1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun // Copyright (C) 2005-2017 Andes Technology Corporation
3*4882a593Smuzhiyun
4*4882a593Smuzhiyun #include <linux/init_task.h>
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #define __HAVE_ARCH_PGD_FREE
7*4882a593Smuzhiyun #include <asm/pgalloc.h>
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #define FIRST_KERNEL_PGD_NR (USER_PTRS_PER_PGD)
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun /*
12*4882a593Smuzhiyun * need to get a page for level 1
13*4882a593Smuzhiyun */
14*4882a593Smuzhiyun
pgd_alloc(struct mm_struct * mm)15*4882a593Smuzhiyun pgd_t *pgd_alloc(struct mm_struct *mm)
16*4882a593Smuzhiyun {
17*4882a593Smuzhiyun pgd_t *new_pgd, *init_pgd;
18*4882a593Smuzhiyun int i;
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun new_pgd = (pgd_t *) __get_free_pages(GFP_KERNEL, 0);
21*4882a593Smuzhiyun if (!new_pgd)
22*4882a593Smuzhiyun return NULL;
23*4882a593Smuzhiyun for (i = 0; i < PTRS_PER_PGD; i++) {
24*4882a593Smuzhiyun (*new_pgd) = 1;
25*4882a593Smuzhiyun new_pgd++;
26*4882a593Smuzhiyun }
27*4882a593Smuzhiyun new_pgd -= PTRS_PER_PGD;
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun init_pgd = pgd_offset_k(0);
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,
32*4882a593Smuzhiyun (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun cpu_dcache_wb_range((unsigned long)new_pgd,
35*4882a593Smuzhiyun (unsigned long)new_pgd +
36*4882a593Smuzhiyun PTRS_PER_PGD * sizeof(pgd_t));
37*4882a593Smuzhiyun inc_zone_page_state(virt_to_page((unsigned long *)new_pgd),
38*4882a593Smuzhiyun NR_PAGETABLE);
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun return new_pgd;
41*4882a593Smuzhiyun }
42*4882a593Smuzhiyun
pgd_free(struct mm_struct * mm,pgd_t * pgd)43*4882a593Smuzhiyun void pgd_free(struct mm_struct *mm, pgd_t * pgd)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun pmd_t *pmd;
46*4882a593Smuzhiyun struct page *pte;
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun if (!pgd)
49*4882a593Smuzhiyun return;
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun pmd = (pmd_t *) pgd;
52*4882a593Smuzhiyun if (pmd_none(*pmd))
53*4882a593Smuzhiyun goto free;
54*4882a593Smuzhiyun if (pmd_bad(*pmd)) {
55*4882a593Smuzhiyun pmd_ERROR(*pmd);
56*4882a593Smuzhiyun pmd_clear(pmd);
57*4882a593Smuzhiyun goto free;
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun pte = pmd_page(*pmd);
61*4882a593Smuzhiyun pmd_clear(pmd);
62*4882a593Smuzhiyun dec_zone_page_state(virt_to_page((unsigned long *)pgd), NR_PAGETABLE);
63*4882a593Smuzhiyun pte_free(mm, pte);
64*4882a593Smuzhiyun mm_dec_nr_ptes(mm);
65*4882a593Smuzhiyun pmd_free(mm, pmd);
66*4882a593Smuzhiyun free:
67*4882a593Smuzhiyun free_pages((unsigned long)pgd, 0);
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun /*
71*4882a593Smuzhiyun * In order to soft-boot, we need to insert a 1:1 mapping in place of
72*4882a593Smuzhiyun * the user-mode pages. This will then ensure that we have predictable
73*4882a593Smuzhiyun * results when turning the mmu off
74*4882a593Smuzhiyun */
setup_mm_for_reboot(char mode)75*4882a593Smuzhiyun void setup_mm_for_reboot(char mode)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun unsigned long pmdval;
78*4882a593Smuzhiyun pgd_t *pgd;
79*4882a593Smuzhiyun p4d_t *p4d;
80*4882a593Smuzhiyun pud_t *pud;
81*4882a593Smuzhiyun pmd_t *pmd;
82*4882a593Smuzhiyun int i;
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun if (current->mm && current->mm->pgd)
85*4882a593Smuzhiyun pgd = current->mm->pgd;
86*4882a593Smuzhiyun else
87*4882a593Smuzhiyun pgd = init_mm.pgd;
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun for (i = 0; i < USER_PTRS_PER_PGD; i++) {
90*4882a593Smuzhiyun pmdval = (i << PGDIR_SHIFT);
91*4882a593Smuzhiyun p4d = p4d_offset(pgd, i << PGDIR_SHIFT);
92*4882a593Smuzhiyun pud = pud_offset(p4d, i << PGDIR_SHIFT);
93*4882a593Smuzhiyun pmd = pmd_offset(pud + i, i << PGDIR_SHIFT);
94*4882a593Smuzhiyun set_pmd(pmd, __pmd(pmdval));
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun }
97