xref: /OK3568_Linux_fs/kernel/arch/csky/include/asm/pgalloc.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3*4882a593Smuzhiyun 
4*4882a593Smuzhiyun #ifndef __ASM_CSKY_PGALLOC_H
5*4882a593Smuzhiyun #define __ASM_CSKY_PGALLOC_H
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <linux/highmem.h>
8*4882a593Smuzhiyun #include <linux/mm.h>
9*4882a593Smuzhiyun #include <linux/sched.h>
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #define __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL
12*4882a593Smuzhiyun #include <asm-generic/pgalloc.h>
13*4882a593Smuzhiyun 
pmd_populate_kernel(struct mm_struct * mm,pmd_t * pmd,pte_t * pte)14*4882a593Smuzhiyun static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
15*4882a593Smuzhiyun 					pte_t *pte)
16*4882a593Smuzhiyun {
17*4882a593Smuzhiyun 	set_pmd(pmd, __pmd(__pa(pte)));
18*4882a593Smuzhiyun }
19*4882a593Smuzhiyun 
pmd_populate(struct mm_struct * mm,pmd_t * pmd,pgtable_t pte)20*4882a593Smuzhiyun static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
21*4882a593Smuzhiyun 					pgtable_t pte)
22*4882a593Smuzhiyun {
23*4882a593Smuzhiyun 	set_pmd(pmd, __pmd(__pa(page_address(pte))));
24*4882a593Smuzhiyun }
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun #define pmd_pgtable(pmd) pmd_page(pmd)
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun extern void pgd_init(unsigned long *p);
29*4882a593Smuzhiyun 
pte_alloc_one_kernel(struct mm_struct * mm)30*4882a593Smuzhiyun static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
31*4882a593Smuzhiyun {
32*4882a593Smuzhiyun 	pte_t *pte;
33*4882a593Smuzhiyun 	unsigned long i;
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun 	pte = (pte_t *) __get_free_page(GFP_KERNEL);
36*4882a593Smuzhiyun 	if (!pte)
37*4882a593Smuzhiyun 		return NULL;
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun 	for (i = 0; i < PAGE_SIZE/sizeof(pte_t); i++)
40*4882a593Smuzhiyun 		(pte + i)->pte_low = _PAGE_GLOBAL;
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 	return pte;
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun 
pgd_alloc(struct mm_struct * mm)45*4882a593Smuzhiyun static inline pgd_t *pgd_alloc(struct mm_struct *mm)
46*4882a593Smuzhiyun {
47*4882a593Smuzhiyun 	pgd_t *ret;
48*4882a593Smuzhiyun 	pgd_t *init;
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun 	ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER);
51*4882a593Smuzhiyun 	if (ret) {
52*4882a593Smuzhiyun 		init = pgd_offset(&init_mm, 0UL);
53*4882a593Smuzhiyun 		pgd_init((unsigned long *)ret);
54*4882a593Smuzhiyun 		memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
55*4882a593Smuzhiyun 			(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
56*4882a593Smuzhiyun 		/* prevent out of order excute */
57*4882a593Smuzhiyun 		smp_mb();
58*4882a593Smuzhiyun #ifdef CONFIG_CPU_NEED_TLBSYNC
59*4882a593Smuzhiyun 		dcache_wb_range((unsigned int)ret,
60*4882a593Smuzhiyun 				(unsigned int)(ret + PTRS_PER_PGD));
61*4882a593Smuzhiyun #endif
62*4882a593Smuzhiyun 	}
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 	return ret;
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun #define __pte_free_tlb(tlb, pte, address)		\
68*4882a593Smuzhiyun do {							\
69*4882a593Smuzhiyun 	pgtable_pte_page_dtor(pte);			\
70*4882a593Smuzhiyun 	tlb_remove_page(tlb, pte);			\
71*4882a593Smuzhiyun } while (0)
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun extern void pagetable_init(void);
74*4882a593Smuzhiyun extern void pre_mmu_init(void);
75*4882a593Smuzhiyun extern void pre_trap_init(void);
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun #endif /* __ASM_CSKY_PGALLOC_H */
78