1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */ 2*4882a593Smuzhiyun /* 3*4882a593Smuzhiyun * include/asm-xtensa/pgalloc.h 4*4882a593Smuzhiyun * 5*4882a593Smuzhiyun * Copyright (C) 2001-2007 Tensilica Inc. 6*4882a593Smuzhiyun */ 7*4882a593Smuzhiyun 8*4882a593Smuzhiyun #ifndef _XTENSA_PGALLOC_H 9*4882a593Smuzhiyun #define _XTENSA_PGALLOC_H 10*4882a593Smuzhiyun 11*4882a593Smuzhiyun #ifdef CONFIG_MMU 12*4882a593Smuzhiyun #include <linux/highmem.h> 13*4882a593Smuzhiyun #include <linux/slab.h> 14*4882a593Smuzhiyun 15*4882a593Smuzhiyun #define __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL 16*4882a593Smuzhiyun #define __HAVE_ARCH_PTE_ALLOC_ONE 17*4882a593Smuzhiyun #include <asm-generic/pgalloc.h> 18*4882a593Smuzhiyun 19*4882a593Smuzhiyun /* 20*4882a593Smuzhiyun * Allocating and freeing a pmd is trivial: the 1-entry pmd is 21*4882a593Smuzhiyun * inside the pgd, so has no extra memory associated with it. 22*4882a593Smuzhiyun */ 23*4882a593Smuzhiyun 24*4882a593Smuzhiyun #define pmd_populate_kernel(mm, pmdp, ptep) \ 25*4882a593Smuzhiyun (pmd_val(*(pmdp)) = ((unsigned long)ptep)) 26*4882a593Smuzhiyun #define pmd_populate(mm, pmdp, page) \ 27*4882a593Smuzhiyun (pmd_val(*(pmdp)) = ((unsigned long)page_to_virt(page))) 28*4882a593Smuzhiyun #define pmd_pgtable(pmd) pmd_page(pmd) 29*4882a593Smuzhiyun 30*4882a593Smuzhiyun static inline pgd_t* pgd_alloc(struct mm_struct * mm)31*4882a593Smuzhiyunpgd_alloc(struct mm_struct *mm) 32*4882a593Smuzhiyun { 33*4882a593Smuzhiyun return (pgd_t*) __get_free_pages(GFP_KERNEL | __GFP_ZERO, PGD_ORDER); 34*4882a593Smuzhiyun } 35*4882a593Smuzhiyun ptes_clear(pte_t * ptep)36*4882a593Smuzhiyunstatic inline void ptes_clear(pte_t *ptep) 37*4882a593Smuzhiyun { 38*4882a593Smuzhiyun int i; 39*4882a593Smuzhiyun 40*4882a593Smuzhiyun for (i = 0; i < PTRS_PER_PTE; i++) 41*4882a593Smuzhiyun pte_clear(NULL, 0, ptep + i); 42*4882a593Smuzhiyun } 43*4882a593Smuzhiyun pte_alloc_one_kernel(struct mm_struct * mm)44*4882a593Smuzhiyunstatic inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) 45*4882a593Smuzhiyun { 46*4882a593Smuzhiyun pte_t *ptep; 47*4882a593Smuzhiyun 48*4882a593Smuzhiyun ptep = (pte_t *)__pte_alloc_one_kernel(mm); 49*4882a593Smuzhiyun if (!ptep) 50*4882a593Smuzhiyun return NULL; 51*4882a593Smuzhiyun ptes_clear(ptep); 52*4882a593Smuzhiyun return ptep; 53*4882a593Smuzhiyun } 54*4882a593Smuzhiyun pte_alloc_one(struct mm_struct * mm)55*4882a593Smuzhiyunstatic inline pgtable_t pte_alloc_one(struct mm_struct *mm) 56*4882a593Smuzhiyun { 57*4882a593Smuzhiyun struct page *page; 58*4882a593Smuzhiyun 59*4882a593Smuzhiyun page = __pte_alloc_one(mm, GFP_PGTABLE_USER); 60*4882a593Smuzhiyun if (!page) 61*4882a593Smuzhiyun return NULL; 62*4882a593Smuzhiyun ptes_clear(page_address(page)); 63*4882a593Smuzhiyun return page; 64*4882a593Smuzhiyun } 65*4882a593Smuzhiyun 66*4882a593Smuzhiyun #define pmd_pgtable(pmd) pmd_page(pmd) 67*4882a593Smuzhiyun #endif /* CONFIG_MMU */ 68*4882a593Smuzhiyun 69*4882a593Smuzhiyun #endif /* _XTENSA_PGALLOC_H */ 70