xref: /OK3568_Linux_fs/kernel/arch/powerpc/include/asm/book3s/64/hash-64k.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _ASM_POWERPC_BOOK3S_64_HASH_64K_H
3*4882a593Smuzhiyun #define _ASM_POWERPC_BOOK3S_64_HASH_64K_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #define H_PTE_INDEX_SIZE   8  // size: 8B <<  8 = 2KB, maps 2^8  x 64KB = 16MB
6*4882a593Smuzhiyun #define H_PMD_INDEX_SIZE  10  // size: 8B << 10 = 8KB, maps 2^10 x 16MB = 16GB
7*4882a593Smuzhiyun #define H_PUD_INDEX_SIZE  10  // size: 8B << 10 = 8KB, maps 2^10 x 16GB = 16TB
8*4882a593Smuzhiyun #define H_PGD_INDEX_SIZE   8  // size: 8B <<  8 = 2KB, maps 2^8  x 16TB =  4PB
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun /*
11*4882a593Smuzhiyun  * If we store section details in page->flags we can't increase the MAX_PHYSMEM_BITS
12*4882a593Smuzhiyun  * if we increase SECTIONS_WIDTH we will not store node details in page->flags and
13*4882a593Smuzhiyun  * page_to_nid does a page->section->node lookup
14*4882a593Smuzhiyun  * Hence only increase for VMEMMAP. Further depending on SPARSEMEM_EXTREME reduce
15*4882a593Smuzhiyun  * memory requirements with large number of sections.
16*4882a593Smuzhiyun  * 51 bits is the max physical real address on POWER9
17*4882a593Smuzhiyun  */
18*4882a593Smuzhiyun #if defined(CONFIG_SPARSEMEM_VMEMMAP) && defined(CONFIG_SPARSEMEM_EXTREME)
19*4882a593Smuzhiyun #define H_MAX_PHYSMEM_BITS	51
20*4882a593Smuzhiyun #else
21*4882a593Smuzhiyun #define H_MAX_PHYSMEM_BITS	46
22*4882a593Smuzhiyun #endif
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun /*
25*4882a593Smuzhiyun  * Each context is 512TB size. SLB miss for first context/default context
26*4882a593Smuzhiyun  * is handled in the hotpath.
27*4882a593Smuzhiyun  */
28*4882a593Smuzhiyun #define MAX_EA_BITS_PER_CONTEXT		49
29*4882a593Smuzhiyun #define REGION_SHIFT		MAX_EA_BITS_PER_CONTEXT
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun /*
32*4882a593Smuzhiyun  * We use one context for each MAP area.
33*4882a593Smuzhiyun  */
34*4882a593Smuzhiyun #define H_KERN_MAP_SIZE		(1UL << MAX_EA_BITS_PER_CONTEXT)
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun /*
37*4882a593Smuzhiyun  * Define the address range of the kernel non-linear virtual area
38*4882a593Smuzhiyun  * 2PB
39*4882a593Smuzhiyun  */
40*4882a593Smuzhiyun #define H_KERN_VIRT_START	ASM_CONST(0xc008000000000000)
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun /*
43*4882a593Smuzhiyun  * 64k aligned address free up few of the lower bits of RPN for us
44*4882a593Smuzhiyun  * We steal that here. For more deatils look at pte_pfn/pfn_pte()
45*4882a593Smuzhiyun  */
46*4882a593Smuzhiyun #define H_PAGE_COMBO	_RPAGE_RPN0 /* this is a combo 4k page */
47*4882a593Smuzhiyun #define H_PAGE_4K_PFN	_RPAGE_RPN1 /* PFN is for a single 4k page */
48*4882a593Smuzhiyun #define H_PAGE_BUSY	_RPAGE_RSV1     /* software: PTE & hash are busy */
49*4882a593Smuzhiyun #define H_PAGE_HASHPTE	_RPAGE_RPN43	/* PTE has associated HPTE */
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun /* memory key bits. */
52*4882a593Smuzhiyun #define H_PTE_PKEY_BIT4		_RPAGE_PKEY_BIT4
53*4882a593Smuzhiyun #define H_PTE_PKEY_BIT3		_RPAGE_PKEY_BIT3
54*4882a593Smuzhiyun #define H_PTE_PKEY_BIT2		_RPAGE_PKEY_BIT2
55*4882a593Smuzhiyun #define H_PTE_PKEY_BIT1		_RPAGE_PKEY_BIT1
56*4882a593Smuzhiyun #define H_PTE_PKEY_BIT0		_RPAGE_PKEY_BIT0
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun /*
59*4882a593Smuzhiyun  * We need to differentiate between explicit huge page and THP huge
60*4882a593Smuzhiyun  * page, since THP huge page also need to track real subpage details
61*4882a593Smuzhiyun  */
62*4882a593Smuzhiyun #define H_PAGE_THP_HUGE  H_PAGE_4K_PFN
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun /* PTE flags to conserve for HPTE identification */
65*4882a593Smuzhiyun #define _PAGE_HPTEFLAGS (H_PAGE_BUSY | H_PAGE_HASHPTE | H_PAGE_COMBO)
66*4882a593Smuzhiyun /*
67*4882a593Smuzhiyun  * We use a 2K PTE page fragment and another 2K for storing
68*4882a593Smuzhiyun  * real_pte_t hash index
69*4882a593Smuzhiyun  * 8 bytes per each pte entry and another 8 bytes for storing
70*4882a593Smuzhiyun  * slot details.
71*4882a593Smuzhiyun  */
72*4882a593Smuzhiyun #define H_PTE_FRAG_SIZE_SHIFT  (H_PTE_INDEX_SIZE + 3 + 1)
73*4882a593Smuzhiyun #define H_PTE_FRAG_NR	(PAGE_SIZE >> H_PTE_FRAG_SIZE_SHIFT)
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
76*4882a593Smuzhiyun #define H_PMD_FRAG_SIZE_SHIFT  (H_PMD_INDEX_SIZE + 3 + 1)
77*4882a593Smuzhiyun #else
78*4882a593Smuzhiyun #define H_PMD_FRAG_SIZE_SHIFT  (H_PMD_INDEX_SIZE + 3)
79*4882a593Smuzhiyun #endif
80*4882a593Smuzhiyun #define H_PMD_FRAG_NR	(PAGE_SIZE >> H_PMD_FRAG_SIZE_SHIFT)
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun #ifndef __ASSEMBLY__
83*4882a593Smuzhiyun #include <asm/errno.h>
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun /*
86*4882a593Smuzhiyun  * With 64K pages on hash table, we have a special PTE format that
87*4882a593Smuzhiyun  * uses a second "half" of the page table to encode sub-page information
88*4882a593Smuzhiyun  * in order to deal with 64K made of 4K HW pages. Thus we override the
89*4882a593Smuzhiyun  * generic accessors and iterators here
90*4882a593Smuzhiyun  */
91*4882a593Smuzhiyun #define __real_pte __real_pte
__real_pte(pte_t pte,pte_t * ptep,int offset)92*4882a593Smuzhiyun static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep, int offset)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun 	real_pte_t rpte;
95*4882a593Smuzhiyun 	unsigned long *hidxp;
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	rpte.pte = pte;
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	/*
100*4882a593Smuzhiyun 	 * Ensure that we do not read the hidx before we read the PTE. Because
101*4882a593Smuzhiyun 	 * the writer side is expected to finish writing the hidx first followed
102*4882a593Smuzhiyun 	 * by the PTE, by using smp_wmb(). pte_set_hash_slot() ensures that.
103*4882a593Smuzhiyun 	 */
104*4882a593Smuzhiyun 	smp_rmb();
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	hidxp = (unsigned long *)(ptep + offset);
107*4882a593Smuzhiyun 	rpte.hidx = *hidxp;
108*4882a593Smuzhiyun 	return rpte;
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun /*
112*4882a593Smuzhiyun  * shift the hidx representation by one-modulo-0xf; i.e hidx 0 is respresented
113*4882a593Smuzhiyun  * as 1, 1 as 2,... , and 0xf as 0.  This convention lets us represent a
114*4882a593Smuzhiyun  * invalid hidx 0xf with a 0x0 bit value. PTEs are anyway zero'd when
115*4882a593Smuzhiyun  * allocated. We dont have to zero them gain; thus save on the initialization.
116*4882a593Smuzhiyun  */
117*4882a593Smuzhiyun #define HIDX_UNSHIFT_BY_ONE(x) ((x + 0xfUL) & 0xfUL) /* shift backward by one */
118*4882a593Smuzhiyun #define HIDX_SHIFT_BY_ONE(x) ((x + 0x1UL) & 0xfUL)   /* shift forward by one */
119*4882a593Smuzhiyun #define HIDX_BITS(x, index)  (x << (index << 2))
120*4882a593Smuzhiyun #define BITS_TO_HIDX(x, index)  ((x >> (index << 2)) & 0xfUL)
121*4882a593Smuzhiyun #define INVALID_RPTE_HIDX  0x0UL
122*4882a593Smuzhiyun 
__rpte_to_hidx(real_pte_t rpte,unsigned long index)123*4882a593Smuzhiyun static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun 	return HIDX_UNSHIFT_BY_ONE(BITS_TO_HIDX(rpte.hidx, index));
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun /*
129*4882a593Smuzhiyun  * Commit the hidx and return PTE bits that needs to be modified. The caller is
130*4882a593Smuzhiyun  * expected to modify the PTE bits accordingly and commit the PTE to memory.
131*4882a593Smuzhiyun  */
pte_set_hidx(pte_t * ptep,real_pte_t rpte,unsigned int subpg_index,unsigned long hidx,int offset)132*4882a593Smuzhiyun static inline unsigned long pte_set_hidx(pte_t *ptep, real_pte_t rpte,
133*4882a593Smuzhiyun 					 unsigned int subpg_index,
134*4882a593Smuzhiyun 					 unsigned long hidx, int offset)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun 	unsigned long *hidxp = (unsigned long *)(ptep + offset);
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	rpte.hidx &= ~HIDX_BITS(0xfUL, subpg_index);
139*4882a593Smuzhiyun 	*hidxp = rpte.hidx  | HIDX_BITS(HIDX_SHIFT_BY_ONE(hidx), subpg_index);
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	/*
142*4882a593Smuzhiyun 	 * Anyone reading PTE must ensure hidx bits are read after reading the
143*4882a593Smuzhiyun 	 * PTE by using the read-side barrier smp_rmb(). __real_pte() can be
144*4882a593Smuzhiyun 	 * used for that.
145*4882a593Smuzhiyun 	 */
146*4882a593Smuzhiyun 	smp_wmb();
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	/* No PTE bits to be modified, return 0x0UL */
149*4882a593Smuzhiyun 	return 0x0UL;
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun #define __rpte_to_pte(r)	((r).pte)
153*4882a593Smuzhiyun extern bool __rpte_sub_valid(real_pte_t rpte, unsigned long index);
154*4882a593Smuzhiyun /*
155*4882a593Smuzhiyun  * Trick: we set __end to va + 64k, which happens works for
156*4882a593Smuzhiyun  * a 16M page as well as we want only one iteration
157*4882a593Smuzhiyun  */
158*4882a593Smuzhiyun #define pte_iterate_hashed_subpages(rpte, psize, vpn, index, shift)	\
159*4882a593Smuzhiyun 	do {								\
160*4882a593Smuzhiyun 		unsigned long __end = vpn + (1UL << (PAGE_SHIFT - VPN_SHIFT));	\
161*4882a593Smuzhiyun 		unsigned __split = (psize == MMU_PAGE_4K ||		\
162*4882a593Smuzhiyun 				    psize == MMU_PAGE_64K_AP);		\
163*4882a593Smuzhiyun 		shift = mmu_psize_defs[psize].shift;			\
164*4882a593Smuzhiyun 		for (index = 0; vpn < __end; index++,			\
165*4882a593Smuzhiyun 			     vpn += (1L << (shift - VPN_SHIFT))) {	\
166*4882a593Smuzhiyun 		if (!__split || __rpte_sub_valid(rpte, index))
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun #define pte_iterate_hashed_end()  } } while(0)
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun #define pte_pagesize_index(mm, addr, pte)	\
171*4882a593Smuzhiyun 	(((pte) & H_PAGE_COMBO)? MMU_PAGE_4K: MMU_PAGE_64K)
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun extern int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
174*4882a593Smuzhiyun 			   unsigned long pfn, unsigned long size, pgprot_t);
hash__remap_4k_pfn(struct vm_area_struct * vma,unsigned long addr,unsigned long pfn,pgprot_t prot)175*4882a593Smuzhiyun static inline int hash__remap_4k_pfn(struct vm_area_struct *vma, unsigned long addr,
176*4882a593Smuzhiyun 				 unsigned long pfn, pgprot_t prot)
177*4882a593Smuzhiyun {
178*4882a593Smuzhiyun 	if (pfn > (PTE_RPN_MASK >> PAGE_SHIFT)) {
179*4882a593Smuzhiyun 		WARN(1, "remap_4k_pfn called with wrong pfn value\n");
180*4882a593Smuzhiyun 		return -EINVAL;
181*4882a593Smuzhiyun 	}
182*4882a593Smuzhiyun 	return remap_pfn_range(vma, addr, pfn, PAGE_SIZE,
183*4882a593Smuzhiyun 			       __pgprot(pgprot_val(prot) | H_PAGE_4K_PFN));
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun #define H_PTE_TABLE_SIZE	PTE_FRAG_SIZE
187*4882a593Smuzhiyun #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined (CONFIG_HUGETLB_PAGE)
188*4882a593Smuzhiyun #define H_PMD_TABLE_SIZE	((sizeof(pmd_t) << PMD_INDEX_SIZE) + \
189*4882a593Smuzhiyun 				 (sizeof(unsigned long) << PMD_INDEX_SIZE))
190*4882a593Smuzhiyun #else
191*4882a593Smuzhiyun #define H_PMD_TABLE_SIZE	(sizeof(pmd_t) << PMD_INDEX_SIZE)
192*4882a593Smuzhiyun #endif
193*4882a593Smuzhiyun #ifdef CONFIG_HUGETLB_PAGE
194*4882a593Smuzhiyun #define H_PUD_TABLE_SIZE	((sizeof(pud_t) << PUD_INDEX_SIZE) +	\
195*4882a593Smuzhiyun 				 (sizeof(unsigned long) << PUD_INDEX_SIZE))
196*4882a593Smuzhiyun #else
197*4882a593Smuzhiyun #define H_PUD_TABLE_SIZE	(sizeof(pud_t) << PUD_INDEX_SIZE)
198*4882a593Smuzhiyun #endif
199*4882a593Smuzhiyun #define H_PGD_TABLE_SIZE	(sizeof(pgd_t) << PGD_INDEX_SIZE)
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun #ifdef CONFIG_TRANSPARENT_HUGEPAGE
get_hpte_slot_array(pmd_t * pmdp)202*4882a593Smuzhiyun static inline char *get_hpte_slot_array(pmd_t *pmdp)
203*4882a593Smuzhiyun {
204*4882a593Smuzhiyun 	/*
205*4882a593Smuzhiyun 	 * The hpte hindex is stored in the pgtable whose address is in the
206*4882a593Smuzhiyun 	 * second half of the PMD
207*4882a593Smuzhiyun 	 *
208*4882a593Smuzhiyun 	 * Order this load with the test for pmd_trans_huge in the caller
209*4882a593Smuzhiyun 	 */
210*4882a593Smuzhiyun 	smp_rmb();
211*4882a593Smuzhiyun 	return *(char **)(pmdp + PTRS_PER_PMD);
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun /*
216*4882a593Smuzhiyun  * The linux hugepage PMD now include the pmd entries followed by the address
217*4882a593Smuzhiyun  * to the stashed pgtable_t. The stashed pgtable_t contains the hpte bits.
218*4882a593Smuzhiyun  * [ 000 | 1 bit secondary | 3 bit hidx | 1 bit valid]. We use one byte per
219*4882a593Smuzhiyun  * each HPTE entry. With 16MB hugepage and 64K HPTE we need 256 entries and
220*4882a593Smuzhiyun  * with 4K HPTE we need 4096 entries. Both will fit in a 4K pgtable_t.
221*4882a593Smuzhiyun  *
222*4882a593Smuzhiyun  * The top three bits are intentionally left as zero. This memory location
223*4882a593Smuzhiyun  * are also used as normal page PTE pointers. So if we have any pointers
224*4882a593Smuzhiyun  * left around while we collapse a hugepage, we need to make sure
225*4882a593Smuzhiyun  * _PAGE_PRESENT bit of that is zero when we look at them
226*4882a593Smuzhiyun  */
hpte_valid(unsigned char * hpte_slot_array,int index)227*4882a593Smuzhiyun static inline unsigned int hpte_valid(unsigned char *hpte_slot_array, int index)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun 	return hpte_slot_array[index] & 0x1;
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun 
hpte_hash_index(unsigned char * hpte_slot_array,int index)232*4882a593Smuzhiyun static inline unsigned int hpte_hash_index(unsigned char *hpte_slot_array,
233*4882a593Smuzhiyun 					   int index)
234*4882a593Smuzhiyun {
235*4882a593Smuzhiyun 	return hpte_slot_array[index] >> 1;
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun 
mark_hpte_slot_valid(unsigned char * hpte_slot_array,unsigned int index,unsigned int hidx)238*4882a593Smuzhiyun static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array,
239*4882a593Smuzhiyun 					unsigned int index, unsigned int hidx)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun 	hpte_slot_array[index] = (hidx << 1) | 0x1;
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun /*
245*4882a593Smuzhiyun  *
246*4882a593Smuzhiyun  * For core kernel code by design pmd_trans_huge is never run on any hugetlbfs
247*4882a593Smuzhiyun  * page. The hugetlbfs page table walking and mangling paths are totally
248*4882a593Smuzhiyun  * separated form the core VM paths and they're differentiated by
249*4882a593Smuzhiyun  *  VM_HUGETLB being set on vm_flags well before any pmd_trans_huge could run.
250*4882a593Smuzhiyun  *
251*4882a593Smuzhiyun  * pmd_trans_huge() is defined as false at build time if
252*4882a593Smuzhiyun  * CONFIG_TRANSPARENT_HUGEPAGE=n to optimize away code blocks at build
253*4882a593Smuzhiyun  * time in such case.
254*4882a593Smuzhiyun  *
255*4882a593Smuzhiyun  * For ppc64 we need to differntiate from explicit hugepages from THP, because
256*4882a593Smuzhiyun  * for THP we also track the subpage details at the pmd level. We don't do
257*4882a593Smuzhiyun  * that for explicit huge pages.
258*4882a593Smuzhiyun  *
259*4882a593Smuzhiyun  */
hash__pmd_trans_huge(pmd_t pmd)260*4882a593Smuzhiyun static inline int hash__pmd_trans_huge(pmd_t pmd)
261*4882a593Smuzhiyun {
262*4882a593Smuzhiyun 	return !!((pmd_val(pmd) & (_PAGE_PTE | H_PAGE_THP_HUGE | _PAGE_DEVMAP)) ==
263*4882a593Smuzhiyun 		  (_PAGE_PTE | H_PAGE_THP_HUGE));
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun 
hash__pmd_same(pmd_t pmd_a,pmd_t pmd_b)266*4882a593Smuzhiyun static inline int hash__pmd_same(pmd_t pmd_a, pmd_t pmd_b)
267*4882a593Smuzhiyun {
268*4882a593Smuzhiyun 	return (((pmd_raw(pmd_a) ^ pmd_raw(pmd_b)) & ~cpu_to_be64(_PAGE_HPTEFLAGS)) == 0);
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun 
hash__pmd_mkhuge(pmd_t pmd)271*4882a593Smuzhiyun static inline pmd_t hash__pmd_mkhuge(pmd_t pmd)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun 	return __pmd(pmd_val(pmd) | (_PAGE_PTE | H_PAGE_THP_HUGE));
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun extern unsigned long hash__pmd_hugepage_update(struct mm_struct *mm,
277*4882a593Smuzhiyun 					   unsigned long addr, pmd_t *pmdp,
278*4882a593Smuzhiyun 					   unsigned long clr, unsigned long set);
279*4882a593Smuzhiyun extern pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma,
280*4882a593Smuzhiyun 				   unsigned long address, pmd_t *pmdp);
281*4882a593Smuzhiyun extern void hash__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
282*4882a593Smuzhiyun 					 pgtable_t pgtable);
283*4882a593Smuzhiyun extern pgtable_t hash__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
284*4882a593Smuzhiyun extern pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm,
285*4882a593Smuzhiyun 				       unsigned long addr, pmd_t *pmdp);
286*4882a593Smuzhiyun extern int hash__has_transparent_hugepage(void);
287*4882a593Smuzhiyun #endif /*  CONFIG_TRANSPARENT_HUGEPAGE */
288*4882a593Smuzhiyun 
hash__pmd_mkdevmap(pmd_t pmd)289*4882a593Smuzhiyun static inline pmd_t hash__pmd_mkdevmap(pmd_t pmd)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun 	return __pmd(pmd_val(pmd) | (_PAGE_PTE | H_PAGE_THP_HUGE | _PAGE_DEVMAP));
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun #endif	/* __ASSEMBLY__ */
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun #endif /* _ASM_POWERPC_BOOK3S_64_HASH_64K_H */
297