xref: /OK3568_Linux_fs/kernel/arch/powerpc/include/asm/book3s/64/hugetlb.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _ASM_POWERPC_BOOK3S_64_HUGETLB_H
3*4882a593Smuzhiyun #define _ASM_POWERPC_BOOK3S_64_HUGETLB_H
4*4882a593Smuzhiyun /*
5*4882a593Smuzhiyun  * For radix we want generic code to handle hugetlb. But then if we want
6*4882a593Smuzhiyun  * both hash and radix to be enabled together we need to workaround the
7*4882a593Smuzhiyun  * limitations.
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun void radix__flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
10*4882a593Smuzhiyun void radix__local_flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
11*4882a593Smuzhiyun extern unsigned long
12*4882a593Smuzhiyun radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
13*4882a593Smuzhiyun 				unsigned long len, unsigned long pgoff,
14*4882a593Smuzhiyun 				unsigned long flags);
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun extern void radix__huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
17*4882a593Smuzhiyun 						unsigned long addr, pte_t *ptep,
18*4882a593Smuzhiyun 						pte_t old_pte, pte_t pte);
19*4882a593Smuzhiyun 
hstate_get_psize(struct hstate * hstate)20*4882a593Smuzhiyun static inline int hstate_get_psize(struct hstate *hstate)
21*4882a593Smuzhiyun {
22*4882a593Smuzhiyun 	unsigned long shift;
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun 	shift = huge_page_shift(hstate);
25*4882a593Smuzhiyun 	if (shift == mmu_psize_defs[MMU_PAGE_2M].shift)
26*4882a593Smuzhiyun 		return MMU_PAGE_2M;
27*4882a593Smuzhiyun 	else if (shift == mmu_psize_defs[MMU_PAGE_1G].shift)
28*4882a593Smuzhiyun 		return MMU_PAGE_1G;
29*4882a593Smuzhiyun 	else if (shift == mmu_psize_defs[MMU_PAGE_16M].shift)
30*4882a593Smuzhiyun 		return MMU_PAGE_16M;
31*4882a593Smuzhiyun 	else if (shift == mmu_psize_defs[MMU_PAGE_16G].shift)
32*4882a593Smuzhiyun 		return MMU_PAGE_16G;
33*4882a593Smuzhiyun 	else {
34*4882a593Smuzhiyun 		WARN(1, "Wrong huge page shift\n");
35*4882a593Smuzhiyun 		return mmu_virtual_psize;
36*4882a593Smuzhiyun 	}
37*4882a593Smuzhiyun }
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun #define __HAVE_ARCH_GIGANTIC_PAGE_RUNTIME_SUPPORTED
gigantic_page_runtime_supported(void)40*4882a593Smuzhiyun static inline bool gigantic_page_runtime_supported(void)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun 	/*
43*4882a593Smuzhiyun 	 * We used gigantic page reservation with hypervisor assist in some case.
44*4882a593Smuzhiyun 	 * We cannot use runtime allocation of gigantic pages in those platforms
45*4882a593Smuzhiyun 	 * This is hash translation mode LPARs.
46*4882a593Smuzhiyun 	 */
47*4882a593Smuzhiyun 	if (firmware_has_feature(FW_FEATURE_LPAR) && !radix_enabled())
48*4882a593Smuzhiyun 		return false;
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun 	return true;
51*4882a593Smuzhiyun }
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun /* hugepd entry valid bit */
54*4882a593Smuzhiyun #define HUGEPD_VAL_BITS		(0x8000000000000000UL)
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun #define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
57*4882a593Smuzhiyun extern pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
58*4882a593Smuzhiyun 					 unsigned long addr, pte_t *ptep);
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun #define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
61*4882a593Smuzhiyun extern void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
62*4882a593Smuzhiyun 					 unsigned long addr, pte_t *ptep,
63*4882a593Smuzhiyun 					 pte_t old_pte, pte_t new_pte);
64*4882a593Smuzhiyun /*
65*4882a593Smuzhiyun  * This should work for other subarchs too. But right now we use the
66*4882a593Smuzhiyun  * new format only for 64bit book3s
67*4882a593Smuzhiyun  */
hugepd_page(hugepd_t hpd)68*4882a593Smuzhiyun static inline pte_t *hugepd_page(hugepd_t hpd)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun 	BUG_ON(!hugepd_ok(hpd));
71*4882a593Smuzhiyun 	/*
72*4882a593Smuzhiyun 	 * We have only four bits to encode, MMU page size
73*4882a593Smuzhiyun 	 */
74*4882a593Smuzhiyun 	BUILD_BUG_ON((MMU_PAGE_COUNT - 1) > 0xf);
75*4882a593Smuzhiyun 	return __va(hpd_val(hpd) & HUGEPD_ADDR_MASK);
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun 
hugepd_mmu_psize(hugepd_t hpd)78*4882a593Smuzhiyun static inline unsigned int hugepd_mmu_psize(hugepd_t hpd)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun 	return (hpd_val(hpd) & HUGEPD_SHIFT_MASK) >> 2;
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun 
hugepd_shift(hugepd_t hpd)83*4882a593Smuzhiyun static inline unsigned int hugepd_shift(hugepd_t hpd)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun 	return mmu_psize_to_shift(hugepd_mmu_psize(hpd));
86*4882a593Smuzhiyun }
flush_hugetlb_page(struct vm_area_struct * vma,unsigned long vmaddr)87*4882a593Smuzhiyun static inline void flush_hugetlb_page(struct vm_area_struct *vma,
88*4882a593Smuzhiyun 				      unsigned long vmaddr)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun 	if (radix_enabled())
91*4882a593Smuzhiyun 		return radix__flush_hugetlb_page(vma, vmaddr);
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun 
hugepte_offset(hugepd_t hpd,unsigned long addr,unsigned int pdshift)94*4882a593Smuzhiyun static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
95*4882a593Smuzhiyun 				    unsigned int pdshift)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun 	unsigned long idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(hpd);
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	return hugepd_page(hpd) + idx;
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun 
hugepd_populate(hugepd_t * hpdp,pte_t * new,unsigned int pshift)102*4882a593Smuzhiyun static inline void hugepd_populate(hugepd_t *hpdp, pte_t *new, unsigned int pshift)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun 	*hpdp = __hugepd(__pa(new) | HUGEPD_VAL_BITS | (shift_to_mmu_psize(pshift) << 2));
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
108*4882a593Smuzhiyun 
check_and_get_huge_psize(int shift)109*4882a593Smuzhiyun static inline int check_and_get_huge_psize(int shift)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun 	int mmu_psize;
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	if (shift > SLICE_HIGH_SHIFT)
114*4882a593Smuzhiyun 		return -EINVAL;
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	mmu_psize = shift_to_mmu_psize(shift);
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	/*
119*4882a593Smuzhiyun 	 * We need to make sure that for different page sizes reported by
120*4882a593Smuzhiyun 	 * firmware we only add hugetlb support for page sizes that can be
121*4882a593Smuzhiyun 	 * supported by linux page table layout.
122*4882a593Smuzhiyun 	 * For now we have
123*4882a593Smuzhiyun 	 * Radix: 2M and 1G
124*4882a593Smuzhiyun 	 * Hash: 16M and 16G
125*4882a593Smuzhiyun 	 */
126*4882a593Smuzhiyun 	if (radix_enabled()) {
127*4882a593Smuzhiyun 		if (mmu_psize != MMU_PAGE_2M && mmu_psize != MMU_PAGE_1G)
128*4882a593Smuzhiyun 			return -EINVAL;
129*4882a593Smuzhiyun 	} else {
130*4882a593Smuzhiyun 		if (mmu_psize != MMU_PAGE_16M && mmu_psize != MMU_PAGE_16G)
131*4882a593Smuzhiyun 			return -EINVAL;
132*4882a593Smuzhiyun 	}
133*4882a593Smuzhiyun 	return mmu_psize;
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun #endif
137