xref: /OK3568_Linux_fs/kernel/arch/powerpc/mm/slice.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * address space "slices" (meta-segments) support
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2007 Benjamin Herrenschmidt, IBM Corporation.
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Based on hugetlb implementation
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * Copyright (C) 2003 David Gibson, IBM Corporation.
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #undef DEBUG
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #include <linux/kernel.h>
15*4882a593Smuzhiyun #include <linux/mm.h>
16*4882a593Smuzhiyun #include <linux/pagemap.h>
17*4882a593Smuzhiyun #include <linux/err.h>
18*4882a593Smuzhiyun #include <linux/spinlock.h>
19*4882a593Smuzhiyun #include <linux/export.h>
20*4882a593Smuzhiyun #include <linux/hugetlb.h>
21*4882a593Smuzhiyun #include <linux/sched/mm.h>
22*4882a593Smuzhiyun #include <linux/security.h>
23*4882a593Smuzhiyun #include <asm/mman.h>
24*4882a593Smuzhiyun #include <asm/mmu.h>
25*4882a593Smuzhiyun #include <asm/copro.h>
26*4882a593Smuzhiyun #include <asm/hugetlb.h>
27*4882a593Smuzhiyun #include <asm/mmu_context.h>
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun static DEFINE_SPINLOCK(slice_convert_lock);
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun #ifdef DEBUG
32*4882a593Smuzhiyun int _slice_debug = 1;
33*4882a593Smuzhiyun 
slice_print_mask(const char * label,const struct slice_mask * mask)34*4882a593Smuzhiyun static void slice_print_mask(const char *label, const struct slice_mask *mask)
35*4882a593Smuzhiyun {
36*4882a593Smuzhiyun 	if (!_slice_debug)
37*4882a593Smuzhiyun 		return;
38*4882a593Smuzhiyun 	pr_devel("%s low_slice: %*pbl\n", label,
39*4882a593Smuzhiyun 			(int)SLICE_NUM_LOW, &mask->low_slices);
40*4882a593Smuzhiyun 	pr_devel("%s high_slice: %*pbl\n", label,
41*4882a593Smuzhiyun 			(int)SLICE_NUM_HIGH, mask->high_slices);
42*4882a593Smuzhiyun }
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun #define slice_dbg(fmt...) do { if (_slice_debug) pr_devel(fmt); } while (0)
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun #else
47*4882a593Smuzhiyun 
slice_print_mask(const char * label,const struct slice_mask * mask)48*4882a593Smuzhiyun static void slice_print_mask(const char *label, const struct slice_mask *mask) {}
49*4882a593Smuzhiyun #define slice_dbg(fmt...)
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun #endif
52*4882a593Smuzhiyun 
slice_addr_is_low(unsigned long addr)53*4882a593Smuzhiyun static inline notrace bool slice_addr_is_low(unsigned long addr)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun 	u64 tmp = (u64)addr;
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	return tmp < SLICE_LOW_TOP;
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun 
slice_range_to_mask(unsigned long start,unsigned long len,struct slice_mask * ret)60*4882a593Smuzhiyun static void slice_range_to_mask(unsigned long start, unsigned long len,
61*4882a593Smuzhiyun 				struct slice_mask *ret)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun 	unsigned long end = start + len - 1;
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	ret->low_slices = 0;
66*4882a593Smuzhiyun 	if (SLICE_NUM_HIGH)
67*4882a593Smuzhiyun 		bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	if (slice_addr_is_low(start)) {
70*4882a593Smuzhiyun 		unsigned long mend = min(end,
71*4882a593Smuzhiyun 					 (unsigned long)(SLICE_LOW_TOP - 1));
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 		ret->low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
74*4882a593Smuzhiyun 			- (1u << GET_LOW_SLICE_INDEX(start));
75*4882a593Smuzhiyun 	}
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	if (SLICE_NUM_HIGH && !slice_addr_is_low(end)) {
78*4882a593Smuzhiyun 		unsigned long start_index = GET_HIGH_SLICE_INDEX(start);
79*4882a593Smuzhiyun 		unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT));
80*4882a593Smuzhiyun 		unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index;
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 		bitmap_set(ret->high_slices, start_index, count);
83*4882a593Smuzhiyun 	}
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun 
slice_area_is_free(struct mm_struct * mm,unsigned long addr,unsigned long len)86*4882a593Smuzhiyun static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
87*4882a593Smuzhiyun 			      unsigned long len)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun 	struct vm_area_struct *vma;
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	if ((mm_ctx_slb_addr_limit(&mm->context) - len) < addr)
92*4882a593Smuzhiyun 		return 0;
93*4882a593Smuzhiyun 	vma = find_vma(mm, addr);
94*4882a593Smuzhiyun 	return (!vma || (addr + len) <= vm_start_gap(vma));
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun 
slice_low_has_vma(struct mm_struct * mm,unsigned long slice)97*4882a593Smuzhiyun static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun 	return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT,
100*4882a593Smuzhiyun 				   1ul << SLICE_LOW_SHIFT);
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun 
slice_high_has_vma(struct mm_struct * mm,unsigned long slice)103*4882a593Smuzhiyun static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun 	unsigned long start = slice << SLICE_HIGH_SHIFT;
106*4882a593Smuzhiyun 	unsigned long end = start + (1ul << SLICE_HIGH_SHIFT);
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	/* Hack, so that each addresses is controlled by exactly one
109*4882a593Smuzhiyun 	 * of the high or low area bitmaps, the first high area starts
110*4882a593Smuzhiyun 	 * at 4GB, not 0 */
111*4882a593Smuzhiyun 	if (start == 0)
112*4882a593Smuzhiyun 		start = (unsigned long)SLICE_LOW_TOP;
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	return !slice_area_is_free(mm, start, end - start);
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun 
slice_mask_for_free(struct mm_struct * mm,struct slice_mask * ret,unsigned long high_limit)117*4882a593Smuzhiyun static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret,
118*4882a593Smuzhiyun 				unsigned long high_limit)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun 	unsigned long i;
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	ret->low_slices = 0;
123*4882a593Smuzhiyun 	if (SLICE_NUM_HIGH)
124*4882a593Smuzhiyun 		bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	for (i = 0; i < SLICE_NUM_LOW; i++)
127*4882a593Smuzhiyun 		if (!slice_low_has_vma(mm, i))
128*4882a593Smuzhiyun 			ret->low_slices |= 1u << i;
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	if (slice_addr_is_low(high_limit - 1))
131*4882a593Smuzhiyun 		return;
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	for (i = 0; i < GET_HIGH_SLICE_INDEX(high_limit); i++)
134*4882a593Smuzhiyun 		if (!slice_high_has_vma(mm, i))
135*4882a593Smuzhiyun 			__set_bit(i, ret->high_slices);
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun 
slice_check_range_fits(struct mm_struct * mm,const struct slice_mask * available,unsigned long start,unsigned long len)138*4882a593Smuzhiyun static bool slice_check_range_fits(struct mm_struct *mm,
139*4882a593Smuzhiyun 			   const struct slice_mask *available,
140*4882a593Smuzhiyun 			   unsigned long start, unsigned long len)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun 	unsigned long end = start + len - 1;
143*4882a593Smuzhiyun 	u64 low_slices = 0;
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	if (slice_addr_is_low(start)) {
146*4882a593Smuzhiyun 		unsigned long mend = min(end,
147*4882a593Smuzhiyun 					 (unsigned long)(SLICE_LOW_TOP - 1));
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 		low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
150*4882a593Smuzhiyun 				- (1u << GET_LOW_SLICE_INDEX(start));
151*4882a593Smuzhiyun 	}
152*4882a593Smuzhiyun 	if ((low_slices & available->low_slices) != low_slices)
153*4882a593Smuzhiyun 		return false;
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	if (SLICE_NUM_HIGH && !slice_addr_is_low(end)) {
156*4882a593Smuzhiyun 		unsigned long start_index = GET_HIGH_SLICE_INDEX(start);
157*4882a593Smuzhiyun 		unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT));
158*4882a593Smuzhiyun 		unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index;
159*4882a593Smuzhiyun 		unsigned long i;
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 		for (i = start_index; i < start_index + count; i++) {
162*4882a593Smuzhiyun 			if (!test_bit(i, available->high_slices))
163*4882a593Smuzhiyun 				return false;
164*4882a593Smuzhiyun 		}
165*4882a593Smuzhiyun 	}
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	return true;
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun 
slice_flush_segments(void * parm)170*4882a593Smuzhiyun static void slice_flush_segments(void *parm)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun #ifdef CONFIG_PPC64
173*4882a593Smuzhiyun 	struct mm_struct *mm = parm;
174*4882a593Smuzhiyun 	unsigned long flags;
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	if (mm != current->active_mm)
177*4882a593Smuzhiyun 		return;
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	copy_mm_to_paca(current->active_mm);
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	local_irq_save(flags);
182*4882a593Smuzhiyun 	slb_flush_and_restore_bolted();
183*4882a593Smuzhiyun 	local_irq_restore(flags);
184*4882a593Smuzhiyun #endif
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun 
slice_convert(struct mm_struct * mm,const struct slice_mask * mask,int psize)187*4882a593Smuzhiyun static void slice_convert(struct mm_struct *mm,
188*4882a593Smuzhiyun 				const struct slice_mask *mask, int psize)
189*4882a593Smuzhiyun {
190*4882a593Smuzhiyun 	int index, mask_index;
191*4882a593Smuzhiyun 	/* Write the new slice psize bits */
192*4882a593Smuzhiyun 	unsigned char *hpsizes, *lpsizes;
193*4882a593Smuzhiyun 	struct slice_mask *psize_mask, *old_mask;
194*4882a593Smuzhiyun 	unsigned long i, flags;
195*4882a593Smuzhiyun 	int old_psize;
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize);
198*4882a593Smuzhiyun 	slice_print_mask(" mask", mask);
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	psize_mask = slice_mask_for_size(&mm->context, psize);
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	/* We need to use a spinlock here to protect against
203*4882a593Smuzhiyun 	 * concurrent 64k -> 4k demotion ...
204*4882a593Smuzhiyun 	 */
205*4882a593Smuzhiyun 	spin_lock_irqsave(&slice_convert_lock, flags);
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	lpsizes = mm_ctx_low_slices(&mm->context);
208*4882a593Smuzhiyun 	for (i = 0; i < SLICE_NUM_LOW; i++) {
209*4882a593Smuzhiyun 		if (!(mask->low_slices & (1u << i)))
210*4882a593Smuzhiyun 			continue;
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 		mask_index = i & 0x1;
213*4882a593Smuzhiyun 		index = i >> 1;
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 		/* Update the slice_mask */
216*4882a593Smuzhiyun 		old_psize = (lpsizes[index] >> (mask_index * 4)) & 0xf;
217*4882a593Smuzhiyun 		old_mask = slice_mask_for_size(&mm->context, old_psize);
218*4882a593Smuzhiyun 		old_mask->low_slices &= ~(1u << i);
219*4882a593Smuzhiyun 		psize_mask->low_slices |= 1u << i;
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 		/* Update the sizes array */
222*4882a593Smuzhiyun 		lpsizes[index] = (lpsizes[index] & ~(0xf << (mask_index * 4))) |
223*4882a593Smuzhiyun 				(((unsigned long)psize) << (mask_index * 4));
224*4882a593Smuzhiyun 	}
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	hpsizes = mm_ctx_high_slices(&mm->context);
227*4882a593Smuzhiyun 	for (i = 0; i < GET_HIGH_SLICE_INDEX(mm_ctx_slb_addr_limit(&mm->context)); i++) {
228*4882a593Smuzhiyun 		if (!test_bit(i, mask->high_slices))
229*4882a593Smuzhiyun 			continue;
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 		mask_index = i & 0x1;
232*4882a593Smuzhiyun 		index = i >> 1;
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 		/* Update the slice_mask */
235*4882a593Smuzhiyun 		old_psize = (hpsizes[index] >> (mask_index * 4)) & 0xf;
236*4882a593Smuzhiyun 		old_mask = slice_mask_for_size(&mm->context, old_psize);
237*4882a593Smuzhiyun 		__clear_bit(i, old_mask->high_slices);
238*4882a593Smuzhiyun 		__set_bit(i, psize_mask->high_slices);
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 		/* Update the sizes array */
241*4882a593Smuzhiyun 		hpsizes[index] = (hpsizes[index] & ~(0xf << (mask_index * 4))) |
242*4882a593Smuzhiyun 				(((unsigned long)psize) << (mask_index * 4));
243*4882a593Smuzhiyun 	}
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	slice_dbg(" lsps=%lx, hsps=%lx\n",
246*4882a593Smuzhiyun 		  (unsigned long)mm_ctx_low_slices(&mm->context),
247*4882a593Smuzhiyun 		  (unsigned long)mm_ctx_high_slices(&mm->context));
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	spin_unlock_irqrestore(&slice_convert_lock, flags);
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	copro_flush_all_slbs(mm);
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun /*
255*4882a593Smuzhiyun  * Compute which slice addr is part of;
256*4882a593Smuzhiyun  * set *boundary_addr to the start or end boundary of that slice
257*4882a593Smuzhiyun  * (depending on 'end' parameter);
258*4882a593Smuzhiyun  * return boolean indicating if the slice is marked as available in the
259*4882a593Smuzhiyun  * 'available' slice_mark.
260*4882a593Smuzhiyun  */
slice_scan_available(unsigned long addr,const struct slice_mask * available,int end,unsigned long * boundary_addr)261*4882a593Smuzhiyun static bool slice_scan_available(unsigned long addr,
262*4882a593Smuzhiyun 				 const struct slice_mask *available,
263*4882a593Smuzhiyun 				 int end, unsigned long *boundary_addr)
264*4882a593Smuzhiyun {
265*4882a593Smuzhiyun 	unsigned long slice;
266*4882a593Smuzhiyun 	if (slice_addr_is_low(addr)) {
267*4882a593Smuzhiyun 		slice = GET_LOW_SLICE_INDEX(addr);
268*4882a593Smuzhiyun 		*boundary_addr = (slice + end) << SLICE_LOW_SHIFT;
269*4882a593Smuzhiyun 		return !!(available->low_slices & (1u << slice));
270*4882a593Smuzhiyun 	} else {
271*4882a593Smuzhiyun 		slice = GET_HIGH_SLICE_INDEX(addr);
272*4882a593Smuzhiyun 		*boundary_addr = (slice + end) ?
273*4882a593Smuzhiyun 			((slice + end) << SLICE_HIGH_SHIFT) : SLICE_LOW_TOP;
274*4882a593Smuzhiyun 		return !!test_bit(slice, available->high_slices);
275*4882a593Smuzhiyun 	}
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun 
slice_find_area_bottomup(struct mm_struct * mm,unsigned long len,const struct slice_mask * available,int psize,unsigned long high_limit)278*4882a593Smuzhiyun static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
279*4882a593Smuzhiyun 					      unsigned long len,
280*4882a593Smuzhiyun 					      const struct slice_mask *available,
281*4882a593Smuzhiyun 					      int psize, unsigned long high_limit)
282*4882a593Smuzhiyun {
283*4882a593Smuzhiyun 	int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
284*4882a593Smuzhiyun 	unsigned long addr, found, next_end;
285*4882a593Smuzhiyun 	struct vm_unmapped_area_info info;
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	info.flags = 0;
288*4882a593Smuzhiyun 	info.length = len;
289*4882a593Smuzhiyun 	info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
290*4882a593Smuzhiyun 	info.align_offset = 0;
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	addr = TASK_UNMAPPED_BASE;
293*4882a593Smuzhiyun 	/*
294*4882a593Smuzhiyun 	 * Check till the allow max value for this mmap request
295*4882a593Smuzhiyun 	 */
296*4882a593Smuzhiyun 	while (addr < high_limit) {
297*4882a593Smuzhiyun 		info.low_limit = addr;
298*4882a593Smuzhiyun 		if (!slice_scan_available(addr, available, 1, &addr))
299*4882a593Smuzhiyun 			continue;
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun  next_slice:
302*4882a593Smuzhiyun 		/*
303*4882a593Smuzhiyun 		 * At this point [info.low_limit; addr) covers
304*4882a593Smuzhiyun 		 * available slices only and ends at a slice boundary.
305*4882a593Smuzhiyun 		 * Check if we need to reduce the range, or if we can
306*4882a593Smuzhiyun 		 * extend it to cover the next available slice.
307*4882a593Smuzhiyun 		 */
308*4882a593Smuzhiyun 		if (addr >= high_limit)
309*4882a593Smuzhiyun 			addr = high_limit;
310*4882a593Smuzhiyun 		else if (slice_scan_available(addr, available, 1, &next_end)) {
311*4882a593Smuzhiyun 			addr = next_end;
312*4882a593Smuzhiyun 			goto next_slice;
313*4882a593Smuzhiyun 		}
314*4882a593Smuzhiyun 		info.high_limit = addr;
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 		found = vm_unmapped_area(&info);
317*4882a593Smuzhiyun 		if (!(found & ~PAGE_MASK))
318*4882a593Smuzhiyun 			return found;
319*4882a593Smuzhiyun 	}
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	return -ENOMEM;
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun 
slice_find_area_topdown(struct mm_struct * mm,unsigned long len,const struct slice_mask * available,int psize,unsigned long high_limit)324*4882a593Smuzhiyun static unsigned long slice_find_area_topdown(struct mm_struct *mm,
325*4882a593Smuzhiyun 					     unsigned long len,
326*4882a593Smuzhiyun 					     const struct slice_mask *available,
327*4882a593Smuzhiyun 					     int psize, unsigned long high_limit)
328*4882a593Smuzhiyun {
329*4882a593Smuzhiyun 	int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
330*4882a593Smuzhiyun 	unsigned long addr, found, prev;
331*4882a593Smuzhiyun 	struct vm_unmapped_area_info info;
332*4882a593Smuzhiyun 	unsigned long min_addr = max(PAGE_SIZE, mmap_min_addr);
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
335*4882a593Smuzhiyun 	info.length = len;
336*4882a593Smuzhiyun 	info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
337*4882a593Smuzhiyun 	info.align_offset = 0;
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	addr = mm->mmap_base;
340*4882a593Smuzhiyun 	/*
341*4882a593Smuzhiyun 	 * If we are trying to allocate above DEFAULT_MAP_WINDOW
342*4882a593Smuzhiyun 	 * Add the different to the mmap_base.
343*4882a593Smuzhiyun 	 * Only for that request for which high_limit is above
344*4882a593Smuzhiyun 	 * DEFAULT_MAP_WINDOW we should apply this.
345*4882a593Smuzhiyun 	 */
346*4882a593Smuzhiyun 	if (high_limit > DEFAULT_MAP_WINDOW)
347*4882a593Smuzhiyun 		addr += mm_ctx_slb_addr_limit(&mm->context) - DEFAULT_MAP_WINDOW;
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 	while (addr > min_addr) {
350*4882a593Smuzhiyun 		info.high_limit = addr;
351*4882a593Smuzhiyun 		if (!slice_scan_available(addr - 1, available, 0, &addr))
352*4882a593Smuzhiyun 			continue;
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun  prev_slice:
355*4882a593Smuzhiyun 		/*
356*4882a593Smuzhiyun 		 * At this point [addr; info.high_limit) covers
357*4882a593Smuzhiyun 		 * available slices only and starts at a slice boundary.
358*4882a593Smuzhiyun 		 * Check if we need to reduce the range, or if we can
359*4882a593Smuzhiyun 		 * extend it to cover the previous available slice.
360*4882a593Smuzhiyun 		 */
361*4882a593Smuzhiyun 		if (addr < min_addr)
362*4882a593Smuzhiyun 			addr = min_addr;
363*4882a593Smuzhiyun 		else if (slice_scan_available(addr - 1, available, 0, &prev)) {
364*4882a593Smuzhiyun 			addr = prev;
365*4882a593Smuzhiyun 			goto prev_slice;
366*4882a593Smuzhiyun 		}
367*4882a593Smuzhiyun 		info.low_limit = addr;
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 		found = vm_unmapped_area(&info);
370*4882a593Smuzhiyun 		if (!(found & ~PAGE_MASK))
371*4882a593Smuzhiyun 			return found;
372*4882a593Smuzhiyun 	}
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 	/*
375*4882a593Smuzhiyun 	 * A failed mmap() very likely causes application failure,
376*4882a593Smuzhiyun 	 * so fall back to the bottom-up function here. This scenario
377*4882a593Smuzhiyun 	 * can happen with large stack limits and large mmap()
378*4882a593Smuzhiyun 	 * allocations.
379*4882a593Smuzhiyun 	 */
380*4882a593Smuzhiyun 	return slice_find_area_bottomup(mm, len, available, psize, high_limit);
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 
slice_find_area(struct mm_struct * mm,unsigned long len,const struct slice_mask * mask,int psize,int topdown,unsigned long high_limit)384*4882a593Smuzhiyun static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len,
385*4882a593Smuzhiyun 				     const struct slice_mask *mask, int psize,
386*4882a593Smuzhiyun 				     int topdown, unsigned long high_limit)
387*4882a593Smuzhiyun {
388*4882a593Smuzhiyun 	if (topdown)
389*4882a593Smuzhiyun 		return slice_find_area_topdown(mm, len, mask, psize, high_limit);
390*4882a593Smuzhiyun 	else
391*4882a593Smuzhiyun 		return slice_find_area_bottomup(mm, len, mask, psize, high_limit);
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun 
slice_copy_mask(struct slice_mask * dst,const struct slice_mask * src)394*4882a593Smuzhiyun static inline void slice_copy_mask(struct slice_mask *dst,
395*4882a593Smuzhiyun 					const struct slice_mask *src)
396*4882a593Smuzhiyun {
397*4882a593Smuzhiyun 	dst->low_slices = src->low_slices;
398*4882a593Smuzhiyun 	if (!SLICE_NUM_HIGH)
399*4882a593Smuzhiyun 		return;
400*4882a593Smuzhiyun 	bitmap_copy(dst->high_slices, src->high_slices, SLICE_NUM_HIGH);
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun 
slice_or_mask(struct slice_mask * dst,const struct slice_mask * src1,const struct slice_mask * src2)403*4882a593Smuzhiyun static inline void slice_or_mask(struct slice_mask *dst,
404*4882a593Smuzhiyun 					const struct slice_mask *src1,
405*4882a593Smuzhiyun 					const struct slice_mask *src2)
406*4882a593Smuzhiyun {
407*4882a593Smuzhiyun 	dst->low_slices = src1->low_slices | src2->low_slices;
408*4882a593Smuzhiyun 	if (!SLICE_NUM_HIGH)
409*4882a593Smuzhiyun 		return;
410*4882a593Smuzhiyun 	bitmap_or(dst->high_slices, src1->high_slices, src2->high_slices, SLICE_NUM_HIGH);
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun 
slice_andnot_mask(struct slice_mask * dst,const struct slice_mask * src1,const struct slice_mask * src2)413*4882a593Smuzhiyun static inline void slice_andnot_mask(struct slice_mask *dst,
414*4882a593Smuzhiyun 					const struct slice_mask *src1,
415*4882a593Smuzhiyun 					const struct slice_mask *src2)
416*4882a593Smuzhiyun {
417*4882a593Smuzhiyun 	dst->low_slices = src1->low_slices & ~src2->low_slices;
418*4882a593Smuzhiyun 	if (!SLICE_NUM_HIGH)
419*4882a593Smuzhiyun 		return;
420*4882a593Smuzhiyun 	bitmap_andnot(dst->high_slices, src1->high_slices, src2->high_slices, SLICE_NUM_HIGH);
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun #ifdef CONFIG_PPC_64K_PAGES
424*4882a593Smuzhiyun #define MMU_PAGE_BASE	MMU_PAGE_64K
425*4882a593Smuzhiyun #else
426*4882a593Smuzhiyun #define MMU_PAGE_BASE	MMU_PAGE_4K
427*4882a593Smuzhiyun #endif
428*4882a593Smuzhiyun 
slice_get_unmapped_area(unsigned long addr,unsigned long len,unsigned long flags,unsigned int psize,int topdown)429*4882a593Smuzhiyun unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
430*4882a593Smuzhiyun 				      unsigned long flags, unsigned int psize,
431*4882a593Smuzhiyun 				      int topdown)
432*4882a593Smuzhiyun {
433*4882a593Smuzhiyun 	struct slice_mask good_mask;
434*4882a593Smuzhiyun 	struct slice_mask potential_mask;
435*4882a593Smuzhiyun 	const struct slice_mask *maskp;
436*4882a593Smuzhiyun 	const struct slice_mask *compat_maskp = NULL;
437*4882a593Smuzhiyun 	int fixed = (flags & MAP_FIXED);
438*4882a593Smuzhiyun 	int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
439*4882a593Smuzhiyun 	unsigned long page_size = 1UL << pshift;
440*4882a593Smuzhiyun 	struct mm_struct *mm = current->mm;
441*4882a593Smuzhiyun 	unsigned long newaddr;
442*4882a593Smuzhiyun 	unsigned long high_limit;
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun 	high_limit = DEFAULT_MAP_WINDOW;
445*4882a593Smuzhiyun 	if (addr >= high_limit || (fixed && (addr + len > high_limit)))
446*4882a593Smuzhiyun 		high_limit = TASK_SIZE;
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun 	if (len > high_limit)
449*4882a593Smuzhiyun 		return -ENOMEM;
450*4882a593Smuzhiyun 	if (len & (page_size - 1))
451*4882a593Smuzhiyun 		return -EINVAL;
452*4882a593Smuzhiyun 	if (fixed) {
453*4882a593Smuzhiyun 		if (addr & (page_size - 1))
454*4882a593Smuzhiyun 			return -EINVAL;
455*4882a593Smuzhiyun 		if (addr > high_limit - len)
456*4882a593Smuzhiyun 			return -ENOMEM;
457*4882a593Smuzhiyun 	}
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun 	if (high_limit > mm_ctx_slb_addr_limit(&mm->context)) {
460*4882a593Smuzhiyun 		/*
461*4882a593Smuzhiyun 		 * Increasing the slb_addr_limit does not require
462*4882a593Smuzhiyun 		 * slice mask cache to be recalculated because it should
463*4882a593Smuzhiyun 		 * be already initialised beyond the old address limit.
464*4882a593Smuzhiyun 		 */
465*4882a593Smuzhiyun 		mm_ctx_set_slb_addr_limit(&mm->context, high_limit);
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 		on_each_cpu(slice_flush_segments, mm, 1);
468*4882a593Smuzhiyun 	}
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun 	/* Sanity checks */
471*4882a593Smuzhiyun 	BUG_ON(mm->task_size == 0);
472*4882a593Smuzhiyun 	BUG_ON(mm_ctx_slb_addr_limit(&mm->context) == 0);
473*4882a593Smuzhiyun 	VM_BUG_ON(radix_enabled());
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 	slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize);
476*4882a593Smuzhiyun 	slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d\n",
477*4882a593Smuzhiyun 		  addr, len, flags, topdown);
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 	/* If hint, make sure it matches our alignment restrictions */
480*4882a593Smuzhiyun 	if (!fixed && addr) {
481*4882a593Smuzhiyun 		addr = ALIGN(addr, page_size);
482*4882a593Smuzhiyun 		slice_dbg(" aligned addr=%lx\n", addr);
483*4882a593Smuzhiyun 		/* Ignore hint if it's too large or overlaps a VMA */
484*4882a593Smuzhiyun 		if (addr > high_limit - len || addr < mmap_min_addr ||
485*4882a593Smuzhiyun 		    !slice_area_is_free(mm, addr, len))
486*4882a593Smuzhiyun 			addr = 0;
487*4882a593Smuzhiyun 	}
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun 	/* First make up a "good" mask of slices that have the right size
490*4882a593Smuzhiyun 	 * already
491*4882a593Smuzhiyun 	 */
492*4882a593Smuzhiyun 	maskp = slice_mask_for_size(&mm->context, psize);
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun 	/*
495*4882a593Smuzhiyun 	 * Here "good" means slices that are already the right page size,
496*4882a593Smuzhiyun 	 * "compat" means slices that have a compatible page size (i.e.
497*4882a593Smuzhiyun 	 * 4k in a 64k pagesize kernel), and "free" means slices without
498*4882a593Smuzhiyun 	 * any VMAs.
499*4882a593Smuzhiyun 	 *
500*4882a593Smuzhiyun 	 * If MAP_FIXED:
501*4882a593Smuzhiyun 	 *	check if fits in good | compat => OK
502*4882a593Smuzhiyun 	 *	check if fits in good | compat | free => convert free
503*4882a593Smuzhiyun 	 *	else bad
504*4882a593Smuzhiyun 	 * If have hint:
505*4882a593Smuzhiyun 	 *	check if hint fits in good => OK
506*4882a593Smuzhiyun 	 *	check if hint fits in good | free => convert free
507*4882a593Smuzhiyun 	 * Otherwise:
508*4882a593Smuzhiyun 	 *	search in good, found => OK
509*4882a593Smuzhiyun 	 *	search in good | free, found => convert free
510*4882a593Smuzhiyun 	 *	search in good | compat | free, found => convert free.
511*4882a593Smuzhiyun 	 */
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun 	/*
514*4882a593Smuzhiyun 	 * If we support combo pages, we can allow 64k pages in 4k slices
515*4882a593Smuzhiyun 	 * The mask copies could be avoided in most cases here if we had
516*4882a593Smuzhiyun 	 * a pointer to good mask for the next code to use.
517*4882a593Smuzhiyun 	 */
518*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && psize == MMU_PAGE_64K) {
519*4882a593Smuzhiyun 		compat_maskp = slice_mask_for_size(&mm->context, MMU_PAGE_4K);
520*4882a593Smuzhiyun 		if (fixed)
521*4882a593Smuzhiyun 			slice_or_mask(&good_mask, maskp, compat_maskp);
522*4882a593Smuzhiyun 		else
523*4882a593Smuzhiyun 			slice_copy_mask(&good_mask, maskp);
524*4882a593Smuzhiyun 	} else {
525*4882a593Smuzhiyun 		slice_copy_mask(&good_mask, maskp);
526*4882a593Smuzhiyun 	}
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 	slice_print_mask(" good_mask", &good_mask);
529*4882a593Smuzhiyun 	if (compat_maskp)
530*4882a593Smuzhiyun 		slice_print_mask(" compat_mask", compat_maskp);
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun 	/* First check hint if it's valid or if we have MAP_FIXED */
533*4882a593Smuzhiyun 	if (addr != 0 || fixed) {
534*4882a593Smuzhiyun 		/* Check if we fit in the good mask. If we do, we just return,
535*4882a593Smuzhiyun 		 * nothing else to do
536*4882a593Smuzhiyun 		 */
537*4882a593Smuzhiyun 		if (slice_check_range_fits(mm, &good_mask, addr, len)) {
538*4882a593Smuzhiyun 			slice_dbg(" fits good !\n");
539*4882a593Smuzhiyun 			newaddr = addr;
540*4882a593Smuzhiyun 			goto return_addr;
541*4882a593Smuzhiyun 		}
542*4882a593Smuzhiyun 	} else {
543*4882a593Smuzhiyun 		/* Now let's see if we can find something in the existing
544*4882a593Smuzhiyun 		 * slices for that size
545*4882a593Smuzhiyun 		 */
546*4882a593Smuzhiyun 		newaddr = slice_find_area(mm, len, &good_mask,
547*4882a593Smuzhiyun 					  psize, topdown, high_limit);
548*4882a593Smuzhiyun 		if (newaddr != -ENOMEM) {
549*4882a593Smuzhiyun 			/* Found within the good mask, we don't have to setup,
550*4882a593Smuzhiyun 			 * we thus return directly
551*4882a593Smuzhiyun 			 */
552*4882a593Smuzhiyun 			slice_dbg(" found area at 0x%lx\n", newaddr);
553*4882a593Smuzhiyun 			goto return_addr;
554*4882a593Smuzhiyun 		}
555*4882a593Smuzhiyun 	}
556*4882a593Smuzhiyun 	/*
557*4882a593Smuzhiyun 	 * We don't fit in the good mask, check what other slices are
558*4882a593Smuzhiyun 	 * empty and thus can be converted
559*4882a593Smuzhiyun 	 */
560*4882a593Smuzhiyun 	slice_mask_for_free(mm, &potential_mask, high_limit);
561*4882a593Smuzhiyun 	slice_or_mask(&potential_mask, &potential_mask, &good_mask);
562*4882a593Smuzhiyun 	slice_print_mask(" potential", &potential_mask);
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 	if (addr != 0 || fixed) {
565*4882a593Smuzhiyun 		if (slice_check_range_fits(mm, &potential_mask, addr, len)) {
566*4882a593Smuzhiyun 			slice_dbg(" fits potential !\n");
567*4882a593Smuzhiyun 			newaddr = addr;
568*4882a593Smuzhiyun 			goto convert;
569*4882a593Smuzhiyun 		}
570*4882a593Smuzhiyun 	}
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun 	/* If we have MAP_FIXED and failed the above steps, then error out */
573*4882a593Smuzhiyun 	if (fixed)
574*4882a593Smuzhiyun 		return -EBUSY;
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 	slice_dbg(" search...\n");
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun 	/* If we had a hint that didn't work out, see if we can fit
579*4882a593Smuzhiyun 	 * anywhere in the good area.
580*4882a593Smuzhiyun 	 */
581*4882a593Smuzhiyun 	if (addr) {
582*4882a593Smuzhiyun 		newaddr = slice_find_area(mm, len, &good_mask,
583*4882a593Smuzhiyun 					  psize, topdown, high_limit);
584*4882a593Smuzhiyun 		if (newaddr != -ENOMEM) {
585*4882a593Smuzhiyun 			slice_dbg(" found area at 0x%lx\n", newaddr);
586*4882a593Smuzhiyun 			goto return_addr;
587*4882a593Smuzhiyun 		}
588*4882a593Smuzhiyun 	}
589*4882a593Smuzhiyun 
590*4882a593Smuzhiyun 	/* Now let's see if we can find something in the existing slices
591*4882a593Smuzhiyun 	 * for that size plus free slices
592*4882a593Smuzhiyun 	 */
593*4882a593Smuzhiyun 	newaddr = slice_find_area(mm, len, &potential_mask,
594*4882a593Smuzhiyun 				  psize, topdown, high_limit);
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && newaddr == -ENOMEM &&
597*4882a593Smuzhiyun 	    psize == MMU_PAGE_64K) {
598*4882a593Smuzhiyun 		/* retry the search with 4k-page slices included */
599*4882a593Smuzhiyun 		slice_or_mask(&potential_mask, &potential_mask, compat_maskp);
600*4882a593Smuzhiyun 		newaddr = slice_find_area(mm, len, &potential_mask,
601*4882a593Smuzhiyun 					  psize, topdown, high_limit);
602*4882a593Smuzhiyun 	}
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun 	if (newaddr == -ENOMEM)
605*4882a593Smuzhiyun 		return -ENOMEM;
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 	slice_range_to_mask(newaddr, len, &potential_mask);
608*4882a593Smuzhiyun 	slice_dbg(" found potential area at 0x%lx\n", newaddr);
609*4882a593Smuzhiyun 	slice_print_mask(" mask", &potential_mask);
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun  convert:
612*4882a593Smuzhiyun 	/*
613*4882a593Smuzhiyun 	 * Try to allocate the context before we do slice convert
614*4882a593Smuzhiyun 	 * so that we handle the context allocation failure gracefully.
615*4882a593Smuzhiyun 	 */
616*4882a593Smuzhiyun 	if (need_extra_context(mm, newaddr)) {
617*4882a593Smuzhiyun 		if (alloc_extended_context(mm, newaddr) < 0)
618*4882a593Smuzhiyun 			return -ENOMEM;
619*4882a593Smuzhiyun 	}
620*4882a593Smuzhiyun 
621*4882a593Smuzhiyun 	slice_andnot_mask(&potential_mask, &potential_mask, &good_mask);
622*4882a593Smuzhiyun 	if (compat_maskp && !fixed)
623*4882a593Smuzhiyun 		slice_andnot_mask(&potential_mask, &potential_mask, compat_maskp);
624*4882a593Smuzhiyun 	if (potential_mask.low_slices ||
625*4882a593Smuzhiyun 		(SLICE_NUM_HIGH &&
626*4882a593Smuzhiyun 		 !bitmap_empty(potential_mask.high_slices, SLICE_NUM_HIGH))) {
627*4882a593Smuzhiyun 		slice_convert(mm, &potential_mask, psize);
628*4882a593Smuzhiyun 		if (psize > MMU_PAGE_BASE)
629*4882a593Smuzhiyun 			on_each_cpu(slice_flush_segments, mm, 1);
630*4882a593Smuzhiyun 	}
631*4882a593Smuzhiyun 	return newaddr;
632*4882a593Smuzhiyun 
633*4882a593Smuzhiyun return_addr:
634*4882a593Smuzhiyun 	if (need_extra_context(mm, newaddr)) {
635*4882a593Smuzhiyun 		if (alloc_extended_context(mm, newaddr) < 0)
636*4882a593Smuzhiyun 			return -ENOMEM;
637*4882a593Smuzhiyun 	}
638*4882a593Smuzhiyun 	return newaddr;
639*4882a593Smuzhiyun }
640*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(slice_get_unmapped_area);
641*4882a593Smuzhiyun 
arch_get_unmapped_area(struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)642*4882a593Smuzhiyun unsigned long arch_get_unmapped_area(struct file *filp,
643*4882a593Smuzhiyun 				     unsigned long addr,
644*4882a593Smuzhiyun 				     unsigned long len,
645*4882a593Smuzhiyun 				     unsigned long pgoff,
646*4882a593Smuzhiyun 				     unsigned long flags)
647*4882a593Smuzhiyun {
648*4882a593Smuzhiyun 	return slice_get_unmapped_area(addr, len, flags,
649*4882a593Smuzhiyun 				       mm_ctx_user_psize(&current->mm->context), 0);
650*4882a593Smuzhiyun }
651*4882a593Smuzhiyun 
arch_get_unmapped_area_topdown(struct file * filp,const unsigned long addr0,const unsigned long len,const unsigned long pgoff,const unsigned long flags)652*4882a593Smuzhiyun unsigned long arch_get_unmapped_area_topdown(struct file *filp,
653*4882a593Smuzhiyun 					     const unsigned long addr0,
654*4882a593Smuzhiyun 					     const unsigned long len,
655*4882a593Smuzhiyun 					     const unsigned long pgoff,
656*4882a593Smuzhiyun 					     const unsigned long flags)
657*4882a593Smuzhiyun {
658*4882a593Smuzhiyun 	return slice_get_unmapped_area(addr0, len, flags,
659*4882a593Smuzhiyun 				       mm_ctx_user_psize(&current->mm->context), 1);
660*4882a593Smuzhiyun }
661*4882a593Smuzhiyun 
get_slice_psize(struct mm_struct * mm,unsigned long addr)662*4882a593Smuzhiyun unsigned int notrace get_slice_psize(struct mm_struct *mm, unsigned long addr)
663*4882a593Smuzhiyun {
664*4882a593Smuzhiyun 	unsigned char *psizes;
665*4882a593Smuzhiyun 	int index, mask_index;
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun 	VM_BUG_ON(radix_enabled());
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun 	if (slice_addr_is_low(addr)) {
670*4882a593Smuzhiyun 		psizes = mm_ctx_low_slices(&mm->context);
671*4882a593Smuzhiyun 		index = GET_LOW_SLICE_INDEX(addr);
672*4882a593Smuzhiyun 	} else {
673*4882a593Smuzhiyun 		psizes = mm_ctx_high_slices(&mm->context);
674*4882a593Smuzhiyun 		index = GET_HIGH_SLICE_INDEX(addr);
675*4882a593Smuzhiyun 	}
676*4882a593Smuzhiyun 	mask_index = index & 0x1;
677*4882a593Smuzhiyun 	return (psizes[index >> 1] >> (mask_index * 4)) & 0xf;
678*4882a593Smuzhiyun }
679*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(get_slice_psize);
680*4882a593Smuzhiyun 
slice_init_new_context_exec(struct mm_struct * mm)681*4882a593Smuzhiyun void slice_init_new_context_exec(struct mm_struct *mm)
682*4882a593Smuzhiyun {
683*4882a593Smuzhiyun 	unsigned char *hpsizes, *lpsizes;
684*4882a593Smuzhiyun 	struct slice_mask *mask;
685*4882a593Smuzhiyun 	unsigned int psize = mmu_virtual_psize;
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun 	slice_dbg("slice_init_new_context_exec(mm=%p)\n", mm);
688*4882a593Smuzhiyun 
689*4882a593Smuzhiyun 	/*
690*4882a593Smuzhiyun 	 * In the case of exec, use the default limit. In the
691*4882a593Smuzhiyun 	 * case of fork it is just inherited from the mm being
692*4882a593Smuzhiyun 	 * duplicated.
693*4882a593Smuzhiyun 	 */
694*4882a593Smuzhiyun 	mm_ctx_set_slb_addr_limit(&mm->context, SLB_ADDR_LIMIT_DEFAULT);
695*4882a593Smuzhiyun 	mm_ctx_set_user_psize(&mm->context, psize);
696*4882a593Smuzhiyun 
697*4882a593Smuzhiyun 	/*
698*4882a593Smuzhiyun 	 * Set all slice psizes to the default.
699*4882a593Smuzhiyun 	 */
700*4882a593Smuzhiyun 	lpsizes = mm_ctx_low_slices(&mm->context);
701*4882a593Smuzhiyun 	memset(lpsizes, (psize << 4) | psize, SLICE_NUM_LOW >> 1);
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun 	hpsizes = mm_ctx_high_slices(&mm->context);
704*4882a593Smuzhiyun 	memset(hpsizes, (psize << 4) | psize, SLICE_NUM_HIGH >> 1);
705*4882a593Smuzhiyun 
706*4882a593Smuzhiyun 	/*
707*4882a593Smuzhiyun 	 * Slice mask cache starts zeroed, fill the default size cache.
708*4882a593Smuzhiyun 	 */
709*4882a593Smuzhiyun 	mask = slice_mask_for_size(&mm->context, psize);
710*4882a593Smuzhiyun 	mask->low_slices = ~0UL;
711*4882a593Smuzhiyun 	if (SLICE_NUM_HIGH)
712*4882a593Smuzhiyun 		bitmap_fill(mask->high_slices, SLICE_NUM_HIGH);
713*4882a593Smuzhiyun }
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S_64
slice_setup_new_exec(void)716*4882a593Smuzhiyun void slice_setup_new_exec(void)
717*4882a593Smuzhiyun {
718*4882a593Smuzhiyun 	struct mm_struct *mm = current->mm;
719*4882a593Smuzhiyun 
720*4882a593Smuzhiyun 	slice_dbg("slice_setup_new_exec(mm=%p)\n", mm);
721*4882a593Smuzhiyun 
722*4882a593Smuzhiyun 	if (!is_32bit_task())
723*4882a593Smuzhiyun 		return;
724*4882a593Smuzhiyun 
725*4882a593Smuzhiyun 	mm_ctx_set_slb_addr_limit(&mm->context, DEFAULT_MAP_WINDOW);
726*4882a593Smuzhiyun }
727*4882a593Smuzhiyun #endif
728*4882a593Smuzhiyun 
slice_set_range_psize(struct mm_struct * mm,unsigned long start,unsigned long len,unsigned int psize)729*4882a593Smuzhiyun void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
730*4882a593Smuzhiyun 			   unsigned long len, unsigned int psize)
731*4882a593Smuzhiyun {
732*4882a593Smuzhiyun 	struct slice_mask mask;
733*4882a593Smuzhiyun 
734*4882a593Smuzhiyun 	VM_BUG_ON(radix_enabled());
735*4882a593Smuzhiyun 
736*4882a593Smuzhiyun 	slice_range_to_mask(start, len, &mask);
737*4882a593Smuzhiyun 	slice_convert(mm, &mask, psize);
738*4882a593Smuzhiyun }
739*4882a593Smuzhiyun 
740*4882a593Smuzhiyun #ifdef CONFIG_HUGETLB_PAGE
741*4882a593Smuzhiyun /*
742*4882a593Smuzhiyun  * is_hugepage_only_range() is used by generic code to verify whether
743*4882a593Smuzhiyun  * a normal mmap mapping (non hugetlbfs) is valid on a given area.
744*4882a593Smuzhiyun  *
745*4882a593Smuzhiyun  * until the generic code provides a more generic hook and/or starts
746*4882a593Smuzhiyun  * calling arch get_unmapped_area for MAP_FIXED (which our implementation
747*4882a593Smuzhiyun  * here knows how to deal with), we hijack it to keep standard mappings
748*4882a593Smuzhiyun  * away from us.
749*4882a593Smuzhiyun  *
750*4882a593Smuzhiyun  * because of that generic code limitation, MAP_FIXED mapping cannot
751*4882a593Smuzhiyun  * "convert" back a slice with no VMAs to the standard page size, only
752*4882a593Smuzhiyun  * get_unmapped_area() can. It would be possible to fix it here but I
753*4882a593Smuzhiyun  * prefer working on fixing the generic code instead.
754*4882a593Smuzhiyun  *
755*4882a593Smuzhiyun  * WARNING: This will not work if hugetlbfs isn't enabled since the
756*4882a593Smuzhiyun  * generic code will redefine that function as 0 in that. This is ok
757*4882a593Smuzhiyun  * for now as we only use slices with hugetlbfs enabled. This should
758*4882a593Smuzhiyun  * be fixed as the generic code gets fixed.
759*4882a593Smuzhiyun  */
slice_is_hugepage_only_range(struct mm_struct * mm,unsigned long addr,unsigned long len)760*4882a593Smuzhiyun int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
761*4882a593Smuzhiyun 			   unsigned long len)
762*4882a593Smuzhiyun {
763*4882a593Smuzhiyun 	const struct slice_mask *maskp;
764*4882a593Smuzhiyun 	unsigned int psize = mm_ctx_user_psize(&mm->context);
765*4882a593Smuzhiyun 
766*4882a593Smuzhiyun 	VM_BUG_ON(radix_enabled());
767*4882a593Smuzhiyun 
768*4882a593Smuzhiyun 	maskp = slice_mask_for_size(&mm->context, psize);
769*4882a593Smuzhiyun 
770*4882a593Smuzhiyun 	/* We need to account for 4k slices too */
771*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && psize == MMU_PAGE_64K) {
772*4882a593Smuzhiyun 		const struct slice_mask *compat_maskp;
773*4882a593Smuzhiyun 		struct slice_mask available;
774*4882a593Smuzhiyun 
775*4882a593Smuzhiyun 		compat_maskp = slice_mask_for_size(&mm->context, MMU_PAGE_4K);
776*4882a593Smuzhiyun 		slice_or_mask(&available, maskp, compat_maskp);
777*4882a593Smuzhiyun 		return !slice_check_range_fits(mm, &available, addr, len);
778*4882a593Smuzhiyun 	}
779*4882a593Smuzhiyun 
780*4882a593Smuzhiyun 	return !slice_check_range_fits(mm, maskp, addr, len);
781*4882a593Smuzhiyun }
782*4882a593Smuzhiyun #endif
783