xref: /OK3568_Linux_fs/kernel/arch/powerpc/mm/nohash/mmu_context.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * This file contains the routines for handling the MMU on those
4*4882a593Smuzhiyun  * PowerPC implementations where the MMU is not using the hash
5*4882a593Smuzhiyun  * table, such as 8xx, 4xx, BookE's etc...
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Copyright 2008 Ben Herrenschmidt <benh@kernel.crashing.org>
8*4882a593Smuzhiyun  *                IBM Corp.
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  *  Derived from previous arch/powerpc/mm/mmu_context.c
11*4882a593Smuzhiyun  *  and arch/powerpc/include/asm/mmu_context.h
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  * TODO:
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun  *   - The global context lock will not scale very well
16*4882a593Smuzhiyun  *   - The maps should be dynamically allocated to allow for processors
17*4882a593Smuzhiyun  *     that support more PID bits at runtime
18*4882a593Smuzhiyun  *   - Implement flush_tlb_mm() by making the context stale and picking
19*4882a593Smuzhiyun  *     a new one
20*4882a593Smuzhiyun  *   - More aggressively clear stale map bits and maybe find some way to
21*4882a593Smuzhiyun  *     also clear mm->cpu_vm_mask bits when processes are migrated
22*4882a593Smuzhiyun  */
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun //#define DEBUG_MAP_CONSISTENCY
25*4882a593Smuzhiyun //#define DEBUG_CLAMP_LAST_CONTEXT   31
26*4882a593Smuzhiyun //#define DEBUG_HARDER
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun /* We don't use DEBUG because it tends to be compiled in always nowadays
29*4882a593Smuzhiyun  * and this would generate way too much output
30*4882a593Smuzhiyun  */
31*4882a593Smuzhiyun #ifdef DEBUG_HARDER
32*4882a593Smuzhiyun #define pr_hard(args...)	printk(KERN_DEBUG args)
33*4882a593Smuzhiyun #define pr_hardcont(args...)	printk(KERN_CONT args)
34*4882a593Smuzhiyun #else
35*4882a593Smuzhiyun #define pr_hard(args...)	do { } while(0)
36*4882a593Smuzhiyun #define pr_hardcont(args...)	do { } while(0)
37*4882a593Smuzhiyun #endif
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun #include <linux/kernel.h>
40*4882a593Smuzhiyun #include <linux/mm.h>
41*4882a593Smuzhiyun #include <linux/init.h>
42*4882a593Smuzhiyun #include <linux/spinlock.h>
43*4882a593Smuzhiyun #include <linux/memblock.h>
44*4882a593Smuzhiyun #include <linux/notifier.h>
45*4882a593Smuzhiyun #include <linux/cpu.h>
46*4882a593Smuzhiyun #include <linux/slab.h>
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun #include <asm/mmu_context.h>
49*4882a593Smuzhiyun #include <asm/tlbflush.h>
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun #include <mm/mmu_decl.h>
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun /*
54*4882a593Smuzhiyun  * The MPC8xx has only 16 contexts. We rotate through them on each task switch.
55*4882a593Smuzhiyun  * A better way would be to keep track of tasks that own contexts, and implement
56*4882a593Smuzhiyun  * an LRU usage. That way very active tasks don't always have to pay the TLB
57*4882a593Smuzhiyun  * reload overhead. The kernel pages are mapped shared, so the kernel can run on
58*4882a593Smuzhiyun  * behalf of any task that makes a kernel entry. Shared does not mean they are
59*4882a593Smuzhiyun  * not protected, just that the ASID comparison is not performed. -- Dan
60*4882a593Smuzhiyun  *
61*4882a593Smuzhiyun  * The IBM4xx has 256 contexts, so we can just rotate through these as a way of
62*4882a593Smuzhiyun  * "switching" contexts. If the TID of the TLB is zero, the PID/TID comparison
63*4882a593Smuzhiyun  * is disabled, so we can use a TID of zero to represent all kernel pages as
64*4882a593Smuzhiyun  * shared among all contexts. -- Dan
65*4882a593Smuzhiyun  *
66*4882a593Smuzhiyun  * The IBM 47x core supports 16-bit PIDs, thus 65535 contexts. We should
67*4882a593Smuzhiyun  * normally never have to steal though the facility is present if needed.
68*4882a593Smuzhiyun  * -- BenH
69*4882a593Smuzhiyun  */
70*4882a593Smuzhiyun #define FIRST_CONTEXT 1
71*4882a593Smuzhiyun #ifdef DEBUG_CLAMP_LAST_CONTEXT
72*4882a593Smuzhiyun #define LAST_CONTEXT DEBUG_CLAMP_LAST_CONTEXT
73*4882a593Smuzhiyun #elif defined(CONFIG_PPC_8xx)
74*4882a593Smuzhiyun #define LAST_CONTEXT 16
75*4882a593Smuzhiyun #elif defined(CONFIG_PPC_47x)
76*4882a593Smuzhiyun #define LAST_CONTEXT 65535
77*4882a593Smuzhiyun #else
78*4882a593Smuzhiyun #define LAST_CONTEXT 255
79*4882a593Smuzhiyun #endif
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun static unsigned int next_context, nr_free_contexts;
82*4882a593Smuzhiyun static unsigned long *context_map;
83*4882a593Smuzhiyun #ifdef CONFIG_SMP
84*4882a593Smuzhiyun static unsigned long *stale_map[NR_CPUS];
85*4882a593Smuzhiyun #endif
86*4882a593Smuzhiyun static struct mm_struct **context_mm;
87*4882a593Smuzhiyun static DEFINE_RAW_SPINLOCK(context_lock);
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun #define CTX_MAP_SIZE	\
90*4882a593Smuzhiyun 	(sizeof(unsigned long) * (LAST_CONTEXT / BITS_PER_LONG + 1))
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun /* Steal a context from a task that has one at the moment.
94*4882a593Smuzhiyun  *
95*4882a593Smuzhiyun  * This is used when we are running out of available PID numbers
96*4882a593Smuzhiyun  * on the processors.
97*4882a593Smuzhiyun  *
98*4882a593Smuzhiyun  * This isn't an LRU system, it just frees up each context in
99*4882a593Smuzhiyun  * turn (sort-of pseudo-random replacement :).  This would be the
100*4882a593Smuzhiyun  * place to implement an LRU scheme if anyone was motivated to do it.
101*4882a593Smuzhiyun  *  -- paulus
102*4882a593Smuzhiyun  *
103*4882a593Smuzhiyun  * For context stealing, we use a slightly different approach for
104*4882a593Smuzhiyun  * SMP and UP. Basically, the UP one is simpler and doesn't use
105*4882a593Smuzhiyun  * the stale map as we can just flush the local CPU
106*4882a593Smuzhiyun  *  -- benh
107*4882a593Smuzhiyun  */
108*4882a593Smuzhiyun #ifdef CONFIG_SMP
steal_context_smp(unsigned int id)109*4882a593Smuzhiyun static unsigned int steal_context_smp(unsigned int id)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun 	struct mm_struct *mm;
112*4882a593Smuzhiyun 	unsigned int cpu, max, i;
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	max = LAST_CONTEXT - FIRST_CONTEXT;
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	/* Attempt to free next_context first and then loop until we manage */
117*4882a593Smuzhiyun 	while (max--) {
118*4882a593Smuzhiyun 		/* Pick up the victim mm */
119*4882a593Smuzhiyun 		mm = context_mm[id];
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 		/* We have a candidate victim, check if it's active, on SMP
122*4882a593Smuzhiyun 		 * we cannot steal active contexts
123*4882a593Smuzhiyun 		 */
124*4882a593Smuzhiyun 		if (mm->context.active) {
125*4882a593Smuzhiyun 			id++;
126*4882a593Smuzhiyun 			if (id > LAST_CONTEXT)
127*4882a593Smuzhiyun 				id = FIRST_CONTEXT;
128*4882a593Smuzhiyun 			continue;
129*4882a593Smuzhiyun 		}
130*4882a593Smuzhiyun 		pr_hardcont(" | steal %d from 0x%p", id, mm);
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 		/* Mark this mm has having no context anymore */
133*4882a593Smuzhiyun 		mm->context.id = MMU_NO_CONTEXT;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 		/* Mark it stale on all CPUs that used this mm. For threaded
136*4882a593Smuzhiyun 		 * implementations, we set it on all threads on each core
137*4882a593Smuzhiyun 		 * represented in the mask. A future implementation will use
138*4882a593Smuzhiyun 		 * a core map instead but this will do for now.
139*4882a593Smuzhiyun 		 */
140*4882a593Smuzhiyun 		for_each_cpu(cpu, mm_cpumask(mm)) {
141*4882a593Smuzhiyun 			for (i = cpu_first_thread_sibling(cpu);
142*4882a593Smuzhiyun 			     i <= cpu_last_thread_sibling(cpu); i++) {
143*4882a593Smuzhiyun 				if (stale_map[i])
144*4882a593Smuzhiyun 					__set_bit(id, stale_map[i]);
145*4882a593Smuzhiyun 			}
146*4882a593Smuzhiyun 			cpu = i - 1;
147*4882a593Smuzhiyun 		}
148*4882a593Smuzhiyun 		return id;
149*4882a593Smuzhiyun 	}
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	/* This will happen if you have more CPUs than available contexts,
152*4882a593Smuzhiyun 	 * all we can do here is wait a bit and try again
153*4882a593Smuzhiyun 	 */
154*4882a593Smuzhiyun 	raw_spin_unlock(&context_lock);
155*4882a593Smuzhiyun 	cpu_relax();
156*4882a593Smuzhiyun 	raw_spin_lock(&context_lock);
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	/* This will cause the caller to try again */
159*4882a593Smuzhiyun 	return MMU_NO_CONTEXT;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun #endif  /* CONFIG_SMP */
162*4882a593Smuzhiyun 
steal_all_contexts(void)163*4882a593Smuzhiyun static unsigned int steal_all_contexts(void)
164*4882a593Smuzhiyun {
165*4882a593Smuzhiyun 	struct mm_struct *mm;
166*4882a593Smuzhiyun #ifdef CONFIG_SMP
167*4882a593Smuzhiyun 	int cpu = smp_processor_id();
168*4882a593Smuzhiyun #endif
169*4882a593Smuzhiyun 	unsigned int id;
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	for (id = FIRST_CONTEXT; id <= LAST_CONTEXT; id++) {
172*4882a593Smuzhiyun 		/* Pick up the victim mm */
173*4882a593Smuzhiyun 		mm = context_mm[id];
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 		pr_hardcont(" | steal %d from 0x%p", id, mm);
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 		/* Mark this mm as having no context anymore */
178*4882a593Smuzhiyun 		mm->context.id = MMU_NO_CONTEXT;
179*4882a593Smuzhiyun 		if (id != FIRST_CONTEXT) {
180*4882a593Smuzhiyun 			context_mm[id] = NULL;
181*4882a593Smuzhiyun 			__clear_bit(id, context_map);
182*4882a593Smuzhiyun #ifdef DEBUG_MAP_CONSISTENCY
183*4882a593Smuzhiyun 			mm->context.active = 0;
184*4882a593Smuzhiyun #endif
185*4882a593Smuzhiyun 		}
186*4882a593Smuzhiyun #ifdef CONFIG_SMP
187*4882a593Smuzhiyun 		__clear_bit(id, stale_map[cpu]);
188*4882a593Smuzhiyun #endif
189*4882a593Smuzhiyun 	}
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	/* Flush the TLB for all contexts (not to be used on SMP) */
192*4882a593Smuzhiyun 	_tlbil_all();
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	nr_free_contexts = LAST_CONTEXT - FIRST_CONTEXT;
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	return FIRST_CONTEXT;
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun /* Note that this will also be called on SMP if all other CPUs are
200*4882a593Smuzhiyun  * offlined, which means that it may be called for cpu != 0. For
201*4882a593Smuzhiyun  * this to work, we somewhat assume that CPUs that are onlined
202*4882a593Smuzhiyun  * come up with a fully clean TLB (or are cleaned when offlined)
203*4882a593Smuzhiyun  */
steal_context_up(unsigned int id)204*4882a593Smuzhiyun static unsigned int steal_context_up(unsigned int id)
205*4882a593Smuzhiyun {
206*4882a593Smuzhiyun 	struct mm_struct *mm;
207*4882a593Smuzhiyun #ifdef CONFIG_SMP
208*4882a593Smuzhiyun 	int cpu = smp_processor_id();
209*4882a593Smuzhiyun #endif
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	/* Pick up the victim mm */
212*4882a593Smuzhiyun 	mm = context_mm[id];
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	pr_hardcont(" | steal %d from 0x%p", id, mm);
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	/* Flush the TLB for that context */
217*4882a593Smuzhiyun 	local_flush_tlb_mm(mm);
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	/* Mark this mm has having no context anymore */
220*4882a593Smuzhiyun 	mm->context.id = MMU_NO_CONTEXT;
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	/* XXX This clear should ultimately be part of local_flush_tlb_mm */
223*4882a593Smuzhiyun #ifdef CONFIG_SMP
224*4882a593Smuzhiyun 	__clear_bit(id, stale_map[cpu]);
225*4882a593Smuzhiyun #endif
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	return id;
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun #ifdef DEBUG_MAP_CONSISTENCY
context_check_map(void)231*4882a593Smuzhiyun static void context_check_map(void)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun 	unsigned int id, nrf, nact;
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	nrf = nact = 0;
236*4882a593Smuzhiyun 	for (id = FIRST_CONTEXT; id <= LAST_CONTEXT; id++) {
237*4882a593Smuzhiyun 		int used = test_bit(id, context_map);
238*4882a593Smuzhiyun 		if (!used)
239*4882a593Smuzhiyun 			nrf++;
240*4882a593Smuzhiyun 		if (used != (context_mm[id] != NULL))
241*4882a593Smuzhiyun 			pr_err("MMU: Context %d is %s and MM is %p !\n",
242*4882a593Smuzhiyun 			       id, used ? "used" : "free", context_mm[id]);
243*4882a593Smuzhiyun 		if (context_mm[id] != NULL)
244*4882a593Smuzhiyun 			nact += context_mm[id]->context.active;
245*4882a593Smuzhiyun 	}
246*4882a593Smuzhiyun 	if (nrf != nr_free_contexts) {
247*4882a593Smuzhiyun 		pr_err("MMU: Free context count out of sync ! (%d vs %d)\n",
248*4882a593Smuzhiyun 		       nr_free_contexts, nrf);
249*4882a593Smuzhiyun 		nr_free_contexts = nrf;
250*4882a593Smuzhiyun 	}
251*4882a593Smuzhiyun 	if (nact > num_online_cpus())
252*4882a593Smuzhiyun 		pr_err("MMU: More active contexts than CPUs ! (%d vs %d)\n",
253*4882a593Smuzhiyun 		       nact, num_online_cpus());
254*4882a593Smuzhiyun 	if (FIRST_CONTEXT > 0 && !test_bit(0, context_map))
255*4882a593Smuzhiyun 		pr_err("MMU: Context 0 has been freed !!!\n");
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun #else
context_check_map(void)258*4882a593Smuzhiyun static void context_check_map(void) { }
259*4882a593Smuzhiyun #endif
260*4882a593Smuzhiyun 
switch_mmu_context(struct mm_struct * prev,struct mm_struct * next,struct task_struct * tsk)261*4882a593Smuzhiyun void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
262*4882a593Smuzhiyun 			struct task_struct *tsk)
263*4882a593Smuzhiyun {
264*4882a593Smuzhiyun 	unsigned int id;
265*4882a593Smuzhiyun #ifdef CONFIG_SMP
266*4882a593Smuzhiyun 	unsigned int i, cpu = smp_processor_id();
267*4882a593Smuzhiyun #endif
268*4882a593Smuzhiyun 	unsigned long *map;
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 	/* No lockless fast path .. yet */
271*4882a593Smuzhiyun 	raw_spin_lock(&context_lock);
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	pr_hard("[%d] activating context for mm @%p, active=%d, id=%d",
274*4882a593Smuzhiyun 		cpu, next, next->context.active, next->context.id);
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun #ifdef CONFIG_SMP
277*4882a593Smuzhiyun 	/* Mark us active and the previous one not anymore */
278*4882a593Smuzhiyun 	next->context.active++;
279*4882a593Smuzhiyun 	if (prev) {
280*4882a593Smuzhiyun 		pr_hardcont(" (old=0x%p a=%d)", prev, prev->context.active);
281*4882a593Smuzhiyun 		WARN_ON(prev->context.active < 1);
282*4882a593Smuzhiyun 		prev->context.active--;
283*4882a593Smuzhiyun 	}
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun  again:
286*4882a593Smuzhiyun #endif /* CONFIG_SMP */
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	/* If we already have a valid assigned context, skip all that */
289*4882a593Smuzhiyun 	id = next->context.id;
290*4882a593Smuzhiyun 	if (likely(id != MMU_NO_CONTEXT)) {
291*4882a593Smuzhiyun #ifdef DEBUG_MAP_CONSISTENCY
292*4882a593Smuzhiyun 		if (context_mm[id] != next)
293*4882a593Smuzhiyun 			pr_err("MMU: mm 0x%p has id %d but context_mm[%d] says 0x%p\n",
294*4882a593Smuzhiyun 			       next, id, id, context_mm[id]);
295*4882a593Smuzhiyun #endif
296*4882a593Smuzhiyun 		goto ctxt_ok;
297*4882a593Smuzhiyun 	}
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	/* We really don't have a context, let's try to acquire one */
300*4882a593Smuzhiyun 	id = next_context;
301*4882a593Smuzhiyun 	if (id > LAST_CONTEXT)
302*4882a593Smuzhiyun 		id = FIRST_CONTEXT;
303*4882a593Smuzhiyun 	map = context_map;
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	/* No more free contexts, let's try to steal one */
306*4882a593Smuzhiyun 	if (nr_free_contexts == 0) {
307*4882a593Smuzhiyun #ifdef CONFIG_SMP
308*4882a593Smuzhiyun 		if (num_online_cpus() > 1) {
309*4882a593Smuzhiyun 			id = steal_context_smp(id);
310*4882a593Smuzhiyun 			if (id == MMU_NO_CONTEXT)
311*4882a593Smuzhiyun 				goto again;
312*4882a593Smuzhiyun 			goto stolen;
313*4882a593Smuzhiyun 		}
314*4882a593Smuzhiyun #endif /* CONFIG_SMP */
315*4882a593Smuzhiyun 		if (IS_ENABLED(CONFIG_PPC_8xx))
316*4882a593Smuzhiyun 			id = steal_all_contexts();
317*4882a593Smuzhiyun 		else
318*4882a593Smuzhiyun 			id = steal_context_up(id);
319*4882a593Smuzhiyun 		goto stolen;
320*4882a593Smuzhiyun 	}
321*4882a593Smuzhiyun 	nr_free_contexts--;
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	/* We know there's at least one free context, try to find it */
324*4882a593Smuzhiyun 	while (__test_and_set_bit(id, map)) {
325*4882a593Smuzhiyun 		id = find_next_zero_bit(map, LAST_CONTEXT+1, id);
326*4882a593Smuzhiyun 		if (id > LAST_CONTEXT)
327*4882a593Smuzhiyun 			id = FIRST_CONTEXT;
328*4882a593Smuzhiyun 	}
329*4882a593Smuzhiyun  stolen:
330*4882a593Smuzhiyun 	next_context = id + 1;
331*4882a593Smuzhiyun 	context_mm[id] = next;
332*4882a593Smuzhiyun 	next->context.id = id;
333*4882a593Smuzhiyun 	pr_hardcont(" | new id=%d,nrf=%d", id, nr_free_contexts);
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	context_check_map();
336*4882a593Smuzhiyun  ctxt_ok:
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	/* If that context got marked stale on this CPU, then flush the
339*4882a593Smuzhiyun 	 * local TLB for it and unmark it before we use it
340*4882a593Smuzhiyun 	 */
341*4882a593Smuzhiyun #ifdef CONFIG_SMP
342*4882a593Smuzhiyun 	if (test_bit(id, stale_map[cpu])) {
343*4882a593Smuzhiyun 		pr_hardcont(" | stale flush %d [%d..%d]",
344*4882a593Smuzhiyun 			    id, cpu_first_thread_sibling(cpu),
345*4882a593Smuzhiyun 			    cpu_last_thread_sibling(cpu));
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 		local_flush_tlb_mm(next);
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 		/* XXX This clear should ultimately be part of local_flush_tlb_mm */
350*4882a593Smuzhiyun 		for (i = cpu_first_thread_sibling(cpu);
351*4882a593Smuzhiyun 		     i <= cpu_last_thread_sibling(cpu); i++) {
352*4882a593Smuzhiyun 			if (stale_map[i])
353*4882a593Smuzhiyun 				__clear_bit(id, stale_map[i]);
354*4882a593Smuzhiyun 		}
355*4882a593Smuzhiyun 	}
356*4882a593Smuzhiyun #endif
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	/* Flick the MMU and release lock */
359*4882a593Smuzhiyun 	pr_hardcont(" -> %d\n", id);
360*4882a593Smuzhiyun 	set_context(id, next->pgd);
361*4882a593Smuzhiyun 	raw_spin_unlock(&context_lock);
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun /*
365*4882a593Smuzhiyun  * Set up the context for a new address space.
366*4882a593Smuzhiyun  */
init_new_context(struct task_struct * t,struct mm_struct * mm)367*4882a593Smuzhiyun int init_new_context(struct task_struct *t, struct mm_struct *mm)
368*4882a593Smuzhiyun {
369*4882a593Smuzhiyun 	pr_hard("initing context for mm @%p\n", mm);
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 	/*
372*4882a593Smuzhiyun 	 * We have MMU_NO_CONTEXT set to be ~0. Hence check
373*4882a593Smuzhiyun 	 * explicitly against context.id == 0. This ensures that we properly
374*4882a593Smuzhiyun 	 * initialize context slice details for newly allocated mm's (which will
375*4882a593Smuzhiyun 	 * have id == 0) and don't alter context slice inherited via fork (which
376*4882a593Smuzhiyun 	 * will have id != 0).
377*4882a593Smuzhiyun 	 */
378*4882a593Smuzhiyun 	if (mm->context.id == 0)
379*4882a593Smuzhiyun 		slice_init_new_context_exec(mm);
380*4882a593Smuzhiyun 	mm->context.id = MMU_NO_CONTEXT;
381*4882a593Smuzhiyun 	mm->context.active = 0;
382*4882a593Smuzhiyun 	pte_frag_set(&mm->context, NULL);
383*4882a593Smuzhiyun 	return 0;
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun /*
387*4882a593Smuzhiyun  * We're finished using the context for an address space.
388*4882a593Smuzhiyun  */
destroy_context(struct mm_struct * mm)389*4882a593Smuzhiyun void destroy_context(struct mm_struct *mm)
390*4882a593Smuzhiyun {
391*4882a593Smuzhiyun 	unsigned long flags;
392*4882a593Smuzhiyun 	unsigned int id;
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun 	if (mm->context.id == MMU_NO_CONTEXT)
395*4882a593Smuzhiyun 		return;
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	WARN_ON(mm->context.active != 0);
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 	raw_spin_lock_irqsave(&context_lock, flags);
400*4882a593Smuzhiyun 	id = mm->context.id;
401*4882a593Smuzhiyun 	if (id != MMU_NO_CONTEXT) {
402*4882a593Smuzhiyun 		__clear_bit(id, context_map);
403*4882a593Smuzhiyun 		mm->context.id = MMU_NO_CONTEXT;
404*4882a593Smuzhiyun #ifdef DEBUG_MAP_CONSISTENCY
405*4882a593Smuzhiyun 		mm->context.active = 0;
406*4882a593Smuzhiyun #endif
407*4882a593Smuzhiyun 		context_mm[id] = NULL;
408*4882a593Smuzhiyun 		nr_free_contexts++;
409*4882a593Smuzhiyun 	}
410*4882a593Smuzhiyun 	raw_spin_unlock_irqrestore(&context_lock, flags);
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun #ifdef CONFIG_SMP
mmu_ctx_cpu_prepare(unsigned int cpu)414*4882a593Smuzhiyun static int mmu_ctx_cpu_prepare(unsigned int cpu)
415*4882a593Smuzhiyun {
416*4882a593Smuzhiyun 	/* We don't touch CPU 0 map, it's allocated at aboot and kept
417*4882a593Smuzhiyun 	 * around forever
418*4882a593Smuzhiyun 	 */
419*4882a593Smuzhiyun 	if (cpu == boot_cpuid)
420*4882a593Smuzhiyun 		return 0;
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 	pr_devel("MMU: Allocating stale context map for CPU %d\n", cpu);
423*4882a593Smuzhiyun 	stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL);
424*4882a593Smuzhiyun 	return 0;
425*4882a593Smuzhiyun }
426*4882a593Smuzhiyun 
mmu_ctx_cpu_dead(unsigned int cpu)427*4882a593Smuzhiyun static int mmu_ctx_cpu_dead(unsigned int cpu)
428*4882a593Smuzhiyun {
429*4882a593Smuzhiyun #ifdef CONFIG_HOTPLUG_CPU
430*4882a593Smuzhiyun 	if (cpu == boot_cpuid)
431*4882a593Smuzhiyun 		return 0;
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun 	pr_devel("MMU: Freeing stale context map for CPU %d\n", cpu);
434*4882a593Smuzhiyun 	kfree(stale_map[cpu]);
435*4882a593Smuzhiyun 	stale_map[cpu] = NULL;
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 	/* We also clear the cpu_vm_mask bits of CPUs going away */
438*4882a593Smuzhiyun 	clear_tasks_mm_cpumask(cpu);
439*4882a593Smuzhiyun #endif
440*4882a593Smuzhiyun 	return 0;
441*4882a593Smuzhiyun }
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun #endif /* CONFIG_SMP */
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun /*
446*4882a593Smuzhiyun  * Initialize the context management stuff.
447*4882a593Smuzhiyun  */
mmu_context_init(void)448*4882a593Smuzhiyun void __init mmu_context_init(void)
449*4882a593Smuzhiyun {
450*4882a593Smuzhiyun 	/* Mark init_mm as being active on all possible CPUs since
451*4882a593Smuzhiyun 	 * we'll get called with prev == init_mm the first time
452*4882a593Smuzhiyun 	 * we schedule on a given CPU
453*4882a593Smuzhiyun 	 */
454*4882a593Smuzhiyun 	init_mm.context.active = NR_CPUS;
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 	/*
457*4882a593Smuzhiyun 	 * Allocate the maps used by context management
458*4882a593Smuzhiyun 	 */
459*4882a593Smuzhiyun 	context_map = memblock_alloc(CTX_MAP_SIZE, SMP_CACHE_BYTES);
460*4882a593Smuzhiyun 	if (!context_map)
461*4882a593Smuzhiyun 		panic("%s: Failed to allocate %zu bytes\n", __func__,
462*4882a593Smuzhiyun 		      CTX_MAP_SIZE);
463*4882a593Smuzhiyun 	context_mm = memblock_alloc(sizeof(void *) * (LAST_CONTEXT + 1),
464*4882a593Smuzhiyun 				    SMP_CACHE_BYTES);
465*4882a593Smuzhiyun 	if (!context_mm)
466*4882a593Smuzhiyun 		panic("%s: Failed to allocate %zu bytes\n", __func__,
467*4882a593Smuzhiyun 		      sizeof(void *) * (LAST_CONTEXT + 1));
468*4882a593Smuzhiyun #ifdef CONFIG_SMP
469*4882a593Smuzhiyun 	stale_map[boot_cpuid] = memblock_alloc(CTX_MAP_SIZE, SMP_CACHE_BYTES);
470*4882a593Smuzhiyun 	if (!stale_map[boot_cpuid])
471*4882a593Smuzhiyun 		panic("%s: Failed to allocate %zu bytes\n", __func__,
472*4882a593Smuzhiyun 		      CTX_MAP_SIZE);
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 	cpuhp_setup_state_nocalls(CPUHP_POWERPC_MMU_CTX_PREPARE,
475*4882a593Smuzhiyun 				  "powerpc/mmu/ctx:prepare",
476*4882a593Smuzhiyun 				  mmu_ctx_cpu_prepare, mmu_ctx_cpu_dead);
477*4882a593Smuzhiyun #endif
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 	printk(KERN_INFO
480*4882a593Smuzhiyun 	       "MMU: Allocated %zu bytes of context maps for %d contexts\n",
481*4882a593Smuzhiyun 	       2 * CTX_MAP_SIZE + (sizeof(void *) * (LAST_CONTEXT + 1)),
482*4882a593Smuzhiyun 	       LAST_CONTEXT - FIRST_CONTEXT + 1);
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun 	/*
485*4882a593Smuzhiyun 	 * Some processors have too few contexts to reserve one for
486*4882a593Smuzhiyun 	 * init_mm, and require using context 0 for a normal task.
487*4882a593Smuzhiyun 	 * Other processors reserve the use of context zero for the kernel.
488*4882a593Smuzhiyun 	 * This code assumes FIRST_CONTEXT < 32.
489*4882a593Smuzhiyun 	 */
490*4882a593Smuzhiyun 	context_map[0] = (1 << FIRST_CONTEXT) - 1;
491*4882a593Smuzhiyun 	next_context = FIRST_CONTEXT;
492*4882a593Smuzhiyun 	nr_free_contexts = LAST_CONTEXT - FIRST_CONTEXT + 1;
493*4882a593Smuzhiyun }
494