xref: /OK3568_Linux_fs/kernel/arch/mips/include/asm/mmu_context.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Switch a MMU context.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * This file is subject to the terms and conditions of the GNU General Public
5*4882a593Smuzhiyun  * License.  See the file "COPYING" in the main directory of this archive
6*4882a593Smuzhiyun  * for more details.
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * Copyright (C) 1996, 1997, 1998, 1999 by Ralf Baechle
9*4882a593Smuzhiyun  * Copyright (C) 1999 Silicon Graphics, Inc.
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun #ifndef _ASM_MMU_CONTEXT_H
12*4882a593Smuzhiyun #define _ASM_MMU_CONTEXT_H
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #include <linux/errno.h>
15*4882a593Smuzhiyun #include <linux/sched.h>
16*4882a593Smuzhiyun #include <linux/mm_types.h>
17*4882a593Smuzhiyun #include <linux/smp.h>
18*4882a593Smuzhiyun #include <linux/slab.h>
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #include <asm/barrier.h>
21*4882a593Smuzhiyun #include <asm/cacheflush.h>
22*4882a593Smuzhiyun #include <asm/dsemul.h>
23*4882a593Smuzhiyun #include <asm/ginvt.h>
24*4882a593Smuzhiyun #include <asm/hazards.h>
25*4882a593Smuzhiyun #include <asm/tlbflush.h>
26*4882a593Smuzhiyun #include <asm-generic/mm_hooks.h>
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #define htw_set_pwbase(pgd)						\
29*4882a593Smuzhiyun do {									\
30*4882a593Smuzhiyun 	if (cpu_has_htw) {						\
31*4882a593Smuzhiyun 		write_c0_pwbase(pgd);					\
32*4882a593Smuzhiyun 		back_to_back_c0_hazard();				\
33*4882a593Smuzhiyun 	}								\
34*4882a593Smuzhiyun } while (0)
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun extern void tlbmiss_handler_setup_pgd(unsigned long);
37*4882a593Smuzhiyun extern char tlbmiss_handler_setup_pgd_end[];
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun /* Note: This is also implemented with uasm in arch/mips/kvm/entry.c */
40*4882a593Smuzhiyun #define TLBMISS_HANDLER_SETUP_PGD(pgd)					\
41*4882a593Smuzhiyun do {									\
42*4882a593Smuzhiyun 	tlbmiss_handler_setup_pgd((unsigned long)(pgd));		\
43*4882a593Smuzhiyun 	htw_set_pwbase((unsigned long)pgd);				\
44*4882a593Smuzhiyun } while (0)
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun #ifdef CONFIG_MIPS_PGD_C0_CONTEXT
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun #define TLBMISS_HANDLER_RESTORE()					\
49*4882a593Smuzhiyun 	write_c0_xcontext((unsigned long) smp_processor_id() <<		\
50*4882a593Smuzhiyun 			  SMP_CPUID_REGSHIFT)
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun #define TLBMISS_HANDLER_SETUP()						\
53*4882a593Smuzhiyun 	do {								\
54*4882a593Smuzhiyun 		TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir);		\
55*4882a593Smuzhiyun 		TLBMISS_HANDLER_RESTORE();				\
56*4882a593Smuzhiyun 	} while (0)
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun #else /* !CONFIG_MIPS_PGD_C0_CONTEXT: using  pgd_current*/
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun /*
61*4882a593Smuzhiyun  * For the fast tlb miss handlers, we keep a per cpu array of pointers
62*4882a593Smuzhiyun  * to the current pgd for each processor. Also, the proc. id is stuffed
63*4882a593Smuzhiyun  * into the context register.
64*4882a593Smuzhiyun  */
65*4882a593Smuzhiyun extern unsigned long pgd_current[];
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun #define TLBMISS_HANDLER_RESTORE()					\
68*4882a593Smuzhiyun 	write_c0_context((unsigned long) smp_processor_id() <<		\
69*4882a593Smuzhiyun 			 SMP_CPUID_REGSHIFT)
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun #define TLBMISS_HANDLER_SETUP()						\
72*4882a593Smuzhiyun 	TLBMISS_HANDLER_RESTORE();					\
73*4882a593Smuzhiyun 	back_to_back_c0_hazard();					\
74*4882a593Smuzhiyun 	TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir)
75*4882a593Smuzhiyun #endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun /*
78*4882a593Smuzhiyun  * The ginvt instruction will invalidate wired entries when its type field
79*4882a593Smuzhiyun  * targets anything other than the entire TLB. That means that if we were to
80*4882a593Smuzhiyun  * allow the kernel to create wired entries with the MMID of current->active_mm
81*4882a593Smuzhiyun  * then those wired entries could be invalidated when we later use ginvt to
82*4882a593Smuzhiyun  * invalidate TLB entries with that MMID.
83*4882a593Smuzhiyun  *
84*4882a593Smuzhiyun  * In order to prevent ginvt from trashing wired entries, we reserve one MMID
85*4882a593Smuzhiyun  * for use by the kernel when creating wired entries. This MMID will never be
86*4882a593Smuzhiyun  * assigned to a struct mm, and we'll never target it with a ginvt instruction.
87*4882a593Smuzhiyun  */
88*4882a593Smuzhiyun #define MMID_KERNEL_WIRED	0
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun /*
91*4882a593Smuzhiyun  *  All unused by hardware upper bits will be considered
92*4882a593Smuzhiyun  *  as a software asid extension.
93*4882a593Smuzhiyun  */
asid_version_mask(unsigned int cpu)94*4882a593Smuzhiyun static inline u64 asid_version_mask(unsigned int cpu)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun 	unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]);
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	return ~(u64)(asid_mask | (asid_mask - 1));
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun 
asid_first_version(unsigned int cpu)101*4882a593Smuzhiyun static inline u64 asid_first_version(unsigned int cpu)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun 	return ~asid_version_mask(cpu) + 1;
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun 
cpu_context(unsigned int cpu,const struct mm_struct * mm)106*4882a593Smuzhiyun static inline u64 cpu_context(unsigned int cpu, const struct mm_struct *mm)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun 	if (cpu_has_mmid)
109*4882a593Smuzhiyun 		return atomic64_read(&mm->context.mmid);
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	return mm->context.asid[cpu];
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun 
set_cpu_context(unsigned int cpu,struct mm_struct * mm,u64 ctx)114*4882a593Smuzhiyun static inline void set_cpu_context(unsigned int cpu,
115*4882a593Smuzhiyun 				   struct mm_struct *mm, u64 ctx)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun 	if (cpu_has_mmid)
118*4882a593Smuzhiyun 		atomic64_set(&mm->context.mmid, ctx);
119*4882a593Smuzhiyun 	else
120*4882a593Smuzhiyun 		mm->context.asid[cpu] = ctx;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun #define asid_cache(cpu)		(cpu_data[cpu].asid_cache)
124*4882a593Smuzhiyun #define cpu_asid(cpu, mm) \
125*4882a593Smuzhiyun 	(cpu_context((cpu), (mm)) & cpu_asid_mask(&cpu_data[cpu]))
126*4882a593Smuzhiyun 
enter_lazy_tlb(struct mm_struct * mm,struct task_struct * tsk)127*4882a593Smuzhiyun static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun extern void get_new_mmu_context(struct mm_struct *mm);
132*4882a593Smuzhiyun extern void check_mmu_context(struct mm_struct *mm);
133*4882a593Smuzhiyun extern void check_switch_mmu_context(struct mm_struct *mm);
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun /*
136*4882a593Smuzhiyun  * Initialize the context related info for a new mm_struct
137*4882a593Smuzhiyun  * instance.
138*4882a593Smuzhiyun  */
139*4882a593Smuzhiyun static inline int
init_new_context(struct task_struct * tsk,struct mm_struct * mm)140*4882a593Smuzhiyun init_new_context(struct task_struct *tsk, struct mm_struct *mm)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun 	int i;
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	if (cpu_has_mmid) {
145*4882a593Smuzhiyun 		set_cpu_context(0, mm, 0);
146*4882a593Smuzhiyun 	} else {
147*4882a593Smuzhiyun 		for_each_possible_cpu(i)
148*4882a593Smuzhiyun 			set_cpu_context(i, mm, 0);
149*4882a593Smuzhiyun 	}
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	mm->context.bd_emupage_allocmap = NULL;
152*4882a593Smuzhiyun 	spin_lock_init(&mm->context.bd_emupage_lock);
153*4882a593Smuzhiyun 	init_waitqueue_head(&mm->context.bd_emupage_queue);
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	return 0;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun 
switch_mm(struct mm_struct * prev,struct mm_struct * next,struct task_struct * tsk)158*4882a593Smuzhiyun static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
159*4882a593Smuzhiyun 			     struct task_struct *tsk)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun 	unsigned int cpu = smp_processor_id();
162*4882a593Smuzhiyun 	unsigned long flags;
163*4882a593Smuzhiyun 	local_irq_save(flags);
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	htw_stop();
166*4882a593Smuzhiyun 	check_switch_mmu_context(next);
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	/*
169*4882a593Smuzhiyun 	 * Mark current->active_mm as not "active" anymore.
170*4882a593Smuzhiyun 	 * We don't want to mislead possible IPI tlb flush routines.
171*4882a593Smuzhiyun 	 */
172*4882a593Smuzhiyun 	cpumask_clear_cpu(cpu, mm_cpumask(prev));
173*4882a593Smuzhiyun 	cpumask_set_cpu(cpu, mm_cpumask(next));
174*4882a593Smuzhiyun 	htw_start();
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	local_irq_restore(flags);
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun /*
180*4882a593Smuzhiyun  * Destroy context related info for an mm_struct that is about
181*4882a593Smuzhiyun  * to be put to rest.
182*4882a593Smuzhiyun  */
destroy_context(struct mm_struct * mm)183*4882a593Smuzhiyun static inline void destroy_context(struct mm_struct *mm)
184*4882a593Smuzhiyun {
185*4882a593Smuzhiyun 	dsemul_mm_cleanup(mm);
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun #define activate_mm(prev, next)	switch_mm(prev, next, current)
189*4882a593Smuzhiyun #define deactivate_mm(tsk, mm)	do { } while (0)
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun static inline void
drop_mmu_context(struct mm_struct * mm)192*4882a593Smuzhiyun drop_mmu_context(struct mm_struct *mm)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun 	unsigned long flags;
195*4882a593Smuzhiyun 	unsigned int cpu;
196*4882a593Smuzhiyun 	u32 old_mmid;
197*4882a593Smuzhiyun 	u64 ctx;
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	local_irq_save(flags);
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	cpu = smp_processor_id();
202*4882a593Smuzhiyun 	ctx = cpu_context(cpu, mm);
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	if (!ctx) {
205*4882a593Smuzhiyun 		/* no-op */
206*4882a593Smuzhiyun 	} else if (cpu_has_mmid) {
207*4882a593Smuzhiyun 		/*
208*4882a593Smuzhiyun 		 * Globally invalidating TLB entries associated with the MMID
209*4882a593Smuzhiyun 		 * is pretty cheap using the GINVT instruction, so we'll do
210*4882a593Smuzhiyun 		 * that rather than incur the overhead of allocating a new
211*4882a593Smuzhiyun 		 * MMID. The latter would be especially difficult since MMIDs
212*4882a593Smuzhiyun 		 * are global & other CPUs may be actively using ctx.
213*4882a593Smuzhiyun 		 */
214*4882a593Smuzhiyun 		htw_stop();
215*4882a593Smuzhiyun 		old_mmid = read_c0_memorymapid();
216*4882a593Smuzhiyun 		write_c0_memorymapid(ctx & cpu_asid_mask(&cpu_data[cpu]));
217*4882a593Smuzhiyun 		mtc0_tlbw_hazard();
218*4882a593Smuzhiyun 		ginvt_mmid();
219*4882a593Smuzhiyun 		sync_ginv();
220*4882a593Smuzhiyun 		write_c0_memorymapid(old_mmid);
221*4882a593Smuzhiyun 		instruction_hazard();
222*4882a593Smuzhiyun 		htw_start();
223*4882a593Smuzhiyun 	} else if (cpumask_test_cpu(cpu, mm_cpumask(mm))) {
224*4882a593Smuzhiyun 		/*
225*4882a593Smuzhiyun 		 * mm is currently active, so we can't really drop it.
226*4882a593Smuzhiyun 		 * Instead we bump the ASID.
227*4882a593Smuzhiyun 		 */
228*4882a593Smuzhiyun 		htw_stop();
229*4882a593Smuzhiyun 		get_new_mmu_context(mm);
230*4882a593Smuzhiyun 		write_c0_entryhi(cpu_asid(cpu, mm));
231*4882a593Smuzhiyun 		htw_start();
232*4882a593Smuzhiyun 	} else {
233*4882a593Smuzhiyun 		/* will get a new context next time */
234*4882a593Smuzhiyun 		set_cpu_context(cpu, mm, 0);
235*4882a593Smuzhiyun 	}
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	local_irq_restore(flags);
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun #endif /* _ASM_MMU_CONTEXT_H */
241