xref: /OK3568_Linux_fs/kernel/arch/powerpc/include/asm/mmu_context.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef __ASM_POWERPC_MMU_CONTEXT_H
3*4882a593Smuzhiyun #define __ASM_POWERPC_MMU_CONTEXT_H
4*4882a593Smuzhiyun #ifdef __KERNEL__
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #include <linux/kernel.h>
7*4882a593Smuzhiyun #include <linux/mm.h>
8*4882a593Smuzhiyun #include <linux/sched.h>
9*4882a593Smuzhiyun #include <linux/spinlock.h>
10*4882a593Smuzhiyun #include <asm/mmu.h>
11*4882a593Smuzhiyun #include <asm/cputable.h>
12*4882a593Smuzhiyun #include <asm/cputhreads.h>
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun /*
15*4882a593Smuzhiyun  * Most if the context management is out of line
16*4882a593Smuzhiyun  */
17*4882a593Smuzhiyun extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
18*4882a593Smuzhiyun extern void destroy_context(struct mm_struct *mm);
19*4882a593Smuzhiyun #ifdef CONFIG_SPAPR_TCE_IOMMU
20*4882a593Smuzhiyun struct mm_iommu_table_group_mem_t;
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun extern int isolate_lru_page(struct page *page);	/* from internal.h */
23*4882a593Smuzhiyun extern bool mm_iommu_preregistered(struct mm_struct *mm);
24*4882a593Smuzhiyun extern long mm_iommu_new(struct mm_struct *mm,
25*4882a593Smuzhiyun 		unsigned long ua, unsigned long entries,
26*4882a593Smuzhiyun 		struct mm_iommu_table_group_mem_t **pmem);
27*4882a593Smuzhiyun extern long mm_iommu_newdev(struct mm_struct *mm, unsigned long ua,
28*4882a593Smuzhiyun 		unsigned long entries, unsigned long dev_hpa,
29*4882a593Smuzhiyun 		struct mm_iommu_table_group_mem_t **pmem);
30*4882a593Smuzhiyun extern long mm_iommu_put(struct mm_struct *mm,
31*4882a593Smuzhiyun 		struct mm_iommu_table_group_mem_t *mem);
32*4882a593Smuzhiyun extern void mm_iommu_init(struct mm_struct *mm);
33*4882a593Smuzhiyun extern void mm_iommu_cleanup(struct mm_struct *mm);
34*4882a593Smuzhiyun extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
35*4882a593Smuzhiyun 		unsigned long ua, unsigned long size);
36*4882a593Smuzhiyun extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(
37*4882a593Smuzhiyun 		struct mm_struct *mm, unsigned long ua, unsigned long size);
38*4882a593Smuzhiyun extern struct mm_iommu_table_group_mem_t *mm_iommu_get(struct mm_struct *mm,
39*4882a593Smuzhiyun 		unsigned long ua, unsigned long entries);
40*4882a593Smuzhiyun extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
41*4882a593Smuzhiyun 		unsigned long ua, unsigned int pageshift, unsigned long *hpa);
42*4882a593Smuzhiyun extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
43*4882a593Smuzhiyun 		unsigned long ua, unsigned int pageshift, unsigned long *hpa);
44*4882a593Smuzhiyun extern void mm_iommu_ua_mark_dirty_rm(struct mm_struct *mm, unsigned long ua);
45*4882a593Smuzhiyun extern bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa,
46*4882a593Smuzhiyun 		unsigned int pageshift, unsigned long *size);
47*4882a593Smuzhiyun extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
48*4882a593Smuzhiyun extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem);
49*4882a593Smuzhiyun #else
mm_iommu_is_devmem(struct mm_struct * mm,unsigned long hpa,unsigned int pageshift,unsigned long * size)50*4882a593Smuzhiyun static inline bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa,
51*4882a593Smuzhiyun 		unsigned int pageshift, unsigned long *size)
52*4882a593Smuzhiyun {
53*4882a593Smuzhiyun 	return false;
54*4882a593Smuzhiyun }
mm_iommu_init(struct mm_struct * mm)55*4882a593Smuzhiyun static inline void mm_iommu_init(struct mm_struct *mm) { }
56*4882a593Smuzhiyun #endif
57*4882a593Smuzhiyun extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
58*4882a593Smuzhiyun extern void set_context(unsigned long id, pgd_t *pgd);
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S_64
61*4882a593Smuzhiyun extern void radix__switch_mmu_context(struct mm_struct *prev,
62*4882a593Smuzhiyun 				      struct mm_struct *next);
switch_mmu_context(struct mm_struct * prev,struct mm_struct * next,struct task_struct * tsk)63*4882a593Smuzhiyun static inline void switch_mmu_context(struct mm_struct *prev,
64*4882a593Smuzhiyun 				      struct mm_struct *next,
65*4882a593Smuzhiyun 				      struct task_struct *tsk)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun 	if (radix_enabled())
68*4882a593Smuzhiyun 		return radix__switch_mmu_context(prev, next);
69*4882a593Smuzhiyun 	return switch_slb(tsk, next);
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun extern int hash__alloc_context_id(void);
73*4882a593Smuzhiyun extern void hash__reserve_context_id(int id);
74*4882a593Smuzhiyun extern void __destroy_context(int context_id);
mmu_context_init(void)75*4882a593Smuzhiyun static inline void mmu_context_init(void) { }
76*4882a593Smuzhiyun 
alloc_extended_context(struct mm_struct * mm,unsigned long ea)77*4882a593Smuzhiyun static inline int alloc_extended_context(struct mm_struct *mm,
78*4882a593Smuzhiyun 					 unsigned long ea)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun 	int context_id;
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	int index = ea >> MAX_EA_BITS_PER_CONTEXT;
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	context_id = hash__alloc_context_id();
85*4882a593Smuzhiyun 	if (context_id < 0)
86*4882a593Smuzhiyun 		return context_id;
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	VM_WARN_ON(mm->context.extended_id[index]);
89*4882a593Smuzhiyun 	mm->context.extended_id[index] = context_id;
90*4882a593Smuzhiyun 	return context_id;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun 
need_extra_context(struct mm_struct * mm,unsigned long ea)93*4882a593Smuzhiyun static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun 	int context_id;
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	context_id = get_user_context(&mm->context, ea);
98*4882a593Smuzhiyun 	if (!context_id)
99*4882a593Smuzhiyun 		return true;
100*4882a593Smuzhiyun 	return false;
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun #else
104*4882a593Smuzhiyun extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
105*4882a593Smuzhiyun 			       struct task_struct *tsk);
106*4882a593Smuzhiyun extern unsigned long __init_new_context(void);
107*4882a593Smuzhiyun extern void __destroy_context(unsigned long context_id);
108*4882a593Smuzhiyun extern void mmu_context_init(void);
alloc_extended_context(struct mm_struct * mm,unsigned long ea)109*4882a593Smuzhiyun static inline int alloc_extended_context(struct mm_struct *mm,
110*4882a593Smuzhiyun 					 unsigned long ea)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun 	/* non book3s_64 should never find this called */
113*4882a593Smuzhiyun 	WARN_ON(1);
114*4882a593Smuzhiyun 	return -ENOMEM;
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun 
need_extra_context(struct mm_struct * mm,unsigned long ea)117*4882a593Smuzhiyun static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun 	return false;
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun #endif
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) && defined(CONFIG_PPC_RADIX_MMU)
124*4882a593Smuzhiyun extern void radix_kvm_prefetch_workaround(struct mm_struct *mm);
125*4882a593Smuzhiyun #else
radix_kvm_prefetch_workaround(struct mm_struct * mm)126*4882a593Smuzhiyun static inline void radix_kvm_prefetch_workaround(struct mm_struct *mm) { }
127*4882a593Smuzhiyun #endif
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun extern void switch_cop(struct mm_struct *next);
130*4882a593Smuzhiyun extern int use_cop(unsigned long acop, struct mm_struct *mm);
131*4882a593Smuzhiyun extern void drop_cop(unsigned long acop, struct mm_struct *mm);
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S_64
inc_mm_active_cpus(struct mm_struct * mm)134*4882a593Smuzhiyun static inline void inc_mm_active_cpus(struct mm_struct *mm)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun 	atomic_inc(&mm->context.active_cpus);
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun 
dec_mm_active_cpus(struct mm_struct * mm)139*4882a593Smuzhiyun static inline void dec_mm_active_cpus(struct mm_struct *mm)
140*4882a593Smuzhiyun {
141*4882a593Smuzhiyun 	atomic_dec(&mm->context.active_cpus);
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun 
mm_context_add_copro(struct mm_struct * mm)144*4882a593Smuzhiyun static inline void mm_context_add_copro(struct mm_struct *mm)
145*4882a593Smuzhiyun {
146*4882a593Smuzhiyun 	/*
147*4882a593Smuzhiyun 	 * If any copro is in use, increment the active CPU count
148*4882a593Smuzhiyun 	 * in order to force TLB invalidations to be global as to
149*4882a593Smuzhiyun 	 * propagate to the Nest MMU.
150*4882a593Smuzhiyun 	 */
151*4882a593Smuzhiyun 	if (atomic_inc_return(&mm->context.copros) == 1)
152*4882a593Smuzhiyun 		inc_mm_active_cpus(mm);
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun 
mm_context_remove_copro(struct mm_struct * mm)155*4882a593Smuzhiyun static inline void mm_context_remove_copro(struct mm_struct *mm)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun 	int c;
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	/*
160*4882a593Smuzhiyun 	 * When removing the last copro, we need to broadcast a global
161*4882a593Smuzhiyun 	 * flush of the full mm, as the next TLBI may be local and the
162*4882a593Smuzhiyun 	 * nMMU and/or PSL need to be cleaned up.
163*4882a593Smuzhiyun 	 *
164*4882a593Smuzhiyun 	 * Both the 'copros' and 'active_cpus' counts are looked at in
165*4882a593Smuzhiyun 	 * flush_all_mm() to determine the scope (local/global) of the
166*4882a593Smuzhiyun 	 * TLBIs, so we need to flush first before decrementing
167*4882a593Smuzhiyun 	 * 'copros'. If this API is used by several callers for the
168*4882a593Smuzhiyun 	 * same context, it can lead to over-flushing. It's hopefully
169*4882a593Smuzhiyun 	 * not common enough to be a problem.
170*4882a593Smuzhiyun 	 *
171*4882a593Smuzhiyun 	 * Skip on hash, as we don't know how to do the proper flush
172*4882a593Smuzhiyun 	 * for the time being. Invalidations will remain global if
173*4882a593Smuzhiyun 	 * used on hash. Note that we can't drop 'copros' either, as
174*4882a593Smuzhiyun 	 * it could make some invalidations local with no flush
175*4882a593Smuzhiyun 	 * in-between.
176*4882a593Smuzhiyun 	 */
177*4882a593Smuzhiyun 	if (radix_enabled()) {
178*4882a593Smuzhiyun 		flush_all_mm(mm);
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 		c = atomic_dec_if_positive(&mm->context.copros);
181*4882a593Smuzhiyun 		/* Detect imbalance between add and remove */
182*4882a593Smuzhiyun 		WARN_ON(c < 0);
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 		if (c == 0)
185*4882a593Smuzhiyun 			dec_mm_active_cpus(mm);
186*4882a593Smuzhiyun 	}
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun /*
190*4882a593Smuzhiyun  * vas_windows counter shows number of open windows in the mm
191*4882a593Smuzhiyun  * context. During context switch, use this counter to clear the
192*4882a593Smuzhiyun  * foreign real address mapping (CP_ABORT) for the thread / process
193*4882a593Smuzhiyun  * that intend to use COPY/PASTE. When a process closes all windows,
194*4882a593Smuzhiyun  * disable CP_ABORT which is expensive to run.
195*4882a593Smuzhiyun  *
196*4882a593Smuzhiyun  * For user context, register a copro so that TLBIs are seen by the
197*4882a593Smuzhiyun  * nest MMU. mm_context_add/remove_vas_window() are used only for user
198*4882a593Smuzhiyun  * space windows.
199*4882a593Smuzhiyun  */
mm_context_add_vas_window(struct mm_struct * mm)200*4882a593Smuzhiyun static inline void mm_context_add_vas_window(struct mm_struct *mm)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun 	atomic_inc(&mm->context.vas_windows);
203*4882a593Smuzhiyun 	mm_context_add_copro(mm);
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun 
mm_context_remove_vas_window(struct mm_struct * mm)206*4882a593Smuzhiyun static inline void mm_context_remove_vas_window(struct mm_struct *mm)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun 	int v;
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	mm_context_remove_copro(mm);
211*4882a593Smuzhiyun 	v = atomic_dec_if_positive(&mm->context.vas_windows);
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	/* Detect imbalance between add and remove */
214*4882a593Smuzhiyun 	WARN_ON(v < 0);
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun #else
inc_mm_active_cpus(struct mm_struct * mm)217*4882a593Smuzhiyun static inline void inc_mm_active_cpus(struct mm_struct *mm) { }
dec_mm_active_cpus(struct mm_struct * mm)218*4882a593Smuzhiyun static inline void dec_mm_active_cpus(struct mm_struct *mm) { }
mm_context_add_copro(struct mm_struct * mm)219*4882a593Smuzhiyun static inline void mm_context_add_copro(struct mm_struct *mm) { }
mm_context_remove_copro(struct mm_struct * mm)220*4882a593Smuzhiyun static inline void mm_context_remove_copro(struct mm_struct *mm) { }
221*4882a593Smuzhiyun #endif
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
225*4882a593Smuzhiyun 			       struct task_struct *tsk);
226*4882a593Smuzhiyun 
switch_mm(struct mm_struct * prev,struct mm_struct * next,struct task_struct * tsk)227*4882a593Smuzhiyun static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
228*4882a593Smuzhiyun 			     struct task_struct *tsk)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun 	unsigned long flags;
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	local_irq_save(flags);
233*4882a593Smuzhiyun 	switch_mm_irqs_off(prev, next, tsk);
234*4882a593Smuzhiyun 	local_irq_restore(flags);
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun #define switch_mm_irqs_off switch_mm_irqs_off
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun #define deactivate_mm(tsk,mm)	do { } while (0)
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun /*
242*4882a593Smuzhiyun  * After we have set current->mm to a new value, this activates
243*4882a593Smuzhiyun  * the context for the new mm so we see the new mappings.
244*4882a593Smuzhiyun  */
activate_mm(struct mm_struct * prev,struct mm_struct * next)245*4882a593Smuzhiyun static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun 	switch_mm_irqs_off(prev, next, current);
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun /* We don't currently use enter_lazy_tlb() for anything */
enter_lazy_tlb(struct mm_struct * mm,struct task_struct * tsk)251*4882a593Smuzhiyun static inline void enter_lazy_tlb(struct mm_struct *mm,
252*4882a593Smuzhiyun 				  struct task_struct *tsk)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun 	/* 64-bit Book3E keeps track of current PGD in the PACA */
255*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3E_64
256*4882a593Smuzhiyun 	get_paca()->pgd = NULL;
257*4882a593Smuzhiyun #endif
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun extern void arch_exit_mmap(struct mm_struct *mm);
261*4882a593Smuzhiyun 
arch_unmap(struct mm_struct * mm,unsigned long start,unsigned long end)262*4882a593Smuzhiyun static inline void arch_unmap(struct mm_struct *mm,
263*4882a593Smuzhiyun 			      unsigned long start, unsigned long end)
264*4882a593Smuzhiyun {
265*4882a593Smuzhiyun 	if (start <= mm->context.vdso_base && mm->context.vdso_base < end)
266*4882a593Smuzhiyun 		mm->context.vdso_base = 0;
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun #ifdef CONFIG_PPC_MEM_KEYS
270*4882a593Smuzhiyun bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write,
271*4882a593Smuzhiyun 			       bool execute, bool foreign);
272*4882a593Smuzhiyun void arch_dup_pkeys(struct mm_struct *oldmm, struct mm_struct *mm);
273*4882a593Smuzhiyun #else /* CONFIG_PPC_MEM_KEYS */
arch_vma_access_permitted(struct vm_area_struct * vma,bool write,bool execute,bool foreign)274*4882a593Smuzhiyun static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
275*4882a593Smuzhiyun 		bool write, bool execute, bool foreign)
276*4882a593Smuzhiyun {
277*4882a593Smuzhiyun 	/* by default, allow everything */
278*4882a593Smuzhiyun 	return true;
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun #define pkey_mm_init(mm)
282*4882a593Smuzhiyun #define thread_pkey_regs_save(thread)
283*4882a593Smuzhiyun #define thread_pkey_regs_restore(new_thread, old_thread)
284*4882a593Smuzhiyun #define thread_pkey_regs_init(thread)
285*4882a593Smuzhiyun #define arch_dup_pkeys(oldmm, mm)
286*4882a593Smuzhiyun 
pte_to_hpte_pkey_bits(u64 pteflags)287*4882a593Smuzhiyun static inline u64 pte_to_hpte_pkey_bits(u64 pteflags)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun 	return 0x0UL;
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun #endif /* CONFIG_PPC_MEM_KEYS */
293*4882a593Smuzhiyun 
arch_dup_mmap(struct mm_struct * oldmm,struct mm_struct * mm)294*4882a593Smuzhiyun static inline int arch_dup_mmap(struct mm_struct *oldmm,
295*4882a593Smuzhiyun 				struct mm_struct *mm)
296*4882a593Smuzhiyun {
297*4882a593Smuzhiyun 	arch_dup_pkeys(oldmm, mm);
298*4882a593Smuzhiyun 	return 0;
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun #endif /* __KERNEL__ */
302*4882a593Smuzhiyun #endif /* __ASM_POWERPC_MMU_CONTEXT_H */
303