xref: /OK3568_Linux_fs/kernel/arch/powerpc/mm/mmu_context.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *  Common implementation of switch_mm_irqs_off
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  *  Copyright IBM Corp. 2017
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <linux/mm.h>
9*4882a593Smuzhiyun #include <linux/cpu.h>
10*4882a593Smuzhiyun #include <linux/sched/mm.h>
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <asm/mmu_context.h>
13*4882a593Smuzhiyun #include <asm/pgalloc.h>
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #if defined(CONFIG_PPC32)
switch_mm_pgdir(struct task_struct * tsk,struct mm_struct * mm)16*4882a593Smuzhiyun static inline void switch_mm_pgdir(struct task_struct *tsk,
17*4882a593Smuzhiyun 				   struct mm_struct *mm)
18*4882a593Smuzhiyun {
19*4882a593Smuzhiyun 	/* 32-bit keeps track of the current PGDIR in the thread struct */
20*4882a593Smuzhiyun 	tsk->thread.pgdir = mm->pgd;
21*4882a593Smuzhiyun }
22*4882a593Smuzhiyun #elif defined(CONFIG_PPC_BOOK3E_64)
switch_mm_pgdir(struct task_struct * tsk,struct mm_struct * mm)23*4882a593Smuzhiyun static inline void switch_mm_pgdir(struct task_struct *tsk,
24*4882a593Smuzhiyun 				   struct mm_struct *mm)
25*4882a593Smuzhiyun {
26*4882a593Smuzhiyun 	/* 64-bit Book3E keeps track of current PGD in the PACA */
27*4882a593Smuzhiyun 	get_paca()->pgd = mm->pgd;
28*4882a593Smuzhiyun }
29*4882a593Smuzhiyun #else
switch_mm_pgdir(struct task_struct * tsk,struct mm_struct * mm)30*4882a593Smuzhiyun static inline void switch_mm_pgdir(struct task_struct *tsk,
31*4882a593Smuzhiyun 				   struct mm_struct *mm) { }
32*4882a593Smuzhiyun #endif
33*4882a593Smuzhiyun 
switch_mm_irqs_off(struct mm_struct * prev,struct mm_struct * next,struct task_struct * tsk)34*4882a593Smuzhiyun void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
35*4882a593Smuzhiyun 			struct task_struct *tsk)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun 	bool new_on_cpu = false;
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun 	/* Mark this context has been used on the new CPU */
40*4882a593Smuzhiyun 	if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next))) {
41*4882a593Smuzhiyun 		cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
42*4882a593Smuzhiyun 		inc_mm_active_cpus(next);
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun 		/*
45*4882a593Smuzhiyun 		 * This full barrier orders the store to the cpumask above vs
46*4882a593Smuzhiyun 		 * a subsequent operation which allows this CPU to begin loading
47*4882a593Smuzhiyun 		 * translations for next.
48*4882a593Smuzhiyun 		 *
49*4882a593Smuzhiyun 		 * When using the radix MMU that operation is the load of the
50*4882a593Smuzhiyun 		 * MMU context id, which is then moved to SPRN_PID.
51*4882a593Smuzhiyun 		 *
52*4882a593Smuzhiyun 		 * For the hash MMU it is either the first load from slb_cache
53*4882a593Smuzhiyun 		 * in switch_slb(), and/or the store of paca->mm_ctx_id in
54*4882a593Smuzhiyun 		 * copy_mm_to_paca().
55*4882a593Smuzhiyun 		 *
56*4882a593Smuzhiyun 		 * On the other side, the barrier is in mm/tlb-radix.c for
57*4882a593Smuzhiyun 		 * radix which orders earlier stores to clear the PTEs vs
58*4882a593Smuzhiyun 		 * the load of mm_cpumask. And pte_xchg which does the same
59*4882a593Smuzhiyun 		 * thing for hash.
60*4882a593Smuzhiyun 		 *
61*4882a593Smuzhiyun 		 * This full barrier is needed by membarrier when switching
62*4882a593Smuzhiyun 		 * between processes after store to rq->curr, before user-space
63*4882a593Smuzhiyun 		 * memory accesses.
64*4882a593Smuzhiyun 		 */
65*4882a593Smuzhiyun 		smp_mb();
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 		new_on_cpu = true;
68*4882a593Smuzhiyun 	}
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	/* Some subarchs need to track the PGD elsewhere */
71*4882a593Smuzhiyun 	switch_mm_pgdir(tsk, next);
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	/* Nothing else to do if we aren't actually switching */
74*4882a593Smuzhiyun 	if (prev == next)
75*4882a593Smuzhiyun 		return;
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	/*
78*4882a593Smuzhiyun 	 * We must stop all altivec streams before changing the HW
79*4882a593Smuzhiyun 	 * context
80*4882a593Smuzhiyun 	 */
81*4882a593Smuzhiyun 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
82*4882a593Smuzhiyun 		asm volatile (PPC_DSSALL);
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	if (new_on_cpu)
85*4882a593Smuzhiyun 		radix_kvm_prefetch_workaround(next);
86*4882a593Smuzhiyun 	else
87*4882a593Smuzhiyun 		membarrier_arch_switch_mm(prev, next, tsk);
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	/*
90*4882a593Smuzhiyun 	 * The actual HW switching method differs between the various
91*4882a593Smuzhiyun 	 * sub architectures. Out of line for now
92*4882a593Smuzhiyun 	 */
93*4882a593Smuzhiyun 	switch_mmu_context(prev, next, tsk);
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun #ifndef CONFIG_PPC_BOOK3S_64
arch_exit_mmap(struct mm_struct * mm)97*4882a593Smuzhiyun void arch_exit_mmap(struct mm_struct *mm)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun 	void *frag = pte_frag_get(&mm->context);
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	if (frag)
102*4882a593Smuzhiyun 		pte_frag_destroy(frag);
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun #endif
105