1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Based on arch/arm/include/asm/mmu_context.h
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 1996 Russell King.
6*4882a593Smuzhiyun * Copyright (C) 2012 ARM Ltd.
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun #ifndef __ASM_MMU_CONTEXT_H
9*4882a593Smuzhiyun #define __ASM_MMU_CONTEXT_H
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #ifndef __ASSEMBLY__
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include <linux/compiler.h>
14*4882a593Smuzhiyun #include <linux/sched.h>
15*4882a593Smuzhiyun #include <linux/sched/hotplug.h>
16*4882a593Smuzhiyun #include <linux/mm_types.h>
17*4882a593Smuzhiyun #include <linux/pgtable.h>
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #include <asm/cacheflush.h>
20*4882a593Smuzhiyun #include <asm/cpufeature.h>
21*4882a593Smuzhiyun #include <asm/proc-fns.h>
22*4882a593Smuzhiyun #include <asm-generic/mm_hooks.h>
23*4882a593Smuzhiyun #include <asm/cputype.h>
24*4882a593Smuzhiyun #include <asm/sysreg.h>
25*4882a593Smuzhiyun #include <asm/tlbflush.h>
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun extern bool rodata_full;
28*4882a593Smuzhiyun
contextidr_thread_switch(struct task_struct * next)29*4882a593Smuzhiyun static inline void contextidr_thread_switch(struct task_struct *next)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun if (!IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR))
32*4882a593Smuzhiyun return;
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun write_sysreg(task_pid_nr(next), contextidr_el1);
35*4882a593Smuzhiyun isb();
36*4882a593Smuzhiyun }
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun /*
39*4882a593Smuzhiyun * Set TTBR0 to reserved_pg_dir. No translations will be possible via TTBR0.
40*4882a593Smuzhiyun */
cpu_set_reserved_ttbr0(void)41*4882a593Smuzhiyun static inline void cpu_set_reserved_ttbr0(void)
42*4882a593Smuzhiyun {
43*4882a593Smuzhiyun unsigned long ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun write_sysreg(ttbr, ttbr0_el1);
46*4882a593Smuzhiyun isb();
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
50*4882a593Smuzhiyun
cpu_switch_mm(pgd_t * pgd,struct mm_struct * mm)51*4882a593Smuzhiyun static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm)
52*4882a593Smuzhiyun {
53*4882a593Smuzhiyun BUG_ON(pgd == swapper_pg_dir);
54*4882a593Smuzhiyun cpu_set_reserved_ttbr0();
55*4882a593Smuzhiyun cpu_do_switch_mm(virt_to_phys(pgd),mm);
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun /*
59*4882a593Smuzhiyun * TCR.T0SZ value to use when the ID map is active. Usually equals
60*4882a593Smuzhiyun * TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in
61*4882a593Smuzhiyun * physical memory, in which case it will be smaller.
62*4882a593Smuzhiyun */
63*4882a593Smuzhiyun extern u64 idmap_t0sz;
64*4882a593Smuzhiyun extern u64 idmap_ptrs_per_pgd;
65*4882a593Smuzhiyun
__cpu_uses_extended_idmap(void)66*4882a593Smuzhiyun static inline bool __cpu_uses_extended_idmap(void)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun return unlikely(idmap_t0sz != TCR_T0SZ(vabits_actual));
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun /*
72*4882a593Smuzhiyun * True if the extended ID map requires an extra level of translation table
73*4882a593Smuzhiyun * to be configured.
74*4882a593Smuzhiyun */
__cpu_uses_extended_idmap_level(void)75*4882a593Smuzhiyun static inline bool __cpu_uses_extended_idmap_level(void)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun return ARM64_HW_PGTABLE_LEVELS(64 - idmap_t0sz) > CONFIG_PGTABLE_LEVELS;
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun /*
81*4882a593Smuzhiyun * Set TCR.T0SZ to its default value (based on VA_BITS)
82*4882a593Smuzhiyun */
__cpu_set_tcr_t0sz(unsigned long t0sz)83*4882a593Smuzhiyun static inline void __cpu_set_tcr_t0sz(unsigned long t0sz)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun unsigned long tcr;
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun if (!__cpu_uses_extended_idmap())
88*4882a593Smuzhiyun return;
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun tcr = read_sysreg(tcr_el1);
91*4882a593Smuzhiyun tcr &= ~TCR_T0SZ_MASK;
92*4882a593Smuzhiyun tcr |= t0sz << TCR_T0SZ_OFFSET;
93*4882a593Smuzhiyun write_sysreg(tcr, tcr_el1);
94*4882a593Smuzhiyun isb();
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun #define cpu_set_default_tcr_t0sz() __cpu_set_tcr_t0sz(TCR_T0SZ(vabits_actual))
98*4882a593Smuzhiyun #define cpu_set_idmap_tcr_t0sz() __cpu_set_tcr_t0sz(idmap_t0sz)
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun /*
101*4882a593Smuzhiyun * Remove the idmap from TTBR0_EL1 and install the pgd of the active mm.
102*4882a593Smuzhiyun *
103*4882a593Smuzhiyun * The idmap lives in the same VA range as userspace, but uses global entries
104*4882a593Smuzhiyun * and may use a different TCR_EL1.T0SZ. To avoid issues resulting from
105*4882a593Smuzhiyun * speculative TLB fetches, we must temporarily install the reserved page
106*4882a593Smuzhiyun * tables while we invalidate the TLBs and set up the correct TCR_EL1.T0SZ.
107*4882a593Smuzhiyun *
108*4882a593Smuzhiyun * If current is a not a user task, the mm covers the TTBR1_EL1 page tables,
109*4882a593Smuzhiyun * which should not be installed in TTBR0_EL1. In this case we can leave the
110*4882a593Smuzhiyun * reserved page tables in place.
111*4882a593Smuzhiyun */
cpu_uninstall_idmap(void)112*4882a593Smuzhiyun static inline void cpu_uninstall_idmap(void)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun struct mm_struct *mm = current->active_mm;
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun cpu_set_reserved_ttbr0();
117*4882a593Smuzhiyun local_flush_tlb_all();
118*4882a593Smuzhiyun cpu_set_default_tcr_t0sz();
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun if (mm != &init_mm && !system_uses_ttbr0_pan())
121*4882a593Smuzhiyun cpu_switch_mm(mm->pgd, mm);
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun
cpu_install_idmap(void)124*4882a593Smuzhiyun static inline void cpu_install_idmap(void)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun cpu_set_reserved_ttbr0();
127*4882a593Smuzhiyun local_flush_tlb_all();
128*4882a593Smuzhiyun cpu_set_idmap_tcr_t0sz();
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun cpu_switch_mm(lm_alias(idmap_pg_dir), &init_mm);
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun /*
134*4882a593Smuzhiyun * Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD,
135*4882a593Smuzhiyun * avoiding the possibility of conflicting TLB entries being allocated.
136*4882a593Smuzhiyun */
cpu_replace_ttbr1(pgd_t * pgdp)137*4882a593Smuzhiyun static inline void __nocfi cpu_replace_ttbr1(pgd_t *pgdp)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun typedef void (ttbr_replace_func)(phys_addr_t);
140*4882a593Smuzhiyun extern ttbr_replace_func idmap_cpu_replace_ttbr1;
141*4882a593Smuzhiyun ttbr_replace_func *replace_phys;
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun /* phys_to_ttbr() zeros lower 2 bits of ttbr with 52-bit PA */
144*4882a593Smuzhiyun phys_addr_t ttbr1 = phys_to_ttbr(virt_to_phys(pgdp));
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun if (system_supports_cnp() && !WARN_ON(pgdp != lm_alias(swapper_pg_dir))) {
147*4882a593Smuzhiyun /*
148*4882a593Smuzhiyun * cpu_replace_ttbr1() is used when there's a boot CPU
149*4882a593Smuzhiyun * up (i.e. cpufeature framework is not up yet) and
150*4882a593Smuzhiyun * latter only when we enable CNP via cpufeature's
151*4882a593Smuzhiyun * enable() callback.
152*4882a593Smuzhiyun * Also we rely on the cpu_hwcap bit being set before
153*4882a593Smuzhiyun * calling the enable() function.
154*4882a593Smuzhiyun */
155*4882a593Smuzhiyun ttbr1 |= TTBR_CNP_BIT;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun replace_phys = (void *)__pa_function(idmap_cpu_replace_ttbr1);
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun cpu_install_idmap();
161*4882a593Smuzhiyun replace_phys(ttbr1);
162*4882a593Smuzhiyun cpu_uninstall_idmap();
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun /*
166*4882a593Smuzhiyun * It would be nice to return ASIDs back to the allocator, but unfortunately
167*4882a593Smuzhiyun * that introduces a race with a generation rollover where we could erroneously
168*4882a593Smuzhiyun * free an ASID allocated in a future generation. We could workaround this by
169*4882a593Smuzhiyun * freeing the ASID from the context of the dying mm (e.g. in arch_exit_mmap),
170*4882a593Smuzhiyun * but we'd then need to make sure that we didn't dirty any TLBs afterwards.
171*4882a593Smuzhiyun * Setting a reserved TTBR0 or EPD0 would work, but it all gets ugly when you
172*4882a593Smuzhiyun * take CPU migration into account.
173*4882a593Smuzhiyun */
174*4882a593Smuzhiyun #define destroy_context(mm) do { } while(0)
175*4882a593Smuzhiyun void check_and_switch_context(struct mm_struct *mm);
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun static inline int
init_new_context(struct task_struct * tsk,struct mm_struct * mm)178*4882a593Smuzhiyun init_new_context(struct task_struct *tsk, struct mm_struct *mm)
179*4882a593Smuzhiyun {
180*4882a593Smuzhiyun atomic64_set(&mm->context.id, 0);
181*4882a593Smuzhiyun refcount_set(&mm->context.pinned, 0);
182*4882a593Smuzhiyun return 0;
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun #ifdef CONFIG_ARM64_SW_TTBR0_PAN
update_saved_ttbr0(struct task_struct * tsk,struct mm_struct * mm)186*4882a593Smuzhiyun static inline void update_saved_ttbr0(struct task_struct *tsk,
187*4882a593Smuzhiyun struct mm_struct *mm)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun u64 ttbr;
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun if (!system_uses_ttbr0_pan())
192*4882a593Smuzhiyun return;
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun if (mm == &init_mm)
195*4882a593Smuzhiyun ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
196*4882a593Smuzhiyun else
197*4882a593Smuzhiyun ttbr = phys_to_ttbr(virt_to_phys(mm->pgd)) | ASID(mm) << 48;
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr);
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun #else
update_saved_ttbr0(struct task_struct * tsk,struct mm_struct * mm)202*4882a593Smuzhiyun static inline void update_saved_ttbr0(struct task_struct *tsk,
203*4882a593Smuzhiyun struct mm_struct *mm)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun #endif
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun static inline void
enter_lazy_tlb(struct mm_struct * mm,struct task_struct * tsk)209*4882a593Smuzhiyun enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun /*
212*4882a593Smuzhiyun * We don't actually care about the ttbr0 mapping, so point it at the
213*4882a593Smuzhiyun * zero page.
214*4882a593Smuzhiyun */
215*4882a593Smuzhiyun update_saved_ttbr0(tsk, &init_mm);
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun
__switch_mm(struct mm_struct * next)218*4882a593Smuzhiyun static inline void __switch_mm(struct mm_struct *next)
219*4882a593Smuzhiyun {
220*4882a593Smuzhiyun /*
221*4882a593Smuzhiyun * init_mm.pgd does not contain any user mappings and it is always
222*4882a593Smuzhiyun * active for kernel addresses in TTBR1. Just set the reserved TTBR0.
223*4882a593Smuzhiyun */
224*4882a593Smuzhiyun if (next == &init_mm) {
225*4882a593Smuzhiyun cpu_set_reserved_ttbr0();
226*4882a593Smuzhiyun return;
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun check_and_switch_context(next);
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun static inline void
switch_mm(struct mm_struct * prev,struct mm_struct * next,struct task_struct * tsk)233*4882a593Smuzhiyun switch_mm(struct mm_struct *prev, struct mm_struct *next,
234*4882a593Smuzhiyun struct task_struct *tsk)
235*4882a593Smuzhiyun {
236*4882a593Smuzhiyun if (prev != next)
237*4882a593Smuzhiyun __switch_mm(next);
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun /*
240*4882a593Smuzhiyun * Update the saved TTBR0_EL1 of the scheduled-in task as the previous
241*4882a593Smuzhiyun * value may have not been initialised yet (activate_mm caller) or the
242*4882a593Smuzhiyun * ASID has changed since the last run (following the context switch
243*4882a593Smuzhiyun * of another thread of the same process).
244*4882a593Smuzhiyun */
245*4882a593Smuzhiyun update_saved_ttbr0(tsk, next);
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun #define deactivate_mm(tsk,mm) do { } while (0)
249*4882a593Smuzhiyun #define activate_mm(prev,next) switch_mm(prev, next, current)
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun static inline const struct cpumask *
task_cpu_possible_mask(struct task_struct * p)252*4882a593Smuzhiyun task_cpu_possible_mask(struct task_struct *p)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun if (!static_branch_unlikely(&arm64_mismatched_32bit_el0))
255*4882a593Smuzhiyun return cpu_possible_mask;
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun if (!is_compat_thread(task_thread_info(p)))
258*4882a593Smuzhiyun return cpu_possible_mask;
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun return system_32bit_el0_cpumask();
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun #define task_cpu_possible_mask task_cpu_possible_mask
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun void verify_cpu_asid_bits(void);
265*4882a593Smuzhiyun void post_ttbr_update_workaround(void);
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun unsigned long arm64_mm_context_get(struct mm_struct *mm);
268*4882a593Smuzhiyun void arm64_mm_context_put(struct mm_struct *mm);
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun #endif /* !__ASSEMBLY__ */
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun #endif /* !__ASM_MMU_CONTEXT_H */
273