1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Based on arch/arm/mm/context.c
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
6*4882a593Smuzhiyun * Copyright (C) 2012 ARM Ltd.
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/bitfield.h>
10*4882a593Smuzhiyun #include <linux/bitops.h>
11*4882a593Smuzhiyun #include <linux/sched.h>
12*4882a593Smuzhiyun #include <linux/slab.h>
13*4882a593Smuzhiyun #include <linux/mm.h>
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #include <asm/cpufeature.h>
16*4882a593Smuzhiyun #include <asm/mmu_context.h>
17*4882a593Smuzhiyun #include <asm/smp.h>
18*4882a593Smuzhiyun #include <asm/tlbflush.h>
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun static u32 asid_bits;
21*4882a593Smuzhiyun static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun static atomic64_t asid_generation;
24*4882a593Smuzhiyun static unsigned long *asid_map;
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun static DEFINE_PER_CPU(atomic64_t, active_asids);
27*4882a593Smuzhiyun static DEFINE_PER_CPU(u64, reserved_asids);
28*4882a593Smuzhiyun static cpumask_t tlb_flush_pending;
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun static unsigned long max_pinned_asids;
31*4882a593Smuzhiyun static unsigned long nr_pinned_asids;
32*4882a593Smuzhiyun static unsigned long *pinned_asid_map;
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun #define ASID_MASK (~GENMASK(asid_bits - 1, 0))
35*4882a593Smuzhiyun #define ASID_FIRST_VERSION (1UL << asid_bits)
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun #define NUM_USER_ASIDS ASID_FIRST_VERSION
38*4882a593Smuzhiyun #define asid2idx(asid) ((asid) & ~ASID_MASK)
39*4882a593Smuzhiyun #define idx2asid(idx) asid2idx(idx)
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun /* Get the ASIDBits supported by the current CPU */
get_cpu_asid_bits(void)42*4882a593Smuzhiyun static u32 get_cpu_asid_bits(void)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun u32 asid;
45*4882a593Smuzhiyun int fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64MMFR0_EL1),
46*4882a593Smuzhiyun ID_AA64MMFR0_ASID_SHIFT);
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun switch (fld) {
49*4882a593Smuzhiyun default:
50*4882a593Smuzhiyun pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n",
51*4882a593Smuzhiyun smp_processor_id(), fld);
52*4882a593Smuzhiyun fallthrough;
53*4882a593Smuzhiyun case 0:
54*4882a593Smuzhiyun asid = 8;
55*4882a593Smuzhiyun break;
56*4882a593Smuzhiyun case 2:
57*4882a593Smuzhiyun asid = 16;
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun return asid;
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun /* Check if the current cpu's ASIDBits is compatible with asid_bits */
verify_cpu_asid_bits(void)64*4882a593Smuzhiyun void verify_cpu_asid_bits(void)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun u32 asid = get_cpu_asid_bits();
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun if (asid < asid_bits) {
69*4882a593Smuzhiyun /*
70*4882a593Smuzhiyun * We cannot decrease the ASID size at runtime, so panic if we support
71*4882a593Smuzhiyun * fewer ASID bits than the boot CPU.
72*4882a593Smuzhiyun */
73*4882a593Smuzhiyun pr_crit("CPU%d: smaller ASID size(%u) than boot CPU (%u)\n",
74*4882a593Smuzhiyun smp_processor_id(), asid, asid_bits);
75*4882a593Smuzhiyun cpu_panic_kernel();
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun
set_kpti_asid_bits(unsigned long * map)79*4882a593Smuzhiyun static void set_kpti_asid_bits(unsigned long *map)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun unsigned int len = BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(unsigned long);
82*4882a593Smuzhiyun /*
83*4882a593Smuzhiyun * In case of KPTI kernel/user ASIDs are allocated in
84*4882a593Smuzhiyun * pairs, the bottom bit distinguishes the two: if it
85*4882a593Smuzhiyun * is set, then the ASID will map only userspace. Thus
86*4882a593Smuzhiyun * mark even as reserved for kernel.
87*4882a593Smuzhiyun */
88*4882a593Smuzhiyun memset(map, 0xaa, len);
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun
set_reserved_asid_bits(void)91*4882a593Smuzhiyun static void set_reserved_asid_bits(void)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun if (pinned_asid_map)
94*4882a593Smuzhiyun bitmap_copy(asid_map, pinned_asid_map, NUM_USER_ASIDS);
95*4882a593Smuzhiyun else if (arm64_kernel_unmapped_at_el0())
96*4882a593Smuzhiyun set_kpti_asid_bits(asid_map);
97*4882a593Smuzhiyun else
98*4882a593Smuzhiyun bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun #define asid_gen_match(asid) \
102*4882a593Smuzhiyun (!(((asid) ^ atomic64_read(&asid_generation)) >> asid_bits))
103*4882a593Smuzhiyun
flush_context(void)104*4882a593Smuzhiyun static void flush_context(void)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun int i;
107*4882a593Smuzhiyun u64 asid;
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun /* Update the list of reserved ASIDs and the ASID bitmap. */
110*4882a593Smuzhiyun set_reserved_asid_bits();
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun for_each_possible_cpu(i) {
113*4882a593Smuzhiyun asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0);
114*4882a593Smuzhiyun /*
115*4882a593Smuzhiyun * If this CPU has already been through a
116*4882a593Smuzhiyun * rollover, but hasn't run another task in
117*4882a593Smuzhiyun * the meantime, we must preserve its reserved
118*4882a593Smuzhiyun * ASID, as this is the only trace we have of
119*4882a593Smuzhiyun * the process it is still running.
120*4882a593Smuzhiyun */
121*4882a593Smuzhiyun if (asid == 0)
122*4882a593Smuzhiyun asid = per_cpu(reserved_asids, i);
123*4882a593Smuzhiyun __set_bit(asid2idx(asid), asid_map);
124*4882a593Smuzhiyun per_cpu(reserved_asids, i) = asid;
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun /*
128*4882a593Smuzhiyun * Queue a TLB invalidation for each CPU to perform on next
129*4882a593Smuzhiyun * context-switch
130*4882a593Smuzhiyun */
131*4882a593Smuzhiyun cpumask_setall(&tlb_flush_pending);
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun
check_update_reserved_asid(u64 asid,u64 newasid)134*4882a593Smuzhiyun static bool check_update_reserved_asid(u64 asid, u64 newasid)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun int cpu;
137*4882a593Smuzhiyun bool hit = false;
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun /*
140*4882a593Smuzhiyun * Iterate over the set of reserved ASIDs looking for a match.
141*4882a593Smuzhiyun * If we find one, then we can update our mm to use newasid
142*4882a593Smuzhiyun * (i.e. the same ASID in the current generation) but we can't
143*4882a593Smuzhiyun * exit the loop early, since we need to ensure that all copies
144*4882a593Smuzhiyun * of the old ASID are updated to reflect the mm. Failure to do
145*4882a593Smuzhiyun * so could result in us missing the reserved ASID in a future
146*4882a593Smuzhiyun * generation.
147*4882a593Smuzhiyun */
148*4882a593Smuzhiyun for_each_possible_cpu(cpu) {
149*4882a593Smuzhiyun if (per_cpu(reserved_asids, cpu) == asid) {
150*4882a593Smuzhiyun hit = true;
151*4882a593Smuzhiyun per_cpu(reserved_asids, cpu) = newasid;
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun return hit;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun
new_context(struct mm_struct * mm)158*4882a593Smuzhiyun static u64 new_context(struct mm_struct *mm)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun static u32 cur_idx = 1;
161*4882a593Smuzhiyun u64 asid = atomic64_read(&mm->context.id);
162*4882a593Smuzhiyun u64 generation = atomic64_read(&asid_generation);
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun if (asid != 0) {
165*4882a593Smuzhiyun u64 newasid = generation | (asid & ~ASID_MASK);
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun /*
168*4882a593Smuzhiyun * If our current ASID was active during a rollover, we
169*4882a593Smuzhiyun * can continue to use it and this was just a false alarm.
170*4882a593Smuzhiyun */
171*4882a593Smuzhiyun if (check_update_reserved_asid(asid, newasid))
172*4882a593Smuzhiyun return newasid;
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun /*
175*4882a593Smuzhiyun * If it is pinned, we can keep using it. Note that reserved
176*4882a593Smuzhiyun * takes priority, because even if it is also pinned, we need to
177*4882a593Smuzhiyun * update the generation into the reserved_asids.
178*4882a593Smuzhiyun */
179*4882a593Smuzhiyun if (refcount_read(&mm->context.pinned))
180*4882a593Smuzhiyun return newasid;
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun /*
183*4882a593Smuzhiyun * We had a valid ASID in a previous life, so try to re-use
184*4882a593Smuzhiyun * it if possible.
185*4882a593Smuzhiyun */
186*4882a593Smuzhiyun if (!__test_and_set_bit(asid2idx(asid), asid_map))
187*4882a593Smuzhiyun return newasid;
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun /*
191*4882a593Smuzhiyun * Allocate a free ASID. If we can't find one, take a note of the
192*4882a593Smuzhiyun * currently active ASIDs and mark the TLBs as requiring flushes. We
193*4882a593Smuzhiyun * always count from ASID #2 (index 1), as we use ASID #0 when setting
194*4882a593Smuzhiyun * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd
195*4882a593Smuzhiyun * pairs.
196*4882a593Smuzhiyun */
197*4882a593Smuzhiyun asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
198*4882a593Smuzhiyun if (asid != NUM_USER_ASIDS)
199*4882a593Smuzhiyun goto set_asid;
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun /* We're out of ASIDs, so increment the global generation count */
202*4882a593Smuzhiyun generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION,
203*4882a593Smuzhiyun &asid_generation);
204*4882a593Smuzhiyun flush_context();
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun /* We have more ASIDs than CPUs, so this will always succeed */
207*4882a593Smuzhiyun asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun set_asid:
210*4882a593Smuzhiyun __set_bit(asid, asid_map);
211*4882a593Smuzhiyun cur_idx = asid;
212*4882a593Smuzhiyun return idx2asid(asid) | generation;
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun
check_and_switch_context(struct mm_struct * mm)215*4882a593Smuzhiyun void check_and_switch_context(struct mm_struct *mm)
216*4882a593Smuzhiyun {
217*4882a593Smuzhiyun unsigned long flags;
218*4882a593Smuzhiyun unsigned int cpu;
219*4882a593Smuzhiyun u64 asid, old_active_asid;
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun if (system_supports_cnp())
222*4882a593Smuzhiyun cpu_set_reserved_ttbr0();
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun asid = atomic64_read(&mm->context.id);
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun /*
227*4882a593Smuzhiyun * The memory ordering here is subtle.
228*4882a593Smuzhiyun * If our active_asids is non-zero and the ASID matches the current
229*4882a593Smuzhiyun * generation, then we update the active_asids entry with a relaxed
230*4882a593Smuzhiyun * cmpxchg. Racing with a concurrent rollover means that either:
231*4882a593Smuzhiyun *
232*4882a593Smuzhiyun * - We get a zero back from the cmpxchg and end up waiting on the
233*4882a593Smuzhiyun * lock. Taking the lock synchronises with the rollover and so
234*4882a593Smuzhiyun * we are forced to see the updated generation.
235*4882a593Smuzhiyun *
236*4882a593Smuzhiyun * - We get a valid ASID back from the cmpxchg, which means the
237*4882a593Smuzhiyun * relaxed xchg in flush_context will treat us as reserved
238*4882a593Smuzhiyun * because atomic RmWs are totally ordered for a given location.
239*4882a593Smuzhiyun */
240*4882a593Smuzhiyun old_active_asid = atomic64_read(this_cpu_ptr(&active_asids));
241*4882a593Smuzhiyun if (old_active_asid && asid_gen_match(asid) &&
242*4882a593Smuzhiyun atomic64_cmpxchg_relaxed(this_cpu_ptr(&active_asids),
243*4882a593Smuzhiyun old_active_asid, asid))
244*4882a593Smuzhiyun goto switch_mm_fastpath;
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun raw_spin_lock_irqsave(&cpu_asid_lock, flags);
247*4882a593Smuzhiyun /* Check that our ASID belongs to the current generation. */
248*4882a593Smuzhiyun asid = atomic64_read(&mm->context.id);
249*4882a593Smuzhiyun if (!asid_gen_match(asid)) {
250*4882a593Smuzhiyun asid = new_context(mm);
251*4882a593Smuzhiyun atomic64_set(&mm->context.id, asid);
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun cpu = smp_processor_id();
255*4882a593Smuzhiyun if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
256*4882a593Smuzhiyun local_flush_tlb_all();
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun atomic64_set(this_cpu_ptr(&active_asids), asid);
259*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun switch_mm_fastpath:
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun arm64_apply_bp_hardening();
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun /*
266*4882a593Smuzhiyun * Defer TTBR0_EL1 setting for user threads to uaccess_enable() when
267*4882a593Smuzhiyun * emulating PAN.
268*4882a593Smuzhiyun */
269*4882a593Smuzhiyun if (!system_uses_ttbr0_pan())
270*4882a593Smuzhiyun cpu_switch_mm(mm->pgd, mm);
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun
arm64_mm_context_get(struct mm_struct * mm)273*4882a593Smuzhiyun unsigned long arm64_mm_context_get(struct mm_struct *mm)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun unsigned long flags;
276*4882a593Smuzhiyun u64 asid;
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun if (!pinned_asid_map)
279*4882a593Smuzhiyun return 0;
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun raw_spin_lock_irqsave(&cpu_asid_lock, flags);
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun asid = atomic64_read(&mm->context.id);
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun if (refcount_inc_not_zero(&mm->context.pinned))
286*4882a593Smuzhiyun goto out_unlock;
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun if (nr_pinned_asids >= max_pinned_asids) {
289*4882a593Smuzhiyun asid = 0;
290*4882a593Smuzhiyun goto out_unlock;
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun if (!asid_gen_match(asid)) {
294*4882a593Smuzhiyun /*
295*4882a593Smuzhiyun * We went through one or more rollover since that ASID was
296*4882a593Smuzhiyun * used. Ensure that it is still valid, or generate a new one.
297*4882a593Smuzhiyun */
298*4882a593Smuzhiyun asid = new_context(mm);
299*4882a593Smuzhiyun atomic64_set(&mm->context.id, asid);
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun nr_pinned_asids++;
303*4882a593Smuzhiyun __set_bit(asid2idx(asid), pinned_asid_map);
304*4882a593Smuzhiyun refcount_set(&mm->context.pinned, 1);
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun out_unlock:
307*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun asid &= ~ASID_MASK;
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun /* Set the equivalent of USER_ASID_BIT */
312*4882a593Smuzhiyun if (asid && arm64_kernel_unmapped_at_el0())
313*4882a593Smuzhiyun asid |= 1;
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun return asid;
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(arm64_mm_context_get);
318*4882a593Smuzhiyun
arm64_mm_context_put(struct mm_struct * mm)319*4882a593Smuzhiyun void arm64_mm_context_put(struct mm_struct *mm)
320*4882a593Smuzhiyun {
321*4882a593Smuzhiyun unsigned long flags;
322*4882a593Smuzhiyun u64 asid = atomic64_read(&mm->context.id);
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun if (!pinned_asid_map)
325*4882a593Smuzhiyun return;
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun raw_spin_lock_irqsave(&cpu_asid_lock, flags);
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun if (refcount_dec_and_test(&mm->context.pinned)) {
330*4882a593Smuzhiyun __clear_bit(asid2idx(asid), pinned_asid_map);
331*4882a593Smuzhiyun nr_pinned_asids--;
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(arm64_mm_context_put);
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun /* Errata workaround post TTBRx_EL1 update. */
post_ttbr_update_workaround(void)339*4882a593Smuzhiyun asmlinkage void post_ttbr_update_workaround(void)
340*4882a593Smuzhiyun {
341*4882a593Smuzhiyun if (!IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456))
342*4882a593Smuzhiyun return;
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun asm(ALTERNATIVE("nop; nop; nop",
345*4882a593Smuzhiyun "ic iallu; dsb nsh; isb",
346*4882a593Smuzhiyun ARM64_WORKAROUND_CAVIUM_27456));
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun
cpu_do_switch_mm(phys_addr_t pgd_phys,struct mm_struct * mm)349*4882a593Smuzhiyun void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm)
350*4882a593Smuzhiyun {
351*4882a593Smuzhiyun unsigned long ttbr1 = read_sysreg(ttbr1_el1);
352*4882a593Smuzhiyun unsigned long asid = ASID(mm);
353*4882a593Smuzhiyun unsigned long ttbr0 = phys_to_ttbr(pgd_phys);
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun /* Skip CNP for the reserved ASID */
356*4882a593Smuzhiyun if (system_supports_cnp() && asid)
357*4882a593Smuzhiyun ttbr0 |= TTBR_CNP_BIT;
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun /* SW PAN needs a copy of the ASID in TTBR0 for entry */
360*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN))
361*4882a593Smuzhiyun ttbr0 |= FIELD_PREP(TTBR_ASID_MASK, asid);
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun /* Set ASID in TTBR1 since TCR.A1 is set */
364*4882a593Smuzhiyun ttbr1 &= ~TTBR_ASID_MASK;
365*4882a593Smuzhiyun ttbr1 |= FIELD_PREP(TTBR_ASID_MASK, asid);
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun write_sysreg(ttbr1, ttbr1_el1);
368*4882a593Smuzhiyun isb();
369*4882a593Smuzhiyun write_sysreg(ttbr0, ttbr0_el1);
370*4882a593Smuzhiyun isb();
371*4882a593Smuzhiyun post_ttbr_update_workaround();
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun
asids_update_limit(void)374*4882a593Smuzhiyun static int asids_update_limit(void)
375*4882a593Smuzhiyun {
376*4882a593Smuzhiyun unsigned long num_available_asids = NUM_USER_ASIDS;
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun if (arm64_kernel_unmapped_at_el0()) {
379*4882a593Smuzhiyun num_available_asids /= 2;
380*4882a593Smuzhiyun if (pinned_asid_map)
381*4882a593Smuzhiyun set_kpti_asid_bits(pinned_asid_map);
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun /*
384*4882a593Smuzhiyun * Expect allocation after rollover to fail if we don't have at least
385*4882a593Smuzhiyun * one more ASID than CPUs. ASID #0 is reserved for init_mm.
386*4882a593Smuzhiyun */
387*4882a593Smuzhiyun WARN_ON(num_available_asids - 1 <= num_possible_cpus());
388*4882a593Smuzhiyun pr_info("ASID allocator initialised with %lu entries\n",
389*4882a593Smuzhiyun num_available_asids);
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun /*
392*4882a593Smuzhiyun * There must always be an ASID available after rollover. Ensure that,
393*4882a593Smuzhiyun * even if all CPUs have a reserved ASID and the maximum number of ASIDs
394*4882a593Smuzhiyun * are pinned, there still is at least one empty slot in the ASID map.
395*4882a593Smuzhiyun */
396*4882a593Smuzhiyun max_pinned_asids = num_available_asids - num_possible_cpus() - 2;
397*4882a593Smuzhiyun return 0;
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun arch_initcall(asids_update_limit);
400*4882a593Smuzhiyun
asids_init(void)401*4882a593Smuzhiyun static int asids_init(void)
402*4882a593Smuzhiyun {
403*4882a593Smuzhiyun asid_bits = get_cpu_asid_bits();
404*4882a593Smuzhiyun atomic64_set(&asid_generation, ASID_FIRST_VERSION);
405*4882a593Smuzhiyun asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS), sizeof(*asid_map),
406*4882a593Smuzhiyun GFP_KERNEL);
407*4882a593Smuzhiyun if (!asid_map)
408*4882a593Smuzhiyun panic("Failed to allocate bitmap for %lu ASIDs\n",
409*4882a593Smuzhiyun NUM_USER_ASIDS);
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun pinned_asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS),
412*4882a593Smuzhiyun sizeof(*pinned_asid_map), GFP_KERNEL);
413*4882a593Smuzhiyun nr_pinned_asids = 0;
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun /*
416*4882a593Smuzhiyun * We cannot call set_reserved_asid_bits() here because CPU
417*4882a593Smuzhiyun * caps are not finalized yet, so it is safer to assume KPTI
418*4882a593Smuzhiyun * and reserve kernel ASID's from beginning.
419*4882a593Smuzhiyun */
420*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0))
421*4882a593Smuzhiyun set_kpti_asid_bits(asid_map);
422*4882a593Smuzhiyun return 0;
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun early_initcall(asids_init);
425