1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * SN Platform GRU Driver
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * MMUOPS callbacks + TLB flushing
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * This file handles emu notifier callbacks from the core kernel. The callbacks
8*4882a593Smuzhiyun * are used to update the TLB in the GRU as a result of changes in the
9*4882a593Smuzhiyun * state of a process address space. This file also handles TLB invalidates
10*4882a593Smuzhiyun * from the GRU driver.
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
13*4882a593Smuzhiyun */
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #include <linux/kernel.h>
16*4882a593Smuzhiyun #include <linux/list.h>
17*4882a593Smuzhiyun #include <linux/spinlock.h>
18*4882a593Smuzhiyun #include <linux/mm.h>
19*4882a593Smuzhiyun #include <linux/slab.h>
20*4882a593Smuzhiyun #include <linux/device.h>
21*4882a593Smuzhiyun #include <linux/hugetlb.h>
22*4882a593Smuzhiyun #include <linux/delay.h>
23*4882a593Smuzhiyun #include <linux/timex.h>
24*4882a593Smuzhiyun #include <linux/srcu.h>
25*4882a593Smuzhiyun #include <asm/processor.h>
26*4882a593Smuzhiyun #include "gru.h"
27*4882a593Smuzhiyun #include "grutables.h"
28*4882a593Smuzhiyun #include <asm/uv/uv_hub.h>
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun #define gru_random() get_cycles()
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun /* ---------------------------------- TLB Invalidation functions --------
33*4882a593Smuzhiyun * get_tgh_handle
34*4882a593Smuzhiyun *
35*4882a593Smuzhiyun * Find a TGH to use for issuing a TLB invalidate. For GRUs that are on the
36*4882a593Smuzhiyun * local blade, use a fixed TGH that is a function of the blade-local cpu
37*4882a593Smuzhiyun * number. Normally, this TGH is private to the cpu & no contention occurs for
38*4882a593Smuzhiyun * the TGH. For offblade GRUs, select a random TGH in the range above the
39*4882a593Smuzhiyun * private TGHs. A spinlock is required to access this TGH & the lock must be
40*4882a593Smuzhiyun * released when the invalidate is completes. This sucks, but it is the best we
41*4882a593Smuzhiyun * can do.
42*4882a593Smuzhiyun *
43*4882a593Smuzhiyun * Note that the spinlock is IN the TGH handle so locking does not involve
44*4882a593Smuzhiyun * additional cache lines.
45*4882a593Smuzhiyun *
46*4882a593Smuzhiyun */
get_off_blade_tgh(struct gru_state * gru)47*4882a593Smuzhiyun static inline int get_off_blade_tgh(struct gru_state *gru)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun int n;
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun n = GRU_NUM_TGH - gru->gs_tgh_first_remote;
52*4882a593Smuzhiyun n = gru_random() % n;
53*4882a593Smuzhiyun n += gru->gs_tgh_first_remote;
54*4882a593Smuzhiyun return n;
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun
get_on_blade_tgh(struct gru_state * gru)57*4882a593Smuzhiyun static inline int get_on_blade_tgh(struct gru_state *gru)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun return uv_blade_processor_id() >> gru->gs_tgh_local_shift;
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun
get_lock_tgh_handle(struct gru_state * gru)62*4882a593Smuzhiyun static struct gru_tlb_global_handle *get_lock_tgh_handle(struct gru_state
63*4882a593Smuzhiyun *gru)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun struct gru_tlb_global_handle *tgh;
66*4882a593Smuzhiyun int n;
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun preempt_disable();
69*4882a593Smuzhiyun if (uv_numa_blade_id() == gru->gs_blade_id)
70*4882a593Smuzhiyun n = get_on_blade_tgh(gru);
71*4882a593Smuzhiyun else
72*4882a593Smuzhiyun n = get_off_blade_tgh(gru);
73*4882a593Smuzhiyun tgh = get_tgh_by_index(gru, n);
74*4882a593Smuzhiyun lock_tgh_handle(tgh);
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun return tgh;
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun
get_unlock_tgh_handle(struct gru_tlb_global_handle * tgh)79*4882a593Smuzhiyun static void get_unlock_tgh_handle(struct gru_tlb_global_handle *tgh)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun unlock_tgh_handle(tgh);
82*4882a593Smuzhiyun preempt_enable();
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun /*
86*4882a593Smuzhiyun * gru_flush_tlb_range
87*4882a593Smuzhiyun *
88*4882a593Smuzhiyun * General purpose TLB invalidation function. This function scans every GRU in
89*4882a593Smuzhiyun * the ENTIRE system (partition) looking for GRUs where the specified MM has
90*4882a593Smuzhiyun * been accessed by the GRU. For each GRU found, the TLB must be invalidated OR
91*4882a593Smuzhiyun * the ASID invalidated. Invalidating an ASID causes a new ASID to be assigned
92*4882a593Smuzhiyun * on the next fault. This effectively flushes the ENTIRE TLB for the MM at the
93*4882a593Smuzhiyun * cost of (possibly) a large number of future TLBmisses.
94*4882a593Smuzhiyun *
95*4882a593Smuzhiyun * The current algorithm is optimized based on the following (somewhat true)
96*4882a593Smuzhiyun * assumptions:
97*4882a593Smuzhiyun * - GRU contexts are not loaded into a GRU unless a reference is made to
98*4882a593Smuzhiyun * the data segment or control block (this is true, not an assumption).
99*4882a593Smuzhiyun * If a DS/CB is referenced, the user will also issue instructions that
100*4882a593Smuzhiyun * cause TLBmisses. It is not necessary to optimize for the case where
101*4882a593Smuzhiyun * contexts are loaded but no instructions cause TLB misses. (I know
102*4882a593Smuzhiyun * this will happen but I'm not optimizing for it).
103*4882a593Smuzhiyun * - GRU instructions to invalidate TLB entries are SLOOOOWWW - normally
104*4882a593Smuzhiyun * a few usec but in unusual cases, it could be longer. Avoid if
105*4882a593Smuzhiyun * possible.
106*4882a593Smuzhiyun * - intrablade process migration between cpus is not frequent but is
107*4882a593Smuzhiyun * common.
108*4882a593Smuzhiyun * - a GRU context is not typically migrated to a different GRU on the
109*4882a593Smuzhiyun * blade because of intrablade migration
110*4882a593Smuzhiyun * - interblade migration is rare. Processes migrate their GRU context to
111*4882a593Smuzhiyun * the new blade.
112*4882a593Smuzhiyun * - if interblade migration occurs, migration back to the original blade
113*4882a593Smuzhiyun * is very very rare (ie., no optimization for this case)
114*4882a593Smuzhiyun * - most GRU instruction operate on a subset of the user REGIONS. Code
115*4882a593Smuzhiyun * & shared library regions are not likely targets of GRU instructions.
116*4882a593Smuzhiyun *
117*4882a593Smuzhiyun * To help improve the efficiency of TLB invalidation, the GMS data
118*4882a593Smuzhiyun * structure is maintained for EACH address space (MM struct). The GMS is
119*4882a593Smuzhiyun * also the structure that contains the pointer to the mmu callout
120*4882a593Smuzhiyun * functions. This structure is linked to the mm_struct for the address space
121*4882a593Smuzhiyun * using the mmu "register" function. The mmu interfaces are used to
122*4882a593Smuzhiyun * provide the callbacks for TLB invalidation. The GMS contains:
123*4882a593Smuzhiyun *
124*4882a593Smuzhiyun * - asid[maxgrus] array. ASIDs are assigned to a GRU when a context is
125*4882a593Smuzhiyun * loaded into the GRU.
126*4882a593Smuzhiyun * - asidmap[maxgrus]. bitmap to make it easier to find non-zero asids in
127*4882a593Smuzhiyun * the above array
128*4882a593Smuzhiyun * - ctxbitmap[maxgrus]. Indicates the contexts that are currently active
129*4882a593Smuzhiyun * in the GRU for the address space. This bitmap must be passed to the
130*4882a593Smuzhiyun * GRU to do an invalidate.
131*4882a593Smuzhiyun *
132*4882a593Smuzhiyun * The current algorithm for invalidating TLBs is:
133*4882a593Smuzhiyun * - scan the asidmap for GRUs where the context has been loaded, ie,
134*4882a593Smuzhiyun * asid is non-zero.
135*4882a593Smuzhiyun * - for each gru found:
136*4882a593Smuzhiyun * - if the ctxtmap is non-zero, there are active contexts in the
137*4882a593Smuzhiyun * GRU. TLB invalidate instructions must be issued to the GRU.
138*4882a593Smuzhiyun * - if the ctxtmap is zero, no context is active. Set the ASID to
139*4882a593Smuzhiyun * zero to force a full TLB invalidation. This is fast but will
140*4882a593Smuzhiyun * cause a lot of TLB misses if the context is reloaded onto the
141*4882a593Smuzhiyun * GRU
142*4882a593Smuzhiyun *
143*4882a593Smuzhiyun */
144*4882a593Smuzhiyun
gru_flush_tlb_range(struct gru_mm_struct * gms,unsigned long start,unsigned long len)145*4882a593Smuzhiyun void gru_flush_tlb_range(struct gru_mm_struct *gms, unsigned long start,
146*4882a593Smuzhiyun unsigned long len)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun struct gru_state *gru;
149*4882a593Smuzhiyun struct gru_mm_tracker *asids;
150*4882a593Smuzhiyun struct gru_tlb_global_handle *tgh;
151*4882a593Smuzhiyun unsigned long num;
152*4882a593Smuzhiyun int grupagesize, pagesize, pageshift, gid, asid;
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun /* ZZZ TODO - handle huge pages */
155*4882a593Smuzhiyun pageshift = PAGE_SHIFT;
156*4882a593Smuzhiyun pagesize = (1UL << pageshift);
157*4882a593Smuzhiyun grupagesize = GRU_PAGESIZE(pageshift);
158*4882a593Smuzhiyun num = min(((len + pagesize - 1) >> pageshift), GRUMAXINVAL);
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun STAT(flush_tlb);
161*4882a593Smuzhiyun gru_dbg(grudev, "gms %p, start 0x%lx, len 0x%lx, asidmap 0x%lx\n", gms,
162*4882a593Smuzhiyun start, len, gms->ms_asidmap[0]);
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun spin_lock(&gms->ms_asid_lock);
165*4882a593Smuzhiyun for_each_gru_in_bitmap(gid, gms->ms_asidmap) {
166*4882a593Smuzhiyun STAT(flush_tlb_gru);
167*4882a593Smuzhiyun gru = GID_TO_GRU(gid);
168*4882a593Smuzhiyun asids = gms->ms_asids + gid;
169*4882a593Smuzhiyun asid = asids->mt_asid;
170*4882a593Smuzhiyun if (asids->mt_ctxbitmap && asid) {
171*4882a593Smuzhiyun STAT(flush_tlb_gru_tgh);
172*4882a593Smuzhiyun asid = GRUASID(asid, start);
173*4882a593Smuzhiyun gru_dbg(grudev,
174*4882a593Smuzhiyun " FLUSH gruid %d, asid 0x%x, vaddr 0x%lx, vamask 0x%x, num %ld, cbmap 0x%x\n",
175*4882a593Smuzhiyun gid, asid, start, grupagesize, num, asids->mt_ctxbitmap);
176*4882a593Smuzhiyun tgh = get_lock_tgh_handle(gru);
177*4882a593Smuzhiyun tgh_invalidate(tgh, start, ~0, asid, grupagesize, 0,
178*4882a593Smuzhiyun num - 1, asids->mt_ctxbitmap);
179*4882a593Smuzhiyun get_unlock_tgh_handle(tgh);
180*4882a593Smuzhiyun } else {
181*4882a593Smuzhiyun STAT(flush_tlb_gru_zero_asid);
182*4882a593Smuzhiyun asids->mt_asid = 0;
183*4882a593Smuzhiyun __clear_bit(gru->gs_gid, gms->ms_asidmap);
184*4882a593Smuzhiyun gru_dbg(grudev,
185*4882a593Smuzhiyun " CLEARASID gruid %d, asid 0x%x, cbtmap 0x%x, asidmap 0x%lx\n",
186*4882a593Smuzhiyun gid, asid, asids->mt_ctxbitmap,
187*4882a593Smuzhiyun gms->ms_asidmap[0]);
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun spin_unlock(&gms->ms_asid_lock);
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun /*
194*4882a593Smuzhiyun * Flush the entire TLB on a chiplet.
195*4882a593Smuzhiyun */
gru_flush_all_tlb(struct gru_state * gru)196*4882a593Smuzhiyun void gru_flush_all_tlb(struct gru_state *gru)
197*4882a593Smuzhiyun {
198*4882a593Smuzhiyun struct gru_tlb_global_handle *tgh;
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun gru_dbg(grudev, "gid %d\n", gru->gs_gid);
201*4882a593Smuzhiyun tgh = get_lock_tgh_handle(gru);
202*4882a593Smuzhiyun tgh_invalidate(tgh, 0, ~0, 0, 1, 1, GRUMAXINVAL - 1, 0xffff);
203*4882a593Smuzhiyun get_unlock_tgh_handle(tgh);
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun /*
207*4882a593Smuzhiyun * MMUOPS notifier callout functions
208*4882a593Smuzhiyun */
gru_invalidate_range_start(struct mmu_notifier * mn,const struct mmu_notifier_range * range)209*4882a593Smuzhiyun static int gru_invalidate_range_start(struct mmu_notifier *mn,
210*4882a593Smuzhiyun const struct mmu_notifier_range *range)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun struct gru_mm_struct *gms = container_of(mn, struct gru_mm_struct,
213*4882a593Smuzhiyun ms_notifier);
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun STAT(mmu_invalidate_range);
216*4882a593Smuzhiyun atomic_inc(&gms->ms_range_active);
217*4882a593Smuzhiyun gru_dbg(grudev, "gms %p, start 0x%lx, end 0x%lx, act %d\n", gms,
218*4882a593Smuzhiyun range->start, range->end, atomic_read(&gms->ms_range_active));
219*4882a593Smuzhiyun gru_flush_tlb_range(gms, range->start, range->end - range->start);
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun return 0;
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun
gru_invalidate_range_end(struct mmu_notifier * mn,const struct mmu_notifier_range * range)224*4882a593Smuzhiyun static void gru_invalidate_range_end(struct mmu_notifier *mn,
225*4882a593Smuzhiyun const struct mmu_notifier_range *range)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun struct gru_mm_struct *gms = container_of(mn, struct gru_mm_struct,
228*4882a593Smuzhiyun ms_notifier);
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun /* ..._and_test() provides needed barrier */
231*4882a593Smuzhiyun (void)atomic_dec_and_test(&gms->ms_range_active);
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun wake_up_all(&gms->ms_wait_queue);
234*4882a593Smuzhiyun gru_dbg(grudev, "gms %p, start 0x%lx, end 0x%lx\n",
235*4882a593Smuzhiyun gms, range->start, range->end);
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun
gru_alloc_notifier(struct mm_struct * mm)238*4882a593Smuzhiyun static struct mmu_notifier *gru_alloc_notifier(struct mm_struct *mm)
239*4882a593Smuzhiyun {
240*4882a593Smuzhiyun struct gru_mm_struct *gms;
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun gms = kzalloc(sizeof(*gms), GFP_KERNEL);
243*4882a593Smuzhiyun if (!gms)
244*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
245*4882a593Smuzhiyun STAT(gms_alloc);
246*4882a593Smuzhiyun spin_lock_init(&gms->ms_asid_lock);
247*4882a593Smuzhiyun init_waitqueue_head(&gms->ms_wait_queue);
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun return &gms->ms_notifier;
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun
gru_free_notifier(struct mmu_notifier * mn)252*4882a593Smuzhiyun static void gru_free_notifier(struct mmu_notifier *mn)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun kfree(container_of(mn, struct gru_mm_struct, ms_notifier));
255*4882a593Smuzhiyun STAT(gms_free);
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun static const struct mmu_notifier_ops gru_mmuops = {
259*4882a593Smuzhiyun .invalidate_range_start = gru_invalidate_range_start,
260*4882a593Smuzhiyun .invalidate_range_end = gru_invalidate_range_end,
261*4882a593Smuzhiyun .alloc_notifier = gru_alloc_notifier,
262*4882a593Smuzhiyun .free_notifier = gru_free_notifier,
263*4882a593Smuzhiyun };
264*4882a593Smuzhiyun
gru_register_mmu_notifier(void)265*4882a593Smuzhiyun struct gru_mm_struct *gru_register_mmu_notifier(void)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun struct mmu_notifier *mn;
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun mn = mmu_notifier_get_locked(&gru_mmuops, current->mm);
270*4882a593Smuzhiyun if (IS_ERR(mn))
271*4882a593Smuzhiyun return ERR_CAST(mn);
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun return container_of(mn, struct gru_mm_struct, ms_notifier);
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun
gru_drop_mmu_notifier(struct gru_mm_struct * gms)276*4882a593Smuzhiyun void gru_drop_mmu_notifier(struct gru_mm_struct *gms)
277*4882a593Smuzhiyun {
278*4882a593Smuzhiyun mmu_notifier_put(&gms->ms_notifier);
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun /*
282*4882a593Smuzhiyun * Setup TGH parameters. There are:
283*4882a593Smuzhiyun * - 24 TGH handles per GRU chiplet
284*4882a593Smuzhiyun * - a portion (MAX_LOCAL_TGH) of the handles are reserved for
285*4882a593Smuzhiyun * use by blade-local cpus
286*4882a593Smuzhiyun * - the rest are used by off-blade cpus. This usage is
287*4882a593Smuzhiyun * less frequent than blade-local usage.
288*4882a593Smuzhiyun *
289*4882a593Smuzhiyun * For now, use 16 handles for local flushes, 8 for remote flushes. If the blade
290*4882a593Smuzhiyun * has less tan or equal to 16 cpus, each cpu has a unique handle that it can
291*4882a593Smuzhiyun * use.
292*4882a593Smuzhiyun */
293*4882a593Smuzhiyun #define MAX_LOCAL_TGH 16
294*4882a593Smuzhiyun
gru_tgh_flush_init(struct gru_state * gru)295*4882a593Smuzhiyun void gru_tgh_flush_init(struct gru_state *gru)
296*4882a593Smuzhiyun {
297*4882a593Smuzhiyun int cpus, shift = 0, n;
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun cpus = uv_blade_nr_possible_cpus(gru->gs_blade_id);
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun /* n = cpus rounded up to next power of 2 */
302*4882a593Smuzhiyun if (cpus) {
303*4882a593Smuzhiyun n = 1 << fls(cpus - 1);
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun /*
306*4882a593Smuzhiyun * shift count for converting local cpu# to TGH index
307*4882a593Smuzhiyun * 0 if cpus <= MAX_LOCAL_TGH,
308*4882a593Smuzhiyun * 1 if cpus <= 2*MAX_LOCAL_TGH,
309*4882a593Smuzhiyun * etc
310*4882a593Smuzhiyun */
311*4882a593Smuzhiyun shift = max(0, fls(n - 1) - fls(MAX_LOCAL_TGH - 1));
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun gru->gs_tgh_local_shift = shift;
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun /* first starting TGH index to use for remote purges */
316*4882a593Smuzhiyun gru->gs_tgh_first_remote = (cpus + (1 << shift) - 1) >> shift;
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun }
319