1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * SN Platform GRU Driver
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * DRIVER TABLE MANAGER + GRU CONTEXT LOAD/UNLOAD
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include <linux/kernel.h>
11*4882a593Smuzhiyun #include <linux/slab.h>
12*4882a593Smuzhiyun #include <linux/mm.h>
13*4882a593Smuzhiyun #include <linux/spinlock.h>
14*4882a593Smuzhiyun #include <linux/sched.h>
15*4882a593Smuzhiyun #include <linux/device.h>
16*4882a593Smuzhiyun #include <linux/list.h>
17*4882a593Smuzhiyun #include <linux/err.h>
18*4882a593Smuzhiyun #include <linux/prefetch.h>
19*4882a593Smuzhiyun #include <asm/uv/uv_hub.h>
20*4882a593Smuzhiyun #include "gru.h"
21*4882a593Smuzhiyun #include "grutables.h"
22*4882a593Smuzhiyun #include "gruhandles.h"
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun unsigned long gru_options __read_mostly;
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun static struct device_driver gru_driver = {
27*4882a593Smuzhiyun .name = "gru"
28*4882a593Smuzhiyun };
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun static struct device gru_device = {
31*4882a593Smuzhiyun .init_name = "",
32*4882a593Smuzhiyun .driver = &gru_driver,
33*4882a593Smuzhiyun };
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun struct device *grudev = &gru_device;
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun /*
38*4882a593Smuzhiyun * Select a gru fault map to be used by the current cpu. Note that
39*4882a593Smuzhiyun * multiple cpus may be using the same map.
40*4882a593Smuzhiyun * ZZZ should be inline but did not work on emulator
41*4882a593Smuzhiyun */
gru_cpu_fault_map_id(void)42*4882a593Smuzhiyun int gru_cpu_fault_map_id(void)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun #ifdef CONFIG_IA64
45*4882a593Smuzhiyun return uv_blade_processor_id() % GRU_NUM_TFM;
46*4882a593Smuzhiyun #else
47*4882a593Smuzhiyun int cpu = smp_processor_id();
48*4882a593Smuzhiyun int id, core;
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun core = uv_cpu_core_number(cpu);
51*4882a593Smuzhiyun id = core + UV_MAX_INT_CORES * uv_cpu_socket_number(cpu);
52*4882a593Smuzhiyun return id;
53*4882a593Smuzhiyun #endif
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun /*--------- ASID Management -------------------------------------------
57*4882a593Smuzhiyun *
58*4882a593Smuzhiyun * Initially, assign asids sequentially from MIN_ASID .. MAX_ASID.
59*4882a593Smuzhiyun * Once MAX is reached, flush the TLB & start over. However,
60*4882a593Smuzhiyun * some asids may still be in use. There won't be many (percentage wise) still
61*4882a593Smuzhiyun * in use. Search active contexts & determine the value of the first
62*4882a593Smuzhiyun * asid in use ("x"s below). Set "limit" to this value.
63*4882a593Smuzhiyun * This defines a block of assignable asids.
64*4882a593Smuzhiyun *
65*4882a593Smuzhiyun * When "limit" is reached, search forward from limit+1 and determine the
66*4882a593Smuzhiyun * next block of assignable asids.
67*4882a593Smuzhiyun *
68*4882a593Smuzhiyun * Repeat until MAX_ASID is reached, then start over again.
69*4882a593Smuzhiyun *
70*4882a593Smuzhiyun * Each time MAX_ASID is reached, increment the asid generation. Since
71*4882a593Smuzhiyun * the search for in-use asids only checks contexts with GRUs currently
72*4882a593Smuzhiyun * assigned, asids in some contexts will be missed. Prior to loading
73*4882a593Smuzhiyun * a context, the asid generation of the GTS asid is rechecked. If it
74*4882a593Smuzhiyun * doesn't match the current generation, a new asid will be assigned.
75*4882a593Smuzhiyun *
76*4882a593Smuzhiyun * 0---------------x------------x---------------------x----|
77*4882a593Smuzhiyun * ^-next ^-limit ^-MAX_ASID
78*4882a593Smuzhiyun *
79*4882a593Smuzhiyun * All asid manipulation & context loading/unloading is protected by the
80*4882a593Smuzhiyun * gs_lock.
81*4882a593Smuzhiyun */
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun /* Hit the asid limit. Start over */
gru_wrap_asid(struct gru_state * gru)84*4882a593Smuzhiyun static int gru_wrap_asid(struct gru_state *gru)
85*4882a593Smuzhiyun {
86*4882a593Smuzhiyun gru_dbg(grudev, "gid %d\n", gru->gs_gid);
87*4882a593Smuzhiyun STAT(asid_wrap);
88*4882a593Smuzhiyun gru->gs_asid_gen++;
89*4882a593Smuzhiyun return MIN_ASID;
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun /* Find the next chunk of unused asids */
gru_reset_asid_limit(struct gru_state * gru,int asid)93*4882a593Smuzhiyun static int gru_reset_asid_limit(struct gru_state *gru, int asid)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun int i, gid, inuse_asid, limit;
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid);
98*4882a593Smuzhiyun STAT(asid_next);
99*4882a593Smuzhiyun limit = MAX_ASID;
100*4882a593Smuzhiyun if (asid >= limit)
101*4882a593Smuzhiyun asid = gru_wrap_asid(gru);
102*4882a593Smuzhiyun gru_flush_all_tlb(gru);
103*4882a593Smuzhiyun gid = gru->gs_gid;
104*4882a593Smuzhiyun again:
105*4882a593Smuzhiyun for (i = 0; i < GRU_NUM_CCH; i++) {
106*4882a593Smuzhiyun if (!gru->gs_gts[i] || is_kernel_context(gru->gs_gts[i]))
107*4882a593Smuzhiyun continue;
108*4882a593Smuzhiyun inuse_asid = gru->gs_gts[i]->ts_gms->ms_asids[gid].mt_asid;
109*4882a593Smuzhiyun gru_dbg(grudev, "gid %d, gts %p, gms %p, inuse 0x%x, cxt %d\n",
110*4882a593Smuzhiyun gru->gs_gid, gru->gs_gts[i], gru->gs_gts[i]->ts_gms,
111*4882a593Smuzhiyun inuse_asid, i);
112*4882a593Smuzhiyun if (inuse_asid == asid) {
113*4882a593Smuzhiyun asid += ASID_INC;
114*4882a593Smuzhiyun if (asid >= limit) {
115*4882a593Smuzhiyun /*
116*4882a593Smuzhiyun * empty range: reset the range limit and
117*4882a593Smuzhiyun * start over
118*4882a593Smuzhiyun */
119*4882a593Smuzhiyun limit = MAX_ASID;
120*4882a593Smuzhiyun if (asid >= MAX_ASID)
121*4882a593Smuzhiyun asid = gru_wrap_asid(gru);
122*4882a593Smuzhiyun goto again;
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun if ((inuse_asid > asid) && (inuse_asid < limit))
127*4882a593Smuzhiyun limit = inuse_asid;
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun gru->gs_asid_limit = limit;
130*4882a593Smuzhiyun gru->gs_asid = asid;
131*4882a593Smuzhiyun gru_dbg(grudev, "gid %d, new asid 0x%x, new_limit 0x%x\n", gru->gs_gid,
132*4882a593Smuzhiyun asid, limit);
133*4882a593Smuzhiyun return asid;
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun /* Assign a new ASID to a thread context. */
gru_assign_asid(struct gru_state * gru)137*4882a593Smuzhiyun static int gru_assign_asid(struct gru_state *gru)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun int asid;
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun gru->gs_asid += ASID_INC;
142*4882a593Smuzhiyun asid = gru->gs_asid;
143*4882a593Smuzhiyun if (asid >= gru->gs_asid_limit)
144*4882a593Smuzhiyun asid = gru_reset_asid_limit(gru, asid);
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid);
147*4882a593Smuzhiyun return asid;
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun /*
151*4882a593Smuzhiyun * Clear n bits in a word. Return a word indicating the bits that were cleared.
152*4882a593Smuzhiyun * Optionally, build an array of chars that contain the bit numbers allocated.
153*4882a593Smuzhiyun */
reserve_resources(unsigned long * p,int n,int mmax,char * idx)154*4882a593Smuzhiyun static unsigned long reserve_resources(unsigned long *p, int n, int mmax,
155*4882a593Smuzhiyun char *idx)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun unsigned long bits = 0;
158*4882a593Smuzhiyun int i;
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun while (n--) {
161*4882a593Smuzhiyun i = find_first_bit(p, mmax);
162*4882a593Smuzhiyun if (i == mmax)
163*4882a593Smuzhiyun BUG();
164*4882a593Smuzhiyun __clear_bit(i, p);
165*4882a593Smuzhiyun __set_bit(i, &bits);
166*4882a593Smuzhiyun if (idx)
167*4882a593Smuzhiyun *idx++ = i;
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun return bits;
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun
gru_reserve_cb_resources(struct gru_state * gru,int cbr_au_count,char * cbmap)172*4882a593Smuzhiyun unsigned long gru_reserve_cb_resources(struct gru_state *gru, int cbr_au_count,
173*4882a593Smuzhiyun char *cbmap)
174*4882a593Smuzhiyun {
175*4882a593Smuzhiyun return reserve_resources(&gru->gs_cbr_map, cbr_au_count, GRU_CBR_AU,
176*4882a593Smuzhiyun cbmap);
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun
gru_reserve_ds_resources(struct gru_state * gru,int dsr_au_count,char * dsmap)179*4882a593Smuzhiyun unsigned long gru_reserve_ds_resources(struct gru_state *gru, int dsr_au_count,
180*4882a593Smuzhiyun char *dsmap)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun return reserve_resources(&gru->gs_dsr_map, dsr_au_count, GRU_DSR_AU,
183*4882a593Smuzhiyun dsmap);
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun
reserve_gru_resources(struct gru_state * gru,struct gru_thread_state * gts)186*4882a593Smuzhiyun static void reserve_gru_resources(struct gru_state *gru,
187*4882a593Smuzhiyun struct gru_thread_state *gts)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun gru->gs_active_contexts++;
190*4882a593Smuzhiyun gts->ts_cbr_map =
191*4882a593Smuzhiyun gru_reserve_cb_resources(gru, gts->ts_cbr_au_count,
192*4882a593Smuzhiyun gts->ts_cbr_idx);
193*4882a593Smuzhiyun gts->ts_dsr_map =
194*4882a593Smuzhiyun gru_reserve_ds_resources(gru, gts->ts_dsr_au_count, NULL);
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun
free_gru_resources(struct gru_state * gru,struct gru_thread_state * gts)197*4882a593Smuzhiyun static void free_gru_resources(struct gru_state *gru,
198*4882a593Smuzhiyun struct gru_thread_state *gts)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun gru->gs_active_contexts--;
201*4882a593Smuzhiyun gru->gs_cbr_map |= gts->ts_cbr_map;
202*4882a593Smuzhiyun gru->gs_dsr_map |= gts->ts_dsr_map;
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun /*
206*4882a593Smuzhiyun * Check if a GRU has sufficient free resources to satisfy an allocation
207*4882a593Smuzhiyun * request. Note: GRU locks may or may not be held when this is called. If
208*4882a593Smuzhiyun * not held, recheck after acquiring the appropriate locks.
209*4882a593Smuzhiyun *
210*4882a593Smuzhiyun * Returns 1 if sufficient resources, 0 if not
211*4882a593Smuzhiyun */
check_gru_resources(struct gru_state * gru,int cbr_au_count,int dsr_au_count,int max_active_contexts)212*4882a593Smuzhiyun static int check_gru_resources(struct gru_state *gru, int cbr_au_count,
213*4882a593Smuzhiyun int dsr_au_count, int max_active_contexts)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun return hweight64(gru->gs_cbr_map) >= cbr_au_count
216*4882a593Smuzhiyun && hweight64(gru->gs_dsr_map) >= dsr_au_count
217*4882a593Smuzhiyun && gru->gs_active_contexts < max_active_contexts;
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun /*
221*4882a593Smuzhiyun * TLB manangment requires tracking all GRU chiplets that have loaded a GSEG
222*4882a593Smuzhiyun * context.
223*4882a593Smuzhiyun */
gru_load_mm_tracker(struct gru_state * gru,struct gru_thread_state * gts)224*4882a593Smuzhiyun static int gru_load_mm_tracker(struct gru_state *gru,
225*4882a593Smuzhiyun struct gru_thread_state *gts)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun struct gru_mm_struct *gms = gts->ts_gms;
228*4882a593Smuzhiyun struct gru_mm_tracker *asids = &gms->ms_asids[gru->gs_gid];
229*4882a593Smuzhiyun unsigned short ctxbitmap = (1 << gts->ts_ctxnum);
230*4882a593Smuzhiyun int asid;
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun spin_lock(&gms->ms_asid_lock);
233*4882a593Smuzhiyun asid = asids->mt_asid;
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun spin_lock(&gru->gs_asid_lock);
236*4882a593Smuzhiyun if (asid == 0 || (asids->mt_ctxbitmap == 0 && asids->mt_asid_gen !=
237*4882a593Smuzhiyun gru->gs_asid_gen)) {
238*4882a593Smuzhiyun asid = gru_assign_asid(gru);
239*4882a593Smuzhiyun asids->mt_asid = asid;
240*4882a593Smuzhiyun asids->mt_asid_gen = gru->gs_asid_gen;
241*4882a593Smuzhiyun STAT(asid_new);
242*4882a593Smuzhiyun } else {
243*4882a593Smuzhiyun STAT(asid_reuse);
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun spin_unlock(&gru->gs_asid_lock);
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun BUG_ON(asids->mt_ctxbitmap & ctxbitmap);
248*4882a593Smuzhiyun asids->mt_ctxbitmap |= ctxbitmap;
249*4882a593Smuzhiyun if (!test_bit(gru->gs_gid, gms->ms_asidmap))
250*4882a593Smuzhiyun __set_bit(gru->gs_gid, gms->ms_asidmap);
251*4882a593Smuzhiyun spin_unlock(&gms->ms_asid_lock);
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun gru_dbg(grudev,
254*4882a593Smuzhiyun "gid %d, gts %p, gms %p, ctxnum %d, asid 0x%x, asidmap 0x%lx\n",
255*4882a593Smuzhiyun gru->gs_gid, gts, gms, gts->ts_ctxnum, asid,
256*4882a593Smuzhiyun gms->ms_asidmap[0]);
257*4882a593Smuzhiyun return asid;
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun
gru_unload_mm_tracker(struct gru_state * gru,struct gru_thread_state * gts)260*4882a593Smuzhiyun static void gru_unload_mm_tracker(struct gru_state *gru,
261*4882a593Smuzhiyun struct gru_thread_state *gts)
262*4882a593Smuzhiyun {
263*4882a593Smuzhiyun struct gru_mm_struct *gms = gts->ts_gms;
264*4882a593Smuzhiyun struct gru_mm_tracker *asids;
265*4882a593Smuzhiyun unsigned short ctxbitmap;
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun asids = &gms->ms_asids[gru->gs_gid];
268*4882a593Smuzhiyun ctxbitmap = (1 << gts->ts_ctxnum);
269*4882a593Smuzhiyun spin_lock(&gms->ms_asid_lock);
270*4882a593Smuzhiyun spin_lock(&gru->gs_asid_lock);
271*4882a593Smuzhiyun BUG_ON((asids->mt_ctxbitmap & ctxbitmap) != ctxbitmap);
272*4882a593Smuzhiyun asids->mt_ctxbitmap ^= ctxbitmap;
273*4882a593Smuzhiyun gru_dbg(grudev, "gid %d, gts %p, gms %p, ctxnum %d, asidmap 0x%lx\n",
274*4882a593Smuzhiyun gru->gs_gid, gts, gms, gts->ts_ctxnum, gms->ms_asidmap[0]);
275*4882a593Smuzhiyun spin_unlock(&gru->gs_asid_lock);
276*4882a593Smuzhiyun spin_unlock(&gms->ms_asid_lock);
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun /*
280*4882a593Smuzhiyun * Decrement the reference count on a GTS structure. Free the structure
281*4882a593Smuzhiyun * if the reference count goes to zero.
282*4882a593Smuzhiyun */
gts_drop(struct gru_thread_state * gts)283*4882a593Smuzhiyun void gts_drop(struct gru_thread_state *gts)
284*4882a593Smuzhiyun {
285*4882a593Smuzhiyun if (gts && atomic_dec_return(>s->ts_refcnt) == 0) {
286*4882a593Smuzhiyun if (gts->ts_gms)
287*4882a593Smuzhiyun gru_drop_mmu_notifier(gts->ts_gms);
288*4882a593Smuzhiyun kfree(gts);
289*4882a593Smuzhiyun STAT(gts_free);
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun /*
294*4882a593Smuzhiyun * Locate the GTS structure for the current thread.
295*4882a593Smuzhiyun */
gru_find_current_gts_nolock(struct gru_vma_data * vdata,int tsid)296*4882a593Smuzhiyun static struct gru_thread_state *gru_find_current_gts_nolock(struct gru_vma_data
297*4882a593Smuzhiyun *vdata, int tsid)
298*4882a593Smuzhiyun {
299*4882a593Smuzhiyun struct gru_thread_state *gts;
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun list_for_each_entry(gts, &vdata->vd_head, ts_next)
302*4882a593Smuzhiyun if (gts->ts_tsid == tsid)
303*4882a593Smuzhiyun return gts;
304*4882a593Smuzhiyun return NULL;
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun /*
308*4882a593Smuzhiyun * Allocate a thread state structure.
309*4882a593Smuzhiyun */
gru_alloc_gts(struct vm_area_struct * vma,int cbr_au_count,int dsr_au_count,unsigned char tlb_preload_count,int options,int tsid)310*4882a593Smuzhiyun struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
311*4882a593Smuzhiyun int cbr_au_count, int dsr_au_count,
312*4882a593Smuzhiyun unsigned char tlb_preload_count, int options, int tsid)
313*4882a593Smuzhiyun {
314*4882a593Smuzhiyun struct gru_thread_state *gts;
315*4882a593Smuzhiyun struct gru_mm_struct *gms;
316*4882a593Smuzhiyun int bytes;
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun bytes = DSR_BYTES(dsr_au_count) + CBR_BYTES(cbr_au_count);
319*4882a593Smuzhiyun bytes += sizeof(struct gru_thread_state);
320*4882a593Smuzhiyun gts = kmalloc(bytes, GFP_KERNEL);
321*4882a593Smuzhiyun if (!gts)
322*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun STAT(gts_alloc);
325*4882a593Smuzhiyun memset(gts, 0, sizeof(struct gru_thread_state)); /* zero out header */
326*4882a593Smuzhiyun atomic_set(>s->ts_refcnt, 1);
327*4882a593Smuzhiyun mutex_init(>s->ts_ctxlock);
328*4882a593Smuzhiyun gts->ts_cbr_au_count = cbr_au_count;
329*4882a593Smuzhiyun gts->ts_dsr_au_count = dsr_au_count;
330*4882a593Smuzhiyun gts->ts_tlb_preload_count = tlb_preload_count;
331*4882a593Smuzhiyun gts->ts_user_options = options;
332*4882a593Smuzhiyun gts->ts_user_blade_id = -1;
333*4882a593Smuzhiyun gts->ts_user_chiplet_id = -1;
334*4882a593Smuzhiyun gts->ts_tsid = tsid;
335*4882a593Smuzhiyun gts->ts_ctxnum = NULLCTX;
336*4882a593Smuzhiyun gts->ts_tlb_int_select = -1;
337*4882a593Smuzhiyun gts->ts_cch_req_slice = -1;
338*4882a593Smuzhiyun gts->ts_sizeavail = GRU_SIZEAVAIL(PAGE_SHIFT);
339*4882a593Smuzhiyun if (vma) {
340*4882a593Smuzhiyun gts->ts_mm = current->mm;
341*4882a593Smuzhiyun gts->ts_vma = vma;
342*4882a593Smuzhiyun gms = gru_register_mmu_notifier();
343*4882a593Smuzhiyun if (IS_ERR(gms))
344*4882a593Smuzhiyun goto err;
345*4882a593Smuzhiyun gts->ts_gms = gms;
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun gru_dbg(grudev, "alloc gts %p\n", gts);
349*4882a593Smuzhiyun return gts;
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun err:
352*4882a593Smuzhiyun gts_drop(gts);
353*4882a593Smuzhiyun return ERR_CAST(gms);
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun /*
357*4882a593Smuzhiyun * Allocate a vma private data structure.
358*4882a593Smuzhiyun */
gru_alloc_vma_data(struct vm_area_struct * vma,int tsid)359*4882a593Smuzhiyun struct gru_vma_data *gru_alloc_vma_data(struct vm_area_struct *vma, int tsid)
360*4882a593Smuzhiyun {
361*4882a593Smuzhiyun struct gru_vma_data *vdata = NULL;
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun vdata = kmalloc(sizeof(*vdata), GFP_KERNEL);
364*4882a593Smuzhiyun if (!vdata)
365*4882a593Smuzhiyun return NULL;
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun STAT(vdata_alloc);
368*4882a593Smuzhiyun INIT_LIST_HEAD(&vdata->vd_head);
369*4882a593Smuzhiyun spin_lock_init(&vdata->vd_lock);
370*4882a593Smuzhiyun gru_dbg(grudev, "alloc vdata %p\n", vdata);
371*4882a593Smuzhiyun return vdata;
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun /*
375*4882a593Smuzhiyun * Find the thread state structure for the current thread.
376*4882a593Smuzhiyun */
gru_find_thread_state(struct vm_area_struct * vma,int tsid)377*4882a593Smuzhiyun struct gru_thread_state *gru_find_thread_state(struct vm_area_struct *vma,
378*4882a593Smuzhiyun int tsid)
379*4882a593Smuzhiyun {
380*4882a593Smuzhiyun struct gru_vma_data *vdata = vma->vm_private_data;
381*4882a593Smuzhiyun struct gru_thread_state *gts;
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun spin_lock(&vdata->vd_lock);
384*4882a593Smuzhiyun gts = gru_find_current_gts_nolock(vdata, tsid);
385*4882a593Smuzhiyun spin_unlock(&vdata->vd_lock);
386*4882a593Smuzhiyun gru_dbg(grudev, "vma %p, gts %p\n", vma, gts);
387*4882a593Smuzhiyun return gts;
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun /*
391*4882a593Smuzhiyun * Allocate a new thread state for a GSEG. Note that races may allow
392*4882a593Smuzhiyun * another thread to race to create a gts.
393*4882a593Smuzhiyun */
gru_alloc_thread_state(struct vm_area_struct * vma,int tsid)394*4882a593Smuzhiyun struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct *vma,
395*4882a593Smuzhiyun int tsid)
396*4882a593Smuzhiyun {
397*4882a593Smuzhiyun struct gru_vma_data *vdata = vma->vm_private_data;
398*4882a593Smuzhiyun struct gru_thread_state *gts, *ngts;
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun gts = gru_alloc_gts(vma, vdata->vd_cbr_au_count,
401*4882a593Smuzhiyun vdata->vd_dsr_au_count,
402*4882a593Smuzhiyun vdata->vd_tlb_preload_count,
403*4882a593Smuzhiyun vdata->vd_user_options, tsid);
404*4882a593Smuzhiyun if (IS_ERR(gts))
405*4882a593Smuzhiyun return gts;
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun spin_lock(&vdata->vd_lock);
408*4882a593Smuzhiyun ngts = gru_find_current_gts_nolock(vdata, tsid);
409*4882a593Smuzhiyun if (ngts) {
410*4882a593Smuzhiyun gts_drop(gts);
411*4882a593Smuzhiyun gts = ngts;
412*4882a593Smuzhiyun STAT(gts_double_allocate);
413*4882a593Smuzhiyun } else {
414*4882a593Smuzhiyun list_add(>s->ts_next, &vdata->vd_head);
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun spin_unlock(&vdata->vd_lock);
417*4882a593Smuzhiyun gru_dbg(grudev, "vma %p, gts %p\n", vma, gts);
418*4882a593Smuzhiyun return gts;
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun /*
422*4882a593Smuzhiyun * Free the GRU context assigned to the thread state.
423*4882a593Smuzhiyun */
gru_free_gru_context(struct gru_thread_state * gts)424*4882a593Smuzhiyun static void gru_free_gru_context(struct gru_thread_state *gts)
425*4882a593Smuzhiyun {
426*4882a593Smuzhiyun struct gru_state *gru;
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun gru = gts->ts_gru;
429*4882a593Smuzhiyun gru_dbg(grudev, "gts %p, gid %d\n", gts, gru->gs_gid);
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun spin_lock(&gru->gs_lock);
432*4882a593Smuzhiyun gru->gs_gts[gts->ts_ctxnum] = NULL;
433*4882a593Smuzhiyun free_gru_resources(gru, gts);
434*4882a593Smuzhiyun BUG_ON(test_bit(gts->ts_ctxnum, &gru->gs_context_map) == 0);
435*4882a593Smuzhiyun __clear_bit(gts->ts_ctxnum, &gru->gs_context_map);
436*4882a593Smuzhiyun gts->ts_ctxnum = NULLCTX;
437*4882a593Smuzhiyun gts->ts_gru = NULL;
438*4882a593Smuzhiyun gts->ts_blade = -1;
439*4882a593Smuzhiyun spin_unlock(&gru->gs_lock);
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun gts_drop(gts);
442*4882a593Smuzhiyun STAT(free_context);
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun /*
446*4882a593Smuzhiyun * Prefetching cachelines help hardware performance.
447*4882a593Smuzhiyun * (Strictly a performance enhancement. Not functionally required).
448*4882a593Smuzhiyun */
prefetch_data(void * p,int num,int stride)449*4882a593Smuzhiyun static void prefetch_data(void *p, int num, int stride)
450*4882a593Smuzhiyun {
451*4882a593Smuzhiyun while (num-- > 0) {
452*4882a593Smuzhiyun prefetchw(p);
453*4882a593Smuzhiyun p += stride;
454*4882a593Smuzhiyun }
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun
gru_copy_handle(void * d,void * s)457*4882a593Smuzhiyun static inline long gru_copy_handle(void *d, void *s)
458*4882a593Smuzhiyun {
459*4882a593Smuzhiyun memcpy(d, s, GRU_HANDLE_BYTES);
460*4882a593Smuzhiyun return GRU_HANDLE_BYTES;
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun
gru_prefetch_context(void * gseg,void * cb,void * cbe,unsigned long cbrmap,unsigned long length)463*4882a593Smuzhiyun static void gru_prefetch_context(void *gseg, void *cb, void *cbe,
464*4882a593Smuzhiyun unsigned long cbrmap, unsigned long length)
465*4882a593Smuzhiyun {
466*4882a593Smuzhiyun int i, scr;
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun prefetch_data(gseg + GRU_DS_BASE, length / GRU_CACHE_LINE_BYTES,
469*4882a593Smuzhiyun GRU_CACHE_LINE_BYTES);
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun for_each_cbr_in_allocation_map(i, &cbrmap, scr) {
472*4882a593Smuzhiyun prefetch_data(cb, 1, GRU_CACHE_LINE_BYTES);
473*4882a593Smuzhiyun prefetch_data(cbe + i * GRU_HANDLE_STRIDE, 1,
474*4882a593Smuzhiyun GRU_CACHE_LINE_BYTES);
475*4882a593Smuzhiyun cb += GRU_HANDLE_STRIDE;
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun
gru_load_context_data(void * save,void * grubase,int ctxnum,unsigned long cbrmap,unsigned long dsrmap,int data_valid)479*4882a593Smuzhiyun static void gru_load_context_data(void *save, void *grubase, int ctxnum,
480*4882a593Smuzhiyun unsigned long cbrmap, unsigned long dsrmap,
481*4882a593Smuzhiyun int data_valid)
482*4882a593Smuzhiyun {
483*4882a593Smuzhiyun void *gseg, *cb, *cbe;
484*4882a593Smuzhiyun unsigned long length;
485*4882a593Smuzhiyun int i, scr;
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun gseg = grubase + ctxnum * GRU_GSEG_STRIDE;
488*4882a593Smuzhiyun cb = gseg + GRU_CB_BASE;
489*4882a593Smuzhiyun cbe = grubase + GRU_CBE_BASE;
490*4882a593Smuzhiyun length = hweight64(dsrmap) * GRU_DSR_AU_BYTES;
491*4882a593Smuzhiyun gru_prefetch_context(gseg, cb, cbe, cbrmap, length);
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun for_each_cbr_in_allocation_map(i, &cbrmap, scr) {
494*4882a593Smuzhiyun if (data_valid) {
495*4882a593Smuzhiyun save += gru_copy_handle(cb, save);
496*4882a593Smuzhiyun save += gru_copy_handle(cbe + i * GRU_HANDLE_STRIDE,
497*4882a593Smuzhiyun save);
498*4882a593Smuzhiyun } else {
499*4882a593Smuzhiyun memset(cb, 0, GRU_CACHE_LINE_BYTES);
500*4882a593Smuzhiyun memset(cbe + i * GRU_HANDLE_STRIDE, 0,
501*4882a593Smuzhiyun GRU_CACHE_LINE_BYTES);
502*4882a593Smuzhiyun }
503*4882a593Smuzhiyun /* Flush CBE to hide race in context restart */
504*4882a593Smuzhiyun mb();
505*4882a593Smuzhiyun gru_flush_cache(cbe + i * GRU_HANDLE_STRIDE);
506*4882a593Smuzhiyun cb += GRU_HANDLE_STRIDE;
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun if (data_valid)
510*4882a593Smuzhiyun memcpy(gseg + GRU_DS_BASE, save, length);
511*4882a593Smuzhiyun else
512*4882a593Smuzhiyun memset(gseg + GRU_DS_BASE, 0, length);
513*4882a593Smuzhiyun }
514*4882a593Smuzhiyun
gru_unload_context_data(void * save,void * grubase,int ctxnum,unsigned long cbrmap,unsigned long dsrmap)515*4882a593Smuzhiyun static void gru_unload_context_data(void *save, void *grubase, int ctxnum,
516*4882a593Smuzhiyun unsigned long cbrmap, unsigned long dsrmap)
517*4882a593Smuzhiyun {
518*4882a593Smuzhiyun void *gseg, *cb, *cbe;
519*4882a593Smuzhiyun unsigned long length;
520*4882a593Smuzhiyun int i, scr;
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun gseg = grubase + ctxnum * GRU_GSEG_STRIDE;
523*4882a593Smuzhiyun cb = gseg + GRU_CB_BASE;
524*4882a593Smuzhiyun cbe = grubase + GRU_CBE_BASE;
525*4882a593Smuzhiyun length = hweight64(dsrmap) * GRU_DSR_AU_BYTES;
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun /* CBEs may not be coherent. Flush them from cache */
528*4882a593Smuzhiyun for_each_cbr_in_allocation_map(i, &cbrmap, scr)
529*4882a593Smuzhiyun gru_flush_cache(cbe + i * GRU_HANDLE_STRIDE);
530*4882a593Smuzhiyun mb(); /* Let the CL flush complete */
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun gru_prefetch_context(gseg, cb, cbe, cbrmap, length);
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun for_each_cbr_in_allocation_map(i, &cbrmap, scr) {
535*4882a593Smuzhiyun save += gru_copy_handle(save, cb);
536*4882a593Smuzhiyun save += gru_copy_handle(save, cbe + i * GRU_HANDLE_STRIDE);
537*4882a593Smuzhiyun cb += GRU_HANDLE_STRIDE;
538*4882a593Smuzhiyun }
539*4882a593Smuzhiyun memcpy(save, gseg + GRU_DS_BASE, length);
540*4882a593Smuzhiyun }
541*4882a593Smuzhiyun
gru_unload_context(struct gru_thread_state * gts,int savestate)542*4882a593Smuzhiyun void gru_unload_context(struct gru_thread_state *gts, int savestate)
543*4882a593Smuzhiyun {
544*4882a593Smuzhiyun struct gru_state *gru = gts->ts_gru;
545*4882a593Smuzhiyun struct gru_context_configuration_handle *cch;
546*4882a593Smuzhiyun int ctxnum = gts->ts_ctxnum;
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun if (!is_kernel_context(gts))
549*4882a593Smuzhiyun zap_vma_ptes(gts->ts_vma, UGRUADDR(gts), GRU_GSEG_PAGESIZE);
550*4882a593Smuzhiyun cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun gru_dbg(grudev, "gts %p, cbrmap 0x%lx, dsrmap 0x%lx\n",
553*4882a593Smuzhiyun gts, gts->ts_cbr_map, gts->ts_dsr_map);
554*4882a593Smuzhiyun lock_cch_handle(cch);
555*4882a593Smuzhiyun if (cch_interrupt_sync(cch))
556*4882a593Smuzhiyun BUG();
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun if (!is_kernel_context(gts))
559*4882a593Smuzhiyun gru_unload_mm_tracker(gru, gts);
560*4882a593Smuzhiyun if (savestate) {
561*4882a593Smuzhiyun gru_unload_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr,
562*4882a593Smuzhiyun ctxnum, gts->ts_cbr_map,
563*4882a593Smuzhiyun gts->ts_dsr_map);
564*4882a593Smuzhiyun gts->ts_data_valid = 1;
565*4882a593Smuzhiyun }
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun if (cch_deallocate(cch))
568*4882a593Smuzhiyun BUG();
569*4882a593Smuzhiyun unlock_cch_handle(cch);
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun gru_free_gru_context(gts);
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun /*
575*4882a593Smuzhiyun * Load a GRU context by copying it from the thread data structure in memory
576*4882a593Smuzhiyun * to the GRU.
577*4882a593Smuzhiyun */
gru_load_context(struct gru_thread_state * gts)578*4882a593Smuzhiyun void gru_load_context(struct gru_thread_state *gts)
579*4882a593Smuzhiyun {
580*4882a593Smuzhiyun struct gru_state *gru = gts->ts_gru;
581*4882a593Smuzhiyun struct gru_context_configuration_handle *cch;
582*4882a593Smuzhiyun int i, err, asid, ctxnum = gts->ts_ctxnum;
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
585*4882a593Smuzhiyun lock_cch_handle(cch);
586*4882a593Smuzhiyun cch->tfm_fault_bit_enable =
587*4882a593Smuzhiyun (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL
588*4882a593Smuzhiyun || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
589*4882a593Smuzhiyun cch->tlb_int_enable = (gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
590*4882a593Smuzhiyun if (cch->tlb_int_enable) {
591*4882a593Smuzhiyun gts->ts_tlb_int_select = gru_cpu_fault_map_id();
592*4882a593Smuzhiyun cch->tlb_int_select = gts->ts_tlb_int_select;
593*4882a593Smuzhiyun }
594*4882a593Smuzhiyun if (gts->ts_cch_req_slice >= 0) {
595*4882a593Smuzhiyun cch->req_slice_set_enable = 1;
596*4882a593Smuzhiyun cch->req_slice = gts->ts_cch_req_slice;
597*4882a593Smuzhiyun } else {
598*4882a593Smuzhiyun cch->req_slice_set_enable =0;
599*4882a593Smuzhiyun }
600*4882a593Smuzhiyun cch->tfm_done_bit_enable = 0;
601*4882a593Smuzhiyun cch->dsr_allocation_map = gts->ts_dsr_map;
602*4882a593Smuzhiyun cch->cbr_allocation_map = gts->ts_cbr_map;
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun if (is_kernel_context(gts)) {
605*4882a593Smuzhiyun cch->unmap_enable = 1;
606*4882a593Smuzhiyun cch->tfm_done_bit_enable = 1;
607*4882a593Smuzhiyun cch->cb_int_enable = 1;
608*4882a593Smuzhiyun cch->tlb_int_select = 0; /* For now, ints go to cpu 0 */
609*4882a593Smuzhiyun } else {
610*4882a593Smuzhiyun cch->unmap_enable = 0;
611*4882a593Smuzhiyun cch->tfm_done_bit_enable = 0;
612*4882a593Smuzhiyun cch->cb_int_enable = 0;
613*4882a593Smuzhiyun asid = gru_load_mm_tracker(gru, gts);
614*4882a593Smuzhiyun for (i = 0; i < 8; i++) {
615*4882a593Smuzhiyun cch->asid[i] = asid + i;
616*4882a593Smuzhiyun cch->sizeavail[i] = gts->ts_sizeavail;
617*4882a593Smuzhiyun }
618*4882a593Smuzhiyun }
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun err = cch_allocate(cch);
621*4882a593Smuzhiyun if (err) {
622*4882a593Smuzhiyun gru_dbg(grudev,
623*4882a593Smuzhiyun "err %d: cch %p, gts %p, cbr 0x%lx, dsr 0x%lx\n",
624*4882a593Smuzhiyun err, cch, gts, gts->ts_cbr_map, gts->ts_dsr_map);
625*4882a593Smuzhiyun BUG();
626*4882a593Smuzhiyun }
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun gru_load_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr, ctxnum,
629*4882a593Smuzhiyun gts->ts_cbr_map, gts->ts_dsr_map, gts->ts_data_valid);
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun if (cch_start(cch))
632*4882a593Smuzhiyun BUG();
633*4882a593Smuzhiyun unlock_cch_handle(cch);
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun gru_dbg(grudev, "gid %d, gts %p, cbrmap 0x%lx, dsrmap 0x%lx, tie %d, tis %d\n",
636*4882a593Smuzhiyun gts->ts_gru->gs_gid, gts, gts->ts_cbr_map, gts->ts_dsr_map,
637*4882a593Smuzhiyun (gts->ts_user_options == GRU_OPT_MISS_FMM_INTR), gts->ts_tlb_int_select);
638*4882a593Smuzhiyun }
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun /*
641*4882a593Smuzhiyun * Update fields in an active CCH:
642*4882a593Smuzhiyun * - retarget interrupts on local blade
643*4882a593Smuzhiyun * - update sizeavail mask
644*4882a593Smuzhiyun */
gru_update_cch(struct gru_thread_state * gts)645*4882a593Smuzhiyun int gru_update_cch(struct gru_thread_state *gts)
646*4882a593Smuzhiyun {
647*4882a593Smuzhiyun struct gru_context_configuration_handle *cch;
648*4882a593Smuzhiyun struct gru_state *gru = gts->ts_gru;
649*4882a593Smuzhiyun int i, ctxnum = gts->ts_ctxnum, ret = 0;
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun lock_cch_handle(cch);
654*4882a593Smuzhiyun if (cch->state == CCHSTATE_ACTIVE) {
655*4882a593Smuzhiyun if (gru->gs_gts[gts->ts_ctxnum] != gts)
656*4882a593Smuzhiyun goto exit;
657*4882a593Smuzhiyun if (cch_interrupt(cch))
658*4882a593Smuzhiyun BUG();
659*4882a593Smuzhiyun for (i = 0; i < 8; i++)
660*4882a593Smuzhiyun cch->sizeavail[i] = gts->ts_sizeavail;
661*4882a593Smuzhiyun gts->ts_tlb_int_select = gru_cpu_fault_map_id();
662*4882a593Smuzhiyun cch->tlb_int_select = gru_cpu_fault_map_id();
663*4882a593Smuzhiyun cch->tfm_fault_bit_enable =
664*4882a593Smuzhiyun (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL
665*4882a593Smuzhiyun || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
666*4882a593Smuzhiyun if (cch_start(cch))
667*4882a593Smuzhiyun BUG();
668*4882a593Smuzhiyun ret = 1;
669*4882a593Smuzhiyun }
670*4882a593Smuzhiyun exit:
671*4882a593Smuzhiyun unlock_cch_handle(cch);
672*4882a593Smuzhiyun return ret;
673*4882a593Smuzhiyun }
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun /*
676*4882a593Smuzhiyun * Update CCH tlb interrupt select. Required when all the following is true:
677*4882a593Smuzhiyun * - task's GRU context is loaded into a GRU
678*4882a593Smuzhiyun * - task is using interrupt notification for TLB faults
679*4882a593Smuzhiyun * - task has migrated to a different cpu on the same blade where
680*4882a593Smuzhiyun * it was previously running.
681*4882a593Smuzhiyun */
gru_retarget_intr(struct gru_thread_state * gts)682*4882a593Smuzhiyun static int gru_retarget_intr(struct gru_thread_state *gts)
683*4882a593Smuzhiyun {
684*4882a593Smuzhiyun if (gts->ts_tlb_int_select < 0
685*4882a593Smuzhiyun || gts->ts_tlb_int_select == gru_cpu_fault_map_id())
686*4882a593Smuzhiyun return 0;
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun gru_dbg(grudev, "retarget from %d to %d\n", gts->ts_tlb_int_select,
689*4882a593Smuzhiyun gru_cpu_fault_map_id());
690*4882a593Smuzhiyun return gru_update_cch(gts);
691*4882a593Smuzhiyun }
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun /*
694*4882a593Smuzhiyun * Check if a GRU context is allowed to use a specific chiplet. By default
695*4882a593Smuzhiyun * a context is assigned to any blade-local chiplet. However, users can
696*4882a593Smuzhiyun * override this.
697*4882a593Smuzhiyun * Returns 1 if assignment allowed, 0 otherwise
698*4882a593Smuzhiyun */
gru_check_chiplet_assignment(struct gru_state * gru,struct gru_thread_state * gts)699*4882a593Smuzhiyun static int gru_check_chiplet_assignment(struct gru_state *gru,
700*4882a593Smuzhiyun struct gru_thread_state *gts)
701*4882a593Smuzhiyun {
702*4882a593Smuzhiyun int blade_id;
703*4882a593Smuzhiyun int chiplet_id;
704*4882a593Smuzhiyun
705*4882a593Smuzhiyun blade_id = gts->ts_user_blade_id;
706*4882a593Smuzhiyun if (blade_id < 0)
707*4882a593Smuzhiyun blade_id = uv_numa_blade_id();
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun chiplet_id = gts->ts_user_chiplet_id;
710*4882a593Smuzhiyun return gru->gs_blade_id == blade_id &&
711*4882a593Smuzhiyun (chiplet_id < 0 || chiplet_id == gru->gs_chiplet_id);
712*4882a593Smuzhiyun }
713*4882a593Smuzhiyun
714*4882a593Smuzhiyun /*
715*4882a593Smuzhiyun * Unload the gru context if it is not assigned to the correct blade or
716*4882a593Smuzhiyun * chiplet. Misassignment can occur if the process migrates to a different
717*4882a593Smuzhiyun * blade or if the user changes the selected blade/chiplet.
718*4882a593Smuzhiyun */
gru_check_context_placement(struct gru_thread_state * gts)719*4882a593Smuzhiyun void gru_check_context_placement(struct gru_thread_state *gts)
720*4882a593Smuzhiyun {
721*4882a593Smuzhiyun struct gru_state *gru;
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun /*
724*4882a593Smuzhiyun * If the current task is the context owner, verify that the
725*4882a593Smuzhiyun * context is correctly placed. This test is skipped for non-owner
726*4882a593Smuzhiyun * references. Pthread apps use non-owner references to the CBRs.
727*4882a593Smuzhiyun */
728*4882a593Smuzhiyun gru = gts->ts_gru;
729*4882a593Smuzhiyun if (!gru || gts->ts_tgid_owner != current->tgid)
730*4882a593Smuzhiyun return;
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun if (!gru_check_chiplet_assignment(gru, gts)) {
733*4882a593Smuzhiyun STAT(check_context_unload);
734*4882a593Smuzhiyun gru_unload_context(gts, 1);
735*4882a593Smuzhiyun } else if (gru_retarget_intr(gts)) {
736*4882a593Smuzhiyun STAT(check_context_retarget_intr);
737*4882a593Smuzhiyun }
738*4882a593Smuzhiyun }
739*4882a593Smuzhiyun
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun /*
742*4882a593Smuzhiyun * Insufficient GRU resources available on the local blade. Steal a context from
743*4882a593Smuzhiyun * a process. This is a hack until a _real_ resource scheduler is written....
744*4882a593Smuzhiyun */
745*4882a593Smuzhiyun #define next_ctxnum(n) ((n) < GRU_NUM_CCH - 2 ? (n) + 1 : 0)
746*4882a593Smuzhiyun #define next_gru(b, g) (((g) < &(b)->bs_grus[GRU_CHIPLETS_PER_BLADE - 1]) ? \
747*4882a593Smuzhiyun ((g)+1) : &(b)->bs_grus[0])
748*4882a593Smuzhiyun
is_gts_stealable(struct gru_thread_state * gts,struct gru_blade_state * bs)749*4882a593Smuzhiyun static int is_gts_stealable(struct gru_thread_state *gts,
750*4882a593Smuzhiyun struct gru_blade_state *bs)
751*4882a593Smuzhiyun {
752*4882a593Smuzhiyun if (is_kernel_context(gts))
753*4882a593Smuzhiyun return down_write_trylock(&bs->bs_kgts_sema);
754*4882a593Smuzhiyun else
755*4882a593Smuzhiyun return mutex_trylock(>s->ts_ctxlock);
756*4882a593Smuzhiyun }
757*4882a593Smuzhiyun
gts_stolen(struct gru_thread_state * gts,struct gru_blade_state * bs)758*4882a593Smuzhiyun static void gts_stolen(struct gru_thread_state *gts,
759*4882a593Smuzhiyun struct gru_blade_state *bs)
760*4882a593Smuzhiyun {
761*4882a593Smuzhiyun if (is_kernel_context(gts)) {
762*4882a593Smuzhiyun up_write(&bs->bs_kgts_sema);
763*4882a593Smuzhiyun STAT(steal_kernel_context);
764*4882a593Smuzhiyun } else {
765*4882a593Smuzhiyun mutex_unlock(>s->ts_ctxlock);
766*4882a593Smuzhiyun STAT(steal_user_context);
767*4882a593Smuzhiyun }
768*4882a593Smuzhiyun }
769*4882a593Smuzhiyun
gru_steal_context(struct gru_thread_state * gts)770*4882a593Smuzhiyun void gru_steal_context(struct gru_thread_state *gts)
771*4882a593Smuzhiyun {
772*4882a593Smuzhiyun struct gru_blade_state *blade;
773*4882a593Smuzhiyun struct gru_state *gru, *gru0;
774*4882a593Smuzhiyun struct gru_thread_state *ngts = NULL;
775*4882a593Smuzhiyun int ctxnum, ctxnum0, flag = 0, cbr, dsr;
776*4882a593Smuzhiyun int blade_id;
777*4882a593Smuzhiyun
778*4882a593Smuzhiyun blade_id = gts->ts_user_blade_id;
779*4882a593Smuzhiyun if (blade_id < 0)
780*4882a593Smuzhiyun blade_id = uv_numa_blade_id();
781*4882a593Smuzhiyun cbr = gts->ts_cbr_au_count;
782*4882a593Smuzhiyun dsr = gts->ts_dsr_au_count;
783*4882a593Smuzhiyun
784*4882a593Smuzhiyun blade = gru_base[blade_id];
785*4882a593Smuzhiyun spin_lock(&blade->bs_lock);
786*4882a593Smuzhiyun
787*4882a593Smuzhiyun ctxnum = next_ctxnum(blade->bs_lru_ctxnum);
788*4882a593Smuzhiyun gru = blade->bs_lru_gru;
789*4882a593Smuzhiyun if (ctxnum == 0)
790*4882a593Smuzhiyun gru = next_gru(blade, gru);
791*4882a593Smuzhiyun blade->bs_lru_gru = gru;
792*4882a593Smuzhiyun blade->bs_lru_ctxnum = ctxnum;
793*4882a593Smuzhiyun ctxnum0 = ctxnum;
794*4882a593Smuzhiyun gru0 = gru;
795*4882a593Smuzhiyun while (1) {
796*4882a593Smuzhiyun if (gru_check_chiplet_assignment(gru, gts)) {
797*4882a593Smuzhiyun if (check_gru_resources(gru, cbr, dsr, GRU_NUM_CCH))
798*4882a593Smuzhiyun break;
799*4882a593Smuzhiyun spin_lock(&gru->gs_lock);
800*4882a593Smuzhiyun for (; ctxnum < GRU_NUM_CCH; ctxnum++) {
801*4882a593Smuzhiyun if (flag && gru == gru0 && ctxnum == ctxnum0)
802*4882a593Smuzhiyun break;
803*4882a593Smuzhiyun ngts = gru->gs_gts[ctxnum];
804*4882a593Smuzhiyun /*
805*4882a593Smuzhiyun * We are grabbing locks out of order, so trylock is
806*4882a593Smuzhiyun * needed. GTSs are usually not locked, so the odds of
807*4882a593Smuzhiyun * success are high. If trylock fails, try to steal a
808*4882a593Smuzhiyun * different GSEG.
809*4882a593Smuzhiyun */
810*4882a593Smuzhiyun if (ngts && is_gts_stealable(ngts, blade))
811*4882a593Smuzhiyun break;
812*4882a593Smuzhiyun ngts = NULL;
813*4882a593Smuzhiyun }
814*4882a593Smuzhiyun spin_unlock(&gru->gs_lock);
815*4882a593Smuzhiyun if (ngts || (flag && gru == gru0 && ctxnum == ctxnum0))
816*4882a593Smuzhiyun break;
817*4882a593Smuzhiyun }
818*4882a593Smuzhiyun if (flag && gru == gru0)
819*4882a593Smuzhiyun break;
820*4882a593Smuzhiyun flag = 1;
821*4882a593Smuzhiyun ctxnum = 0;
822*4882a593Smuzhiyun gru = next_gru(blade, gru);
823*4882a593Smuzhiyun }
824*4882a593Smuzhiyun spin_unlock(&blade->bs_lock);
825*4882a593Smuzhiyun
826*4882a593Smuzhiyun if (ngts) {
827*4882a593Smuzhiyun gts->ustats.context_stolen++;
828*4882a593Smuzhiyun ngts->ts_steal_jiffies = jiffies;
829*4882a593Smuzhiyun gru_unload_context(ngts, is_kernel_context(ngts) ? 0 : 1);
830*4882a593Smuzhiyun gts_stolen(ngts, blade);
831*4882a593Smuzhiyun } else {
832*4882a593Smuzhiyun STAT(steal_context_failed);
833*4882a593Smuzhiyun }
834*4882a593Smuzhiyun gru_dbg(grudev,
835*4882a593Smuzhiyun "stole gid %d, ctxnum %d from gts %p. Need cb %d, ds %d;"
836*4882a593Smuzhiyun " avail cb %ld, ds %ld\n",
837*4882a593Smuzhiyun gru->gs_gid, ctxnum, ngts, cbr, dsr, hweight64(gru->gs_cbr_map),
838*4882a593Smuzhiyun hweight64(gru->gs_dsr_map));
839*4882a593Smuzhiyun }
840*4882a593Smuzhiyun
841*4882a593Smuzhiyun /*
842*4882a593Smuzhiyun * Assign a gru context.
843*4882a593Smuzhiyun */
gru_assign_context_number(struct gru_state * gru)844*4882a593Smuzhiyun static int gru_assign_context_number(struct gru_state *gru)
845*4882a593Smuzhiyun {
846*4882a593Smuzhiyun int ctxnum;
847*4882a593Smuzhiyun
848*4882a593Smuzhiyun ctxnum = find_first_zero_bit(&gru->gs_context_map, GRU_NUM_CCH);
849*4882a593Smuzhiyun __set_bit(ctxnum, &gru->gs_context_map);
850*4882a593Smuzhiyun return ctxnum;
851*4882a593Smuzhiyun }
852*4882a593Smuzhiyun
853*4882a593Smuzhiyun /*
854*4882a593Smuzhiyun * Scan the GRUs on the local blade & assign a GRU context.
855*4882a593Smuzhiyun */
gru_assign_gru_context(struct gru_thread_state * gts)856*4882a593Smuzhiyun struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts)
857*4882a593Smuzhiyun {
858*4882a593Smuzhiyun struct gru_state *gru, *grux;
859*4882a593Smuzhiyun int i, max_active_contexts;
860*4882a593Smuzhiyun int blade_id = gts->ts_user_blade_id;
861*4882a593Smuzhiyun
862*4882a593Smuzhiyun if (blade_id < 0)
863*4882a593Smuzhiyun blade_id = uv_numa_blade_id();
864*4882a593Smuzhiyun again:
865*4882a593Smuzhiyun gru = NULL;
866*4882a593Smuzhiyun max_active_contexts = GRU_NUM_CCH;
867*4882a593Smuzhiyun for_each_gru_on_blade(grux, blade_id, i) {
868*4882a593Smuzhiyun if (!gru_check_chiplet_assignment(grux, gts))
869*4882a593Smuzhiyun continue;
870*4882a593Smuzhiyun if (check_gru_resources(grux, gts->ts_cbr_au_count,
871*4882a593Smuzhiyun gts->ts_dsr_au_count,
872*4882a593Smuzhiyun max_active_contexts)) {
873*4882a593Smuzhiyun gru = grux;
874*4882a593Smuzhiyun max_active_contexts = grux->gs_active_contexts;
875*4882a593Smuzhiyun if (max_active_contexts == 0)
876*4882a593Smuzhiyun break;
877*4882a593Smuzhiyun }
878*4882a593Smuzhiyun }
879*4882a593Smuzhiyun
880*4882a593Smuzhiyun if (gru) {
881*4882a593Smuzhiyun spin_lock(&gru->gs_lock);
882*4882a593Smuzhiyun if (!check_gru_resources(gru, gts->ts_cbr_au_count,
883*4882a593Smuzhiyun gts->ts_dsr_au_count, GRU_NUM_CCH)) {
884*4882a593Smuzhiyun spin_unlock(&gru->gs_lock);
885*4882a593Smuzhiyun goto again;
886*4882a593Smuzhiyun }
887*4882a593Smuzhiyun reserve_gru_resources(gru, gts);
888*4882a593Smuzhiyun gts->ts_gru = gru;
889*4882a593Smuzhiyun gts->ts_blade = gru->gs_blade_id;
890*4882a593Smuzhiyun gts->ts_ctxnum = gru_assign_context_number(gru);
891*4882a593Smuzhiyun atomic_inc(>s->ts_refcnt);
892*4882a593Smuzhiyun gru->gs_gts[gts->ts_ctxnum] = gts;
893*4882a593Smuzhiyun spin_unlock(&gru->gs_lock);
894*4882a593Smuzhiyun
895*4882a593Smuzhiyun STAT(assign_context);
896*4882a593Smuzhiyun gru_dbg(grudev,
897*4882a593Smuzhiyun "gseg %p, gts %p, gid %d, ctx %d, cbr %d, dsr %d\n",
898*4882a593Smuzhiyun gseg_virtual_address(gts->ts_gru, gts->ts_ctxnum), gts,
899*4882a593Smuzhiyun gts->ts_gru->gs_gid, gts->ts_ctxnum,
900*4882a593Smuzhiyun gts->ts_cbr_au_count, gts->ts_dsr_au_count);
901*4882a593Smuzhiyun } else {
902*4882a593Smuzhiyun gru_dbg(grudev, "failed to allocate a GTS %s\n", "");
903*4882a593Smuzhiyun STAT(assign_context_failed);
904*4882a593Smuzhiyun }
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun return gru;
907*4882a593Smuzhiyun }
908*4882a593Smuzhiyun
909*4882a593Smuzhiyun /*
910*4882a593Smuzhiyun * gru_nopage
911*4882a593Smuzhiyun *
912*4882a593Smuzhiyun * Map the user's GRU segment
913*4882a593Smuzhiyun *
914*4882a593Smuzhiyun * Note: gru segments alway mmaped on GRU_GSEG_PAGESIZE boundaries.
915*4882a593Smuzhiyun */
gru_fault(struct vm_fault * vmf)916*4882a593Smuzhiyun vm_fault_t gru_fault(struct vm_fault *vmf)
917*4882a593Smuzhiyun {
918*4882a593Smuzhiyun struct vm_area_struct *vma = vmf->vma;
919*4882a593Smuzhiyun struct gru_thread_state *gts;
920*4882a593Smuzhiyun unsigned long paddr, vaddr;
921*4882a593Smuzhiyun unsigned long expires;
922*4882a593Smuzhiyun
923*4882a593Smuzhiyun vaddr = vmf->address;
924*4882a593Smuzhiyun gru_dbg(grudev, "vma %p, vaddr 0x%lx (0x%lx)\n",
925*4882a593Smuzhiyun vma, vaddr, GSEG_BASE(vaddr));
926*4882a593Smuzhiyun STAT(nopfn);
927*4882a593Smuzhiyun
928*4882a593Smuzhiyun /* The following check ensures vaddr is a valid address in the VMA */
929*4882a593Smuzhiyun gts = gru_find_thread_state(vma, TSID(vaddr, vma));
930*4882a593Smuzhiyun if (!gts)
931*4882a593Smuzhiyun return VM_FAULT_SIGBUS;
932*4882a593Smuzhiyun
933*4882a593Smuzhiyun again:
934*4882a593Smuzhiyun mutex_lock(>s->ts_ctxlock);
935*4882a593Smuzhiyun preempt_disable();
936*4882a593Smuzhiyun
937*4882a593Smuzhiyun gru_check_context_placement(gts);
938*4882a593Smuzhiyun
939*4882a593Smuzhiyun if (!gts->ts_gru) {
940*4882a593Smuzhiyun STAT(load_user_context);
941*4882a593Smuzhiyun if (!gru_assign_gru_context(gts)) {
942*4882a593Smuzhiyun preempt_enable();
943*4882a593Smuzhiyun mutex_unlock(>s->ts_ctxlock);
944*4882a593Smuzhiyun set_current_state(TASK_INTERRUPTIBLE);
945*4882a593Smuzhiyun schedule_timeout(GRU_ASSIGN_DELAY); /* true hack ZZZ */
946*4882a593Smuzhiyun expires = gts->ts_steal_jiffies + GRU_STEAL_DELAY;
947*4882a593Smuzhiyun if (time_before(expires, jiffies))
948*4882a593Smuzhiyun gru_steal_context(gts);
949*4882a593Smuzhiyun goto again;
950*4882a593Smuzhiyun }
951*4882a593Smuzhiyun gru_load_context(gts);
952*4882a593Smuzhiyun paddr = gseg_physical_address(gts->ts_gru, gts->ts_ctxnum);
953*4882a593Smuzhiyun remap_pfn_range(vma, vaddr & ~(GRU_GSEG_PAGESIZE - 1),
954*4882a593Smuzhiyun paddr >> PAGE_SHIFT, GRU_GSEG_PAGESIZE,
955*4882a593Smuzhiyun vma->vm_page_prot);
956*4882a593Smuzhiyun }
957*4882a593Smuzhiyun
958*4882a593Smuzhiyun preempt_enable();
959*4882a593Smuzhiyun mutex_unlock(>s->ts_ctxlock);
960*4882a593Smuzhiyun
961*4882a593Smuzhiyun return VM_FAULT_NOPAGE;
962*4882a593Smuzhiyun }
963*4882a593Smuzhiyun
964