xref: /OK3568_Linux_fs/kernel/drivers/misc/sgi-gru/grufault.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * SN Platform GRU Driver
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  *              FAULT HANDLER FOR GRU DETECTED TLB MISSES
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * This file contains code that handles TLB misses within the GRU.
8*4882a593Smuzhiyun  * These misses are reported either via interrupts or user polling of
9*4882a593Smuzhiyun  * the user CB.
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  *  Copyright (c) 2008 Silicon Graphics, Inc.  All Rights Reserved.
12*4882a593Smuzhiyun  */
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #include <linux/kernel.h>
15*4882a593Smuzhiyun #include <linux/errno.h>
16*4882a593Smuzhiyun #include <linux/spinlock.h>
17*4882a593Smuzhiyun #include <linux/mm.h>
18*4882a593Smuzhiyun #include <linux/hugetlb.h>
19*4882a593Smuzhiyun #include <linux/device.h>
20*4882a593Smuzhiyun #include <linux/io.h>
21*4882a593Smuzhiyun #include <linux/uaccess.h>
22*4882a593Smuzhiyun #include <linux/security.h>
23*4882a593Smuzhiyun #include <linux/sync_core.h>
24*4882a593Smuzhiyun #include <linux/prefetch.h>
25*4882a593Smuzhiyun #include "gru.h"
26*4882a593Smuzhiyun #include "grutables.h"
27*4882a593Smuzhiyun #include "grulib.h"
28*4882a593Smuzhiyun #include "gru_instructions.h"
29*4882a593Smuzhiyun #include <asm/uv/uv_hub.h>
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun /* Return codes for vtop functions */
32*4882a593Smuzhiyun #define VTOP_SUCCESS               0
33*4882a593Smuzhiyun #define VTOP_INVALID               -1
34*4882a593Smuzhiyun #define VTOP_RETRY                 -2
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun /*
38*4882a593Smuzhiyun  * Test if a physical address is a valid GRU GSEG address
39*4882a593Smuzhiyun  */
is_gru_paddr(unsigned long paddr)40*4882a593Smuzhiyun static inline int is_gru_paddr(unsigned long paddr)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun 	return paddr >= gru_start_paddr && paddr < gru_end_paddr;
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun /*
46*4882a593Smuzhiyun  * Find the vma of a GRU segment. Caller must hold mmap_lock.
47*4882a593Smuzhiyun  */
gru_find_vma(unsigned long vaddr)48*4882a593Smuzhiyun struct vm_area_struct *gru_find_vma(unsigned long vaddr)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun 	struct vm_area_struct *vma;
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 	vma = find_vma(current->mm, vaddr);
53*4882a593Smuzhiyun 	if (vma && vma->vm_start <= vaddr && vma->vm_ops == &gru_vm_ops)
54*4882a593Smuzhiyun 		return vma;
55*4882a593Smuzhiyun 	return NULL;
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun /*
59*4882a593Smuzhiyun  * Find and lock the gts that contains the specified user vaddr.
60*4882a593Smuzhiyun  *
61*4882a593Smuzhiyun  * Returns:
62*4882a593Smuzhiyun  * 	- *gts with the mmap_lock locked for read and the GTS locked.
63*4882a593Smuzhiyun  *	- NULL if vaddr invalid OR is not a valid GSEG vaddr.
64*4882a593Smuzhiyun  */
65*4882a593Smuzhiyun 
gru_find_lock_gts(unsigned long vaddr)66*4882a593Smuzhiyun static struct gru_thread_state *gru_find_lock_gts(unsigned long vaddr)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun 	struct mm_struct *mm = current->mm;
69*4882a593Smuzhiyun 	struct vm_area_struct *vma;
70*4882a593Smuzhiyun 	struct gru_thread_state *gts = NULL;
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	mmap_read_lock(mm);
73*4882a593Smuzhiyun 	vma = gru_find_vma(vaddr);
74*4882a593Smuzhiyun 	if (vma)
75*4882a593Smuzhiyun 		gts = gru_find_thread_state(vma, TSID(vaddr, vma));
76*4882a593Smuzhiyun 	if (gts)
77*4882a593Smuzhiyun 		mutex_lock(&gts->ts_ctxlock);
78*4882a593Smuzhiyun 	else
79*4882a593Smuzhiyun 		mmap_read_unlock(mm);
80*4882a593Smuzhiyun 	return gts;
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun 
gru_alloc_locked_gts(unsigned long vaddr)83*4882a593Smuzhiyun static struct gru_thread_state *gru_alloc_locked_gts(unsigned long vaddr)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun 	struct mm_struct *mm = current->mm;
86*4882a593Smuzhiyun 	struct vm_area_struct *vma;
87*4882a593Smuzhiyun 	struct gru_thread_state *gts = ERR_PTR(-EINVAL);
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	mmap_write_lock(mm);
90*4882a593Smuzhiyun 	vma = gru_find_vma(vaddr);
91*4882a593Smuzhiyun 	if (!vma)
92*4882a593Smuzhiyun 		goto err;
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	gts = gru_alloc_thread_state(vma, TSID(vaddr, vma));
95*4882a593Smuzhiyun 	if (IS_ERR(gts))
96*4882a593Smuzhiyun 		goto err;
97*4882a593Smuzhiyun 	mutex_lock(&gts->ts_ctxlock);
98*4882a593Smuzhiyun 	mmap_write_downgrade(mm);
99*4882a593Smuzhiyun 	return gts;
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun err:
102*4882a593Smuzhiyun 	mmap_write_unlock(mm);
103*4882a593Smuzhiyun 	return gts;
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun /*
107*4882a593Smuzhiyun  * Unlock a GTS that was previously locked with gru_find_lock_gts().
108*4882a593Smuzhiyun  */
gru_unlock_gts(struct gru_thread_state * gts)109*4882a593Smuzhiyun static void gru_unlock_gts(struct gru_thread_state *gts)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun 	mutex_unlock(&gts->ts_ctxlock);
112*4882a593Smuzhiyun 	mmap_read_unlock(current->mm);
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun /*
116*4882a593Smuzhiyun  * Set a CB.istatus to active using a user virtual address. This must be done
117*4882a593Smuzhiyun  * just prior to a TFH RESTART. The new cb.istatus is an in-cache status ONLY.
118*4882a593Smuzhiyun  * If the line is evicted, the status may be lost. The in-cache update
119*4882a593Smuzhiyun  * is necessary to prevent the user from seeing a stale cb.istatus that will
120*4882a593Smuzhiyun  * change as soon as the TFH restart is complete. Races may cause an
121*4882a593Smuzhiyun  * occasional failure to clear the cb.istatus, but that is ok.
122*4882a593Smuzhiyun  */
gru_cb_set_istatus_active(struct gru_instruction_bits * cbk)123*4882a593Smuzhiyun static void gru_cb_set_istatus_active(struct gru_instruction_bits *cbk)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun 	if (cbk) {
126*4882a593Smuzhiyun 		cbk->istatus = CBS_ACTIVE;
127*4882a593Smuzhiyun 	}
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun /*
131*4882a593Smuzhiyun  * Read & clear a TFM
132*4882a593Smuzhiyun  *
133*4882a593Smuzhiyun  * The GRU has an array of fault maps. A map is private to a cpu
134*4882a593Smuzhiyun  * Only one cpu will be accessing a cpu's fault map.
135*4882a593Smuzhiyun  *
136*4882a593Smuzhiyun  * This function scans the cpu-private fault map & clears all bits that
137*4882a593Smuzhiyun  * are set. The function returns a bitmap that indicates the bits that
138*4882a593Smuzhiyun  * were cleared. Note that sense the maps may be updated asynchronously by
139*4882a593Smuzhiyun  * the GRU, atomic operations must be used to clear bits.
140*4882a593Smuzhiyun  */
get_clear_fault_map(struct gru_state * gru,struct gru_tlb_fault_map * imap,struct gru_tlb_fault_map * dmap)141*4882a593Smuzhiyun static void get_clear_fault_map(struct gru_state *gru,
142*4882a593Smuzhiyun 				struct gru_tlb_fault_map *imap,
143*4882a593Smuzhiyun 				struct gru_tlb_fault_map *dmap)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun 	unsigned long i, k;
146*4882a593Smuzhiyun 	struct gru_tlb_fault_map *tfm;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	tfm = get_tfm_for_cpu(gru, gru_cpu_fault_map_id());
149*4882a593Smuzhiyun 	prefetchw(tfm);		/* Helps on hardware, required for emulator */
150*4882a593Smuzhiyun 	for (i = 0; i < BITS_TO_LONGS(GRU_NUM_CBE); i++) {
151*4882a593Smuzhiyun 		k = tfm->fault_bits[i];
152*4882a593Smuzhiyun 		if (k)
153*4882a593Smuzhiyun 			k = xchg(&tfm->fault_bits[i], 0UL);
154*4882a593Smuzhiyun 		imap->fault_bits[i] = k;
155*4882a593Smuzhiyun 		k = tfm->done_bits[i];
156*4882a593Smuzhiyun 		if (k)
157*4882a593Smuzhiyun 			k = xchg(&tfm->done_bits[i], 0UL);
158*4882a593Smuzhiyun 		dmap->fault_bits[i] = k;
159*4882a593Smuzhiyun 	}
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	/*
162*4882a593Smuzhiyun 	 * Not functionally required but helps performance. (Required
163*4882a593Smuzhiyun 	 * on emulator)
164*4882a593Smuzhiyun 	 */
165*4882a593Smuzhiyun 	gru_flush_cache(tfm);
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun /*
169*4882a593Smuzhiyun  * Atomic (interrupt context) & non-atomic (user context) functions to
170*4882a593Smuzhiyun  * convert a vaddr into a physical address. The size of the page
171*4882a593Smuzhiyun  * is returned in pageshift.
172*4882a593Smuzhiyun  * 	returns:
173*4882a593Smuzhiyun  * 		  0 - successful
174*4882a593Smuzhiyun  * 		< 0 - error code
175*4882a593Smuzhiyun  * 		  1 - (atomic only) try again in non-atomic context
176*4882a593Smuzhiyun  */
non_atomic_pte_lookup(struct vm_area_struct * vma,unsigned long vaddr,int write,unsigned long * paddr,int * pageshift)177*4882a593Smuzhiyun static int non_atomic_pte_lookup(struct vm_area_struct *vma,
178*4882a593Smuzhiyun 				 unsigned long vaddr, int write,
179*4882a593Smuzhiyun 				 unsigned long *paddr, int *pageshift)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun 	struct page *page;
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun #ifdef CONFIG_HUGETLB_PAGE
184*4882a593Smuzhiyun 	*pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT;
185*4882a593Smuzhiyun #else
186*4882a593Smuzhiyun 	*pageshift = PAGE_SHIFT;
187*4882a593Smuzhiyun #endif
188*4882a593Smuzhiyun 	if (get_user_pages(vaddr, 1, write ? FOLL_WRITE : 0, &page, NULL) <= 0)
189*4882a593Smuzhiyun 		return -EFAULT;
190*4882a593Smuzhiyun 	*paddr = page_to_phys(page);
191*4882a593Smuzhiyun 	put_page(page);
192*4882a593Smuzhiyun 	return 0;
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun /*
196*4882a593Smuzhiyun  * atomic_pte_lookup
197*4882a593Smuzhiyun  *
198*4882a593Smuzhiyun  * Convert a user virtual address to a physical address
199*4882a593Smuzhiyun  * Only supports Intel large pages (2MB only) on x86_64.
200*4882a593Smuzhiyun  *	ZZZ - hugepage support is incomplete
201*4882a593Smuzhiyun  *
202*4882a593Smuzhiyun  * NOTE: mmap_lock is already held on entry to this function. This
203*4882a593Smuzhiyun  * guarantees existence of the page tables.
204*4882a593Smuzhiyun  */
atomic_pte_lookup(struct vm_area_struct * vma,unsigned long vaddr,int write,unsigned long * paddr,int * pageshift)205*4882a593Smuzhiyun static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr,
206*4882a593Smuzhiyun 	int write, unsigned long *paddr, int *pageshift)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun 	pgd_t *pgdp;
209*4882a593Smuzhiyun 	p4d_t *p4dp;
210*4882a593Smuzhiyun 	pud_t *pudp;
211*4882a593Smuzhiyun 	pmd_t *pmdp;
212*4882a593Smuzhiyun 	pte_t pte;
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	pgdp = pgd_offset(vma->vm_mm, vaddr);
215*4882a593Smuzhiyun 	if (unlikely(pgd_none(*pgdp)))
216*4882a593Smuzhiyun 		goto err;
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	p4dp = p4d_offset(pgdp, vaddr);
219*4882a593Smuzhiyun 	if (unlikely(p4d_none(*p4dp)))
220*4882a593Smuzhiyun 		goto err;
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	pudp = pud_offset(p4dp, vaddr);
223*4882a593Smuzhiyun 	if (unlikely(pud_none(*pudp)))
224*4882a593Smuzhiyun 		goto err;
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	pmdp = pmd_offset(pudp, vaddr);
227*4882a593Smuzhiyun 	if (unlikely(pmd_none(*pmdp)))
228*4882a593Smuzhiyun 		goto err;
229*4882a593Smuzhiyun #ifdef CONFIG_X86_64
230*4882a593Smuzhiyun 	if (unlikely(pmd_large(*pmdp)))
231*4882a593Smuzhiyun 		pte = *(pte_t *) pmdp;
232*4882a593Smuzhiyun 	else
233*4882a593Smuzhiyun #endif
234*4882a593Smuzhiyun 		pte = *pte_offset_kernel(pmdp, vaddr);
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	if (unlikely(!pte_present(pte) ||
237*4882a593Smuzhiyun 		     (write && (!pte_write(pte) || !pte_dirty(pte)))))
238*4882a593Smuzhiyun 		return 1;
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	*paddr = pte_pfn(pte) << PAGE_SHIFT;
241*4882a593Smuzhiyun #ifdef CONFIG_HUGETLB_PAGE
242*4882a593Smuzhiyun 	*pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT;
243*4882a593Smuzhiyun #else
244*4882a593Smuzhiyun 	*pageshift = PAGE_SHIFT;
245*4882a593Smuzhiyun #endif
246*4882a593Smuzhiyun 	return 0;
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun err:
249*4882a593Smuzhiyun 	return 1;
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun 
gru_vtop(struct gru_thread_state * gts,unsigned long vaddr,int write,int atomic,unsigned long * gpa,int * pageshift)252*4882a593Smuzhiyun static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr,
253*4882a593Smuzhiyun 		    int write, int atomic, unsigned long *gpa, int *pageshift)
254*4882a593Smuzhiyun {
255*4882a593Smuzhiyun 	struct mm_struct *mm = gts->ts_mm;
256*4882a593Smuzhiyun 	struct vm_area_struct *vma;
257*4882a593Smuzhiyun 	unsigned long paddr;
258*4882a593Smuzhiyun 	int ret, ps;
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	vma = find_vma(mm, vaddr);
261*4882a593Smuzhiyun 	if (!vma)
262*4882a593Smuzhiyun 		goto inval;
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	/*
265*4882a593Smuzhiyun 	 * Atomic lookup is faster & usually works even if called in non-atomic
266*4882a593Smuzhiyun 	 * context.
267*4882a593Smuzhiyun 	 */
268*4882a593Smuzhiyun 	rmb();	/* Must/check ms_range_active before loading PTEs */
269*4882a593Smuzhiyun 	ret = atomic_pte_lookup(vma, vaddr, write, &paddr, &ps);
270*4882a593Smuzhiyun 	if (ret) {
271*4882a593Smuzhiyun 		if (atomic)
272*4882a593Smuzhiyun 			goto upm;
273*4882a593Smuzhiyun 		if (non_atomic_pte_lookup(vma, vaddr, write, &paddr, &ps))
274*4882a593Smuzhiyun 			goto inval;
275*4882a593Smuzhiyun 	}
276*4882a593Smuzhiyun 	if (is_gru_paddr(paddr))
277*4882a593Smuzhiyun 		goto inval;
278*4882a593Smuzhiyun 	paddr = paddr & ~((1UL << ps) - 1);
279*4882a593Smuzhiyun 	*gpa = uv_soc_phys_ram_to_gpa(paddr);
280*4882a593Smuzhiyun 	*pageshift = ps;
281*4882a593Smuzhiyun 	return VTOP_SUCCESS;
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun inval:
284*4882a593Smuzhiyun 	return VTOP_INVALID;
285*4882a593Smuzhiyun upm:
286*4882a593Smuzhiyun 	return VTOP_RETRY;
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun /*
291*4882a593Smuzhiyun  * Flush a CBE from cache. The CBE is clean in the cache. Dirty the
292*4882a593Smuzhiyun  * CBE cacheline so that the line will be written back to home agent.
293*4882a593Smuzhiyun  * Otherwise the line may be silently dropped. This has no impact
294*4882a593Smuzhiyun  * except on performance.
295*4882a593Smuzhiyun  */
gru_flush_cache_cbe(struct gru_control_block_extended * cbe)296*4882a593Smuzhiyun static void gru_flush_cache_cbe(struct gru_control_block_extended *cbe)
297*4882a593Smuzhiyun {
298*4882a593Smuzhiyun 	if (unlikely(cbe)) {
299*4882a593Smuzhiyun 		cbe->cbrexecstatus = 0;         /* make CL dirty */
300*4882a593Smuzhiyun 		gru_flush_cache(cbe);
301*4882a593Smuzhiyun 	}
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun /*
305*4882a593Smuzhiyun  * Preload the TLB with entries that may be required. Currently, preloading
306*4882a593Smuzhiyun  * is implemented only for BCOPY. Preload  <tlb_preload_count> pages OR to
307*4882a593Smuzhiyun  * the end of the bcopy tranfer, whichever is smaller.
308*4882a593Smuzhiyun  */
gru_preload_tlb(struct gru_state * gru,struct gru_thread_state * gts,int atomic,unsigned long fault_vaddr,int asid,int write,unsigned char tlb_preload_count,struct gru_tlb_fault_handle * tfh,struct gru_control_block_extended * cbe)309*4882a593Smuzhiyun static void gru_preload_tlb(struct gru_state *gru,
310*4882a593Smuzhiyun 			struct gru_thread_state *gts, int atomic,
311*4882a593Smuzhiyun 			unsigned long fault_vaddr, int asid, int write,
312*4882a593Smuzhiyun 			unsigned char tlb_preload_count,
313*4882a593Smuzhiyun 			struct gru_tlb_fault_handle *tfh,
314*4882a593Smuzhiyun 			struct gru_control_block_extended *cbe)
315*4882a593Smuzhiyun {
316*4882a593Smuzhiyun 	unsigned long vaddr = 0, gpa;
317*4882a593Smuzhiyun 	int ret, pageshift;
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	if (cbe->opccpy != OP_BCOPY)
320*4882a593Smuzhiyun 		return;
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	if (fault_vaddr == cbe->cbe_baddr0)
323*4882a593Smuzhiyun 		vaddr = fault_vaddr + GRU_CACHE_LINE_BYTES * cbe->cbe_src_cl - 1;
324*4882a593Smuzhiyun 	else if (fault_vaddr == cbe->cbe_baddr1)
325*4882a593Smuzhiyun 		vaddr = fault_vaddr + (1 << cbe->xtypecpy) * cbe->cbe_nelemcur - 1;
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	fault_vaddr &= PAGE_MASK;
328*4882a593Smuzhiyun 	vaddr &= PAGE_MASK;
329*4882a593Smuzhiyun 	vaddr = min(vaddr, fault_vaddr + tlb_preload_count * PAGE_SIZE);
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	while (vaddr > fault_vaddr) {
332*4882a593Smuzhiyun 		ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift);
333*4882a593Smuzhiyun 		if (ret || tfh_write_only(tfh, gpa, GAA_RAM, vaddr, asid, write,
334*4882a593Smuzhiyun 					  GRU_PAGESIZE(pageshift)))
335*4882a593Smuzhiyun 			return;
336*4882a593Smuzhiyun 		gru_dbg(grudev,
337*4882a593Smuzhiyun 			"%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, rw %d, ps %d, gpa 0x%lx\n",
338*4882a593Smuzhiyun 			atomic ? "atomic" : "non-atomic", gru->gs_gid, gts, tfh,
339*4882a593Smuzhiyun 			vaddr, asid, write, pageshift, gpa);
340*4882a593Smuzhiyun 		vaddr -= PAGE_SIZE;
341*4882a593Smuzhiyun 		STAT(tlb_preload_page);
342*4882a593Smuzhiyun 	}
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun /*
346*4882a593Smuzhiyun  * Drop a TLB entry into the GRU. The fault is described by info in an TFH.
347*4882a593Smuzhiyun  *	Input:
348*4882a593Smuzhiyun  *		cb    Address of user CBR. Null if not running in user context
349*4882a593Smuzhiyun  * 	Return:
350*4882a593Smuzhiyun  * 		  0 = dropin, exception, or switch to UPM successful
351*4882a593Smuzhiyun  * 		  1 = range invalidate active
352*4882a593Smuzhiyun  * 		< 0 = error code
353*4882a593Smuzhiyun  *
354*4882a593Smuzhiyun  */
gru_try_dropin(struct gru_state * gru,struct gru_thread_state * gts,struct gru_tlb_fault_handle * tfh,struct gru_instruction_bits * cbk)355*4882a593Smuzhiyun static int gru_try_dropin(struct gru_state *gru,
356*4882a593Smuzhiyun 			  struct gru_thread_state *gts,
357*4882a593Smuzhiyun 			  struct gru_tlb_fault_handle *tfh,
358*4882a593Smuzhiyun 			  struct gru_instruction_bits *cbk)
359*4882a593Smuzhiyun {
360*4882a593Smuzhiyun 	struct gru_control_block_extended *cbe = NULL;
361*4882a593Smuzhiyun 	unsigned char tlb_preload_count = gts->ts_tlb_preload_count;
362*4882a593Smuzhiyun 	int pageshift = 0, asid, write, ret, atomic = !cbk, indexway;
363*4882a593Smuzhiyun 	unsigned long gpa = 0, vaddr = 0;
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	/*
366*4882a593Smuzhiyun 	 * NOTE: The GRU contains magic hardware that eliminates races between
367*4882a593Smuzhiyun 	 * TLB invalidates and TLB dropins. If an invalidate occurs
368*4882a593Smuzhiyun 	 * in the window between reading the TFH and the subsequent TLB dropin,
369*4882a593Smuzhiyun 	 * the dropin is ignored. This eliminates the need for additional locks.
370*4882a593Smuzhiyun 	 */
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	/*
373*4882a593Smuzhiyun 	 * Prefetch the CBE if doing TLB preloading
374*4882a593Smuzhiyun 	 */
375*4882a593Smuzhiyun 	if (unlikely(tlb_preload_count)) {
376*4882a593Smuzhiyun 		cbe = gru_tfh_to_cbe(tfh);
377*4882a593Smuzhiyun 		prefetchw(cbe);
378*4882a593Smuzhiyun 	}
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	/*
381*4882a593Smuzhiyun 	 * Error if TFH state is IDLE or FMM mode & the user issuing a UPM call.
382*4882a593Smuzhiyun 	 * Might be a hardware race OR a stupid user. Ignore FMM because FMM
383*4882a593Smuzhiyun 	 * is a transient state.
384*4882a593Smuzhiyun 	 */
385*4882a593Smuzhiyun 	if (tfh->status != TFHSTATUS_EXCEPTION) {
386*4882a593Smuzhiyun 		gru_flush_cache(tfh);
387*4882a593Smuzhiyun 		sync_core();
388*4882a593Smuzhiyun 		if (tfh->status != TFHSTATUS_EXCEPTION)
389*4882a593Smuzhiyun 			goto failnoexception;
390*4882a593Smuzhiyun 		STAT(tfh_stale_on_fault);
391*4882a593Smuzhiyun 	}
392*4882a593Smuzhiyun 	if (tfh->state == TFHSTATE_IDLE)
393*4882a593Smuzhiyun 		goto failidle;
394*4882a593Smuzhiyun 	if (tfh->state == TFHSTATE_MISS_FMM && cbk)
395*4882a593Smuzhiyun 		goto failfmm;
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	write = (tfh->cause & TFHCAUSE_TLB_MOD) != 0;
398*4882a593Smuzhiyun 	vaddr = tfh->missvaddr;
399*4882a593Smuzhiyun 	asid = tfh->missasid;
400*4882a593Smuzhiyun 	indexway = tfh->indexway;
401*4882a593Smuzhiyun 	if (asid == 0)
402*4882a593Smuzhiyun 		goto failnoasid;
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	rmb();	/* TFH must be cache resident before reading ms_range_active */
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	/*
407*4882a593Smuzhiyun 	 * TFH is cache resident - at least briefly. Fail the dropin
408*4882a593Smuzhiyun 	 * if a range invalidate is active.
409*4882a593Smuzhiyun 	 */
410*4882a593Smuzhiyun 	if (atomic_read(&gts->ts_gms->ms_range_active))
411*4882a593Smuzhiyun 		goto failactive;
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 	ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift);
414*4882a593Smuzhiyun 	if (ret == VTOP_INVALID)
415*4882a593Smuzhiyun 		goto failinval;
416*4882a593Smuzhiyun 	if (ret == VTOP_RETRY)
417*4882a593Smuzhiyun 		goto failupm;
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun 	if (!(gts->ts_sizeavail & GRU_SIZEAVAIL(pageshift))) {
420*4882a593Smuzhiyun 		gts->ts_sizeavail |= GRU_SIZEAVAIL(pageshift);
421*4882a593Smuzhiyun 		if (atomic || !gru_update_cch(gts)) {
422*4882a593Smuzhiyun 			gts->ts_force_cch_reload = 1;
423*4882a593Smuzhiyun 			goto failupm;
424*4882a593Smuzhiyun 		}
425*4882a593Smuzhiyun 	}
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun 	if (unlikely(cbe) && pageshift == PAGE_SHIFT) {
428*4882a593Smuzhiyun 		gru_preload_tlb(gru, gts, atomic, vaddr, asid, write, tlb_preload_count, tfh, cbe);
429*4882a593Smuzhiyun 		gru_flush_cache_cbe(cbe);
430*4882a593Smuzhiyun 	}
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	gru_cb_set_istatus_active(cbk);
433*4882a593Smuzhiyun 	gts->ustats.tlbdropin++;
434*4882a593Smuzhiyun 	tfh_write_restart(tfh, gpa, GAA_RAM, vaddr, asid, write,
435*4882a593Smuzhiyun 			  GRU_PAGESIZE(pageshift));
436*4882a593Smuzhiyun 	gru_dbg(grudev,
437*4882a593Smuzhiyun 		"%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, indexway 0x%x,"
438*4882a593Smuzhiyun 		" rw %d, ps %d, gpa 0x%lx\n",
439*4882a593Smuzhiyun 		atomic ? "atomic" : "non-atomic", gru->gs_gid, gts, tfh, vaddr, asid,
440*4882a593Smuzhiyun 		indexway, write, pageshift, gpa);
441*4882a593Smuzhiyun 	STAT(tlb_dropin);
442*4882a593Smuzhiyun 	return 0;
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun failnoasid:
445*4882a593Smuzhiyun 	/* No asid (delayed unload). */
446*4882a593Smuzhiyun 	STAT(tlb_dropin_fail_no_asid);
447*4882a593Smuzhiyun 	gru_dbg(grudev, "FAILED no_asid tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
448*4882a593Smuzhiyun 	if (!cbk)
449*4882a593Smuzhiyun 		tfh_user_polling_mode(tfh);
450*4882a593Smuzhiyun 	else
451*4882a593Smuzhiyun 		gru_flush_cache(tfh);
452*4882a593Smuzhiyun 	gru_flush_cache_cbe(cbe);
453*4882a593Smuzhiyun 	return -EAGAIN;
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun failupm:
456*4882a593Smuzhiyun 	/* Atomic failure switch CBR to UPM */
457*4882a593Smuzhiyun 	tfh_user_polling_mode(tfh);
458*4882a593Smuzhiyun 	gru_flush_cache_cbe(cbe);
459*4882a593Smuzhiyun 	STAT(tlb_dropin_fail_upm);
460*4882a593Smuzhiyun 	gru_dbg(grudev, "FAILED upm tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
461*4882a593Smuzhiyun 	return 1;
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun failfmm:
464*4882a593Smuzhiyun 	/* FMM state on UPM call */
465*4882a593Smuzhiyun 	gru_flush_cache(tfh);
466*4882a593Smuzhiyun 	gru_flush_cache_cbe(cbe);
467*4882a593Smuzhiyun 	STAT(tlb_dropin_fail_fmm);
468*4882a593Smuzhiyun 	gru_dbg(grudev, "FAILED fmm tfh: 0x%p, state %d\n", tfh, tfh->state);
469*4882a593Smuzhiyun 	return 0;
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun failnoexception:
472*4882a593Smuzhiyun 	/* TFH status did not show exception pending */
473*4882a593Smuzhiyun 	gru_flush_cache(tfh);
474*4882a593Smuzhiyun 	gru_flush_cache_cbe(cbe);
475*4882a593Smuzhiyun 	if (cbk)
476*4882a593Smuzhiyun 		gru_flush_cache(cbk);
477*4882a593Smuzhiyun 	STAT(tlb_dropin_fail_no_exception);
478*4882a593Smuzhiyun 	gru_dbg(grudev, "FAILED non-exception tfh: 0x%p, status %d, state %d\n",
479*4882a593Smuzhiyun 		tfh, tfh->status, tfh->state);
480*4882a593Smuzhiyun 	return 0;
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun failidle:
483*4882a593Smuzhiyun 	/* TFH state was idle  - no miss pending */
484*4882a593Smuzhiyun 	gru_flush_cache(tfh);
485*4882a593Smuzhiyun 	gru_flush_cache_cbe(cbe);
486*4882a593Smuzhiyun 	if (cbk)
487*4882a593Smuzhiyun 		gru_flush_cache(cbk);
488*4882a593Smuzhiyun 	STAT(tlb_dropin_fail_idle);
489*4882a593Smuzhiyun 	gru_dbg(grudev, "FAILED idle tfh: 0x%p, state %d\n", tfh, tfh->state);
490*4882a593Smuzhiyun 	return 0;
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun failinval:
493*4882a593Smuzhiyun 	/* All errors (atomic & non-atomic) switch CBR to EXCEPTION state */
494*4882a593Smuzhiyun 	tfh_exception(tfh);
495*4882a593Smuzhiyun 	gru_flush_cache_cbe(cbe);
496*4882a593Smuzhiyun 	STAT(tlb_dropin_fail_invalid);
497*4882a593Smuzhiyun 	gru_dbg(grudev, "FAILED inval tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
498*4882a593Smuzhiyun 	return -EFAULT;
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun failactive:
501*4882a593Smuzhiyun 	/* Range invalidate active. Switch to UPM iff atomic */
502*4882a593Smuzhiyun 	if (!cbk)
503*4882a593Smuzhiyun 		tfh_user_polling_mode(tfh);
504*4882a593Smuzhiyun 	else
505*4882a593Smuzhiyun 		gru_flush_cache(tfh);
506*4882a593Smuzhiyun 	gru_flush_cache_cbe(cbe);
507*4882a593Smuzhiyun 	STAT(tlb_dropin_fail_range_active);
508*4882a593Smuzhiyun 	gru_dbg(grudev, "FAILED range active: tfh 0x%p, vaddr 0x%lx\n",
509*4882a593Smuzhiyun 		tfh, vaddr);
510*4882a593Smuzhiyun 	return 1;
511*4882a593Smuzhiyun }
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun /*
514*4882a593Smuzhiyun  * Process an external interrupt from the GRU. This interrupt is
515*4882a593Smuzhiyun  * caused by a TLB miss.
516*4882a593Smuzhiyun  * Note that this is the interrupt handler that is registered with linux
517*4882a593Smuzhiyun  * interrupt handlers.
518*4882a593Smuzhiyun  */
gru_intr(int chiplet,int blade)519*4882a593Smuzhiyun static irqreturn_t gru_intr(int chiplet, int blade)
520*4882a593Smuzhiyun {
521*4882a593Smuzhiyun 	struct gru_state *gru;
522*4882a593Smuzhiyun 	struct gru_tlb_fault_map imap, dmap;
523*4882a593Smuzhiyun 	struct gru_thread_state *gts;
524*4882a593Smuzhiyun 	struct gru_tlb_fault_handle *tfh = NULL;
525*4882a593Smuzhiyun 	struct completion *cmp;
526*4882a593Smuzhiyun 	int cbrnum, ctxnum;
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 	STAT(intr);
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun 	gru = &gru_base[blade]->bs_grus[chiplet];
531*4882a593Smuzhiyun 	if (!gru) {
532*4882a593Smuzhiyun 		dev_err(grudev, "GRU: invalid interrupt: cpu %d, chiplet %d\n",
533*4882a593Smuzhiyun 			raw_smp_processor_id(), chiplet);
534*4882a593Smuzhiyun 		return IRQ_NONE;
535*4882a593Smuzhiyun 	}
536*4882a593Smuzhiyun 	get_clear_fault_map(gru, &imap, &dmap);
537*4882a593Smuzhiyun 	gru_dbg(grudev,
538*4882a593Smuzhiyun 		"cpu %d, chiplet %d, gid %d, imap %016lx %016lx, dmap %016lx %016lx\n",
539*4882a593Smuzhiyun 		smp_processor_id(), chiplet, gru->gs_gid,
540*4882a593Smuzhiyun 		imap.fault_bits[0], imap.fault_bits[1],
541*4882a593Smuzhiyun 		dmap.fault_bits[0], dmap.fault_bits[1]);
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 	for_each_cbr_in_tfm(cbrnum, dmap.fault_bits) {
544*4882a593Smuzhiyun 		STAT(intr_cbr);
545*4882a593Smuzhiyun 		cmp = gru->gs_blade->bs_async_wq;
546*4882a593Smuzhiyun 		if (cmp)
547*4882a593Smuzhiyun 			complete(cmp);
548*4882a593Smuzhiyun 		gru_dbg(grudev, "gid %d, cbr_done %d, done %d\n",
549*4882a593Smuzhiyun 			gru->gs_gid, cbrnum, cmp ? cmp->done : -1);
550*4882a593Smuzhiyun 	}
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun 	for_each_cbr_in_tfm(cbrnum, imap.fault_bits) {
553*4882a593Smuzhiyun 		STAT(intr_tfh);
554*4882a593Smuzhiyun 		tfh = get_tfh_by_index(gru, cbrnum);
555*4882a593Smuzhiyun 		prefetchw(tfh);	/* Helps on hdw, required for emulator */
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 		/*
558*4882a593Smuzhiyun 		 * When hardware sets a bit in the faultmap, it implicitly
559*4882a593Smuzhiyun 		 * locks the GRU context so that it cannot be unloaded.
560*4882a593Smuzhiyun 		 * The gts cannot change until a TFH start/writestart command
561*4882a593Smuzhiyun 		 * is issued.
562*4882a593Smuzhiyun 		 */
563*4882a593Smuzhiyun 		ctxnum = tfh->ctxnum;
564*4882a593Smuzhiyun 		gts = gru->gs_gts[ctxnum];
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun 		/* Spurious interrupts can cause this. Ignore. */
567*4882a593Smuzhiyun 		if (!gts) {
568*4882a593Smuzhiyun 			STAT(intr_spurious);
569*4882a593Smuzhiyun 			continue;
570*4882a593Smuzhiyun 		}
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun 		/*
573*4882a593Smuzhiyun 		 * This is running in interrupt context. Trylock the mmap_lock.
574*4882a593Smuzhiyun 		 * If it fails, retry the fault in user context.
575*4882a593Smuzhiyun 		 */
576*4882a593Smuzhiyun 		gts->ustats.fmm_tlbmiss++;
577*4882a593Smuzhiyun 		if (!gts->ts_force_cch_reload &&
578*4882a593Smuzhiyun 					mmap_read_trylock(gts->ts_mm)) {
579*4882a593Smuzhiyun 			gru_try_dropin(gru, gts, tfh, NULL);
580*4882a593Smuzhiyun 			mmap_read_unlock(gts->ts_mm);
581*4882a593Smuzhiyun 		} else {
582*4882a593Smuzhiyun 			tfh_user_polling_mode(tfh);
583*4882a593Smuzhiyun 			STAT(intr_mm_lock_failed);
584*4882a593Smuzhiyun 		}
585*4882a593Smuzhiyun 	}
586*4882a593Smuzhiyun 	return IRQ_HANDLED;
587*4882a593Smuzhiyun }
588*4882a593Smuzhiyun 
gru0_intr(int irq,void * dev_id)589*4882a593Smuzhiyun irqreturn_t gru0_intr(int irq, void *dev_id)
590*4882a593Smuzhiyun {
591*4882a593Smuzhiyun 	return gru_intr(0, uv_numa_blade_id());
592*4882a593Smuzhiyun }
593*4882a593Smuzhiyun 
gru1_intr(int irq,void * dev_id)594*4882a593Smuzhiyun irqreturn_t gru1_intr(int irq, void *dev_id)
595*4882a593Smuzhiyun {
596*4882a593Smuzhiyun 	return gru_intr(1, uv_numa_blade_id());
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun 
gru_intr_mblade(int irq,void * dev_id)599*4882a593Smuzhiyun irqreturn_t gru_intr_mblade(int irq, void *dev_id)
600*4882a593Smuzhiyun {
601*4882a593Smuzhiyun 	int blade;
602*4882a593Smuzhiyun 
603*4882a593Smuzhiyun 	for_each_possible_blade(blade) {
604*4882a593Smuzhiyun 		if (uv_blade_nr_possible_cpus(blade))
605*4882a593Smuzhiyun 			continue;
606*4882a593Smuzhiyun 		gru_intr(0, blade);
607*4882a593Smuzhiyun 		gru_intr(1, blade);
608*4882a593Smuzhiyun 	}
609*4882a593Smuzhiyun 	return IRQ_HANDLED;
610*4882a593Smuzhiyun }
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun 
gru_user_dropin(struct gru_thread_state * gts,struct gru_tlb_fault_handle * tfh,void * cb)613*4882a593Smuzhiyun static int gru_user_dropin(struct gru_thread_state *gts,
614*4882a593Smuzhiyun 			   struct gru_tlb_fault_handle *tfh,
615*4882a593Smuzhiyun 			   void *cb)
616*4882a593Smuzhiyun {
617*4882a593Smuzhiyun 	struct gru_mm_struct *gms = gts->ts_gms;
618*4882a593Smuzhiyun 	int ret;
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun 	gts->ustats.upm_tlbmiss++;
621*4882a593Smuzhiyun 	while (1) {
622*4882a593Smuzhiyun 		wait_event(gms->ms_wait_queue,
623*4882a593Smuzhiyun 			   atomic_read(&gms->ms_range_active) == 0);
624*4882a593Smuzhiyun 		prefetchw(tfh);	/* Helps on hdw, required for emulator */
625*4882a593Smuzhiyun 		ret = gru_try_dropin(gts->ts_gru, gts, tfh, cb);
626*4882a593Smuzhiyun 		if (ret <= 0)
627*4882a593Smuzhiyun 			return ret;
628*4882a593Smuzhiyun 		STAT(call_os_wait_queue);
629*4882a593Smuzhiyun 	}
630*4882a593Smuzhiyun }
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun /*
633*4882a593Smuzhiyun  * This interface is called as a result of a user detecting a "call OS" bit
634*4882a593Smuzhiyun  * in a user CB. Normally means that a TLB fault has occurred.
635*4882a593Smuzhiyun  * 	cb - user virtual address of the CB
636*4882a593Smuzhiyun  */
gru_handle_user_call_os(unsigned long cb)637*4882a593Smuzhiyun int gru_handle_user_call_os(unsigned long cb)
638*4882a593Smuzhiyun {
639*4882a593Smuzhiyun 	struct gru_tlb_fault_handle *tfh;
640*4882a593Smuzhiyun 	struct gru_thread_state *gts;
641*4882a593Smuzhiyun 	void *cbk;
642*4882a593Smuzhiyun 	int ucbnum, cbrnum, ret = -EINVAL;
643*4882a593Smuzhiyun 
644*4882a593Smuzhiyun 	STAT(call_os);
645*4882a593Smuzhiyun 
646*4882a593Smuzhiyun 	/* sanity check the cb pointer */
647*4882a593Smuzhiyun 	ucbnum = get_cb_number((void *)cb);
648*4882a593Smuzhiyun 	if ((cb & (GRU_HANDLE_STRIDE - 1)) || ucbnum >= GRU_NUM_CB)
649*4882a593Smuzhiyun 		return -EINVAL;
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun 	gts = gru_find_lock_gts(cb);
652*4882a593Smuzhiyun 	if (!gts)
653*4882a593Smuzhiyun 		return -EINVAL;
654*4882a593Smuzhiyun 	gru_dbg(grudev, "address 0x%lx, gid %d, gts 0x%p\n", cb, gts->ts_gru ? gts->ts_gru->gs_gid : -1, gts);
655*4882a593Smuzhiyun 
656*4882a593Smuzhiyun 	if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE)
657*4882a593Smuzhiyun 		goto exit;
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun 	gru_check_context_placement(gts);
660*4882a593Smuzhiyun 
661*4882a593Smuzhiyun 	/*
662*4882a593Smuzhiyun 	 * CCH may contain stale data if ts_force_cch_reload is set.
663*4882a593Smuzhiyun 	 */
664*4882a593Smuzhiyun 	if (gts->ts_gru && gts->ts_force_cch_reload) {
665*4882a593Smuzhiyun 		gts->ts_force_cch_reload = 0;
666*4882a593Smuzhiyun 		gru_update_cch(gts);
667*4882a593Smuzhiyun 	}
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun 	ret = -EAGAIN;
670*4882a593Smuzhiyun 	cbrnum = thread_cbr_number(gts, ucbnum);
671*4882a593Smuzhiyun 	if (gts->ts_gru) {
672*4882a593Smuzhiyun 		tfh = get_tfh_by_index(gts->ts_gru, cbrnum);
673*4882a593Smuzhiyun 		cbk = get_gseg_base_address_cb(gts->ts_gru->gs_gru_base_vaddr,
674*4882a593Smuzhiyun 				gts->ts_ctxnum, ucbnum);
675*4882a593Smuzhiyun 		ret = gru_user_dropin(gts, tfh, cbk);
676*4882a593Smuzhiyun 	}
677*4882a593Smuzhiyun exit:
678*4882a593Smuzhiyun 	gru_unlock_gts(gts);
679*4882a593Smuzhiyun 	return ret;
680*4882a593Smuzhiyun }
681*4882a593Smuzhiyun 
682*4882a593Smuzhiyun /*
683*4882a593Smuzhiyun  * Fetch the exception detail information for a CB that terminated with
684*4882a593Smuzhiyun  * an exception.
685*4882a593Smuzhiyun  */
gru_get_exception_detail(unsigned long arg)686*4882a593Smuzhiyun int gru_get_exception_detail(unsigned long arg)
687*4882a593Smuzhiyun {
688*4882a593Smuzhiyun 	struct control_block_extended_exc_detail excdet;
689*4882a593Smuzhiyun 	struct gru_control_block_extended *cbe;
690*4882a593Smuzhiyun 	struct gru_thread_state *gts;
691*4882a593Smuzhiyun 	int ucbnum, cbrnum, ret;
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun 	STAT(user_exception);
694*4882a593Smuzhiyun 	if (copy_from_user(&excdet, (void __user *)arg, sizeof(excdet)))
695*4882a593Smuzhiyun 		return -EFAULT;
696*4882a593Smuzhiyun 
697*4882a593Smuzhiyun 	gts = gru_find_lock_gts(excdet.cb);
698*4882a593Smuzhiyun 	if (!gts)
699*4882a593Smuzhiyun 		return -EINVAL;
700*4882a593Smuzhiyun 
701*4882a593Smuzhiyun 	gru_dbg(grudev, "address 0x%lx, gid %d, gts 0x%p\n", excdet.cb, gts->ts_gru ? gts->ts_gru->gs_gid : -1, gts);
702*4882a593Smuzhiyun 	ucbnum = get_cb_number((void *)excdet.cb);
703*4882a593Smuzhiyun 	if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) {
704*4882a593Smuzhiyun 		ret = -EINVAL;
705*4882a593Smuzhiyun 	} else if (gts->ts_gru) {
706*4882a593Smuzhiyun 		cbrnum = thread_cbr_number(gts, ucbnum);
707*4882a593Smuzhiyun 		cbe = get_cbe_by_index(gts->ts_gru, cbrnum);
708*4882a593Smuzhiyun 		gru_flush_cache(cbe);	/* CBE not coherent */
709*4882a593Smuzhiyun 		sync_core();		/* make sure we are have current data */
710*4882a593Smuzhiyun 		excdet.opc = cbe->opccpy;
711*4882a593Smuzhiyun 		excdet.exopc = cbe->exopccpy;
712*4882a593Smuzhiyun 		excdet.ecause = cbe->ecause;
713*4882a593Smuzhiyun 		excdet.exceptdet0 = cbe->idef1upd;
714*4882a593Smuzhiyun 		excdet.exceptdet1 = cbe->idef3upd;
715*4882a593Smuzhiyun 		excdet.cbrstate = cbe->cbrstate;
716*4882a593Smuzhiyun 		excdet.cbrexecstatus = cbe->cbrexecstatus;
717*4882a593Smuzhiyun 		gru_flush_cache_cbe(cbe);
718*4882a593Smuzhiyun 		ret = 0;
719*4882a593Smuzhiyun 	} else {
720*4882a593Smuzhiyun 		ret = -EAGAIN;
721*4882a593Smuzhiyun 	}
722*4882a593Smuzhiyun 	gru_unlock_gts(gts);
723*4882a593Smuzhiyun 
724*4882a593Smuzhiyun 	gru_dbg(grudev,
725*4882a593Smuzhiyun 		"cb 0x%lx, op %d, exopc %d, cbrstate %d, cbrexecstatus 0x%x, ecause 0x%x, "
726*4882a593Smuzhiyun 		"exdet0 0x%lx, exdet1 0x%x\n",
727*4882a593Smuzhiyun 		excdet.cb, excdet.opc, excdet.exopc, excdet.cbrstate, excdet.cbrexecstatus,
728*4882a593Smuzhiyun 		excdet.ecause, excdet.exceptdet0, excdet.exceptdet1);
729*4882a593Smuzhiyun 	if (!ret && copy_to_user((void __user *)arg, &excdet, sizeof(excdet)))
730*4882a593Smuzhiyun 		ret = -EFAULT;
731*4882a593Smuzhiyun 	return ret;
732*4882a593Smuzhiyun }
733*4882a593Smuzhiyun 
734*4882a593Smuzhiyun /*
735*4882a593Smuzhiyun  * User request to unload a context. Content is saved for possible reload.
736*4882a593Smuzhiyun  */
gru_unload_all_contexts(void)737*4882a593Smuzhiyun static int gru_unload_all_contexts(void)
738*4882a593Smuzhiyun {
739*4882a593Smuzhiyun 	struct gru_thread_state *gts;
740*4882a593Smuzhiyun 	struct gru_state *gru;
741*4882a593Smuzhiyun 	int gid, ctxnum;
742*4882a593Smuzhiyun 
743*4882a593Smuzhiyun 	if (!capable(CAP_SYS_ADMIN))
744*4882a593Smuzhiyun 		return -EPERM;
745*4882a593Smuzhiyun 	foreach_gid(gid) {
746*4882a593Smuzhiyun 		gru = GID_TO_GRU(gid);
747*4882a593Smuzhiyun 		spin_lock(&gru->gs_lock);
748*4882a593Smuzhiyun 		for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) {
749*4882a593Smuzhiyun 			gts = gru->gs_gts[ctxnum];
750*4882a593Smuzhiyun 			if (gts && mutex_trylock(&gts->ts_ctxlock)) {
751*4882a593Smuzhiyun 				spin_unlock(&gru->gs_lock);
752*4882a593Smuzhiyun 				gru_unload_context(gts, 1);
753*4882a593Smuzhiyun 				mutex_unlock(&gts->ts_ctxlock);
754*4882a593Smuzhiyun 				spin_lock(&gru->gs_lock);
755*4882a593Smuzhiyun 			}
756*4882a593Smuzhiyun 		}
757*4882a593Smuzhiyun 		spin_unlock(&gru->gs_lock);
758*4882a593Smuzhiyun 	}
759*4882a593Smuzhiyun 	return 0;
760*4882a593Smuzhiyun }
761*4882a593Smuzhiyun 
gru_user_unload_context(unsigned long arg)762*4882a593Smuzhiyun int gru_user_unload_context(unsigned long arg)
763*4882a593Smuzhiyun {
764*4882a593Smuzhiyun 	struct gru_thread_state *gts;
765*4882a593Smuzhiyun 	struct gru_unload_context_req req;
766*4882a593Smuzhiyun 
767*4882a593Smuzhiyun 	STAT(user_unload_context);
768*4882a593Smuzhiyun 	if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
769*4882a593Smuzhiyun 		return -EFAULT;
770*4882a593Smuzhiyun 
771*4882a593Smuzhiyun 	gru_dbg(grudev, "gseg 0x%lx\n", req.gseg);
772*4882a593Smuzhiyun 
773*4882a593Smuzhiyun 	if (!req.gseg)
774*4882a593Smuzhiyun 		return gru_unload_all_contexts();
775*4882a593Smuzhiyun 
776*4882a593Smuzhiyun 	gts = gru_find_lock_gts(req.gseg);
777*4882a593Smuzhiyun 	if (!gts)
778*4882a593Smuzhiyun 		return -EINVAL;
779*4882a593Smuzhiyun 
780*4882a593Smuzhiyun 	if (gts->ts_gru)
781*4882a593Smuzhiyun 		gru_unload_context(gts, 1);
782*4882a593Smuzhiyun 	gru_unlock_gts(gts);
783*4882a593Smuzhiyun 
784*4882a593Smuzhiyun 	return 0;
785*4882a593Smuzhiyun }
786*4882a593Smuzhiyun 
787*4882a593Smuzhiyun /*
788*4882a593Smuzhiyun  * User request to flush a range of virtual addresses from the GRU TLB
789*4882a593Smuzhiyun  * (Mainly for testing).
790*4882a593Smuzhiyun  */
gru_user_flush_tlb(unsigned long arg)791*4882a593Smuzhiyun int gru_user_flush_tlb(unsigned long arg)
792*4882a593Smuzhiyun {
793*4882a593Smuzhiyun 	struct gru_thread_state *gts;
794*4882a593Smuzhiyun 	struct gru_flush_tlb_req req;
795*4882a593Smuzhiyun 	struct gru_mm_struct *gms;
796*4882a593Smuzhiyun 
797*4882a593Smuzhiyun 	STAT(user_flush_tlb);
798*4882a593Smuzhiyun 	if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
799*4882a593Smuzhiyun 		return -EFAULT;
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun 	gru_dbg(grudev, "gseg 0x%lx, vaddr 0x%lx, len 0x%lx\n", req.gseg,
802*4882a593Smuzhiyun 		req.vaddr, req.len);
803*4882a593Smuzhiyun 
804*4882a593Smuzhiyun 	gts = gru_find_lock_gts(req.gseg);
805*4882a593Smuzhiyun 	if (!gts)
806*4882a593Smuzhiyun 		return -EINVAL;
807*4882a593Smuzhiyun 
808*4882a593Smuzhiyun 	gms = gts->ts_gms;
809*4882a593Smuzhiyun 	gru_unlock_gts(gts);
810*4882a593Smuzhiyun 	gru_flush_tlb_range(gms, req.vaddr, req.len);
811*4882a593Smuzhiyun 
812*4882a593Smuzhiyun 	return 0;
813*4882a593Smuzhiyun }
814*4882a593Smuzhiyun 
815*4882a593Smuzhiyun /*
816*4882a593Smuzhiyun  * Fetch GSEG statisticss
817*4882a593Smuzhiyun  */
gru_get_gseg_statistics(unsigned long arg)818*4882a593Smuzhiyun long gru_get_gseg_statistics(unsigned long arg)
819*4882a593Smuzhiyun {
820*4882a593Smuzhiyun 	struct gru_thread_state *gts;
821*4882a593Smuzhiyun 	struct gru_get_gseg_statistics_req req;
822*4882a593Smuzhiyun 
823*4882a593Smuzhiyun 	if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
824*4882a593Smuzhiyun 		return -EFAULT;
825*4882a593Smuzhiyun 
826*4882a593Smuzhiyun 	/*
827*4882a593Smuzhiyun 	 * The library creates arrays of contexts for threaded programs.
828*4882a593Smuzhiyun 	 * If no gts exists in the array, the context has never been used & all
829*4882a593Smuzhiyun 	 * statistics are implicitly 0.
830*4882a593Smuzhiyun 	 */
831*4882a593Smuzhiyun 	gts = gru_find_lock_gts(req.gseg);
832*4882a593Smuzhiyun 	if (gts) {
833*4882a593Smuzhiyun 		memcpy(&req.stats, &gts->ustats, sizeof(gts->ustats));
834*4882a593Smuzhiyun 		gru_unlock_gts(gts);
835*4882a593Smuzhiyun 	} else {
836*4882a593Smuzhiyun 		memset(&req.stats, 0, sizeof(gts->ustats));
837*4882a593Smuzhiyun 	}
838*4882a593Smuzhiyun 
839*4882a593Smuzhiyun 	if (copy_to_user((void __user *)arg, &req, sizeof(req)))
840*4882a593Smuzhiyun 		return -EFAULT;
841*4882a593Smuzhiyun 
842*4882a593Smuzhiyun 	return 0;
843*4882a593Smuzhiyun }
844*4882a593Smuzhiyun 
845*4882a593Smuzhiyun /*
846*4882a593Smuzhiyun  * Register the current task as the user of the GSEG slice.
847*4882a593Smuzhiyun  * Needed for TLB fault interrupt targeting.
848*4882a593Smuzhiyun  */
gru_set_context_option(unsigned long arg)849*4882a593Smuzhiyun int gru_set_context_option(unsigned long arg)
850*4882a593Smuzhiyun {
851*4882a593Smuzhiyun 	struct gru_thread_state *gts;
852*4882a593Smuzhiyun 	struct gru_set_context_option_req req;
853*4882a593Smuzhiyun 	int ret = 0;
854*4882a593Smuzhiyun 
855*4882a593Smuzhiyun 	STAT(set_context_option);
856*4882a593Smuzhiyun 	if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
857*4882a593Smuzhiyun 		return -EFAULT;
858*4882a593Smuzhiyun 	gru_dbg(grudev, "op %d, gseg 0x%lx, value1 0x%lx\n", req.op, req.gseg, req.val1);
859*4882a593Smuzhiyun 
860*4882a593Smuzhiyun 	gts = gru_find_lock_gts(req.gseg);
861*4882a593Smuzhiyun 	if (!gts) {
862*4882a593Smuzhiyun 		gts = gru_alloc_locked_gts(req.gseg);
863*4882a593Smuzhiyun 		if (IS_ERR(gts))
864*4882a593Smuzhiyun 			return PTR_ERR(gts);
865*4882a593Smuzhiyun 	}
866*4882a593Smuzhiyun 
867*4882a593Smuzhiyun 	switch (req.op) {
868*4882a593Smuzhiyun 	case sco_blade_chiplet:
869*4882a593Smuzhiyun 		/* Select blade/chiplet for GRU context */
870*4882a593Smuzhiyun 		if (req.val0 < -1 || req.val0 >= GRU_CHIPLETS_PER_HUB ||
871*4882a593Smuzhiyun 		    req.val1 < -1 || req.val1 >= GRU_MAX_BLADES ||
872*4882a593Smuzhiyun 		    (req.val1 >= 0 && !gru_base[req.val1])) {
873*4882a593Smuzhiyun 			ret = -EINVAL;
874*4882a593Smuzhiyun 		} else {
875*4882a593Smuzhiyun 			gts->ts_user_blade_id = req.val1;
876*4882a593Smuzhiyun 			gts->ts_user_chiplet_id = req.val0;
877*4882a593Smuzhiyun 			gru_check_context_placement(gts);
878*4882a593Smuzhiyun 		}
879*4882a593Smuzhiyun 		break;
880*4882a593Smuzhiyun 	case sco_gseg_owner:
881*4882a593Smuzhiyun  		/* Register the current task as the GSEG owner */
882*4882a593Smuzhiyun 		gts->ts_tgid_owner = current->tgid;
883*4882a593Smuzhiyun 		break;
884*4882a593Smuzhiyun 	case sco_cch_req_slice:
885*4882a593Smuzhiyun  		/* Set the CCH slice option */
886*4882a593Smuzhiyun 		gts->ts_cch_req_slice = req.val1 & 3;
887*4882a593Smuzhiyun 		break;
888*4882a593Smuzhiyun 	default:
889*4882a593Smuzhiyun 		ret = -EINVAL;
890*4882a593Smuzhiyun 	}
891*4882a593Smuzhiyun 	gru_unlock_gts(gts);
892*4882a593Smuzhiyun 
893*4882a593Smuzhiyun 	return ret;
894*4882a593Smuzhiyun }
895