1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0+
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * VAS Fault handling.
4*4882a593Smuzhiyun * Copyright 2019, IBM Corporation
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #define pr_fmt(fmt) "vas: " fmt
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/kernel.h>
10*4882a593Smuzhiyun #include <linux/types.h>
11*4882a593Smuzhiyun #include <linux/slab.h>
12*4882a593Smuzhiyun #include <linux/uaccess.h>
13*4882a593Smuzhiyun #include <linux/kthread.h>
14*4882a593Smuzhiyun #include <linux/sched/signal.h>
15*4882a593Smuzhiyun #include <linux/mmu_context.h>
16*4882a593Smuzhiyun #include <asm/icswx.h>
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #include "vas.h"
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun /*
21*4882a593Smuzhiyun * The maximum FIFO size for fault window can be 8MB
22*4882a593Smuzhiyun * (VAS_RX_FIFO_SIZE_MAX). Using 4MB FIFO since each VAS
23*4882a593Smuzhiyun * instance will be having fault window.
24*4882a593Smuzhiyun * 8MB FIFO can be used if expects more faults for each VAS
25*4882a593Smuzhiyun * instance.
26*4882a593Smuzhiyun */
27*4882a593Smuzhiyun #define VAS_FAULT_WIN_FIFO_SIZE (4 << 20)
28*4882a593Smuzhiyun
dump_crb(struct coprocessor_request_block * crb)29*4882a593Smuzhiyun static void dump_crb(struct coprocessor_request_block *crb)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun struct data_descriptor_entry *dde;
32*4882a593Smuzhiyun struct nx_fault_stamp *nx;
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun dde = &crb->source;
35*4882a593Smuzhiyun pr_devel("SrcDDE: addr 0x%llx, len %d, count %d, idx %d, flags %d\n",
36*4882a593Smuzhiyun be64_to_cpu(dde->address), be32_to_cpu(dde->length),
37*4882a593Smuzhiyun dde->count, dde->index, dde->flags);
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun dde = &crb->target;
40*4882a593Smuzhiyun pr_devel("TgtDDE: addr 0x%llx, len %d, count %d, idx %d, flags %d\n",
41*4882a593Smuzhiyun be64_to_cpu(dde->address), be32_to_cpu(dde->length),
42*4882a593Smuzhiyun dde->count, dde->index, dde->flags);
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun nx = &crb->stamp.nx;
45*4882a593Smuzhiyun pr_devel("NX Stamp: PSWID 0x%x, FSA 0x%llx, flags 0x%x, FS 0x%x\n",
46*4882a593Smuzhiyun be32_to_cpu(nx->pswid),
47*4882a593Smuzhiyun be64_to_cpu(crb->stamp.nx.fault_storage_addr),
48*4882a593Smuzhiyun nx->flags, nx->fault_status);
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun /*
52*4882a593Smuzhiyun * Update the CSB to indicate a translation error.
53*4882a593Smuzhiyun *
54*4882a593Smuzhiyun * User space will be polling on CSB after the request is issued.
55*4882a593Smuzhiyun * If NX can handle the request without any issues, it updates CSB.
56*4882a593Smuzhiyun * Whereas if NX encounters page fault, the kernel will handle the
57*4882a593Smuzhiyun * fault and update CSB with translation error.
58*4882a593Smuzhiyun *
59*4882a593Smuzhiyun * If we are unable to update the CSB means copy_to_user failed due to
60*4882a593Smuzhiyun * invalid csb_addr, send a signal to the process.
61*4882a593Smuzhiyun */
update_csb(struct vas_window * window,struct coprocessor_request_block * crb)62*4882a593Smuzhiyun static void update_csb(struct vas_window *window,
63*4882a593Smuzhiyun struct coprocessor_request_block *crb)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun struct coprocessor_status_block csb;
66*4882a593Smuzhiyun struct kernel_siginfo info;
67*4882a593Smuzhiyun struct task_struct *tsk;
68*4882a593Smuzhiyun void __user *csb_addr;
69*4882a593Smuzhiyun struct pid *pid;
70*4882a593Smuzhiyun int rc;
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun /*
73*4882a593Smuzhiyun * NX user space windows can not be opened for task->mm=NULL
74*4882a593Smuzhiyun * and faults will not be generated for kernel requests.
75*4882a593Smuzhiyun */
76*4882a593Smuzhiyun if (WARN_ON_ONCE(!window->mm || !window->user_win))
77*4882a593Smuzhiyun return;
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun csb_addr = (void __user *)be64_to_cpu(crb->csb_addr);
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun memset(&csb, 0, sizeof(csb));
82*4882a593Smuzhiyun csb.cc = CSB_CC_FAULT_ADDRESS;
83*4882a593Smuzhiyun csb.ce = CSB_CE_TERMINATION;
84*4882a593Smuzhiyun csb.cs = 0;
85*4882a593Smuzhiyun csb.count = 0;
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun /*
88*4882a593Smuzhiyun * NX operates and returns in BE format as defined CRB struct.
89*4882a593Smuzhiyun * So saves fault_storage_addr in BE as NX pastes in FIFO and
90*4882a593Smuzhiyun * expects user space to convert to CPU format.
91*4882a593Smuzhiyun */
92*4882a593Smuzhiyun csb.address = crb->stamp.nx.fault_storage_addr;
93*4882a593Smuzhiyun csb.flags = 0;
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun pid = window->pid;
96*4882a593Smuzhiyun tsk = get_pid_task(pid, PIDTYPE_PID);
97*4882a593Smuzhiyun /*
98*4882a593Smuzhiyun * Process closes send window after all pending NX requests are
99*4882a593Smuzhiyun * completed. In multi-thread applications, a child thread can
100*4882a593Smuzhiyun * open a window and can exit without closing it. May be some
101*4882a593Smuzhiyun * requests are pending or this window can be used by other
102*4882a593Smuzhiyun * threads later. We should handle faults if NX encounters
103*4882a593Smuzhiyun * pages faults on these requests. Update CSB with translation
104*4882a593Smuzhiyun * error and fault address. If csb_addr passed by user space is
105*4882a593Smuzhiyun * invalid, send SEGV signal to pid saved in window. If the
106*4882a593Smuzhiyun * child thread is not running, send the signal to tgid.
107*4882a593Smuzhiyun * Parent thread (tgid) will close this window upon its exit.
108*4882a593Smuzhiyun *
109*4882a593Smuzhiyun * pid and mm references are taken when window is opened by
110*4882a593Smuzhiyun * process (pid). So tgid is used only when child thread opens
111*4882a593Smuzhiyun * a window and exits without closing it.
112*4882a593Smuzhiyun */
113*4882a593Smuzhiyun if (!tsk) {
114*4882a593Smuzhiyun pid = window->tgid;
115*4882a593Smuzhiyun tsk = get_pid_task(pid, PIDTYPE_PID);
116*4882a593Smuzhiyun /*
117*4882a593Smuzhiyun * Parent thread (tgid) will be closing window when it
118*4882a593Smuzhiyun * exits. So should not get here.
119*4882a593Smuzhiyun */
120*4882a593Smuzhiyun if (WARN_ON_ONCE(!tsk))
121*4882a593Smuzhiyun return;
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun /* Return if the task is exiting. */
125*4882a593Smuzhiyun if (tsk->flags & PF_EXITING) {
126*4882a593Smuzhiyun put_task_struct(tsk);
127*4882a593Smuzhiyun return;
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun kthread_use_mm(window->mm);
131*4882a593Smuzhiyun rc = copy_to_user(csb_addr, &csb, sizeof(csb));
132*4882a593Smuzhiyun /*
133*4882a593Smuzhiyun * User space polls on csb.flags (first byte). So add barrier
134*4882a593Smuzhiyun * then copy first byte with csb flags update.
135*4882a593Smuzhiyun */
136*4882a593Smuzhiyun if (!rc) {
137*4882a593Smuzhiyun csb.flags = CSB_V;
138*4882a593Smuzhiyun /* Make sure update to csb.flags is visible now */
139*4882a593Smuzhiyun smp_mb();
140*4882a593Smuzhiyun rc = copy_to_user(csb_addr, &csb, sizeof(u8));
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun kthread_unuse_mm(window->mm);
143*4882a593Smuzhiyun put_task_struct(tsk);
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun /* Success */
146*4882a593Smuzhiyun if (!rc)
147*4882a593Smuzhiyun return;
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun pr_debug("Invalid CSB address 0x%p signalling pid(%d)\n",
150*4882a593Smuzhiyun csb_addr, pid_vnr(pid));
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun clear_siginfo(&info);
153*4882a593Smuzhiyun info.si_signo = SIGSEGV;
154*4882a593Smuzhiyun info.si_errno = EFAULT;
155*4882a593Smuzhiyun info.si_code = SEGV_MAPERR;
156*4882a593Smuzhiyun info.si_addr = csb_addr;
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun /*
159*4882a593Smuzhiyun * process will be polling on csb.flags after request is sent to
160*4882a593Smuzhiyun * NX. So generally CSB update should not fail except when an
161*4882a593Smuzhiyun * application passes invalid csb_addr. So an error message will
162*4882a593Smuzhiyun * be displayed and leave it to user space whether to ignore or
163*4882a593Smuzhiyun * handle this signal.
164*4882a593Smuzhiyun */
165*4882a593Smuzhiyun rcu_read_lock();
166*4882a593Smuzhiyun rc = kill_pid_info(SIGSEGV, &info, pid);
167*4882a593Smuzhiyun rcu_read_unlock();
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun pr_devel("%s(): pid %d kill_proc_info() rc %d\n", __func__,
170*4882a593Smuzhiyun pid_vnr(pid), rc);
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun
dump_fifo(struct vas_instance * vinst,void * entry)173*4882a593Smuzhiyun static void dump_fifo(struct vas_instance *vinst, void *entry)
174*4882a593Smuzhiyun {
175*4882a593Smuzhiyun unsigned long *end = vinst->fault_fifo + vinst->fault_fifo_size;
176*4882a593Smuzhiyun unsigned long *fifo = entry;
177*4882a593Smuzhiyun int i;
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun pr_err("Fault fifo size %d, Max crbs %d\n", vinst->fault_fifo_size,
180*4882a593Smuzhiyun vinst->fault_fifo_size / CRB_SIZE);
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun /* Dump 10 CRB entries or until end of FIFO */
183*4882a593Smuzhiyun pr_err("Fault FIFO Dump:\n");
184*4882a593Smuzhiyun for (i = 0; i < 10*(CRB_SIZE/8) && fifo < end; i += 4, fifo += 4) {
185*4882a593Smuzhiyun pr_err("[%.3d, %p]: 0x%.16lx 0x%.16lx 0x%.16lx 0x%.16lx\n",
186*4882a593Smuzhiyun i, fifo, *fifo, *(fifo+1), *(fifo+2), *(fifo+3));
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun /*
191*4882a593Smuzhiyun * Process valid CRBs in fault FIFO.
192*4882a593Smuzhiyun * NX process user space requests, return credit and update the status
193*4882a593Smuzhiyun * in CRB. If it encounters transalation error when accessing CRB or
194*4882a593Smuzhiyun * request buffers, raises interrupt on the CPU to handle the fault.
195*4882a593Smuzhiyun * It takes credit on fault window, updates nx_fault_stamp in CRB with
196*4882a593Smuzhiyun * the following information and pastes CRB in fault FIFO.
197*4882a593Smuzhiyun *
198*4882a593Smuzhiyun * pswid - window ID of the window on which the request is sent.
199*4882a593Smuzhiyun * fault_storage_addr - fault address
200*4882a593Smuzhiyun *
201*4882a593Smuzhiyun * It can raise a single interrupt for multiple faults. Expects OS to
202*4882a593Smuzhiyun * process all valid faults and return credit for each fault on user
203*4882a593Smuzhiyun * space and fault windows. This fault FIFO control will be done with
204*4882a593Smuzhiyun * credit mechanism. NX can continuously paste CRBs until credits are not
205*4882a593Smuzhiyun * available on fault window. Otherwise, returns with RMA_reject.
206*4882a593Smuzhiyun *
207*4882a593Smuzhiyun * Total credits available on fault window: FIFO_SIZE(4MB)/CRBS_SIZE(128)
208*4882a593Smuzhiyun *
209*4882a593Smuzhiyun */
vas_fault_thread_fn(int irq,void * data)210*4882a593Smuzhiyun irqreturn_t vas_fault_thread_fn(int irq, void *data)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun struct vas_instance *vinst = data;
213*4882a593Smuzhiyun struct coprocessor_request_block *crb, *entry;
214*4882a593Smuzhiyun struct coprocessor_request_block buf;
215*4882a593Smuzhiyun struct vas_window *window;
216*4882a593Smuzhiyun unsigned long flags;
217*4882a593Smuzhiyun void *fifo;
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun crb = &buf;
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun /*
222*4882a593Smuzhiyun * VAS can interrupt with multiple page faults. So process all
223*4882a593Smuzhiyun * valid CRBs within fault FIFO until reaches invalid CRB.
224*4882a593Smuzhiyun * We use CCW[0] and pswid to validate validate CRBs:
225*4882a593Smuzhiyun *
226*4882a593Smuzhiyun * CCW[0] Reserved bit. When NX pastes CRB, CCW[0]=0
227*4882a593Smuzhiyun * OS sets this bit to 1 after reading CRB.
228*4882a593Smuzhiyun * pswid NX assigns window ID. Set pswid to -1 after
229*4882a593Smuzhiyun * reading CRB from fault FIFO.
230*4882a593Smuzhiyun *
231*4882a593Smuzhiyun * We exit this function if no valid CRBs are available to process.
232*4882a593Smuzhiyun * So acquire fault_lock and reset fifo_in_progress to 0 before
233*4882a593Smuzhiyun * exit.
234*4882a593Smuzhiyun * In case kernel receives another interrupt with different page
235*4882a593Smuzhiyun * fault, interrupt handler returns with IRQ_HANDLED if
236*4882a593Smuzhiyun * fifo_in_progress is set. Means these new faults will be
237*4882a593Smuzhiyun * handled by the current thread. Otherwise set fifo_in_progress
238*4882a593Smuzhiyun * and return IRQ_WAKE_THREAD to wake up thread.
239*4882a593Smuzhiyun */
240*4882a593Smuzhiyun while (true) {
241*4882a593Smuzhiyun spin_lock_irqsave(&vinst->fault_lock, flags);
242*4882a593Smuzhiyun /*
243*4882a593Smuzhiyun * Advance the fault fifo pointer to next CRB.
244*4882a593Smuzhiyun * Use CRB_SIZE rather than sizeof(*crb) since the latter is
245*4882a593Smuzhiyun * aligned to CRB_ALIGN (256) but the CRB written to by VAS is
246*4882a593Smuzhiyun * only CRB_SIZE in len.
247*4882a593Smuzhiyun */
248*4882a593Smuzhiyun fifo = vinst->fault_fifo + (vinst->fault_crbs * CRB_SIZE);
249*4882a593Smuzhiyun entry = fifo;
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun if ((entry->stamp.nx.pswid == cpu_to_be32(FIFO_INVALID_ENTRY))
252*4882a593Smuzhiyun || (entry->ccw & cpu_to_be32(CCW0_INVALID))) {
253*4882a593Smuzhiyun vinst->fifo_in_progress = 0;
254*4882a593Smuzhiyun spin_unlock_irqrestore(&vinst->fault_lock, flags);
255*4882a593Smuzhiyun return IRQ_HANDLED;
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun spin_unlock_irqrestore(&vinst->fault_lock, flags);
259*4882a593Smuzhiyun vinst->fault_crbs++;
260*4882a593Smuzhiyun if (vinst->fault_crbs == (vinst->fault_fifo_size / CRB_SIZE))
261*4882a593Smuzhiyun vinst->fault_crbs = 0;
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun memcpy(crb, fifo, CRB_SIZE);
264*4882a593Smuzhiyun entry->stamp.nx.pswid = cpu_to_be32(FIFO_INVALID_ENTRY);
265*4882a593Smuzhiyun entry->ccw |= cpu_to_be32(CCW0_INVALID);
266*4882a593Smuzhiyun /*
267*4882a593Smuzhiyun * Return credit for the fault window.
268*4882a593Smuzhiyun */
269*4882a593Smuzhiyun vas_return_credit(vinst->fault_win, false);
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun pr_devel("VAS[%d] fault_fifo %p, fifo %p, fault_crbs %d\n",
272*4882a593Smuzhiyun vinst->vas_id, vinst->fault_fifo, fifo,
273*4882a593Smuzhiyun vinst->fault_crbs);
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun dump_crb(crb);
276*4882a593Smuzhiyun window = vas_pswid_to_window(vinst,
277*4882a593Smuzhiyun be32_to_cpu(crb->stamp.nx.pswid));
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun if (IS_ERR(window)) {
280*4882a593Smuzhiyun /*
281*4882a593Smuzhiyun * We got an interrupt about a specific send
282*4882a593Smuzhiyun * window but we can't find that window and we can't
283*4882a593Smuzhiyun * even clean it up (return credit on user space
284*4882a593Smuzhiyun * window).
285*4882a593Smuzhiyun * But we should not get here.
286*4882a593Smuzhiyun * TODO: Disable IRQ.
287*4882a593Smuzhiyun */
288*4882a593Smuzhiyun dump_fifo(vinst, (void *)entry);
289*4882a593Smuzhiyun pr_err("VAS[%d] fault_fifo %p, fifo %p, pswid 0x%x, fault_crbs %d bad CRB?\n",
290*4882a593Smuzhiyun vinst->vas_id, vinst->fault_fifo, fifo,
291*4882a593Smuzhiyun be32_to_cpu(crb->stamp.nx.pswid),
292*4882a593Smuzhiyun vinst->fault_crbs);
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun WARN_ON_ONCE(1);
295*4882a593Smuzhiyun } else {
296*4882a593Smuzhiyun update_csb(window, crb);
297*4882a593Smuzhiyun /*
298*4882a593Smuzhiyun * Return credit for send window after processing
299*4882a593Smuzhiyun * fault CRB.
300*4882a593Smuzhiyun */
301*4882a593Smuzhiyun vas_return_credit(window, true);
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun
vas_fault_handler(int irq,void * dev_id)306*4882a593Smuzhiyun irqreturn_t vas_fault_handler(int irq, void *dev_id)
307*4882a593Smuzhiyun {
308*4882a593Smuzhiyun struct vas_instance *vinst = dev_id;
309*4882a593Smuzhiyun irqreturn_t ret = IRQ_WAKE_THREAD;
310*4882a593Smuzhiyun unsigned long flags;
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun /*
313*4882a593Smuzhiyun * NX can generate an interrupt for multiple faults. So the
314*4882a593Smuzhiyun * fault handler thread process all CRBs until finds invalid
315*4882a593Smuzhiyun * entry. In case if NX sees continuous faults, it is possible
316*4882a593Smuzhiyun * that the thread function entered with the first interrupt
317*4882a593Smuzhiyun * can execute and process all valid CRBs.
318*4882a593Smuzhiyun * So wake up thread only if the fault thread is not in progress.
319*4882a593Smuzhiyun */
320*4882a593Smuzhiyun spin_lock_irqsave(&vinst->fault_lock, flags);
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun if (vinst->fifo_in_progress)
323*4882a593Smuzhiyun ret = IRQ_HANDLED;
324*4882a593Smuzhiyun else
325*4882a593Smuzhiyun vinst->fifo_in_progress = 1;
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun spin_unlock_irqrestore(&vinst->fault_lock, flags);
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun return ret;
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun /*
333*4882a593Smuzhiyun * Fault window is opened per VAS instance. NX pastes fault CRB in fault
334*4882a593Smuzhiyun * FIFO upon page faults.
335*4882a593Smuzhiyun */
vas_setup_fault_window(struct vas_instance * vinst)336*4882a593Smuzhiyun int vas_setup_fault_window(struct vas_instance *vinst)
337*4882a593Smuzhiyun {
338*4882a593Smuzhiyun struct vas_rx_win_attr attr;
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun vinst->fault_fifo_size = VAS_FAULT_WIN_FIFO_SIZE;
341*4882a593Smuzhiyun vinst->fault_fifo = kzalloc(vinst->fault_fifo_size, GFP_KERNEL);
342*4882a593Smuzhiyun if (!vinst->fault_fifo) {
343*4882a593Smuzhiyun pr_err("Unable to alloc %d bytes for fault_fifo\n",
344*4882a593Smuzhiyun vinst->fault_fifo_size);
345*4882a593Smuzhiyun return -ENOMEM;
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun /*
349*4882a593Smuzhiyun * Invalidate all CRB entries. NX pastes valid entry for each fault.
350*4882a593Smuzhiyun */
351*4882a593Smuzhiyun memset(vinst->fault_fifo, FIFO_INVALID_ENTRY, vinst->fault_fifo_size);
352*4882a593Smuzhiyun vas_init_rx_win_attr(&attr, VAS_COP_TYPE_FAULT);
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun attr.rx_fifo_size = vinst->fault_fifo_size;
355*4882a593Smuzhiyun attr.rx_fifo = __pa(vinst->fault_fifo);
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun /*
358*4882a593Smuzhiyun * Max creds is based on number of CRBs can fit in the FIFO.
359*4882a593Smuzhiyun * (fault_fifo_size/CRB_SIZE). If 8MB FIFO is used, max creds
360*4882a593Smuzhiyun * will be 0xffff since the receive creds field is 16bits wide.
361*4882a593Smuzhiyun */
362*4882a593Smuzhiyun attr.wcreds_max = vinst->fault_fifo_size / CRB_SIZE;
363*4882a593Smuzhiyun attr.lnotify_lpid = 0;
364*4882a593Smuzhiyun attr.lnotify_pid = mfspr(SPRN_PID);
365*4882a593Smuzhiyun attr.lnotify_tid = mfspr(SPRN_PID);
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun vinst->fault_win = vas_rx_win_open(vinst->vas_id, VAS_COP_TYPE_FAULT,
368*4882a593Smuzhiyun &attr);
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun if (IS_ERR(vinst->fault_win)) {
371*4882a593Smuzhiyun pr_err("VAS: Error %ld opening FaultWin\n",
372*4882a593Smuzhiyun PTR_ERR(vinst->fault_win));
373*4882a593Smuzhiyun kfree(vinst->fault_fifo);
374*4882a593Smuzhiyun return PTR_ERR(vinst->fault_win);
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun pr_devel("VAS: Created FaultWin %d, LPID/PID/TID [%d/%d/%d]\n",
378*4882a593Smuzhiyun vinst->fault_win->winid, attr.lnotify_lpid,
379*4882a593Smuzhiyun attr.lnotify_pid, attr.lnotify_tid);
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun return 0;
382*4882a593Smuzhiyun }
383