1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright 2015 IBM Corp.
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #include <linux/spinlock.h>
7*4882a593Smuzhiyun #include <linux/uaccess.h>
8*4882a593Smuzhiyun #include <linux/delay.h>
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include "cxl.h"
11*4882a593Smuzhiyun #include "hcalls.h"
12*4882a593Smuzhiyun #include "trace.h"
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #define CXL_ERROR_DETECTED_EVENT 1
15*4882a593Smuzhiyun #define CXL_SLOT_RESET_EVENT 2
16*4882a593Smuzhiyun #define CXL_RESUME_EVENT 3
17*4882a593Smuzhiyun
pci_error_handlers(struct cxl_afu * afu,int bus_error_event,pci_channel_state_t state)18*4882a593Smuzhiyun static void pci_error_handlers(struct cxl_afu *afu,
19*4882a593Smuzhiyun int bus_error_event,
20*4882a593Smuzhiyun pci_channel_state_t state)
21*4882a593Smuzhiyun {
22*4882a593Smuzhiyun struct pci_dev *afu_dev;
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun if (afu->phb == NULL)
25*4882a593Smuzhiyun return;
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
28*4882a593Smuzhiyun if (!afu_dev->driver)
29*4882a593Smuzhiyun continue;
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun switch (bus_error_event) {
32*4882a593Smuzhiyun case CXL_ERROR_DETECTED_EVENT:
33*4882a593Smuzhiyun afu_dev->error_state = state;
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun if (afu_dev->driver->err_handler &&
36*4882a593Smuzhiyun afu_dev->driver->err_handler->error_detected)
37*4882a593Smuzhiyun afu_dev->driver->err_handler->error_detected(afu_dev, state);
38*4882a593Smuzhiyun break;
39*4882a593Smuzhiyun case CXL_SLOT_RESET_EVENT:
40*4882a593Smuzhiyun afu_dev->error_state = state;
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun if (afu_dev->driver->err_handler &&
43*4882a593Smuzhiyun afu_dev->driver->err_handler->slot_reset)
44*4882a593Smuzhiyun afu_dev->driver->err_handler->slot_reset(afu_dev);
45*4882a593Smuzhiyun break;
46*4882a593Smuzhiyun case CXL_RESUME_EVENT:
47*4882a593Smuzhiyun if (afu_dev->driver->err_handler &&
48*4882a593Smuzhiyun afu_dev->driver->err_handler->resume)
49*4882a593Smuzhiyun afu_dev->driver->err_handler->resume(afu_dev);
50*4882a593Smuzhiyun break;
51*4882a593Smuzhiyun }
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun
guest_handle_psl_slice_error(struct cxl_context * ctx,u64 dsisr,u64 errstat)55*4882a593Smuzhiyun static irqreturn_t guest_handle_psl_slice_error(struct cxl_context *ctx, u64 dsisr,
56*4882a593Smuzhiyun u64 errstat)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun pr_devel("in %s\n", __func__);
59*4882a593Smuzhiyun dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%.16llx\n", errstat);
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun return cxl_ops->ack_irq(ctx, 0, errstat);
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun
guest_collect_vpd(struct cxl * adapter,struct cxl_afu * afu,void * buf,size_t len)64*4882a593Smuzhiyun static ssize_t guest_collect_vpd(struct cxl *adapter, struct cxl_afu *afu,
65*4882a593Smuzhiyun void *buf, size_t len)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun unsigned int entries, mod;
68*4882a593Smuzhiyun unsigned long **vpd_buf = NULL;
69*4882a593Smuzhiyun struct sg_list *le;
70*4882a593Smuzhiyun int rc = 0, i, tocopy;
71*4882a593Smuzhiyun u64 out = 0;
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun if (buf == NULL)
74*4882a593Smuzhiyun return -EINVAL;
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun /* number of entries in the list */
77*4882a593Smuzhiyun entries = len / SG_BUFFER_SIZE;
78*4882a593Smuzhiyun mod = len % SG_BUFFER_SIZE;
79*4882a593Smuzhiyun if (mod)
80*4882a593Smuzhiyun entries++;
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun if (entries > SG_MAX_ENTRIES) {
83*4882a593Smuzhiyun entries = SG_MAX_ENTRIES;
84*4882a593Smuzhiyun len = SG_MAX_ENTRIES * SG_BUFFER_SIZE;
85*4882a593Smuzhiyun mod = 0;
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun vpd_buf = kcalloc(entries, sizeof(unsigned long *), GFP_KERNEL);
89*4882a593Smuzhiyun if (!vpd_buf)
90*4882a593Smuzhiyun return -ENOMEM;
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun le = (struct sg_list *)get_zeroed_page(GFP_KERNEL);
93*4882a593Smuzhiyun if (!le) {
94*4882a593Smuzhiyun rc = -ENOMEM;
95*4882a593Smuzhiyun goto err1;
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun for (i = 0; i < entries; i++) {
99*4882a593Smuzhiyun vpd_buf[i] = (unsigned long *)get_zeroed_page(GFP_KERNEL);
100*4882a593Smuzhiyun if (!vpd_buf[i]) {
101*4882a593Smuzhiyun rc = -ENOMEM;
102*4882a593Smuzhiyun goto err2;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun le[i].phys_addr = cpu_to_be64(virt_to_phys(vpd_buf[i]));
105*4882a593Smuzhiyun le[i].len = cpu_to_be64(SG_BUFFER_SIZE);
106*4882a593Smuzhiyun if ((i == (entries - 1)) && mod)
107*4882a593Smuzhiyun le[i].len = cpu_to_be64(mod);
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun if (adapter)
111*4882a593Smuzhiyun rc = cxl_h_collect_vpd_adapter(adapter->guest->handle,
112*4882a593Smuzhiyun virt_to_phys(le), entries, &out);
113*4882a593Smuzhiyun else
114*4882a593Smuzhiyun rc = cxl_h_collect_vpd(afu->guest->handle, 0,
115*4882a593Smuzhiyun virt_to_phys(le), entries, &out);
116*4882a593Smuzhiyun pr_devel("length of available (entries: %i), vpd: %#llx\n",
117*4882a593Smuzhiyun entries, out);
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun if (!rc) {
120*4882a593Smuzhiyun /*
121*4882a593Smuzhiyun * hcall returns in 'out' the size of available VPDs.
122*4882a593Smuzhiyun * It fills the buffer with as much data as possible.
123*4882a593Smuzhiyun */
124*4882a593Smuzhiyun if (out < len)
125*4882a593Smuzhiyun len = out;
126*4882a593Smuzhiyun rc = len;
127*4882a593Smuzhiyun if (out) {
128*4882a593Smuzhiyun for (i = 0; i < entries; i++) {
129*4882a593Smuzhiyun if (len < SG_BUFFER_SIZE)
130*4882a593Smuzhiyun tocopy = len;
131*4882a593Smuzhiyun else
132*4882a593Smuzhiyun tocopy = SG_BUFFER_SIZE;
133*4882a593Smuzhiyun memcpy(buf, vpd_buf[i], tocopy);
134*4882a593Smuzhiyun buf += tocopy;
135*4882a593Smuzhiyun len -= tocopy;
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun err2:
140*4882a593Smuzhiyun for (i = 0; i < entries; i++) {
141*4882a593Smuzhiyun if (vpd_buf[i])
142*4882a593Smuzhiyun free_page((unsigned long) vpd_buf[i]);
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun free_page((unsigned long) le);
145*4882a593Smuzhiyun err1:
146*4882a593Smuzhiyun kfree(vpd_buf);
147*4882a593Smuzhiyun return rc;
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun
guest_get_irq_info(struct cxl_context * ctx,struct cxl_irq_info * info)150*4882a593Smuzhiyun static int guest_get_irq_info(struct cxl_context *ctx, struct cxl_irq_info *info)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun return cxl_h_collect_int_info(ctx->afu->guest->handle, ctx->process_token, info);
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun
guest_psl_irq(int irq,void * data)155*4882a593Smuzhiyun static irqreturn_t guest_psl_irq(int irq, void *data)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun struct cxl_context *ctx = data;
158*4882a593Smuzhiyun struct cxl_irq_info irq_info;
159*4882a593Smuzhiyun int rc;
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun pr_devel("%d: received PSL interrupt %i\n", ctx->pe, irq);
162*4882a593Smuzhiyun rc = guest_get_irq_info(ctx, &irq_info);
163*4882a593Smuzhiyun if (rc) {
164*4882a593Smuzhiyun WARN(1, "Unable to get IRQ info: %i\n", rc);
165*4882a593Smuzhiyun return IRQ_HANDLED;
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun rc = cxl_irq_psl8(irq, ctx, &irq_info);
169*4882a593Smuzhiyun return rc;
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun
afu_read_error_state(struct cxl_afu * afu,int * state_out)172*4882a593Smuzhiyun static int afu_read_error_state(struct cxl_afu *afu, int *state_out)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun u64 state;
175*4882a593Smuzhiyun int rc = 0;
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun if (!afu)
178*4882a593Smuzhiyun return -EIO;
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun rc = cxl_h_read_error_state(afu->guest->handle, &state);
181*4882a593Smuzhiyun if (!rc) {
182*4882a593Smuzhiyun WARN_ON(state != H_STATE_NORMAL &&
183*4882a593Smuzhiyun state != H_STATE_DISABLE &&
184*4882a593Smuzhiyun state != H_STATE_TEMP_UNAVAILABLE &&
185*4882a593Smuzhiyun state != H_STATE_PERM_UNAVAILABLE);
186*4882a593Smuzhiyun *state_out = state & 0xffffffff;
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun return rc;
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun
guest_slice_irq_err(int irq,void * data)191*4882a593Smuzhiyun static irqreturn_t guest_slice_irq_err(int irq, void *data)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun struct cxl_afu *afu = data;
194*4882a593Smuzhiyun int rc;
195*4882a593Smuzhiyun u64 serr, afu_error, dsisr;
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun rc = cxl_h_get_fn_error_interrupt(afu->guest->handle, &serr);
198*4882a593Smuzhiyun if (rc) {
199*4882a593Smuzhiyun dev_crit(&afu->dev, "Couldn't read PSL_SERR_An: %d\n", rc);
200*4882a593Smuzhiyun return IRQ_HANDLED;
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun afu_error = cxl_p2n_read(afu, CXL_AFU_ERR_An);
203*4882a593Smuzhiyun dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
204*4882a593Smuzhiyun cxl_afu_decode_psl_serr(afu, serr);
205*4882a593Smuzhiyun dev_crit(&afu->dev, "AFU_ERR_An: 0x%.16llx\n", afu_error);
206*4882a593Smuzhiyun dev_crit(&afu->dev, "PSL_DSISR_An: 0x%.16llx\n", dsisr);
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun rc = cxl_h_ack_fn_error_interrupt(afu->guest->handle, serr);
209*4882a593Smuzhiyun if (rc)
210*4882a593Smuzhiyun dev_crit(&afu->dev, "Couldn't ack slice error interrupt: %d\n",
211*4882a593Smuzhiyun rc);
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun return IRQ_HANDLED;
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun
irq_alloc_range(struct cxl * adapter,int len,int * irq)217*4882a593Smuzhiyun static int irq_alloc_range(struct cxl *adapter, int len, int *irq)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun int i, n;
220*4882a593Smuzhiyun struct irq_avail *cur;
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun for (i = 0; i < adapter->guest->irq_nranges; i++) {
223*4882a593Smuzhiyun cur = &adapter->guest->irq_avail[i];
224*4882a593Smuzhiyun n = bitmap_find_next_zero_area(cur->bitmap, cur->range,
225*4882a593Smuzhiyun 0, len, 0);
226*4882a593Smuzhiyun if (n < cur->range) {
227*4882a593Smuzhiyun bitmap_set(cur->bitmap, n, len);
228*4882a593Smuzhiyun *irq = cur->offset + n;
229*4882a593Smuzhiyun pr_devel("guest: allocate IRQs %#x->%#x\n",
230*4882a593Smuzhiyun *irq, *irq + len - 1);
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun return 0;
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun return -ENOSPC;
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun
irq_free_range(struct cxl * adapter,int irq,int len)238*4882a593Smuzhiyun static int irq_free_range(struct cxl *adapter, int irq, int len)
239*4882a593Smuzhiyun {
240*4882a593Smuzhiyun int i, n;
241*4882a593Smuzhiyun struct irq_avail *cur;
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun if (len == 0)
244*4882a593Smuzhiyun return -ENOENT;
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun for (i = 0; i < adapter->guest->irq_nranges; i++) {
247*4882a593Smuzhiyun cur = &adapter->guest->irq_avail[i];
248*4882a593Smuzhiyun if (irq >= cur->offset &&
249*4882a593Smuzhiyun (irq + len) <= (cur->offset + cur->range)) {
250*4882a593Smuzhiyun n = irq - cur->offset;
251*4882a593Smuzhiyun bitmap_clear(cur->bitmap, n, len);
252*4882a593Smuzhiyun pr_devel("guest: release IRQs %#x->%#x\n",
253*4882a593Smuzhiyun irq, irq + len - 1);
254*4882a593Smuzhiyun return 0;
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun return -ENOENT;
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun
guest_reset(struct cxl * adapter)260*4882a593Smuzhiyun static int guest_reset(struct cxl *adapter)
261*4882a593Smuzhiyun {
262*4882a593Smuzhiyun struct cxl_afu *afu = NULL;
263*4882a593Smuzhiyun int i, rc;
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun pr_devel("Adapter reset request\n");
266*4882a593Smuzhiyun spin_lock(&adapter->afu_list_lock);
267*4882a593Smuzhiyun for (i = 0; i < adapter->slices; i++) {
268*4882a593Smuzhiyun if ((afu = adapter->afu[i])) {
269*4882a593Smuzhiyun pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT,
270*4882a593Smuzhiyun pci_channel_io_frozen);
271*4882a593Smuzhiyun cxl_context_detach_all(afu);
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun rc = cxl_h_reset_adapter(adapter->guest->handle);
276*4882a593Smuzhiyun for (i = 0; i < adapter->slices; i++) {
277*4882a593Smuzhiyun if (!rc && (afu = adapter->afu[i])) {
278*4882a593Smuzhiyun pci_error_handlers(afu, CXL_SLOT_RESET_EVENT,
279*4882a593Smuzhiyun pci_channel_io_normal);
280*4882a593Smuzhiyun pci_error_handlers(afu, CXL_RESUME_EVENT, 0);
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun spin_unlock(&adapter->afu_list_lock);
284*4882a593Smuzhiyun return rc;
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun
guest_alloc_one_irq(struct cxl * adapter)287*4882a593Smuzhiyun static int guest_alloc_one_irq(struct cxl *adapter)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun int irq;
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun spin_lock(&adapter->guest->irq_alloc_lock);
292*4882a593Smuzhiyun if (irq_alloc_range(adapter, 1, &irq))
293*4882a593Smuzhiyun irq = -ENOSPC;
294*4882a593Smuzhiyun spin_unlock(&adapter->guest->irq_alloc_lock);
295*4882a593Smuzhiyun return irq;
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun
guest_release_one_irq(struct cxl * adapter,int irq)298*4882a593Smuzhiyun static void guest_release_one_irq(struct cxl *adapter, int irq)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun spin_lock(&adapter->guest->irq_alloc_lock);
301*4882a593Smuzhiyun irq_free_range(adapter, irq, 1);
302*4882a593Smuzhiyun spin_unlock(&adapter->guest->irq_alloc_lock);
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun
guest_alloc_irq_ranges(struct cxl_irq_ranges * irqs,struct cxl * adapter,unsigned int num)305*4882a593Smuzhiyun static int guest_alloc_irq_ranges(struct cxl_irq_ranges *irqs,
306*4882a593Smuzhiyun struct cxl *adapter, unsigned int num)
307*4882a593Smuzhiyun {
308*4882a593Smuzhiyun int i, try, irq;
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun memset(irqs, 0, sizeof(struct cxl_irq_ranges));
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun spin_lock(&adapter->guest->irq_alloc_lock);
313*4882a593Smuzhiyun for (i = 0; i < CXL_IRQ_RANGES && num; i++) {
314*4882a593Smuzhiyun try = num;
315*4882a593Smuzhiyun while (try) {
316*4882a593Smuzhiyun if (irq_alloc_range(adapter, try, &irq) == 0)
317*4882a593Smuzhiyun break;
318*4882a593Smuzhiyun try /= 2;
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun if (!try)
321*4882a593Smuzhiyun goto error;
322*4882a593Smuzhiyun irqs->offset[i] = irq;
323*4882a593Smuzhiyun irqs->range[i] = try;
324*4882a593Smuzhiyun num -= try;
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun if (num)
327*4882a593Smuzhiyun goto error;
328*4882a593Smuzhiyun spin_unlock(&adapter->guest->irq_alloc_lock);
329*4882a593Smuzhiyun return 0;
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun error:
332*4882a593Smuzhiyun for (i = 0; i < CXL_IRQ_RANGES; i++)
333*4882a593Smuzhiyun irq_free_range(adapter, irqs->offset[i], irqs->range[i]);
334*4882a593Smuzhiyun spin_unlock(&adapter->guest->irq_alloc_lock);
335*4882a593Smuzhiyun return -ENOSPC;
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun
guest_release_irq_ranges(struct cxl_irq_ranges * irqs,struct cxl * adapter)338*4882a593Smuzhiyun static void guest_release_irq_ranges(struct cxl_irq_ranges *irqs,
339*4882a593Smuzhiyun struct cxl *adapter)
340*4882a593Smuzhiyun {
341*4882a593Smuzhiyun int i;
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun spin_lock(&adapter->guest->irq_alloc_lock);
344*4882a593Smuzhiyun for (i = 0; i < CXL_IRQ_RANGES; i++)
345*4882a593Smuzhiyun irq_free_range(adapter, irqs->offset[i], irqs->range[i]);
346*4882a593Smuzhiyun spin_unlock(&adapter->guest->irq_alloc_lock);
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun
guest_register_serr_irq(struct cxl_afu * afu)349*4882a593Smuzhiyun static int guest_register_serr_irq(struct cxl_afu *afu)
350*4882a593Smuzhiyun {
351*4882a593Smuzhiyun afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
352*4882a593Smuzhiyun dev_name(&afu->dev));
353*4882a593Smuzhiyun if (!afu->err_irq_name)
354*4882a593Smuzhiyun return -ENOMEM;
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun if (!(afu->serr_virq = cxl_map_irq(afu->adapter, afu->serr_hwirq,
357*4882a593Smuzhiyun guest_slice_irq_err, afu, afu->err_irq_name))) {
358*4882a593Smuzhiyun kfree(afu->err_irq_name);
359*4882a593Smuzhiyun afu->err_irq_name = NULL;
360*4882a593Smuzhiyun return -ENOMEM;
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun return 0;
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun
guest_release_serr_irq(struct cxl_afu * afu)366*4882a593Smuzhiyun static void guest_release_serr_irq(struct cxl_afu *afu)
367*4882a593Smuzhiyun {
368*4882a593Smuzhiyun cxl_unmap_irq(afu->serr_virq, afu);
369*4882a593Smuzhiyun cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq);
370*4882a593Smuzhiyun kfree(afu->err_irq_name);
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun
guest_ack_irq(struct cxl_context * ctx,u64 tfc,u64 psl_reset_mask)373*4882a593Smuzhiyun static int guest_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask)
374*4882a593Smuzhiyun {
375*4882a593Smuzhiyun return cxl_h_control_faults(ctx->afu->guest->handle, ctx->process_token,
376*4882a593Smuzhiyun tfc >> 32, (psl_reset_mask != 0));
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun
disable_afu_irqs(struct cxl_context * ctx)379*4882a593Smuzhiyun static void disable_afu_irqs(struct cxl_context *ctx)
380*4882a593Smuzhiyun {
381*4882a593Smuzhiyun irq_hw_number_t hwirq;
382*4882a593Smuzhiyun unsigned int virq;
383*4882a593Smuzhiyun int r, i;
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun pr_devel("Disabling AFU(%d) interrupts\n", ctx->afu->slice);
386*4882a593Smuzhiyun for (r = 0; r < CXL_IRQ_RANGES; r++) {
387*4882a593Smuzhiyun hwirq = ctx->irqs.offset[r];
388*4882a593Smuzhiyun for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
389*4882a593Smuzhiyun virq = irq_find_mapping(NULL, hwirq);
390*4882a593Smuzhiyun disable_irq(virq);
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun
enable_afu_irqs(struct cxl_context * ctx)395*4882a593Smuzhiyun static void enable_afu_irqs(struct cxl_context *ctx)
396*4882a593Smuzhiyun {
397*4882a593Smuzhiyun irq_hw_number_t hwirq;
398*4882a593Smuzhiyun unsigned int virq;
399*4882a593Smuzhiyun int r, i;
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun pr_devel("Enabling AFU(%d) interrupts\n", ctx->afu->slice);
402*4882a593Smuzhiyun for (r = 0; r < CXL_IRQ_RANGES; r++) {
403*4882a593Smuzhiyun hwirq = ctx->irqs.offset[r];
404*4882a593Smuzhiyun for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
405*4882a593Smuzhiyun virq = irq_find_mapping(NULL, hwirq);
406*4882a593Smuzhiyun enable_irq(virq);
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun }
410*4882a593Smuzhiyun
_guest_afu_cr_readXX(int sz,struct cxl_afu * afu,int cr_idx,u64 offset,u64 * val)411*4882a593Smuzhiyun static int _guest_afu_cr_readXX(int sz, struct cxl_afu *afu, int cr_idx,
412*4882a593Smuzhiyun u64 offset, u64 *val)
413*4882a593Smuzhiyun {
414*4882a593Smuzhiyun unsigned long cr;
415*4882a593Smuzhiyun char c;
416*4882a593Smuzhiyun int rc = 0;
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun if (afu->crs_len < sz)
419*4882a593Smuzhiyun return -ENOENT;
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun if (unlikely(offset >= afu->crs_len))
422*4882a593Smuzhiyun return -ERANGE;
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun cr = get_zeroed_page(GFP_KERNEL);
425*4882a593Smuzhiyun if (!cr)
426*4882a593Smuzhiyun return -ENOMEM;
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun rc = cxl_h_get_config(afu->guest->handle, cr_idx, offset,
429*4882a593Smuzhiyun virt_to_phys((void *)cr), sz);
430*4882a593Smuzhiyun if (rc)
431*4882a593Smuzhiyun goto err;
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun switch (sz) {
434*4882a593Smuzhiyun case 1:
435*4882a593Smuzhiyun c = *((char *) cr);
436*4882a593Smuzhiyun *val = c;
437*4882a593Smuzhiyun break;
438*4882a593Smuzhiyun case 2:
439*4882a593Smuzhiyun *val = in_le16((u16 *)cr);
440*4882a593Smuzhiyun break;
441*4882a593Smuzhiyun case 4:
442*4882a593Smuzhiyun *val = in_le32((unsigned *)cr);
443*4882a593Smuzhiyun break;
444*4882a593Smuzhiyun case 8:
445*4882a593Smuzhiyun *val = in_le64((u64 *)cr);
446*4882a593Smuzhiyun break;
447*4882a593Smuzhiyun default:
448*4882a593Smuzhiyun WARN_ON(1);
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun err:
451*4882a593Smuzhiyun free_page(cr);
452*4882a593Smuzhiyun return rc;
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun
guest_afu_cr_read32(struct cxl_afu * afu,int cr_idx,u64 offset,u32 * out)455*4882a593Smuzhiyun static int guest_afu_cr_read32(struct cxl_afu *afu, int cr_idx, u64 offset,
456*4882a593Smuzhiyun u32 *out)
457*4882a593Smuzhiyun {
458*4882a593Smuzhiyun int rc;
459*4882a593Smuzhiyun u64 val;
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun rc = _guest_afu_cr_readXX(4, afu, cr_idx, offset, &val);
462*4882a593Smuzhiyun if (!rc)
463*4882a593Smuzhiyun *out = (u32) val;
464*4882a593Smuzhiyun return rc;
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun
guest_afu_cr_read16(struct cxl_afu * afu,int cr_idx,u64 offset,u16 * out)467*4882a593Smuzhiyun static int guest_afu_cr_read16(struct cxl_afu *afu, int cr_idx, u64 offset,
468*4882a593Smuzhiyun u16 *out)
469*4882a593Smuzhiyun {
470*4882a593Smuzhiyun int rc;
471*4882a593Smuzhiyun u64 val;
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun rc = _guest_afu_cr_readXX(2, afu, cr_idx, offset, &val);
474*4882a593Smuzhiyun if (!rc)
475*4882a593Smuzhiyun *out = (u16) val;
476*4882a593Smuzhiyun return rc;
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun
guest_afu_cr_read8(struct cxl_afu * afu,int cr_idx,u64 offset,u8 * out)479*4882a593Smuzhiyun static int guest_afu_cr_read8(struct cxl_afu *afu, int cr_idx, u64 offset,
480*4882a593Smuzhiyun u8 *out)
481*4882a593Smuzhiyun {
482*4882a593Smuzhiyun int rc;
483*4882a593Smuzhiyun u64 val;
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun rc = _guest_afu_cr_readXX(1, afu, cr_idx, offset, &val);
486*4882a593Smuzhiyun if (!rc)
487*4882a593Smuzhiyun *out = (u8) val;
488*4882a593Smuzhiyun return rc;
489*4882a593Smuzhiyun }
490*4882a593Smuzhiyun
guest_afu_cr_read64(struct cxl_afu * afu,int cr_idx,u64 offset,u64 * out)491*4882a593Smuzhiyun static int guest_afu_cr_read64(struct cxl_afu *afu, int cr_idx, u64 offset,
492*4882a593Smuzhiyun u64 *out)
493*4882a593Smuzhiyun {
494*4882a593Smuzhiyun return _guest_afu_cr_readXX(8, afu, cr_idx, offset, out);
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun
guest_afu_cr_write32(struct cxl_afu * afu,int cr,u64 off,u32 in)497*4882a593Smuzhiyun static int guest_afu_cr_write32(struct cxl_afu *afu, int cr, u64 off, u32 in)
498*4882a593Smuzhiyun {
499*4882a593Smuzhiyun /* config record is not writable from guest */
500*4882a593Smuzhiyun return -EPERM;
501*4882a593Smuzhiyun }
502*4882a593Smuzhiyun
guest_afu_cr_write16(struct cxl_afu * afu,int cr,u64 off,u16 in)503*4882a593Smuzhiyun static int guest_afu_cr_write16(struct cxl_afu *afu, int cr, u64 off, u16 in)
504*4882a593Smuzhiyun {
505*4882a593Smuzhiyun /* config record is not writable from guest */
506*4882a593Smuzhiyun return -EPERM;
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun
guest_afu_cr_write8(struct cxl_afu * afu,int cr,u64 off,u8 in)509*4882a593Smuzhiyun static int guest_afu_cr_write8(struct cxl_afu *afu, int cr, u64 off, u8 in)
510*4882a593Smuzhiyun {
511*4882a593Smuzhiyun /* config record is not writable from guest */
512*4882a593Smuzhiyun return -EPERM;
513*4882a593Smuzhiyun }
514*4882a593Smuzhiyun
attach_afu_directed(struct cxl_context * ctx,u64 wed,u64 amr)515*4882a593Smuzhiyun static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr)
516*4882a593Smuzhiyun {
517*4882a593Smuzhiyun struct cxl_process_element_hcall *elem;
518*4882a593Smuzhiyun struct cxl *adapter = ctx->afu->adapter;
519*4882a593Smuzhiyun const struct cred *cred;
520*4882a593Smuzhiyun u32 pid, idx;
521*4882a593Smuzhiyun int rc, r, i;
522*4882a593Smuzhiyun u64 mmio_addr, mmio_size;
523*4882a593Smuzhiyun __be64 flags = 0;
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun /* Must be 8 byte aligned and cannot cross a 4096 byte boundary */
526*4882a593Smuzhiyun if (!(elem = (struct cxl_process_element_hcall *)
527*4882a593Smuzhiyun get_zeroed_page(GFP_KERNEL)))
528*4882a593Smuzhiyun return -ENOMEM;
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun elem->version = cpu_to_be64(CXL_PROCESS_ELEMENT_VERSION);
531*4882a593Smuzhiyun if (ctx->kernel) {
532*4882a593Smuzhiyun pid = 0;
533*4882a593Smuzhiyun flags |= CXL_PE_TRANSLATION_ENABLED;
534*4882a593Smuzhiyun flags |= CXL_PE_PRIVILEGED_PROCESS;
535*4882a593Smuzhiyun if (mfmsr() & MSR_SF)
536*4882a593Smuzhiyun flags |= CXL_PE_64_BIT;
537*4882a593Smuzhiyun } else {
538*4882a593Smuzhiyun pid = current->pid;
539*4882a593Smuzhiyun flags |= CXL_PE_PROBLEM_STATE;
540*4882a593Smuzhiyun flags |= CXL_PE_TRANSLATION_ENABLED;
541*4882a593Smuzhiyun if (!test_tsk_thread_flag(current, TIF_32BIT))
542*4882a593Smuzhiyun flags |= CXL_PE_64_BIT;
543*4882a593Smuzhiyun cred = get_current_cred();
544*4882a593Smuzhiyun if (uid_eq(cred->euid, GLOBAL_ROOT_UID))
545*4882a593Smuzhiyun flags |= CXL_PE_PRIVILEGED_PROCESS;
546*4882a593Smuzhiyun put_cred(cred);
547*4882a593Smuzhiyun }
548*4882a593Smuzhiyun elem->flags = cpu_to_be64(flags);
549*4882a593Smuzhiyun elem->common.tid = cpu_to_be32(0); /* Unused */
550*4882a593Smuzhiyun elem->common.pid = cpu_to_be32(pid);
551*4882a593Smuzhiyun elem->common.csrp = cpu_to_be64(0); /* disable */
552*4882a593Smuzhiyun elem->common.u.psl8.aurp0 = cpu_to_be64(0); /* disable */
553*4882a593Smuzhiyun elem->common.u.psl8.aurp1 = cpu_to_be64(0); /* disable */
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun cxl_prefault(ctx, wed);
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun elem->common.u.psl8.sstp0 = cpu_to_be64(ctx->sstp0);
558*4882a593Smuzhiyun elem->common.u.psl8.sstp1 = cpu_to_be64(ctx->sstp1);
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun /*
561*4882a593Smuzhiyun * Ensure we have at least one interrupt allocated to take faults for
562*4882a593Smuzhiyun * kernel contexts that may not have allocated any AFU IRQs at all:
563*4882a593Smuzhiyun */
564*4882a593Smuzhiyun if (ctx->irqs.range[0] == 0) {
565*4882a593Smuzhiyun rc = afu_register_irqs(ctx, 0);
566*4882a593Smuzhiyun if (rc)
567*4882a593Smuzhiyun goto out_free;
568*4882a593Smuzhiyun }
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun for (r = 0; r < CXL_IRQ_RANGES; r++) {
571*4882a593Smuzhiyun for (i = 0; i < ctx->irqs.range[r]; i++) {
572*4882a593Smuzhiyun if (r == 0 && i == 0) {
573*4882a593Smuzhiyun elem->pslVirtualIsn = cpu_to_be32(ctx->irqs.offset[0]);
574*4882a593Smuzhiyun } else {
575*4882a593Smuzhiyun idx = ctx->irqs.offset[r] + i - adapter->guest->irq_base_offset;
576*4882a593Smuzhiyun elem->applicationVirtualIsnBitmap[idx / 8] |= 0x80 >> (idx % 8);
577*4882a593Smuzhiyun }
578*4882a593Smuzhiyun }
579*4882a593Smuzhiyun }
580*4882a593Smuzhiyun elem->common.amr = cpu_to_be64(amr);
581*4882a593Smuzhiyun elem->common.wed = cpu_to_be64(wed);
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun disable_afu_irqs(ctx);
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun rc = cxl_h_attach_process(ctx->afu->guest->handle, elem,
586*4882a593Smuzhiyun &ctx->process_token, &mmio_addr, &mmio_size);
587*4882a593Smuzhiyun if (rc == H_SUCCESS) {
588*4882a593Smuzhiyun if (ctx->master || !ctx->afu->pp_psa) {
589*4882a593Smuzhiyun ctx->psn_phys = ctx->afu->psn_phys;
590*4882a593Smuzhiyun ctx->psn_size = ctx->afu->adapter->ps_size;
591*4882a593Smuzhiyun } else {
592*4882a593Smuzhiyun ctx->psn_phys = mmio_addr;
593*4882a593Smuzhiyun ctx->psn_size = mmio_size;
594*4882a593Smuzhiyun }
595*4882a593Smuzhiyun if (ctx->afu->pp_psa && mmio_size &&
596*4882a593Smuzhiyun ctx->afu->pp_size == 0) {
597*4882a593Smuzhiyun /*
598*4882a593Smuzhiyun * There's no property in the device tree to read the
599*4882a593Smuzhiyun * pp_size. We only find out at the 1st attach.
600*4882a593Smuzhiyun * Compared to bare-metal, it is too late and we
601*4882a593Smuzhiyun * should really lock here. However, on powerVM,
602*4882a593Smuzhiyun * pp_size is really only used to display in /sys.
603*4882a593Smuzhiyun * Being discussed with pHyp for their next release.
604*4882a593Smuzhiyun */
605*4882a593Smuzhiyun ctx->afu->pp_size = mmio_size;
606*4882a593Smuzhiyun }
607*4882a593Smuzhiyun /* from PAPR: process element is bytes 4-7 of process token */
608*4882a593Smuzhiyun ctx->external_pe = ctx->process_token & 0xFFFFFFFF;
609*4882a593Smuzhiyun pr_devel("CXL pe=%i is known as %i for pHyp, mmio_size=%#llx",
610*4882a593Smuzhiyun ctx->pe, ctx->external_pe, ctx->psn_size);
611*4882a593Smuzhiyun ctx->pe_inserted = true;
612*4882a593Smuzhiyun enable_afu_irqs(ctx);
613*4882a593Smuzhiyun }
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun out_free:
616*4882a593Smuzhiyun free_page((u64)elem);
617*4882a593Smuzhiyun return rc;
618*4882a593Smuzhiyun }
619*4882a593Smuzhiyun
guest_attach_process(struct cxl_context * ctx,bool kernel,u64 wed,u64 amr)620*4882a593Smuzhiyun static int guest_attach_process(struct cxl_context *ctx, bool kernel, u64 wed, u64 amr)
621*4882a593Smuzhiyun {
622*4882a593Smuzhiyun pr_devel("in %s\n", __func__);
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun ctx->kernel = kernel;
625*4882a593Smuzhiyun if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
626*4882a593Smuzhiyun return attach_afu_directed(ctx, wed, amr);
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun /* dedicated mode not supported on FW840 */
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun return -EINVAL;
631*4882a593Smuzhiyun }
632*4882a593Smuzhiyun
detach_afu_directed(struct cxl_context * ctx)633*4882a593Smuzhiyun static int detach_afu_directed(struct cxl_context *ctx)
634*4882a593Smuzhiyun {
635*4882a593Smuzhiyun if (!ctx->pe_inserted)
636*4882a593Smuzhiyun return 0;
637*4882a593Smuzhiyun if (cxl_h_detach_process(ctx->afu->guest->handle, ctx->process_token))
638*4882a593Smuzhiyun return -1;
639*4882a593Smuzhiyun return 0;
640*4882a593Smuzhiyun }
641*4882a593Smuzhiyun
guest_detach_process(struct cxl_context * ctx)642*4882a593Smuzhiyun static int guest_detach_process(struct cxl_context *ctx)
643*4882a593Smuzhiyun {
644*4882a593Smuzhiyun pr_devel("in %s\n", __func__);
645*4882a593Smuzhiyun trace_cxl_detach(ctx);
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
648*4882a593Smuzhiyun return -EIO;
649*4882a593Smuzhiyun
650*4882a593Smuzhiyun if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
651*4882a593Smuzhiyun return detach_afu_directed(ctx);
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun return -EINVAL;
654*4882a593Smuzhiyun }
655*4882a593Smuzhiyun
guest_release_afu(struct device * dev)656*4882a593Smuzhiyun static void guest_release_afu(struct device *dev)
657*4882a593Smuzhiyun {
658*4882a593Smuzhiyun struct cxl_afu *afu = to_cxl_afu(dev);
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun pr_devel("%s\n", __func__);
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun idr_destroy(&afu->contexts_idr);
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun kfree(afu->guest);
665*4882a593Smuzhiyun kfree(afu);
666*4882a593Smuzhiyun }
667*4882a593Smuzhiyun
cxl_guest_read_afu_vpd(struct cxl_afu * afu,void * buf,size_t len)668*4882a593Smuzhiyun ssize_t cxl_guest_read_afu_vpd(struct cxl_afu *afu, void *buf, size_t len)
669*4882a593Smuzhiyun {
670*4882a593Smuzhiyun return guest_collect_vpd(NULL, afu, buf, len);
671*4882a593Smuzhiyun }
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun #define ERR_BUFF_MAX_COPY_SIZE PAGE_SIZE
guest_afu_read_err_buffer(struct cxl_afu * afu,char * buf,loff_t off,size_t count)674*4882a593Smuzhiyun static ssize_t guest_afu_read_err_buffer(struct cxl_afu *afu, char *buf,
675*4882a593Smuzhiyun loff_t off, size_t count)
676*4882a593Smuzhiyun {
677*4882a593Smuzhiyun void *tbuf = NULL;
678*4882a593Smuzhiyun int rc = 0;
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun tbuf = (void *) get_zeroed_page(GFP_KERNEL);
681*4882a593Smuzhiyun if (!tbuf)
682*4882a593Smuzhiyun return -ENOMEM;
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun rc = cxl_h_get_afu_err(afu->guest->handle,
685*4882a593Smuzhiyun off & 0x7,
686*4882a593Smuzhiyun virt_to_phys(tbuf),
687*4882a593Smuzhiyun count);
688*4882a593Smuzhiyun if (rc)
689*4882a593Smuzhiyun goto err;
690*4882a593Smuzhiyun
691*4882a593Smuzhiyun if (count > ERR_BUFF_MAX_COPY_SIZE)
692*4882a593Smuzhiyun count = ERR_BUFF_MAX_COPY_SIZE - (off & 0x7);
693*4882a593Smuzhiyun memcpy(buf, tbuf, count);
694*4882a593Smuzhiyun err:
695*4882a593Smuzhiyun free_page((u64)tbuf);
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun return rc;
698*4882a593Smuzhiyun }
699*4882a593Smuzhiyun
guest_afu_check_and_enable(struct cxl_afu * afu)700*4882a593Smuzhiyun static int guest_afu_check_and_enable(struct cxl_afu *afu)
701*4882a593Smuzhiyun {
702*4882a593Smuzhiyun return 0;
703*4882a593Smuzhiyun }
704*4882a593Smuzhiyun
guest_support_attributes(const char * attr_name,enum cxl_attrs type)705*4882a593Smuzhiyun static bool guest_support_attributes(const char *attr_name,
706*4882a593Smuzhiyun enum cxl_attrs type)
707*4882a593Smuzhiyun {
708*4882a593Smuzhiyun switch (type) {
709*4882a593Smuzhiyun case CXL_ADAPTER_ATTRS:
710*4882a593Smuzhiyun if ((strcmp(attr_name, "base_image") == 0) ||
711*4882a593Smuzhiyun (strcmp(attr_name, "load_image_on_perst") == 0) ||
712*4882a593Smuzhiyun (strcmp(attr_name, "perst_reloads_same_image") == 0) ||
713*4882a593Smuzhiyun (strcmp(attr_name, "image_loaded") == 0))
714*4882a593Smuzhiyun return false;
715*4882a593Smuzhiyun break;
716*4882a593Smuzhiyun case CXL_AFU_MASTER_ATTRS:
717*4882a593Smuzhiyun if ((strcmp(attr_name, "pp_mmio_off") == 0))
718*4882a593Smuzhiyun return false;
719*4882a593Smuzhiyun break;
720*4882a593Smuzhiyun case CXL_AFU_ATTRS:
721*4882a593Smuzhiyun break;
722*4882a593Smuzhiyun default:
723*4882a593Smuzhiyun break;
724*4882a593Smuzhiyun }
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun return true;
727*4882a593Smuzhiyun }
728*4882a593Smuzhiyun
activate_afu_directed(struct cxl_afu * afu)729*4882a593Smuzhiyun static int activate_afu_directed(struct cxl_afu *afu)
730*4882a593Smuzhiyun {
731*4882a593Smuzhiyun int rc;
732*4882a593Smuzhiyun
733*4882a593Smuzhiyun dev_info(&afu->dev, "Activating AFU(%d) directed mode\n", afu->slice);
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun afu->current_mode = CXL_MODE_DIRECTED;
736*4882a593Smuzhiyun
737*4882a593Smuzhiyun afu->num_procs = afu->max_procs_virtualised;
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun if ((rc = cxl_chardev_m_afu_add(afu)))
740*4882a593Smuzhiyun return rc;
741*4882a593Smuzhiyun
742*4882a593Smuzhiyun if ((rc = cxl_sysfs_afu_m_add(afu)))
743*4882a593Smuzhiyun goto err;
744*4882a593Smuzhiyun
745*4882a593Smuzhiyun if ((rc = cxl_chardev_s_afu_add(afu)))
746*4882a593Smuzhiyun goto err1;
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun return 0;
749*4882a593Smuzhiyun err1:
750*4882a593Smuzhiyun cxl_sysfs_afu_m_remove(afu);
751*4882a593Smuzhiyun err:
752*4882a593Smuzhiyun cxl_chardev_afu_remove(afu);
753*4882a593Smuzhiyun return rc;
754*4882a593Smuzhiyun }
755*4882a593Smuzhiyun
guest_afu_activate_mode(struct cxl_afu * afu,int mode)756*4882a593Smuzhiyun static int guest_afu_activate_mode(struct cxl_afu *afu, int mode)
757*4882a593Smuzhiyun {
758*4882a593Smuzhiyun if (!mode)
759*4882a593Smuzhiyun return 0;
760*4882a593Smuzhiyun if (!(mode & afu->modes_supported))
761*4882a593Smuzhiyun return -EINVAL;
762*4882a593Smuzhiyun
763*4882a593Smuzhiyun if (mode == CXL_MODE_DIRECTED)
764*4882a593Smuzhiyun return activate_afu_directed(afu);
765*4882a593Smuzhiyun
766*4882a593Smuzhiyun if (mode == CXL_MODE_DEDICATED)
767*4882a593Smuzhiyun dev_err(&afu->dev, "Dedicated mode not supported\n");
768*4882a593Smuzhiyun
769*4882a593Smuzhiyun return -EINVAL;
770*4882a593Smuzhiyun }
771*4882a593Smuzhiyun
deactivate_afu_directed(struct cxl_afu * afu)772*4882a593Smuzhiyun static int deactivate_afu_directed(struct cxl_afu *afu)
773*4882a593Smuzhiyun {
774*4882a593Smuzhiyun dev_info(&afu->dev, "Deactivating AFU(%d) directed mode\n", afu->slice);
775*4882a593Smuzhiyun
776*4882a593Smuzhiyun afu->current_mode = 0;
777*4882a593Smuzhiyun afu->num_procs = 0;
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun cxl_sysfs_afu_m_remove(afu);
780*4882a593Smuzhiyun cxl_chardev_afu_remove(afu);
781*4882a593Smuzhiyun
782*4882a593Smuzhiyun cxl_ops->afu_reset(afu);
783*4882a593Smuzhiyun
784*4882a593Smuzhiyun return 0;
785*4882a593Smuzhiyun }
786*4882a593Smuzhiyun
guest_afu_deactivate_mode(struct cxl_afu * afu,int mode)787*4882a593Smuzhiyun static int guest_afu_deactivate_mode(struct cxl_afu *afu, int mode)
788*4882a593Smuzhiyun {
789*4882a593Smuzhiyun if (!mode)
790*4882a593Smuzhiyun return 0;
791*4882a593Smuzhiyun if (!(mode & afu->modes_supported))
792*4882a593Smuzhiyun return -EINVAL;
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun if (mode == CXL_MODE_DIRECTED)
795*4882a593Smuzhiyun return deactivate_afu_directed(afu);
796*4882a593Smuzhiyun return 0;
797*4882a593Smuzhiyun }
798*4882a593Smuzhiyun
guest_afu_reset(struct cxl_afu * afu)799*4882a593Smuzhiyun static int guest_afu_reset(struct cxl_afu *afu)
800*4882a593Smuzhiyun {
801*4882a593Smuzhiyun pr_devel("AFU(%d) reset request\n", afu->slice);
802*4882a593Smuzhiyun return cxl_h_reset_afu(afu->guest->handle);
803*4882a593Smuzhiyun }
804*4882a593Smuzhiyun
guest_map_slice_regs(struct cxl_afu * afu)805*4882a593Smuzhiyun static int guest_map_slice_regs(struct cxl_afu *afu)
806*4882a593Smuzhiyun {
807*4882a593Smuzhiyun if (!(afu->p2n_mmio = ioremap(afu->guest->p2n_phys, afu->guest->p2n_size))) {
808*4882a593Smuzhiyun dev_err(&afu->dev, "Error mapping AFU(%d) MMIO regions\n",
809*4882a593Smuzhiyun afu->slice);
810*4882a593Smuzhiyun return -ENOMEM;
811*4882a593Smuzhiyun }
812*4882a593Smuzhiyun return 0;
813*4882a593Smuzhiyun }
814*4882a593Smuzhiyun
guest_unmap_slice_regs(struct cxl_afu * afu)815*4882a593Smuzhiyun static void guest_unmap_slice_regs(struct cxl_afu *afu)
816*4882a593Smuzhiyun {
817*4882a593Smuzhiyun if (afu->p2n_mmio)
818*4882a593Smuzhiyun iounmap(afu->p2n_mmio);
819*4882a593Smuzhiyun }
820*4882a593Smuzhiyun
afu_update_state(struct cxl_afu * afu)821*4882a593Smuzhiyun static int afu_update_state(struct cxl_afu *afu)
822*4882a593Smuzhiyun {
823*4882a593Smuzhiyun int rc, cur_state;
824*4882a593Smuzhiyun
825*4882a593Smuzhiyun rc = afu_read_error_state(afu, &cur_state);
826*4882a593Smuzhiyun if (rc)
827*4882a593Smuzhiyun return rc;
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun if (afu->guest->previous_state == cur_state)
830*4882a593Smuzhiyun return 0;
831*4882a593Smuzhiyun
832*4882a593Smuzhiyun pr_devel("AFU(%d) update state to %#x\n", afu->slice, cur_state);
833*4882a593Smuzhiyun
834*4882a593Smuzhiyun switch (cur_state) {
835*4882a593Smuzhiyun case H_STATE_NORMAL:
836*4882a593Smuzhiyun afu->guest->previous_state = cur_state;
837*4882a593Smuzhiyun break;
838*4882a593Smuzhiyun
839*4882a593Smuzhiyun case H_STATE_DISABLE:
840*4882a593Smuzhiyun pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT,
841*4882a593Smuzhiyun pci_channel_io_frozen);
842*4882a593Smuzhiyun
843*4882a593Smuzhiyun cxl_context_detach_all(afu);
844*4882a593Smuzhiyun if ((rc = cxl_ops->afu_reset(afu)))
845*4882a593Smuzhiyun pr_devel("reset hcall failed %d\n", rc);
846*4882a593Smuzhiyun
847*4882a593Smuzhiyun rc = afu_read_error_state(afu, &cur_state);
848*4882a593Smuzhiyun if (!rc && cur_state == H_STATE_NORMAL) {
849*4882a593Smuzhiyun pci_error_handlers(afu, CXL_SLOT_RESET_EVENT,
850*4882a593Smuzhiyun pci_channel_io_normal);
851*4882a593Smuzhiyun pci_error_handlers(afu, CXL_RESUME_EVENT, 0);
852*4882a593Smuzhiyun }
853*4882a593Smuzhiyun afu->guest->previous_state = 0;
854*4882a593Smuzhiyun break;
855*4882a593Smuzhiyun
856*4882a593Smuzhiyun case H_STATE_TEMP_UNAVAILABLE:
857*4882a593Smuzhiyun afu->guest->previous_state = cur_state;
858*4882a593Smuzhiyun break;
859*4882a593Smuzhiyun
860*4882a593Smuzhiyun case H_STATE_PERM_UNAVAILABLE:
861*4882a593Smuzhiyun dev_err(&afu->dev, "AFU is in permanent error state\n");
862*4882a593Smuzhiyun pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT,
863*4882a593Smuzhiyun pci_channel_io_perm_failure);
864*4882a593Smuzhiyun afu->guest->previous_state = cur_state;
865*4882a593Smuzhiyun break;
866*4882a593Smuzhiyun
867*4882a593Smuzhiyun default:
868*4882a593Smuzhiyun pr_err("Unexpected AFU(%d) error state: %#x\n",
869*4882a593Smuzhiyun afu->slice, cur_state);
870*4882a593Smuzhiyun return -EINVAL;
871*4882a593Smuzhiyun }
872*4882a593Smuzhiyun
873*4882a593Smuzhiyun return rc;
874*4882a593Smuzhiyun }
875*4882a593Smuzhiyun
afu_handle_errstate(struct work_struct * work)876*4882a593Smuzhiyun static void afu_handle_errstate(struct work_struct *work)
877*4882a593Smuzhiyun {
878*4882a593Smuzhiyun struct cxl_afu_guest *afu_guest =
879*4882a593Smuzhiyun container_of(to_delayed_work(work), struct cxl_afu_guest, work_err);
880*4882a593Smuzhiyun
881*4882a593Smuzhiyun if (!afu_update_state(afu_guest->parent) &&
882*4882a593Smuzhiyun afu_guest->previous_state == H_STATE_PERM_UNAVAILABLE)
883*4882a593Smuzhiyun return;
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun if (afu_guest->handle_err)
886*4882a593Smuzhiyun schedule_delayed_work(&afu_guest->work_err,
887*4882a593Smuzhiyun msecs_to_jiffies(3000));
888*4882a593Smuzhiyun }
889*4882a593Smuzhiyun
guest_link_ok(struct cxl * cxl,struct cxl_afu * afu)890*4882a593Smuzhiyun static bool guest_link_ok(struct cxl *cxl, struct cxl_afu *afu)
891*4882a593Smuzhiyun {
892*4882a593Smuzhiyun int state;
893*4882a593Smuzhiyun
894*4882a593Smuzhiyun if (afu && (!afu_read_error_state(afu, &state))) {
895*4882a593Smuzhiyun if (state == H_STATE_NORMAL)
896*4882a593Smuzhiyun return true;
897*4882a593Smuzhiyun }
898*4882a593Smuzhiyun
899*4882a593Smuzhiyun return false;
900*4882a593Smuzhiyun }
901*4882a593Smuzhiyun
afu_properties_look_ok(struct cxl_afu * afu)902*4882a593Smuzhiyun static int afu_properties_look_ok(struct cxl_afu *afu)
903*4882a593Smuzhiyun {
904*4882a593Smuzhiyun if (afu->pp_irqs < 0) {
905*4882a593Smuzhiyun dev_err(&afu->dev, "Unexpected per-process minimum interrupt value\n");
906*4882a593Smuzhiyun return -EINVAL;
907*4882a593Smuzhiyun }
908*4882a593Smuzhiyun
909*4882a593Smuzhiyun if (afu->max_procs_virtualised < 1) {
910*4882a593Smuzhiyun dev_err(&afu->dev, "Unexpected max number of processes virtualised value\n");
911*4882a593Smuzhiyun return -EINVAL;
912*4882a593Smuzhiyun }
913*4882a593Smuzhiyun
914*4882a593Smuzhiyun return 0;
915*4882a593Smuzhiyun }
916*4882a593Smuzhiyun
cxl_guest_init_afu(struct cxl * adapter,int slice,struct device_node * afu_np)917*4882a593Smuzhiyun int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_np)
918*4882a593Smuzhiyun {
919*4882a593Smuzhiyun struct cxl_afu *afu;
920*4882a593Smuzhiyun bool free = true;
921*4882a593Smuzhiyun int rc;
922*4882a593Smuzhiyun
923*4882a593Smuzhiyun pr_devel("in %s - AFU(%d)\n", __func__, slice);
924*4882a593Smuzhiyun if (!(afu = cxl_alloc_afu(adapter, slice)))
925*4882a593Smuzhiyun return -ENOMEM;
926*4882a593Smuzhiyun
927*4882a593Smuzhiyun if (!(afu->guest = kzalloc(sizeof(struct cxl_afu_guest), GFP_KERNEL))) {
928*4882a593Smuzhiyun kfree(afu);
929*4882a593Smuzhiyun return -ENOMEM;
930*4882a593Smuzhiyun }
931*4882a593Smuzhiyun
932*4882a593Smuzhiyun if ((rc = dev_set_name(&afu->dev, "afu%i.%i",
933*4882a593Smuzhiyun adapter->adapter_num,
934*4882a593Smuzhiyun slice)))
935*4882a593Smuzhiyun goto err1;
936*4882a593Smuzhiyun
937*4882a593Smuzhiyun adapter->slices++;
938*4882a593Smuzhiyun
939*4882a593Smuzhiyun if ((rc = cxl_of_read_afu_handle(afu, afu_np)))
940*4882a593Smuzhiyun goto err1;
941*4882a593Smuzhiyun
942*4882a593Smuzhiyun if ((rc = cxl_ops->afu_reset(afu)))
943*4882a593Smuzhiyun goto err1;
944*4882a593Smuzhiyun
945*4882a593Smuzhiyun if ((rc = cxl_of_read_afu_properties(afu, afu_np)))
946*4882a593Smuzhiyun goto err1;
947*4882a593Smuzhiyun
948*4882a593Smuzhiyun if ((rc = afu_properties_look_ok(afu)))
949*4882a593Smuzhiyun goto err1;
950*4882a593Smuzhiyun
951*4882a593Smuzhiyun if ((rc = guest_map_slice_regs(afu)))
952*4882a593Smuzhiyun goto err1;
953*4882a593Smuzhiyun
954*4882a593Smuzhiyun if ((rc = guest_register_serr_irq(afu)))
955*4882a593Smuzhiyun goto err2;
956*4882a593Smuzhiyun
957*4882a593Smuzhiyun /*
958*4882a593Smuzhiyun * After we call this function we must not free the afu directly, even
959*4882a593Smuzhiyun * if it returns an error!
960*4882a593Smuzhiyun */
961*4882a593Smuzhiyun if ((rc = cxl_register_afu(afu)))
962*4882a593Smuzhiyun goto err_put1;
963*4882a593Smuzhiyun
964*4882a593Smuzhiyun if ((rc = cxl_sysfs_afu_add(afu)))
965*4882a593Smuzhiyun goto err_put1;
966*4882a593Smuzhiyun
967*4882a593Smuzhiyun /*
968*4882a593Smuzhiyun * pHyp doesn't expose the programming models supported by the
969*4882a593Smuzhiyun * AFU. pHyp currently only supports directed mode. If it adds
970*4882a593Smuzhiyun * dedicated mode later, this version of cxl has no way to
971*4882a593Smuzhiyun * detect it. So we'll initialize the driver, but the first
972*4882a593Smuzhiyun * attach will fail.
973*4882a593Smuzhiyun * Being discussed with pHyp to do better (likely new property)
974*4882a593Smuzhiyun */
975*4882a593Smuzhiyun if (afu->max_procs_virtualised == 1)
976*4882a593Smuzhiyun afu->modes_supported = CXL_MODE_DEDICATED;
977*4882a593Smuzhiyun else
978*4882a593Smuzhiyun afu->modes_supported = CXL_MODE_DIRECTED;
979*4882a593Smuzhiyun
980*4882a593Smuzhiyun if ((rc = cxl_afu_select_best_mode(afu)))
981*4882a593Smuzhiyun goto err_put2;
982*4882a593Smuzhiyun
983*4882a593Smuzhiyun adapter->afu[afu->slice] = afu;
984*4882a593Smuzhiyun
985*4882a593Smuzhiyun afu->enabled = true;
986*4882a593Smuzhiyun
987*4882a593Smuzhiyun /*
988*4882a593Smuzhiyun * wake up the cpu periodically to check the state
989*4882a593Smuzhiyun * of the AFU using "afu" stored in the guest structure.
990*4882a593Smuzhiyun */
991*4882a593Smuzhiyun afu->guest->parent = afu;
992*4882a593Smuzhiyun afu->guest->handle_err = true;
993*4882a593Smuzhiyun INIT_DELAYED_WORK(&afu->guest->work_err, afu_handle_errstate);
994*4882a593Smuzhiyun schedule_delayed_work(&afu->guest->work_err, msecs_to_jiffies(1000));
995*4882a593Smuzhiyun
996*4882a593Smuzhiyun if ((rc = cxl_pci_vphb_add(afu)))
997*4882a593Smuzhiyun dev_info(&afu->dev, "Can't register vPHB\n");
998*4882a593Smuzhiyun
999*4882a593Smuzhiyun return 0;
1000*4882a593Smuzhiyun
1001*4882a593Smuzhiyun err_put2:
1002*4882a593Smuzhiyun cxl_sysfs_afu_remove(afu);
1003*4882a593Smuzhiyun err_put1:
1004*4882a593Smuzhiyun device_unregister(&afu->dev);
1005*4882a593Smuzhiyun free = false;
1006*4882a593Smuzhiyun guest_release_serr_irq(afu);
1007*4882a593Smuzhiyun err2:
1008*4882a593Smuzhiyun guest_unmap_slice_regs(afu);
1009*4882a593Smuzhiyun err1:
1010*4882a593Smuzhiyun if (free) {
1011*4882a593Smuzhiyun kfree(afu->guest);
1012*4882a593Smuzhiyun kfree(afu);
1013*4882a593Smuzhiyun }
1014*4882a593Smuzhiyun return rc;
1015*4882a593Smuzhiyun }
1016*4882a593Smuzhiyun
cxl_guest_remove_afu(struct cxl_afu * afu)1017*4882a593Smuzhiyun void cxl_guest_remove_afu(struct cxl_afu *afu)
1018*4882a593Smuzhiyun {
1019*4882a593Smuzhiyun if (!afu)
1020*4882a593Smuzhiyun return;
1021*4882a593Smuzhiyun
1022*4882a593Smuzhiyun /* flush and stop pending job */
1023*4882a593Smuzhiyun afu->guest->handle_err = false;
1024*4882a593Smuzhiyun flush_delayed_work(&afu->guest->work_err);
1025*4882a593Smuzhiyun
1026*4882a593Smuzhiyun cxl_pci_vphb_remove(afu);
1027*4882a593Smuzhiyun cxl_sysfs_afu_remove(afu);
1028*4882a593Smuzhiyun
1029*4882a593Smuzhiyun spin_lock(&afu->adapter->afu_list_lock);
1030*4882a593Smuzhiyun afu->adapter->afu[afu->slice] = NULL;
1031*4882a593Smuzhiyun spin_unlock(&afu->adapter->afu_list_lock);
1032*4882a593Smuzhiyun
1033*4882a593Smuzhiyun cxl_context_detach_all(afu);
1034*4882a593Smuzhiyun cxl_ops->afu_deactivate_mode(afu, afu->current_mode);
1035*4882a593Smuzhiyun guest_release_serr_irq(afu);
1036*4882a593Smuzhiyun guest_unmap_slice_regs(afu);
1037*4882a593Smuzhiyun
1038*4882a593Smuzhiyun device_unregister(&afu->dev);
1039*4882a593Smuzhiyun }
1040*4882a593Smuzhiyun
free_adapter(struct cxl * adapter)1041*4882a593Smuzhiyun static void free_adapter(struct cxl *adapter)
1042*4882a593Smuzhiyun {
1043*4882a593Smuzhiyun struct irq_avail *cur;
1044*4882a593Smuzhiyun int i;
1045*4882a593Smuzhiyun
1046*4882a593Smuzhiyun if (adapter->guest) {
1047*4882a593Smuzhiyun if (adapter->guest->irq_avail) {
1048*4882a593Smuzhiyun for (i = 0; i < adapter->guest->irq_nranges; i++) {
1049*4882a593Smuzhiyun cur = &adapter->guest->irq_avail[i];
1050*4882a593Smuzhiyun kfree(cur->bitmap);
1051*4882a593Smuzhiyun }
1052*4882a593Smuzhiyun kfree(adapter->guest->irq_avail);
1053*4882a593Smuzhiyun }
1054*4882a593Smuzhiyun kfree(adapter->guest->status);
1055*4882a593Smuzhiyun kfree(adapter->guest);
1056*4882a593Smuzhiyun }
1057*4882a593Smuzhiyun cxl_remove_adapter_nr(adapter);
1058*4882a593Smuzhiyun kfree(adapter);
1059*4882a593Smuzhiyun }
1060*4882a593Smuzhiyun
properties_look_ok(struct cxl * adapter)1061*4882a593Smuzhiyun static int properties_look_ok(struct cxl *adapter)
1062*4882a593Smuzhiyun {
1063*4882a593Smuzhiyun /* The absence of this property means that the operational
1064*4882a593Smuzhiyun * status is unknown or okay
1065*4882a593Smuzhiyun */
1066*4882a593Smuzhiyun if (strlen(adapter->guest->status) &&
1067*4882a593Smuzhiyun strcmp(adapter->guest->status, "okay")) {
1068*4882a593Smuzhiyun pr_err("ABORTING:Bad operational status of the device\n");
1069*4882a593Smuzhiyun return -EINVAL;
1070*4882a593Smuzhiyun }
1071*4882a593Smuzhiyun
1072*4882a593Smuzhiyun return 0;
1073*4882a593Smuzhiyun }
1074*4882a593Smuzhiyun
cxl_guest_read_adapter_vpd(struct cxl * adapter,void * buf,size_t len)1075*4882a593Smuzhiyun ssize_t cxl_guest_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len)
1076*4882a593Smuzhiyun {
1077*4882a593Smuzhiyun return guest_collect_vpd(adapter, NULL, buf, len);
1078*4882a593Smuzhiyun }
1079*4882a593Smuzhiyun
cxl_guest_remove_adapter(struct cxl * adapter)1080*4882a593Smuzhiyun void cxl_guest_remove_adapter(struct cxl *adapter)
1081*4882a593Smuzhiyun {
1082*4882a593Smuzhiyun pr_devel("in %s\n", __func__);
1083*4882a593Smuzhiyun
1084*4882a593Smuzhiyun cxl_sysfs_adapter_remove(adapter);
1085*4882a593Smuzhiyun
1086*4882a593Smuzhiyun cxl_guest_remove_chardev(adapter);
1087*4882a593Smuzhiyun device_unregister(&adapter->dev);
1088*4882a593Smuzhiyun }
1089*4882a593Smuzhiyun
release_adapter(struct device * dev)1090*4882a593Smuzhiyun static void release_adapter(struct device *dev)
1091*4882a593Smuzhiyun {
1092*4882a593Smuzhiyun free_adapter(to_cxl_adapter(dev));
1093*4882a593Smuzhiyun }
1094*4882a593Smuzhiyun
cxl_guest_init_adapter(struct device_node * np,struct platform_device * pdev)1095*4882a593Smuzhiyun struct cxl *cxl_guest_init_adapter(struct device_node *np, struct platform_device *pdev)
1096*4882a593Smuzhiyun {
1097*4882a593Smuzhiyun struct cxl *adapter;
1098*4882a593Smuzhiyun bool free = true;
1099*4882a593Smuzhiyun int rc;
1100*4882a593Smuzhiyun
1101*4882a593Smuzhiyun if (!(adapter = cxl_alloc_adapter()))
1102*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
1103*4882a593Smuzhiyun
1104*4882a593Smuzhiyun if (!(adapter->guest = kzalloc(sizeof(struct cxl_guest), GFP_KERNEL))) {
1105*4882a593Smuzhiyun free_adapter(adapter);
1106*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
1107*4882a593Smuzhiyun }
1108*4882a593Smuzhiyun
1109*4882a593Smuzhiyun adapter->slices = 0;
1110*4882a593Smuzhiyun adapter->guest->pdev = pdev;
1111*4882a593Smuzhiyun adapter->dev.parent = &pdev->dev;
1112*4882a593Smuzhiyun adapter->dev.release = release_adapter;
1113*4882a593Smuzhiyun dev_set_drvdata(&pdev->dev, adapter);
1114*4882a593Smuzhiyun
1115*4882a593Smuzhiyun /*
1116*4882a593Smuzhiyun * Hypervisor controls PSL timebase initialization (p1 register).
1117*4882a593Smuzhiyun * On FW840, PSL is initialized.
1118*4882a593Smuzhiyun */
1119*4882a593Smuzhiyun adapter->psl_timebase_synced = true;
1120*4882a593Smuzhiyun
1121*4882a593Smuzhiyun if ((rc = cxl_of_read_adapter_handle(adapter, np)))
1122*4882a593Smuzhiyun goto err1;
1123*4882a593Smuzhiyun
1124*4882a593Smuzhiyun if ((rc = cxl_of_read_adapter_properties(adapter, np)))
1125*4882a593Smuzhiyun goto err1;
1126*4882a593Smuzhiyun
1127*4882a593Smuzhiyun if ((rc = properties_look_ok(adapter)))
1128*4882a593Smuzhiyun goto err1;
1129*4882a593Smuzhiyun
1130*4882a593Smuzhiyun if ((rc = cxl_guest_add_chardev(adapter)))
1131*4882a593Smuzhiyun goto err1;
1132*4882a593Smuzhiyun
1133*4882a593Smuzhiyun /*
1134*4882a593Smuzhiyun * After we call this function we must not free the adapter directly,
1135*4882a593Smuzhiyun * even if it returns an error!
1136*4882a593Smuzhiyun */
1137*4882a593Smuzhiyun if ((rc = cxl_register_adapter(adapter)))
1138*4882a593Smuzhiyun goto err_put1;
1139*4882a593Smuzhiyun
1140*4882a593Smuzhiyun if ((rc = cxl_sysfs_adapter_add(adapter)))
1141*4882a593Smuzhiyun goto err_put1;
1142*4882a593Smuzhiyun
1143*4882a593Smuzhiyun /* release the context lock as the adapter is configured */
1144*4882a593Smuzhiyun cxl_adapter_context_unlock(adapter);
1145*4882a593Smuzhiyun
1146*4882a593Smuzhiyun return adapter;
1147*4882a593Smuzhiyun
1148*4882a593Smuzhiyun err_put1:
1149*4882a593Smuzhiyun device_unregister(&adapter->dev);
1150*4882a593Smuzhiyun free = false;
1151*4882a593Smuzhiyun cxl_guest_remove_chardev(adapter);
1152*4882a593Smuzhiyun err1:
1153*4882a593Smuzhiyun if (free)
1154*4882a593Smuzhiyun free_adapter(adapter);
1155*4882a593Smuzhiyun return ERR_PTR(rc);
1156*4882a593Smuzhiyun }
1157*4882a593Smuzhiyun
cxl_guest_reload_module(struct cxl * adapter)1158*4882a593Smuzhiyun void cxl_guest_reload_module(struct cxl *adapter)
1159*4882a593Smuzhiyun {
1160*4882a593Smuzhiyun struct platform_device *pdev;
1161*4882a593Smuzhiyun
1162*4882a593Smuzhiyun pdev = adapter->guest->pdev;
1163*4882a593Smuzhiyun cxl_guest_remove_adapter(adapter);
1164*4882a593Smuzhiyun
1165*4882a593Smuzhiyun cxl_of_probe(pdev);
1166*4882a593Smuzhiyun }
1167*4882a593Smuzhiyun
1168*4882a593Smuzhiyun const struct cxl_backend_ops cxl_guest_ops = {
1169*4882a593Smuzhiyun .module = THIS_MODULE,
1170*4882a593Smuzhiyun .adapter_reset = guest_reset,
1171*4882a593Smuzhiyun .alloc_one_irq = guest_alloc_one_irq,
1172*4882a593Smuzhiyun .release_one_irq = guest_release_one_irq,
1173*4882a593Smuzhiyun .alloc_irq_ranges = guest_alloc_irq_ranges,
1174*4882a593Smuzhiyun .release_irq_ranges = guest_release_irq_ranges,
1175*4882a593Smuzhiyun .setup_irq = NULL,
1176*4882a593Smuzhiyun .handle_psl_slice_error = guest_handle_psl_slice_error,
1177*4882a593Smuzhiyun .psl_interrupt = guest_psl_irq,
1178*4882a593Smuzhiyun .ack_irq = guest_ack_irq,
1179*4882a593Smuzhiyun .attach_process = guest_attach_process,
1180*4882a593Smuzhiyun .detach_process = guest_detach_process,
1181*4882a593Smuzhiyun .update_ivtes = NULL,
1182*4882a593Smuzhiyun .support_attributes = guest_support_attributes,
1183*4882a593Smuzhiyun .link_ok = guest_link_ok,
1184*4882a593Smuzhiyun .release_afu = guest_release_afu,
1185*4882a593Smuzhiyun .afu_read_err_buffer = guest_afu_read_err_buffer,
1186*4882a593Smuzhiyun .afu_check_and_enable = guest_afu_check_and_enable,
1187*4882a593Smuzhiyun .afu_activate_mode = guest_afu_activate_mode,
1188*4882a593Smuzhiyun .afu_deactivate_mode = guest_afu_deactivate_mode,
1189*4882a593Smuzhiyun .afu_reset = guest_afu_reset,
1190*4882a593Smuzhiyun .afu_cr_read8 = guest_afu_cr_read8,
1191*4882a593Smuzhiyun .afu_cr_read16 = guest_afu_cr_read16,
1192*4882a593Smuzhiyun .afu_cr_read32 = guest_afu_cr_read32,
1193*4882a593Smuzhiyun .afu_cr_read64 = guest_afu_cr_read64,
1194*4882a593Smuzhiyun .afu_cr_write8 = guest_afu_cr_write8,
1195*4882a593Smuzhiyun .afu_cr_write16 = guest_afu_cr_write16,
1196*4882a593Smuzhiyun .afu_cr_write32 = guest_afu_cr_write32,
1197*4882a593Smuzhiyun .read_adapter_vpd = cxl_guest_read_adapter_vpd,
1198*4882a593Smuzhiyun };
1199