1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0+
2*4882a593Smuzhiyun // Copyright 2019 IBM Corp.
3*4882a593Smuzhiyun #include <linux/idr.h>
4*4882a593Smuzhiyun #include "ocxl_internal.h"
5*4882a593Smuzhiyun
ocxl_fn_get(struct ocxl_fn * fn)6*4882a593Smuzhiyun static struct ocxl_fn *ocxl_fn_get(struct ocxl_fn *fn)
7*4882a593Smuzhiyun {
8*4882a593Smuzhiyun return (get_device(&fn->dev) == NULL) ? NULL : fn;
9*4882a593Smuzhiyun }
10*4882a593Smuzhiyun
ocxl_fn_put(struct ocxl_fn * fn)11*4882a593Smuzhiyun static void ocxl_fn_put(struct ocxl_fn *fn)
12*4882a593Smuzhiyun {
13*4882a593Smuzhiyun put_device(&fn->dev);
14*4882a593Smuzhiyun }
15*4882a593Smuzhiyun
alloc_afu(struct ocxl_fn * fn)16*4882a593Smuzhiyun static struct ocxl_afu *alloc_afu(struct ocxl_fn *fn)
17*4882a593Smuzhiyun {
18*4882a593Smuzhiyun struct ocxl_afu *afu;
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun afu = kzalloc(sizeof(struct ocxl_afu), GFP_KERNEL);
21*4882a593Smuzhiyun if (!afu)
22*4882a593Smuzhiyun return NULL;
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun kref_init(&afu->kref);
25*4882a593Smuzhiyun mutex_init(&afu->contexts_lock);
26*4882a593Smuzhiyun mutex_init(&afu->afu_control_lock);
27*4882a593Smuzhiyun idr_init(&afu->contexts_idr);
28*4882a593Smuzhiyun afu->fn = fn;
29*4882a593Smuzhiyun ocxl_fn_get(fn);
30*4882a593Smuzhiyun return afu;
31*4882a593Smuzhiyun }
32*4882a593Smuzhiyun
free_afu(struct kref * kref)33*4882a593Smuzhiyun static void free_afu(struct kref *kref)
34*4882a593Smuzhiyun {
35*4882a593Smuzhiyun struct ocxl_afu *afu = container_of(kref, struct ocxl_afu, kref);
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun idr_destroy(&afu->contexts_idr);
38*4882a593Smuzhiyun ocxl_fn_put(afu->fn);
39*4882a593Smuzhiyun kfree(afu);
40*4882a593Smuzhiyun }
41*4882a593Smuzhiyun
ocxl_afu_get(struct ocxl_afu * afu)42*4882a593Smuzhiyun void ocxl_afu_get(struct ocxl_afu *afu)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun kref_get(&afu->kref);
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ocxl_afu_get);
47*4882a593Smuzhiyun
ocxl_afu_put(struct ocxl_afu * afu)48*4882a593Smuzhiyun void ocxl_afu_put(struct ocxl_afu *afu)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun kref_put(&afu->kref, free_afu);
51*4882a593Smuzhiyun }
52*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ocxl_afu_put);
53*4882a593Smuzhiyun
assign_afu_actag(struct ocxl_afu * afu)54*4882a593Smuzhiyun static int assign_afu_actag(struct ocxl_afu *afu)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun struct ocxl_fn *fn = afu->fn;
57*4882a593Smuzhiyun int actag_count, actag_offset;
58*4882a593Smuzhiyun struct pci_dev *pci_dev = to_pci_dev(fn->dev.parent);
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun /*
61*4882a593Smuzhiyun * if there were not enough actags for the function, each afu
62*4882a593Smuzhiyun * reduces its count as well
63*4882a593Smuzhiyun */
64*4882a593Smuzhiyun actag_count = afu->config.actag_supported *
65*4882a593Smuzhiyun fn->actag_enabled / fn->actag_supported;
66*4882a593Smuzhiyun actag_offset = ocxl_actag_afu_alloc(fn, actag_count);
67*4882a593Smuzhiyun if (actag_offset < 0) {
68*4882a593Smuzhiyun dev_err(&pci_dev->dev, "Can't allocate %d actags for AFU: %d\n",
69*4882a593Smuzhiyun actag_count, actag_offset);
70*4882a593Smuzhiyun return actag_offset;
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun afu->actag_base = fn->actag_base + actag_offset;
73*4882a593Smuzhiyun afu->actag_enabled = actag_count;
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun ocxl_config_set_afu_actag(pci_dev, afu->config.dvsec_afu_control_pos,
76*4882a593Smuzhiyun afu->actag_base, afu->actag_enabled);
77*4882a593Smuzhiyun dev_dbg(&pci_dev->dev, "actag base=%d enabled=%d\n",
78*4882a593Smuzhiyun afu->actag_base, afu->actag_enabled);
79*4882a593Smuzhiyun return 0;
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun
reclaim_afu_actag(struct ocxl_afu * afu)82*4882a593Smuzhiyun static void reclaim_afu_actag(struct ocxl_afu *afu)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun struct ocxl_fn *fn = afu->fn;
85*4882a593Smuzhiyun int start_offset, size;
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun start_offset = afu->actag_base - fn->actag_base;
88*4882a593Smuzhiyun size = afu->actag_enabled;
89*4882a593Smuzhiyun ocxl_actag_afu_free(afu->fn, start_offset, size);
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun
assign_afu_pasid(struct ocxl_afu * afu)92*4882a593Smuzhiyun static int assign_afu_pasid(struct ocxl_afu *afu)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun struct ocxl_fn *fn = afu->fn;
95*4882a593Smuzhiyun int pasid_count, pasid_offset;
96*4882a593Smuzhiyun struct pci_dev *pci_dev = to_pci_dev(fn->dev.parent);
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun /*
99*4882a593Smuzhiyun * We only support the case where the function configuration
100*4882a593Smuzhiyun * requested enough PASIDs to cover all AFUs.
101*4882a593Smuzhiyun */
102*4882a593Smuzhiyun pasid_count = 1 << afu->config.pasid_supported_log;
103*4882a593Smuzhiyun pasid_offset = ocxl_pasid_afu_alloc(fn, pasid_count);
104*4882a593Smuzhiyun if (pasid_offset < 0) {
105*4882a593Smuzhiyun dev_err(&pci_dev->dev, "Can't allocate %d PASIDs for AFU: %d\n",
106*4882a593Smuzhiyun pasid_count, pasid_offset);
107*4882a593Smuzhiyun return pasid_offset;
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun afu->pasid_base = fn->pasid_base + pasid_offset;
110*4882a593Smuzhiyun afu->pasid_count = 0;
111*4882a593Smuzhiyun afu->pasid_max = pasid_count;
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun ocxl_config_set_afu_pasid(pci_dev, afu->config.dvsec_afu_control_pos,
114*4882a593Smuzhiyun afu->pasid_base,
115*4882a593Smuzhiyun afu->config.pasid_supported_log);
116*4882a593Smuzhiyun dev_dbg(&pci_dev->dev, "PASID base=%d, enabled=%d\n",
117*4882a593Smuzhiyun afu->pasid_base, pasid_count);
118*4882a593Smuzhiyun return 0;
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun
reclaim_afu_pasid(struct ocxl_afu * afu)121*4882a593Smuzhiyun static void reclaim_afu_pasid(struct ocxl_afu *afu)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun struct ocxl_fn *fn = afu->fn;
124*4882a593Smuzhiyun int start_offset, size;
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun start_offset = afu->pasid_base - fn->pasid_base;
127*4882a593Smuzhiyun size = 1 << afu->config.pasid_supported_log;
128*4882a593Smuzhiyun ocxl_pasid_afu_free(afu->fn, start_offset, size);
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun
reserve_fn_bar(struct ocxl_fn * fn,int bar)131*4882a593Smuzhiyun static int reserve_fn_bar(struct ocxl_fn *fn, int bar)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun struct pci_dev *dev = to_pci_dev(fn->dev.parent);
134*4882a593Smuzhiyun int rc, idx;
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun if (bar != 0 && bar != 2 && bar != 4)
137*4882a593Smuzhiyun return -EINVAL;
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun idx = bar >> 1;
140*4882a593Smuzhiyun if (fn->bar_used[idx]++ == 0) {
141*4882a593Smuzhiyun rc = pci_request_region(dev, bar, "ocxl");
142*4882a593Smuzhiyun if (rc)
143*4882a593Smuzhiyun return rc;
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun return 0;
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun
release_fn_bar(struct ocxl_fn * fn,int bar)148*4882a593Smuzhiyun static void release_fn_bar(struct ocxl_fn *fn, int bar)
149*4882a593Smuzhiyun {
150*4882a593Smuzhiyun struct pci_dev *dev = to_pci_dev(fn->dev.parent);
151*4882a593Smuzhiyun int idx;
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun if (bar != 0 && bar != 2 && bar != 4)
154*4882a593Smuzhiyun return;
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun idx = bar >> 1;
157*4882a593Smuzhiyun if (--fn->bar_used[idx] == 0)
158*4882a593Smuzhiyun pci_release_region(dev, bar);
159*4882a593Smuzhiyun WARN_ON(fn->bar_used[idx] < 0);
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun
map_mmio_areas(struct ocxl_afu * afu)162*4882a593Smuzhiyun static int map_mmio_areas(struct ocxl_afu *afu)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun int rc;
165*4882a593Smuzhiyun struct pci_dev *pci_dev = to_pci_dev(afu->fn->dev.parent);
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun rc = reserve_fn_bar(afu->fn, afu->config.global_mmio_bar);
168*4882a593Smuzhiyun if (rc)
169*4882a593Smuzhiyun return rc;
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun rc = reserve_fn_bar(afu->fn, afu->config.pp_mmio_bar);
172*4882a593Smuzhiyun if (rc) {
173*4882a593Smuzhiyun release_fn_bar(afu->fn, afu->config.global_mmio_bar);
174*4882a593Smuzhiyun return rc;
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun afu->global_mmio_start =
178*4882a593Smuzhiyun pci_resource_start(pci_dev, afu->config.global_mmio_bar) +
179*4882a593Smuzhiyun afu->config.global_mmio_offset;
180*4882a593Smuzhiyun afu->pp_mmio_start =
181*4882a593Smuzhiyun pci_resource_start(pci_dev, afu->config.pp_mmio_bar) +
182*4882a593Smuzhiyun afu->config.pp_mmio_offset;
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun afu->global_mmio_ptr = ioremap(afu->global_mmio_start,
185*4882a593Smuzhiyun afu->config.global_mmio_size);
186*4882a593Smuzhiyun if (!afu->global_mmio_ptr) {
187*4882a593Smuzhiyun release_fn_bar(afu->fn, afu->config.pp_mmio_bar);
188*4882a593Smuzhiyun release_fn_bar(afu->fn, afu->config.global_mmio_bar);
189*4882a593Smuzhiyun dev_err(&pci_dev->dev, "Error mapping global mmio area\n");
190*4882a593Smuzhiyun return -ENOMEM;
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun /*
194*4882a593Smuzhiyun * Leave an empty page between the per-process mmio area and
195*4882a593Smuzhiyun * the AFU interrupt mappings
196*4882a593Smuzhiyun */
197*4882a593Smuzhiyun afu->irq_base_offset = afu->config.pp_mmio_stride + PAGE_SIZE;
198*4882a593Smuzhiyun return 0;
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun
unmap_mmio_areas(struct ocxl_afu * afu)201*4882a593Smuzhiyun static void unmap_mmio_areas(struct ocxl_afu *afu)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun if (afu->global_mmio_ptr) {
204*4882a593Smuzhiyun iounmap(afu->global_mmio_ptr);
205*4882a593Smuzhiyun afu->global_mmio_ptr = NULL;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun afu->global_mmio_start = 0;
208*4882a593Smuzhiyun afu->pp_mmio_start = 0;
209*4882a593Smuzhiyun release_fn_bar(afu->fn, afu->config.pp_mmio_bar);
210*4882a593Smuzhiyun release_fn_bar(afu->fn, afu->config.global_mmio_bar);
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun
configure_afu(struct ocxl_afu * afu,u8 afu_idx,struct pci_dev * dev)213*4882a593Smuzhiyun static int configure_afu(struct ocxl_afu *afu, u8 afu_idx, struct pci_dev *dev)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun int rc;
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun rc = ocxl_config_read_afu(dev, &afu->fn->config, &afu->config, afu_idx);
218*4882a593Smuzhiyun if (rc)
219*4882a593Smuzhiyun return rc;
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun rc = assign_afu_actag(afu);
222*4882a593Smuzhiyun if (rc)
223*4882a593Smuzhiyun return rc;
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun rc = assign_afu_pasid(afu);
226*4882a593Smuzhiyun if (rc)
227*4882a593Smuzhiyun goto err_free_actag;
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun rc = map_mmio_areas(afu);
230*4882a593Smuzhiyun if (rc)
231*4882a593Smuzhiyun goto err_free_pasid;
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun return 0;
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun err_free_pasid:
236*4882a593Smuzhiyun reclaim_afu_pasid(afu);
237*4882a593Smuzhiyun err_free_actag:
238*4882a593Smuzhiyun reclaim_afu_actag(afu);
239*4882a593Smuzhiyun return rc;
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun
deconfigure_afu(struct ocxl_afu * afu)242*4882a593Smuzhiyun static void deconfigure_afu(struct ocxl_afu *afu)
243*4882a593Smuzhiyun {
244*4882a593Smuzhiyun unmap_mmio_areas(afu);
245*4882a593Smuzhiyun reclaim_afu_pasid(afu);
246*4882a593Smuzhiyun reclaim_afu_actag(afu);
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun
activate_afu(struct pci_dev * dev,struct ocxl_afu * afu)249*4882a593Smuzhiyun static int activate_afu(struct pci_dev *dev, struct ocxl_afu *afu)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun ocxl_config_set_afu_state(dev, afu->config.dvsec_afu_control_pos, 1);
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun return 0;
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun
deactivate_afu(struct ocxl_afu * afu)256*4882a593Smuzhiyun static void deactivate_afu(struct ocxl_afu *afu)
257*4882a593Smuzhiyun {
258*4882a593Smuzhiyun struct pci_dev *dev = to_pci_dev(afu->fn->dev.parent);
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun ocxl_config_set_afu_state(dev, afu->config.dvsec_afu_control_pos, 0);
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun
init_afu(struct pci_dev * dev,struct ocxl_fn * fn,u8 afu_idx)263*4882a593Smuzhiyun static int init_afu(struct pci_dev *dev, struct ocxl_fn *fn, u8 afu_idx)
264*4882a593Smuzhiyun {
265*4882a593Smuzhiyun int rc;
266*4882a593Smuzhiyun struct ocxl_afu *afu;
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun afu = alloc_afu(fn);
269*4882a593Smuzhiyun if (!afu)
270*4882a593Smuzhiyun return -ENOMEM;
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun rc = configure_afu(afu, afu_idx, dev);
273*4882a593Smuzhiyun if (rc) {
274*4882a593Smuzhiyun ocxl_afu_put(afu);
275*4882a593Smuzhiyun return rc;
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun rc = activate_afu(dev, afu);
279*4882a593Smuzhiyun if (rc) {
280*4882a593Smuzhiyun deconfigure_afu(afu);
281*4882a593Smuzhiyun ocxl_afu_put(afu);
282*4882a593Smuzhiyun return rc;
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun list_add_tail(&afu->list, &fn->afu_list);
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun return 0;
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun
remove_afu(struct ocxl_afu * afu)290*4882a593Smuzhiyun static void remove_afu(struct ocxl_afu *afu)
291*4882a593Smuzhiyun {
292*4882a593Smuzhiyun list_del(&afu->list);
293*4882a593Smuzhiyun ocxl_context_detach_all(afu);
294*4882a593Smuzhiyun deactivate_afu(afu);
295*4882a593Smuzhiyun deconfigure_afu(afu);
296*4882a593Smuzhiyun ocxl_afu_put(afu); // matches the implicit get in alloc_afu
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun
alloc_function(void)299*4882a593Smuzhiyun static struct ocxl_fn *alloc_function(void)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun struct ocxl_fn *fn;
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun fn = kzalloc(sizeof(struct ocxl_fn), GFP_KERNEL);
304*4882a593Smuzhiyun if (!fn)
305*4882a593Smuzhiyun return NULL;
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun INIT_LIST_HEAD(&fn->afu_list);
308*4882a593Smuzhiyun INIT_LIST_HEAD(&fn->pasid_list);
309*4882a593Smuzhiyun INIT_LIST_HEAD(&fn->actag_list);
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun return fn;
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun
free_function(struct ocxl_fn * fn)314*4882a593Smuzhiyun static void free_function(struct ocxl_fn *fn)
315*4882a593Smuzhiyun {
316*4882a593Smuzhiyun WARN_ON(!list_empty(&fn->afu_list));
317*4882a593Smuzhiyun WARN_ON(!list_empty(&fn->pasid_list));
318*4882a593Smuzhiyun kfree(fn);
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun
free_function_dev(struct device * dev)321*4882a593Smuzhiyun static void free_function_dev(struct device *dev)
322*4882a593Smuzhiyun {
323*4882a593Smuzhiyun struct ocxl_fn *fn = container_of(dev, struct ocxl_fn, dev);
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun free_function(fn);
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun
set_function_device(struct ocxl_fn * fn,struct pci_dev * dev)328*4882a593Smuzhiyun static int set_function_device(struct ocxl_fn *fn, struct pci_dev *dev)
329*4882a593Smuzhiyun {
330*4882a593Smuzhiyun fn->dev.parent = &dev->dev;
331*4882a593Smuzhiyun fn->dev.release = free_function_dev;
332*4882a593Smuzhiyun return dev_set_name(&fn->dev, "ocxlfn.%s", dev_name(&dev->dev));
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun
assign_function_actag(struct ocxl_fn * fn)335*4882a593Smuzhiyun static int assign_function_actag(struct ocxl_fn *fn)
336*4882a593Smuzhiyun {
337*4882a593Smuzhiyun struct pci_dev *dev = to_pci_dev(fn->dev.parent);
338*4882a593Smuzhiyun u16 base, enabled, supported;
339*4882a593Smuzhiyun int rc;
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun rc = ocxl_config_get_actag_info(dev, &base, &enabled, &supported);
342*4882a593Smuzhiyun if (rc)
343*4882a593Smuzhiyun return rc;
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun fn->actag_base = base;
346*4882a593Smuzhiyun fn->actag_enabled = enabled;
347*4882a593Smuzhiyun fn->actag_supported = supported;
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun ocxl_config_set_actag(dev, fn->config.dvsec_function_pos,
350*4882a593Smuzhiyun fn->actag_base, fn->actag_enabled);
351*4882a593Smuzhiyun dev_dbg(&fn->dev, "actag range starting at %d, enabled %d\n",
352*4882a593Smuzhiyun fn->actag_base, fn->actag_enabled);
353*4882a593Smuzhiyun return 0;
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun
set_function_pasid(struct ocxl_fn * fn)356*4882a593Smuzhiyun static int set_function_pasid(struct ocxl_fn *fn)
357*4882a593Smuzhiyun {
358*4882a593Smuzhiyun struct pci_dev *dev = to_pci_dev(fn->dev.parent);
359*4882a593Smuzhiyun int rc, desired_count, max_count;
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun /* A function may not require any PASID */
362*4882a593Smuzhiyun if (fn->config.max_pasid_log < 0)
363*4882a593Smuzhiyun return 0;
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun rc = ocxl_config_get_pasid_info(dev, &max_count);
366*4882a593Smuzhiyun if (rc)
367*4882a593Smuzhiyun return rc;
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun desired_count = 1 << fn->config.max_pasid_log;
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun if (desired_count > max_count) {
372*4882a593Smuzhiyun dev_err(&fn->dev,
373*4882a593Smuzhiyun "Function requires more PASIDs than is available (%d vs. %d)\n",
374*4882a593Smuzhiyun desired_count, max_count);
375*4882a593Smuzhiyun return -ENOSPC;
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun fn->pasid_base = 0;
379*4882a593Smuzhiyun return 0;
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun
configure_function(struct ocxl_fn * fn,struct pci_dev * dev)382*4882a593Smuzhiyun static int configure_function(struct ocxl_fn *fn, struct pci_dev *dev)
383*4882a593Smuzhiyun {
384*4882a593Smuzhiyun int rc;
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun rc = pci_enable_device(dev);
387*4882a593Smuzhiyun if (rc) {
388*4882a593Smuzhiyun dev_err(&dev->dev, "pci_enable_device failed: %d\n", rc);
389*4882a593Smuzhiyun return rc;
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun /*
393*4882a593Smuzhiyun * Once it has been confirmed to work on our hardware, we
394*4882a593Smuzhiyun * should reset the function, to force the adapter to restart
395*4882a593Smuzhiyun * from scratch.
396*4882a593Smuzhiyun * A function reset would also reset all its AFUs.
397*4882a593Smuzhiyun *
398*4882a593Smuzhiyun * Some hints for implementation:
399*4882a593Smuzhiyun *
400*4882a593Smuzhiyun * - there's not status bit to know when the reset is done. We
401*4882a593Smuzhiyun * should try reading the config space to know when it's
402*4882a593Smuzhiyun * done.
403*4882a593Smuzhiyun * - probably something like:
404*4882a593Smuzhiyun * Reset
405*4882a593Smuzhiyun * wait 100ms
406*4882a593Smuzhiyun * issue config read
407*4882a593Smuzhiyun * allow device up to 1 sec to return success on config
408*4882a593Smuzhiyun * read before declaring it broken
409*4882a593Smuzhiyun *
410*4882a593Smuzhiyun * Some shared logic on the card (CFG, TLX) won't be reset, so
411*4882a593Smuzhiyun * there's no guarantee that it will be enough.
412*4882a593Smuzhiyun */
413*4882a593Smuzhiyun rc = ocxl_config_read_function(dev, &fn->config);
414*4882a593Smuzhiyun if (rc)
415*4882a593Smuzhiyun return rc;
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun rc = set_function_device(fn, dev);
418*4882a593Smuzhiyun if (rc)
419*4882a593Smuzhiyun return rc;
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun rc = assign_function_actag(fn);
422*4882a593Smuzhiyun if (rc)
423*4882a593Smuzhiyun return rc;
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun rc = set_function_pasid(fn);
426*4882a593Smuzhiyun if (rc)
427*4882a593Smuzhiyun return rc;
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun rc = ocxl_link_setup(dev, 0, &fn->link);
430*4882a593Smuzhiyun if (rc)
431*4882a593Smuzhiyun return rc;
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun rc = ocxl_config_set_TL(dev, fn->config.dvsec_tl_pos);
434*4882a593Smuzhiyun if (rc) {
435*4882a593Smuzhiyun ocxl_link_release(dev, fn->link);
436*4882a593Smuzhiyun return rc;
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun return 0;
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun
deconfigure_function(struct ocxl_fn * fn)441*4882a593Smuzhiyun static void deconfigure_function(struct ocxl_fn *fn)
442*4882a593Smuzhiyun {
443*4882a593Smuzhiyun struct pci_dev *dev = to_pci_dev(fn->dev.parent);
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun ocxl_link_release(dev, fn->link);
446*4882a593Smuzhiyun pci_disable_device(dev);
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun
init_function(struct pci_dev * dev)449*4882a593Smuzhiyun static struct ocxl_fn *init_function(struct pci_dev *dev)
450*4882a593Smuzhiyun {
451*4882a593Smuzhiyun struct ocxl_fn *fn;
452*4882a593Smuzhiyun int rc;
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun fn = alloc_function();
455*4882a593Smuzhiyun if (!fn)
456*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun rc = configure_function(fn, dev);
459*4882a593Smuzhiyun if (rc) {
460*4882a593Smuzhiyun free_function(fn);
461*4882a593Smuzhiyun return ERR_PTR(rc);
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun rc = device_register(&fn->dev);
465*4882a593Smuzhiyun if (rc) {
466*4882a593Smuzhiyun deconfigure_function(fn);
467*4882a593Smuzhiyun put_device(&fn->dev);
468*4882a593Smuzhiyun return ERR_PTR(rc);
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun return fn;
471*4882a593Smuzhiyun }
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun // Device detection & initialisation
474*4882a593Smuzhiyun
ocxl_function_open(struct pci_dev * dev)475*4882a593Smuzhiyun struct ocxl_fn *ocxl_function_open(struct pci_dev *dev)
476*4882a593Smuzhiyun {
477*4882a593Smuzhiyun int rc, afu_count = 0;
478*4882a593Smuzhiyun u8 afu;
479*4882a593Smuzhiyun struct ocxl_fn *fn;
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun if (!radix_enabled()) {
482*4882a593Smuzhiyun dev_err(&dev->dev, "Unsupported memory model (hash)\n");
483*4882a593Smuzhiyun return ERR_PTR(-ENODEV);
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun fn = init_function(dev);
487*4882a593Smuzhiyun if (IS_ERR(fn)) {
488*4882a593Smuzhiyun dev_err(&dev->dev, "function init failed: %li\n",
489*4882a593Smuzhiyun PTR_ERR(fn));
490*4882a593Smuzhiyun return fn;
491*4882a593Smuzhiyun }
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun for (afu = 0; afu <= fn->config.max_afu_index; afu++) {
494*4882a593Smuzhiyun rc = ocxl_config_check_afu_index(dev, &fn->config, afu);
495*4882a593Smuzhiyun if (rc > 0) {
496*4882a593Smuzhiyun rc = init_afu(dev, fn, afu);
497*4882a593Smuzhiyun if (rc) {
498*4882a593Smuzhiyun dev_err(&dev->dev,
499*4882a593Smuzhiyun "Can't initialize AFU index %d\n", afu);
500*4882a593Smuzhiyun continue;
501*4882a593Smuzhiyun }
502*4882a593Smuzhiyun afu_count++;
503*4882a593Smuzhiyun }
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun dev_info(&dev->dev, "%d AFU(s) configured\n", afu_count);
506*4882a593Smuzhiyun return fn;
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ocxl_function_open);
509*4882a593Smuzhiyun
ocxl_function_afu_list(struct ocxl_fn * fn)510*4882a593Smuzhiyun struct list_head *ocxl_function_afu_list(struct ocxl_fn *fn)
511*4882a593Smuzhiyun {
512*4882a593Smuzhiyun return &fn->afu_list;
513*4882a593Smuzhiyun }
514*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ocxl_function_afu_list);
515*4882a593Smuzhiyun
ocxl_function_fetch_afu(struct ocxl_fn * fn,u8 afu_idx)516*4882a593Smuzhiyun struct ocxl_afu *ocxl_function_fetch_afu(struct ocxl_fn *fn, u8 afu_idx)
517*4882a593Smuzhiyun {
518*4882a593Smuzhiyun struct ocxl_afu *afu;
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun list_for_each_entry(afu, &fn->afu_list, list) {
521*4882a593Smuzhiyun if (afu->config.idx == afu_idx)
522*4882a593Smuzhiyun return afu;
523*4882a593Smuzhiyun }
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun return NULL;
526*4882a593Smuzhiyun }
527*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ocxl_function_fetch_afu);
528*4882a593Smuzhiyun
ocxl_function_config(struct ocxl_fn * fn)529*4882a593Smuzhiyun const struct ocxl_fn_config *ocxl_function_config(struct ocxl_fn *fn)
530*4882a593Smuzhiyun {
531*4882a593Smuzhiyun return &fn->config;
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ocxl_function_config);
534*4882a593Smuzhiyun
ocxl_function_close(struct ocxl_fn * fn)535*4882a593Smuzhiyun void ocxl_function_close(struct ocxl_fn *fn)
536*4882a593Smuzhiyun {
537*4882a593Smuzhiyun struct ocxl_afu *afu, *tmp;
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun list_for_each_entry_safe(afu, tmp, &fn->afu_list, list) {
540*4882a593Smuzhiyun remove_afu(afu);
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun deconfigure_function(fn);
544*4882a593Smuzhiyun device_unregister(&fn->dev);
545*4882a593Smuzhiyun }
546*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ocxl_function_close);
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun // AFU Metadata
549*4882a593Smuzhiyun
ocxl_afu_config(struct ocxl_afu * afu)550*4882a593Smuzhiyun struct ocxl_afu_config *ocxl_afu_config(struct ocxl_afu *afu)
551*4882a593Smuzhiyun {
552*4882a593Smuzhiyun return &afu->config;
553*4882a593Smuzhiyun }
554*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ocxl_afu_config);
555*4882a593Smuzhiyun
ocxl_afu_set_private(struct ocxl_afu * afu,void * private)556*4882a593Smuzhiyun void ocxl_afu_set_private(struct ocxl_afu *afu, void *private)
557*4882a593Smuzhiyun {
558*4882a593Smuzhiyun afu->private = private;
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ocxl_afu_set_private);
561*4882a593Smuzhiyun
ocxl_afu_get_private(struct ocxl_afu * afu)562*4882a593Smuzhiyun void *ocxl_afu_get_private(struct ocxl_afu *afu)
563*4882a593Smuzhiyun {
564*4882a593Smuzhiyun if (afu)
565*4882a593Smuzhiyun return afu->private;
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun return NULL;
568*4882a593Smuzhiyun }
569*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ocxl_afu_get_private);
570