1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0+
2*4882a593Smuzhiyun // Copyright 2017 IBM Corp.
3*4882a593Smuzhiyun #include <asm/pnv-ocxl.h>
4*4882a593Smuzhiyun #include <asm/opal.h>
5*4882a593Smuzhiyun #include <misc/ocxl-config.h>
6*4882a593Smuzhiyun #include "pci.h"
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #define PNV_OCXL_TL_P9_RECV_CAP 0x000000000000000Full
9*4882a593Smuzhiyun #define PNV_OCXL_ACTAG_MAX 64
10*4882a593Smuzhiyun /* PASIDs are 20-bit, but on P9, NPU can only handle 15 bits */
11*4882a593Smuzhiyun #define PNV_OCXL_PASID_BITS 15
12*4882a593Smuzhiyun #define PNV_OCXL_PASID_MAX ((1 << PNV_OCXL_PASID_BITS) - 1)
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #define AFU_PRESENT (1 << 31)
15*4882a593Smuzhiyun #define AFU_INDEX_MASK 0x3F000000
16*4882a593Smuzhiyun #define AFU_INDEX_SHIFT 24
17*4882a593Smuzhiyun #define ACTAG_MASK 0xFFF
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun struct actag_range {
21*4882a593Smuzhiyun u16 start;
22*4882a593Smuzhiyun u16 count;
23*4882a593Smuzhiyun };
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun struct npu_link {
26*4882a593Smuzhiyun struct list_head list;
27*4882a593Smuzhiyun int domain;
28*4882a593Smuzhiyun int bus;
29*4882a593Smuzhiyun int dev;
30*4882a593Smuzhiyun u16 fn_desired_actags[8];
31*4882a593Smuzhiyun struct actag_range fn_actags[8];
32*4882a593Smuzhiyun bool assignment_done;
33*4882a593Smuzhiyun };
34*4882a593Smuzhiyun static struct list_head links_list = LIST_HEAD_INIT(links_list);
35*4882a593Smuzhiyun static DEFINE_MUTEX(links_list_lock);
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun /*
39*4882a593Smuzhiyun * opencapi actags handling:
40*4882a593Smuzhiyun *
41*4882a593Smuzhiyun * When sending commands, the opencapi device references the memory
42*4882a593Smuzhiyun * context it's targeting with an 'actag', which is really an alias
43*4882a593Smuzhiyun * for a (BDF, pasid) combination. When it receives a command, the NPU
44*4882a593Smuzhiyun * must do a lookup of the actag to identify the memory context. The
45*4882a593Smuzhiyun * hardware supports a finite number of actags per link (64 for
46*4882a593Smuzhiyun * POWER9).
47*4882a593Smuzhiyun *
48*4882a593Smuzhiyun * The device can carry multiple functions, and each function can have
49*4882a593Smuzhiyun * multiple AFUs. Each AFU advertises in its config space the number
50*4882a593Smuzhiyun * of desired actags. The host must configure in the config space of
51*4882a593Smuzhiyun * the AFU how many actags the AFU is really allowed to use (which can
52*4882a593Smuzhiyun * be less than what the AFU desires).
53*4882a593Smuzhiyun *
54*4882a593Smuzhiyun * When a PCI function is probed by the driver, it has no visibility
55*4882a593Smuzhiyun * about the other PCI functions and how many actags they'd like,
56*4882a593Smuzhiyun * which makes it impossible to distribute actags fairly among AFUs.
57*4882a593Smuzhiyun *
58*4882a593Smuzhiyun * Unfortunately, the only way to know how many actags a function
59*4882a593Smuzhiyun * desires is by looking at the data for each AFU in the config space
60*4882a593Smuzhiyun * and add them up. Similarly, the only way to know how many actags
61*4882a593Smuzhiyun * all the functions of the physical device desire is by adding the
62*4882a593Smuzhiyun * previously computed function counts. Then we can match that against
63*4882a593Smuzhiyun * what the hardware supports.
64*4882a593Smuzhiyun *
65*4882a593Smuzhiyun * To get a comprehensive view, we use a 'pci fixup': at the end of
66*4882a593Smuzhiyun * PCI enumeration, each function counts how many actags its AFUs
67*4882a593Smuzhiyun * desire and we save it in a 'npu_link' structure, shared between all
68*4882a593Smuzhiyun * the PCI functions of a same device. Therefore, when the first
69*4882a593Smuzhiyun * function is probed by the driver, we can get an idea of the total
70*4882a593Smuzhiyun * count of desired actags for the device, and assign the actags to
71*4882a593Smuzhiyun * the AFUs, by pro-rating if needed.
72*4882a593Smuzhiyun */
73*4882a593Smuzhiyun
find_dvsec_from_pos(struct pci_dev * dev,int dvsec_id,int pos)74*4882a593Smuzhiyun static int find_dvsec_from_pos(struct pci_dev *dev, int dvsec_id, int pos)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun int vsec = pos;
77*4882a593Smuzhiyun u16 vendor, id;
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun while ((vsec = pci_find_next_ext_capability(dev, vsec,
80*4882a593Smuzhiyun OCXL_EXT_CAP_ID_DVSEC))) {
81*4882a593Smuzhiyun pci_read_config_word(dev, vsec + OCXL_DVSEC_VENDOR_OFFSET,
82*4882a593Smuzhiyun &vendor);
83*4882a593Smuzhiyun pci_read_config_word(dev, vsec + OCXL_DVSEC_ID_OFFSET, &id);
84*4882a593Smuzhiyun if (vendor == PCI_VENDOR_ID_IBM && id == dvsec_id)
85*4882a593Smuzhiyun return vsec;
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun return 0;
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun
find_dvsec_afu_ctrl(struct pci_dev * dev,u8 afu_idx)90*4882a593Smuzhiyun static int find_dvsec_afu_ctrl(struct pci_dev *dev, u8 afu_idx)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun int vsec = 0;
93*4882a593Smuzhiyun u8 idx;
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun while ((vsec = find_dvsec_from_pos(dev, OCXL_DVSEC_AFU_CTRL_ID,
96*4882a593Smuzhiyun vsec))) {
97*4882a593Smuzhiyun pci_read_config_byte(dev, vsec + OCXL_DVSEC_AFU_CTRL_AFU_IDX,
98*4882a593Smuzhiyun &idx);
99*4882a593Smuzhiyun if (idx == afu_idx)
100*4882a593Smuzhiyun return vsec;
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun return 0;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
get_max_afu_index(struct pci_dev * dev,int * afu_idx)105*4882a593Smuzhiyun static int get_max_afu_index(struct pci_dev *dev, int *afu_idx)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun int pos;
108*4882a593Smuzhiyun u32 val;
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun pos = find_dvsec_from_pos(dev, OCXL_DVSEC_FUNC_ID, 0);
111*4882a593Smuzhiyun if (!pos)
112*4882a593Smuzhiyun return -ESRCH;
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun pci_read_config_dword(dev, pos + OCXL_DVSEC_FUNC_OFF_INDEX, &val);
115*4882a593Smuzhiyun if (val & AFU_PRESENT)
116*4882a593Smuzhiyun *afu_idx = (val & AFU_INDEX_MASK) >> AFU_INDEX_SHIFT;
117*4882a593Smuzhiyun else
118*4882a593Smuzhiyun *afu_idx = -1;
119*4882a593Smuzhiyun return 0;
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun
get_actag_count(struct pci_dev * dev,int afu_idx,int * actag)122*4882a593Smuzhiyun static int get_actag_count(struct pci_dev *dev, int afu_idx, int *actag)
123*4882a593Smuzhiyun {
124*4882a593Smuzhiyun int pos;
125*4882a593Smuzhiyun u16 actag_sup;
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun pos = find_dvsec_afu_ctrl(dev, afu_idx);
128*4882a593Smuzhiyun if (!pos)
129*4882a593Smuzhiyun return -ESRCH;
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun pci_read_config_word(dev, pos + OCXL_DVSEC_AFU_CTRL_ACTAG_SUP,
132*4882a593Smuzhiyun &actag_sup);
133*4882a593Smuzhiyun *actag = actag_sup & ACTAG_MASK;
134*4882a593Smuzhiyun return 0;
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun
find_link(struct pci_dev * dev)137*4882a593Smuzhiyun static struct npu_link *find_link(struct pci_dev *dev)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun struct npu_link *link;
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun list_for_each_entry(link, &links_list, list) {
142*4882a593Smuzhiyun /* The functions of a device all share the same link */
143*4882a593Smuzhiyun if (link->domain == pci_domain_nr(dev->bus) &&
144*4882a593Smuzhiyun link->bus == dev->bus->number &&
145*4882a593Smuzhiyun link->dev == PCI_SLOT(dev->devfn)) {
146*4882a593Smuzhiyun return link;
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun /* link doesn't exist yet. Allocate one */
151*4882a593Smuzhiyun link = kzalloc(sizeof(struct npu_link), GFP_KERNEL);
152*4882a593Smuzhiyun if (!link)
153*4882a593Smuzhiyun return NULL;
154*4882a593Smuzhiyun link->domain = pci_domain_nr(dev->bus);
155*4882a593Smuzhiyun link->bus = dev->bus->number;
156*4882a593Smuzhiyun link->dev = PCI_SLOT(dev->devfn);
157*4882a593Smuzhiyun list_add(&link->list, &links_list);
158*4882a593Smuzhiyun return link;
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun
pnv_ocxl_fixup_actag(struct pci_dev * dev)161*4882a593Smuzhiyun static void pnv_ocxl_fixup_actag(struct pci_dev *dev)
162*4882a593Smuzhiyun {
163*4882a593Smuzhiyun struct pci_controller *hose = pci_bus_to_host(dev->bus);
164*4882a593Smuzhiyun struct pnv_phb *phb = hose->private_data;
165*4882a593Smuzhiyun struct npu_link *link;
166*4882a593Smuzhiyun int rc, afu_idx = -1, i, actag;
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun if (!machine_is(powernv))
169*4882a593Smuzhiyun return;
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun if (phb->type != PNV_PHB_NPU_OCAPI)
172*4882a593Smuzhiyun return;
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun mutex_lock(&links_list_lock);
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun link = find_link(dev);
177*4882a593Smuzhiyun if (!link) {
178*4882a593Smuzhiyun dev_warn(&dev->dev, "couldn't update actag information\n");
179*4882a593Smuzhiyun mutex_unlock(&links_list_lock);
180*4882a593Smuzhiyun return;
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun /*
184*4882a593Smuzhiyun * Check how many actags are desired for the AFUs under that
185*4882a593Smuzhiyun * function and add it to the count for the link
186*4882a593Smuzhiyun */
187*4882a593Smuzhiyun rc = get_max_afu_index(dev, &afu_idx);
188*4882a593Smuzhiyun if (rc) {
189*4882a593Smuzhiyun /* Most likely an invalid config space */
190*4882a593Smuzhiyun dev_dbg(&dev->dev, "couldn't find AFU information\n");
191*4882a593Smuzhiyun afu_idx = -1;
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun link->fn_desired_actags[PCI_FUNC(dev->devfn)] = 0;
195*4882a593Smuzhiyun for (i = 0; i <= afu_idx; i++) {
196*4882a593Smuzhiyun /*
197*4882a593Smuzhiyun * AFU index 'holes' are allowed. So don't fail if we
198*4882a593Smuzhiyun * can't read the actag info for an index
199*4882a593Smuzhiyun */
200*4882a593Smuzhiyun rc = get_actag_count(dev, i, &actag);
201*4882a593Smuzhiyun if (rc)
202*4882a593Smuzhiyun continue;
203*4882a593Smuzhiyun link->fn_desired_actags[PCI_FUNC(dev->devfn)] += actag;
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun dev_dbg(&dev->dev, "total actags for function: %d\n",
206*4882a593Smuzhiyun link->fn_desired_actags[PCI_FUNC(dev->devfn)]);
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun mutex_unlock(&links_list_lock);
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pnv_ocxl_fixup_actag);
211*4882a593Smuzhiyun
assign_fn_actags(u16 desired,u16 total)212*4882a593Smuzhiyun static u16 assign_fn_actags(u16 desired, u16 total)
213*4882a593Smuzhiyun {
214*4882a593Smuzhiyun u16 count;
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun if (total <= PNV_OCXL_ACTAG_MAX)
217*4882a593Smuzhiyun count = desired;
218*4882a593Smuzhiyun else
219*4882a593Smuzhiyun count = PNV_OCXL_ACTAG_MAX * desired / total;
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun return count;
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun
assign_actags(struct npu_link * link)224*4882a593Smuzhiyun static void assign_actags(struct npu_link *link)
225*4882a593Smuzhiyun {
226*4882a593Smuzhiyun u16 actag_count, range_start = 0, total_desired = 0;
227*4882a593Smuzhiyun int i;
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun for (i = 0; i < 8; i++)
230*4882a593Smuzhiyun total_desired += link->fn_desired_actags[i];
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun for (i = 0; i < 8; i++) {
233*4882a593Smuzhiyun if (link->fn_desired_actags[i]) {
234*4882a593Smuzhiyun actag_count = assign_fn_actags(
235*4882a593Smuzhiyun link->fn_desired_actags[i],
236*4882a593Smuzhiyun total_desired);
237*4882a593Smuzhiyun link->fn_actags[i].start = range_start;
238*4882a593Smuzhiyun link->fn_actags[i].count = actag_count;
239*4882a593Smuzhiyun range_start += actag_count;
240*4882a593Smuzhiyun WARN_ON(range_start >= PNV_OCXL_ACTAG_MAX);
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun pr_debug("link %x:%x:%x fct %d actags: start=%d count=%d (desired=%d)\n",
243*4882a593Smuzhiyun link->domain, link->bus, link->dev, i,
244*4882a593Smuzhiyun link->fn_actags[i].start, link->fn_actags[i].count,
245*4882a593Smuzhiyun link->fn_desired_actags[i]);
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun link->assignment_done = true;
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun
pnv_ocxl_get_actag(struct pci_dev * dev,u16 * base,u16 * enabled,u16 * supported)250*4882a593Smuzhiyun int pnv_ocxl_get_actag(struct pci_dev *dev, u16 *base, u16 *enabled,
251*4882a593Smuzhiyun u16 *supported)
252*4882a593Smuzhiyun {
253*4882a593Smuzhiyun struct npu_link *link;
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun mutex_lock(&links_list_lock);
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun link = find_link(dev);
258*4882a593Smuzhiyun if (!link) {
259*4882a593Smuzhiyun dev_err(&dev->dev, "actag information not found\n");
260*4882a593Smuzhiyun mutex_unlock(&links_list_lock);
261*4882a593Smuzhiyun return -ENODEV;
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun /*
264*4882a593Smuzhiyun * On p9, we only have 64 actags per link, so they must be
265*4882a593Smuzhiyun * shared by all the functions of the same adapter. We counted
266*4882a593Smuzhiyun * the desired actag counts during PCI enumeration, so that we
267*4882a593Smuzhiyun * can allocate a pro-rated number of actags to each function.
268*4882a593Smuzhiyun */
269*4882a593Smuzhiyun if (!link->assignment_done)
270*4882a593Smuzhiyun assign_actags(link);
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun *base = link->fn_actags[PCI_FUNC(dev->devfn)].start;
273*4882a593Smuzhiyun *enabled = link->fn_actags[PCI_FUNC(dev->devfn)].count;
274*4882a593Smuzhiyun *supported = link->fn_desired_actags[PCI_FUNC(dev->devfn)];
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun mutex_unlock(&links_list_lock);
277*4882a593Smuzhiyun return 0;
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pnv_ocxl_get_actag);
280*4882a593Smuzhiyun
pnv_ocxl_get_pasid_count(struct pci_dev * dev,int * count)281*4882a593Smuzhiyun int pnv_ocxl_get_pasid_count(struct pci_dev *dev, int *count)
282*4882a593Smuzhiyun {
283*4882a593Smuzhiyun struct npu_link *link;
284*4882a593Smuzhiyun int i, rc = -EINVAL;
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun /*
287*4882a593Smuzhiyun * The number of PASIDs (process address space ID) which can
288*4882a593Smuzhiyun * be used by a function depends on how many functions exist
289*4882a593Smuzhiyun * on the device. The NPU needs to be configured to know how
290*4882a593Smuzhiyun * many bits are available to PASIDs and how many are to be
291*4882a593Smuzhiyun * used by the function BDF indentifier.
292*4882a593Smuzhiyun *
293*4882a593Smuzhiyun * We only support one AFU-carrying function for now.
294*4882a593Smuzhiyun */
295*4882a593Smuzhiyun mutex_lock(&links_list_lock);
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun link = find_link(dev);
298*4882a593Smuzhiyun if (!link) {
299*4882a593Smuzhiyun dev_err(&dev->dev, "actag information not found\n");
300*4882a593Smuzhiyun mutex_unlock(&links_list_lock);
301*4882a593Smuzhiyun return -ENODEV;
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun for (i = 0; i < 8; i++)
305*4882a593Smuzhiyun if (link->fn_desired_actags[i] && (i == PCI_FUNC(dev->devfn))) {
306*4882a593Smuzhiyun *count = PNV_OCXL_PASID_MAX;
307*4882a593Smuzhiyun rc = 0;
308*4882a593Smuzhiyun break;
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun mutex_unlock(&links_list_lock);
312*4882a593Smuzhiyun dev_dbg(&dev->dev, "%d PASIDs available for function\n",
313*4882a593Smuzhiyun rc ? 0 : *count);
314*4882a593Smuzhiyun return rc;
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pnv_ocxl_get_pasid_count);
317*4882a593Smuzhiyun
set_templ_rate(unsigned int templ,unsigned int rate,char * buf)318*4882a593Smuzhiyun static void set_templ_rate(unsigned int templ, unsigned int rate, char *buf)
319*4882a593Smuzhiyun {
320*4882a593Smuzhiyun int shift, idx;
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun WARN_ON(templ > PNV_OCXL_TL_MAX_TEMPLATE);
323*4882a593Smuzhiyun idx = (PNV_OCXL_TL_MAX_TEMPLATE - templ) / 2;
324*4882a593Smuzhiyun shift = 4 * (1 - ((PNV_OCXL_TL_MAX_TEMPLATE - templ) % 2));
325*4882a593Smuzhiyun buf[idx] |= rate << shift;
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun
pnv_ocxl_get_tl_cap(struct pci_dev * dev,long * cap,char * rate_buf,int rate_buf_size)328*4882a593Smuzhiyun int pnv_ocxl_get_tl_cap(struct pci_dev *dev, long *cap,
329*4882a593Smuzhiyun char *rate_buf, int rate_buf_size)
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun if (rate_buf_size != PNV_OCXL_TL_RATE_BUF_SIZE)
332*4882a593Smuzhiyun return -EINVAL;
333*4882a593Smuzhiyun /*
334*4882a593Smuzhiyun * The TL capabilities are a characteristic of the NPU, so
335*4882a593Smuzhiyun * we go with hard-coded values.
336*4882a593Smuzhiyun *
337*4882a593Smuzhiyun * The receiving rate of each template is encoded on 4 bits.
338*4882a593Smuzhiyun *
339*4882a593Smuzhiyun * On P9:
340*4882a593Smuzhiyun * - templates 0 -> 3 are supported
341*4882a593Smuzhiyun * - templates 0, 1 and 3 have a 0 receiving rate
342*4882a593Smuzhiyun * - template 2 has receiving rate of 1 (extra cycle)
343*4882a593Smuzhiyun */
344*4882a593Smuzhiyun memset(rate_buf, 0, rate_buf_size);
345*4882a593Smuzhiyun set_templ_rate(2, 1, rate_buf);
346*4882a593Smuzhiyun *cap = PNV_OCXL_TL_P9_RECV_CAP;
347*4882a593Smuzhiyun return 0;
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pnv_ocxl_get_tl_cap);
350*4882a593Smuzhiyun
pnv_ocxl_set_tl_conf(struct pci_dev * dev,long cap,uint64_t rate_buf_phys,int rate_buf_size)351*4882a593Smuzhiyun int pnv_ocxl_set_tl_conf(struct pci_dev *dev, long cap,
352*4882a593Smuzhiyun uint64_t rate_buf_phys, int rate_buf_size)
353*4882a593Smuzhiyun {
354*4882a593Smuzhiyun struct pci_controller *hose = pci_bus_to_host(dev->bus);
355*4882a593Smuzhiyun struct pnv_phb *phb = hose->private_data;
356*4882a593Smuzhiyun int rc;
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun if (rate_buf_size != PNV_OCXL_TL_RATE_BUF_SIZE)
359*4882a593Smuzhiyun return -EINVAL;
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun rc = opal_npu_tl_set(phb->opal_id, dev->devfn, cap,
362*4882a593Smuzhiyun rate_buf_phys, rate_buf_size);
363*4882a593Smuzhiyun if (rc) {
364*4882a593Smuzhiyun dev_err(&dev->dev, "Can't configure host TL: %d\n", rc);
365*4882a593Smuzhiyun return -EINVAL;
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun return 0;
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pnv_ocxl_set_tl_conf);
370*4882a593Smuzhiyun
pnv_ocxl_get_xsl_irq(struct pci_dev * dev,int * hwirq)371*4882a593Smuzhiyun int pnv_ocxl_get_xsl_irq(struct pci_dev *dev, int *hwirq)
372*4882a593Smuzhiyun {
373*4882a593Smuzhiyun int rc;
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun rc = of_property_read_u32(dev->dev.of_node, "ibm,opal-xsl-irq", hwirq);
376*4882a593Smuzhiyun if (rc) {
377*4882a593Smuzhiyun dev_err(&dev->dev,
378*4882a593Smuzhiyun "Can't get translation interrupt for device\n");
379*4882a593Smuzhiyun return rc;
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun return 0;
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pnv_ocxl_get_xsl_irq);
384*4882a593Smuzhiyun
pnv_ocxl_unmap_xsl_regs(void __iomem * dsisr,void __iomem * dar,void __iomem * tfc,void __iomem * pe_handle)385*4882a593Smuzhiyun void pnv_ocxl_unmap_xsl_regs(void __iomem *dsisr, void __iomem *dar,
386*4882a593Smuzhiyun void __iomem *tfc, void __iomem *pe_handle)
387*4882a593Smuzhiyun {
388*4882a593Smuzhiyun iounmap(dsisr);
389*4882a593Smuzhiyun iounmap(dar);
390*4882a593Smuzhiyun iounmap(tfc);
391*4882a593Smuzhiyun iounmap(pe_handle);
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pnv_ocxl_unmap_xsl_regs);
394*4882a593Smuzhiyun
pnv_ocxl_map_xsl_regs(struct pci_dev * dev,void __iomem ** dsisr,void __iomem ** dar,void __iomem ** tfc,void __iomem ** pe_handle)395*4882a593Smuzhiyun int pnv_ocxl_map_xsl_regs(struct pci_dev *dev, void __iomem **dsisr,
396*4882a593Smuzhiyun void __iomem **dar, void __iomem **tfc,
397*4882a593Smuzhiyun void __iomem **pe_handle)
398*4882a593Smuzhiyun {
399*4882a593Smuzhiyun u64 reg;
400*4882a593Smuzhiyun int i, j, rc = 0;
401*4882a593Smuzhiyun void __iomem *regs[4];
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun /*
404*4882a593Smuzhiyun * opal stores the mmio addresses of the DSISR, DAR, TFC and
405*4882a593Smuzhiyun * PE_HANDLE registers in a device tree property, in that
406*4882a593Smuzhiyun * order
407*4882a593Smuzhiyun */
408*4882a593Smuzhiyun for (i = 0; i < 4; i++) {
409*4882a593Smuzhiyun rc = of_property_read_u64_index(dev->dev.of_node,
410*4882a593Smuzhiyun "ibm,opal-xsl-mmio", i, ®);
411*4882a593Smuzhiyun if (rc)
412*4882a593Smuzhiyun break;
413*4882a593Smuzhiyun regs[i] = ioremap(reg, 8);
414*4882a593Smuzhiyun if (!regs[i]) {
415*4882a593Smuzhiyun rc = -EINVAL;
416*4882a593Smuzhiyun break;
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun if (rc) {
420*4882a593Smuzhiyun dev_err(&dev->dev, "Can't map translation mmio registers\n");
421*4882a593Smuzhiyun for (j = i - 1; j >= 0; j--)
422*4882a593Smuzhiyun iounmap(regs[j]);
423*4882a593Smuzhiyun } else {
424*4882a593Smuzhiyun *dsisr = regs[0];
425*4882a593Smuzhiyun *dar = regs[1];
426*4882a593Smuzhiyun *tfc = regs[2];
427*4882a593Smuzhiyun *pe_handle = regs[3];
428*4882a593Smuzhiyun }
429*4882a593Smuzhiyun return rc;
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pnv_ocxl_map_xsl_regs);
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun struct spa_data {
434*4882a593Smuzhiyun u64 phb_opal_id;
435*4882a593Smuzhiyun u32 bdfn;
436*4882a593Smuzhiyun };
437*4882a593Smuzhiyun
pnv_ocxl_spa_setup(struct pci_dev * dev,void * spa_mem,int PE_mask,void ** platform_data)438*4882a593Smuzhiyun int pnv_ocxl_spa_setup(struct pci_dev *dev, void *spa_mem, int PE_mask,
439*4882a593Smuzhiyun void **platform_data)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun struct pci_controller *hose = pci_bus_to_host(dev->bus);
442*4882a593Smuzhiyun struct pnv_phb *phb = hose->private_data;
443*4882a593Smuzhiyun struct spa_data *data;
444*4882a593Smuzhiyun u32 bdfn;
445*4882a593Smuzhiyun int rc;
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun data = kzalloc(sizeof(*data), GFP_KERNEL);
448*4882a593Smuzhiyun if (!data)
449*4882a593Smuzhiyun return -ENOMEM;
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun bdfn = (dev->bus->number << 8) | dev->devfn;
452*4882a593Smuzhiyun rc = opal_npu_spa_setup(phb->opal_id, bdfn, virt_to_phys(spa_mem),
453*4882a593Smuzhiyun PE_mask);
454*4882a593Smuzhiyun if (rc) {
455*4882a593Smuzhiyun dev_err(&dev->dev, "Can't setup Shared Process Area: %d\n", rc);
456*4882a593Smuzhiyun kfree(data);
457*4882a593Smuzhiyun return rc;
458*4882a593Smuzhiyun }
459*4882a593Smuzhiyun data->phb_opal_id = phb->opal_id;
460*4882a593Smuzhiyun data->bdfn = bdfn;
461*4882a593Smuzhiyun *platform_data = (void *) data;
462*4882a593Smuzhiyun return 0;
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pnv_ocxl_spa_setup);
465*4882a593Smuzhiyun
pnv_ocxl_spa_release(void * platform_data)466*4882a593Smuzhiyun void pnv_ocxl_spa_release(void *platform_data)
467*4882a593Smuzhiyun {
468*4882a593Smuzhiyun struct spa_data *data = (struct spa_data *) platform_data;
469*4882a593Smuzhiyun int rc;
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun rc = opal_npu_spa_setup(data->phb_opal_id, data->bdfn, 0, 0);
472*4882a593Smuzhiyun WARN_ON(rc);
473*4882a593Smuzhiyun kfree(data);
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pnv_ocxl_spa_release);
476*4882a593Smuzhiyun
pnv_ocxl_spa_remove_pe_from_cache(void * platform_data,int pe_handle)477*4882a593Smuzhiyun int pnv_ocxl_spa_remove_pe_from_cache(void *platform_data, int pe_handle)
478*4882a593Smuzhiyun {
479*4882a593Smuzhiyun struct spa_data *data = (struct spa_data *) platform_data;
480*4882a593Smuzhiyun int rc;
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun rc = opal_npu_spa_clear_cache(data->phb_opal_id, data->bdfn, pe_handle);
483*4882a593Smuzhiyun return rc;
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pnv_ocxl_spa_remove_pe_from_cache);
486