1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright 2014 IBM Corp.
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #include <linux/pci.h>
7*4882a593Smuzhiyun #include <misc/cxl.h>
8*4882a593Smuzhiyun #include "cxl.h"
9*4882a593Smuzhiyun
cxl_pci_probe_mode(struct pci_bus * bus)10*4882a593Smuzhiyun static int cxl_pci_probe_mode(struct pci_bus *bus)
11*4882a593Smuzhiyun {
12*4882a593Smuzhiyun return PCI_PROBE_NORMAL;
13*4882a593Smuzhiyun }
14*4882a593Smuzhiyun
cxl_setup_msi_irqs(struct pci_dev * pdev,int nvec,int type)15*4882a593Smuzhiyun static int cxl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
16*4882a593Smuzhiyun {
17*4882a593Smuzhiyun return -ENODEV;
18*4882a593Smuzhiyun }
19*4882a593Smuzhiyun
cxl_teardown_msi_irqs(struct pci_dev * pdev)20*4882a593Smuzhiyun static void cxl_teardown_msi_irqs(struct pci_dev *pdev)
21*4882a593Smuzhiyun {
22*4882a593Smuzhiyun /*
23*4882a593Smuzhiyun * MSI should never be set but need still need to provide this call
24*4882a593Smuzhiyun * back.
25*4882a593Smuzhiyun */
26*4882a593Smuzhiyun }
27*4882a593Smuzhiyun
cxl_pci_enable_device_hook(struct pci_dev * dev)28*4882a593Smuzhiyun static bool cxl_pci_enable_device_hook(struct pci_dev *dev)
29*4882a593Smuzhiyun {
30*4882a593Smuzhiyun struct pci_controller *phb;
31*4882a593Smuzhiyun struct cxl_afu *afu;
32*4882a593Smuzhiyun struct cxl_context *ctx;
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun phb = pci_bus_to_host(dev->bus);
35*4882a593Smuzhiyun afu = (struct cxl_afu *)phb->private_data;
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun if (!cxl_ops->link_ok(afu->adapter, afu)) {
38*4882a593Smuzhiyun dev_warn(&dev->dev, "%s: Device link is down, refusing to enable AFU\n", __func__);
39*4882a593Smuzhiyun return false;
40*4882a593Smuzhiyun }
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun dev->dev.archdata.dma_offset = PAGE_OFFSET;
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun /*
45*4882a593Smuzhiyun * Allocate a context to do cxl things too. If we eventually do real
46*4882a593Smuzhiyun * DMA ops, we'll need a default context to attach them to
47*4882a593Smuzhiyun */
48*4882a593Smuzhiyun ctx = cxl_dev_context_init(dev);
49*4882a593Smuzhiyun if (IS_ERR(ctx))
50*4882a593Smuzhiyun return false;
51*4882a593Smuzhiyun dev->dev.archdata.cxl_ctx = ctx;
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun return (cxl_ops->afu_check_and_enable(afu) == 0);
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun
cxl_pci_disable_device(struct pci_dev * dev)56*4882a593Smuzhiyun static void cxl_pci_disable_device(struct pci_dev *dev)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun struct cxl_context *ctx = cxl_get_context(dev);
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun if (ctx) {
61*4882a593Smuzhiyun if (ctx->status == STARTED) {
62*4882a593Smuzhiyun dev_err(&dev->dev, "Default context started\n");
63*4882a593Smuzhiyun return;
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun dev->dev.archdata.cxl_ctx = NULL;
66*4882a593Smuzhiyun cxl_release_context(ctx);
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun
cxl_pci_window_alignment(struct pci_bus * bus,unsigned long type)70*4882a593Smuzhiyun static resource_size_t cxl_pci_window_alignment(struct pci_bus *bus,
71*4882a593Smuzhiyun unsigned long type)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun return 1;
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun
cxl_pci_reset_secondary_bus(struct pci_dev * dev)76*4882a593Smuzhiyun static void cxl_pci_reset_secondary_bus(struct pci_dev *dev)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun /* Should we do an AFU reset here ? */
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun
cxl_pcie_cfg_record(u8 bus,u8 devfn)81*4882a593Smuzhiyun static int cxl_pcie_cfg_record(u8 bus, u8 devfn)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun return (bus << 8) + devfn;
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun
pci_bus_to_afu(struct pci_bus * bus)86*4882a593Smuzhiyun static inline struct cxl_afu *pci_bus_to_afu(struct pci_bus *bus)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun struct pci_controller *phb = bus ? pci_bus_to_host(bus) : NULL;
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun return phb ? phb->private_data : NULL;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun
cxl_afu_configured_put(struct cxl_afu * afu)93*4882a593Smuzhiyun static void cxl_afu_configured_put(struct cxl_afu *afu)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun atomic_dec_if_positive(&afu->configured_state);
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun
cxl_afu_configured_get(struct cxl_afu * afu)98*4882a593Smuzhiyun static bool cxl_afu_configured_get(struct cxl_afu *afu)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun return atomic_inc_unless_negative(&afu->configured_state);
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun
cxl_pcie_config_info(struct pci_bus * bus,unsigned int devfn,struct cxl_afu * afu,int * _record)103*4882a593Smuzhiyun static inline int cxl_pcie_config_info(struct pci_bus *bus, unsigned int devfn,
104*4882a593Smuzhiyun struct cxl_afu *afu, int *_record)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun int record;
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun record = cxl_pcie_cfg_record(bus->number, devfn);
109*4882a593Smuzhiyun if (record > afu->crs_num)
110*4882a593Smuzhiyun return PCIBIOS_DEVICE_NOT_FOUND;
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun *_record = record;
113*4882a593Smuzhiyun return 0;
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun
cxl_pcie_read_config(struct pci_bus * bus,unsigned int devfn,int offset,int len,u32 * val)116*4882a593Smuzhiyun static int cxl_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
117*4882a593Smuzhiyun int offset, int len, u32 *val)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun int rc, record;
120*4882a593Smuzhiyun struct cxl_afu *afu;
121*4882a593Smuzhiyun u8 val8;
122*4882a593Smuzhiyun u16 val16;
123*4882a593Smuzhiyun u32 val32;
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun afu = pci_bus_to_afu(bus);
126*4882a593Smuzhiyun /* Grab a reader lock on afu. */
127*4882a593Smuzhiyun if (afu == NULL || !cxl_afu_configured_get(afu))
128*4882a593Smuzhiyun return PCIBIOS_DEVICE_NOT_FOUND;
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun rc = cxl_pcie_config_info(bus, devfn, afu, &record);
131*4882a593Smuzhiyun if (rc)
132*4882a593Smuzhiyun goto out;
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun switch (len) {
135*4882a593Smuzhiyun case 1:
136*4882a593Smuzhiyun rc = cxl_ops->afu_cr_read8(afu, record, offset, &val8);
137*4882a593Smuzhiyun *val = val8;
138*4882a593Smuzhiyun break;
139*4882a593Smuzhiyun case 2:
140*4882a593Smuzhiyun rc = cxl_ops->afu_cr_read16(afu, record, offset, &val16);
141*4882a593Smuzhiyun *val = val16;
142*4882a593Smuzhiyun break;
143*4882a593Smuzhiyun case 4:
144*4882a593Smuzhiyun rc = cxl_ops->afu_cr_read32(afu, record, offset, &val32);
145*4882a593Smuzhiyun *val = val32;
146*4882a593Smuzhiyun break;
147*4882a593Smuzhiyun default:
148*4882a593Smuzhiyun WARN_ON(1);
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun out:
152*4882a593Smuzhiyun cxl_afu_configured_put(afu);
153*4882a593Smuzhiyun return rc ? PCIBIOS_DEVICE_NOT_FOUND : 0;
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun
cxl_pcie_write_config(struct pci_bus * bus,unsigned int devfn,int offset,int len,u32 val)156*4882a593Smuzhiyun static int cxl_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
157*4882a593Smuzhiyun int offset, int len, u32 val)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun int rc, record;
160*4882a593Smuzhiyun struct cxl_afu *afu;
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun afu = pci_bus_to_afu(bus);
163*4882a593Smuzhiyun /* Grab a reader lock on afu. */
164*4882a593Smuzhiyun if (afu == NULL || !cxl_afu_configured_get(afu))
165*4882a593Smuzhiyun return PCIBIOS_DEVICE_NOT_FOUND;
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun rc = cxl_pcie_config_info(bus, devfn, afu, &record);
168*4882a593Smuzhiyun if (rc)
169*4882a593Smuzhiyun goto out;
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun switch (len) {
172*4882a593Smuzhiyun case 1:
173*4882a593Smuzhiyun rc = cxl_ops->afu_cr_write8(afu, record, offset, val & 0xff);
174*4882a593Smuzhiyun break;
175*4882a593Smuzhiyun case 2:
176*4882a593Smuzhiyun rc = cxl_ops->afu_cr_write16(afu, record, offset, val & 0xffff);
177*4882a593Smuzhiyun break;
178*4882a593Smuzhiyun case 4:
179*4882a593Smuzhiyun rc = cxl_ops->afu_cr_write32(afu, record, offset, val);
180*4882a593Smuzhiyun break;
181*4882a593Smuzhiyun default:
182*4882a593Smuzhiyun WARN_ON(1);
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun out:
186*4882a593Smuzhiyun cxl_afu_configured_put(afu);
187*4882a593Smuzhiyun return rc ? PCIBIOS_SET_FAILED : 0;
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun static struct pci_ops cxl_pcie_pci_ops =
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun .read = cxl_pcie_read_config,
193*4882a593Smuzhiyun .write = cxl_pcie_write_config,
194*4882a593Smuzhiyun };
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun static struct pci_controller_ops cxl_pci_controller_ops =
198*4882a593Smuzhiyun {
199*4882a593Smuzhiyun .probe_mode = cxl_pci_probe_mode,
200*4882a593Smuzhiyun .enable_device_hook = cxl_pci_enable_device_hook,
201*4882a593Smuzhiyun .disable_device = cxl_pci_disable_device,
202*4882a593Smuzhiyun .release_device = cxl_pci_disable_device,
203*4882a593Smuzhiyun .window_alignment = cxl_pci_window_alignment,
204*4882a593Smuzhiyun .reset_secondary_bus = cxl_pci_reset_secondary_bus,
205*4882a593Smuzhiyun .setup_msi_irqs = cxl_setup_msi_irqs,
206*4882a593Smuzhiyun .teardown_msi_irqs = cxl_teardown_msi_irqs,
207*4882a593Smuzhiyun };
208*4882a593Smuzhiyun
cxl_pci_vphb_add(struct cxl_afu * afu)209*4882a593Smuzhiyun int cxl_pci_vphb_add(struct cxl_afu *afu)
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun struct pci_controller *phb;
212*4882a593Smuzhiyun struct device_node *vphb_dn;
213*4882a593Smuzhiyun struct device *parent;
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun /*
216*4882a593Smuzhiyun * If there are no AFU configuration records we won't have anything to
217*4882a593Smuzhiyun * expose under the vPHB, so skip creating one, returning success since
218*4882a593Smuzhiyun * this is still a valid case. This will also opt us out of EEH
219*4882a593Smuzhiyun * handling since we won't have anything special to do if there are no
220*4882a593Smuzhiyun * kernel drivers attached to the vPHB, and EEH handling is not yet
221*4882a593Smuzhiyun * supported in the peer model.
222*4882a593Smuzhiyun */
223*4882a593Smuzhiyun if (!afu->crs_num)
224*4882a593Smuzhiyun return 0;
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun /* The parent device is the adapter. Reuse the device node of
227*4882a593Smuzhiyun * the adapter.
228*4882a593Smuzhiyun * We don't seem to care what device node is used for the vPHB,
229*4882a593Smuzhiyun * but tools such as lsvpd walk up the device parents looking
230*4882a593Smuzhiyun * for a valid location code, so we might as well show devices
231*4882a593Smuzhiyun * attached to the adapter as being located on that adapter.
232*4882a593Smuzhiyun */
233*4882a593Smuzhiyun parent = afu->adapter->dev.parent;
234*4882a593Smuzhiyun vphb_dn = parent->of_node;
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun /* Alloc and setup PHB data structure */
237*4882a593Smuzhiyun phb = pcibios_alloc_controller(vphb_dn);
238*4882a593Smuzhiyun if (!phb)
239*4882a593Smuzhiyun return -ENODEV;
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun /* Setup parent in sysfs */
242*4882a593Smuzhiyun phb->parent = parent;
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun /* Setup the PHB using arch provided callback */
245*4882a593Smuzhiyun phb->ops = &cxl_pcie_pci_ops;
246*4882a593Smuzhiyun phb->cfg_addr = NULL;
247*4882a593Smuzhiyun phb->cfg_data = NULL;
248*4882a593Smuzhiyun phb->private_data = afu;
249*4882a593Smuzhiyun phb->controller_ops = cxl_pci_controller_ops;
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun /* Scan the bus */
252*4882a593Smuzhiyun pcibios_scan_phb(phb);
253*4882a593Smuzhiyun if (phb->bus == NULL)
254*4882a593Smuzhiyun return -ENXIO;
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun /* Set release hook on root bus */
257*4882a593Smuzhiyun pci_set_host_bridge_release(to_pci_host_bridge(phb->bus->bridge),
258*4882a593Smuzhiyun pcibios_free_controller_deferred,
259*4882a593Smuzhiyun (void *) phb);
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun /* Claim resources. This might need some rework as well depending
262*4882a593Smuzhiyun * whether we are doing probe-only or not, like assigning unassigned
263*4882a593Smuzhiyun * resources etc...
264*4882a593Smuzhiyun */
265*4882a593Smuzhiyun pcibios_claim_one_bus(phb->bus);
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun /* Add probed PCI devices to the device model */
268*4882a593Smuzhiyun pci_bus_add_devices(phb->bus);
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun afu->phb = phb;
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun return 0;
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun
cxl_pci_vphb_remove(struct cxl_afu * afu)275*4882a593Smuzhiyun void cxl_pci_vphb_remove(struct cxl_afu *afu)
276*4882a593Smuzhiyun {
277*4882a593Smuzhiyun struct pci_controller *phb;
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun /* If there is no configuration record we won't have one of these */
280*4882a593Smuzhiyun if (!afu || !afu->phb)
281*4882a593Smuzhiyun return;
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun phb = afu->phb;
284*4882a593Smuzhiyun afu->phb = NULL;
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun pci_remove_root_bus(phb->bus);
287*4882a593Smuzhiyun /*
288*4882a593Smuzhiyun * We don't free phb here - that's handled by
289*4882a593Smuzhiyun * pcibios_free_controller_deferred()
290*4882a593Smuzhiyun */
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun
cxl_pci_is_vphb_device(struct pci_dev * dev)293*4882a593Smuzhiyun bool cxl_pci_is_vphb_device(struct pci_dev *dev)
294*4882a593Smuzhiyun {
295*4882a593Smuzhiyun struct pci_controller *phb;
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun phb = pci_bus_to_host(dev->bus);
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun return (phb->ops == &cxl_pcie_pci_ops);
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun
cxl_pci_to_afu(struct pci_dev * dev)302*4882a593Smuzhiyun struct cxl_afu *cxl_pci_to_afu(struct pci_dev *dev)
303*4882a593Smuzhiyun {
304*4882a593Smuzhiyun struct pci_controller *phb;
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun phb = pci_bus_to_host(dev->bus);
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun return (struct cxl_afu *)phb->private_data;
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cxl_pci_to_afu);
311*4882a593Smuzhiyun
cxl_pci_to_cfg_record(struct pci_dev * dev)312*4882a593Smuzhiyun unsigned int cxl_pci_to_cfg_record(struct pci_dev *dev)
313*4882a593Smuzhiyun {
314*4882a593Smuzhiyun return cxl_pcie_cfg_record(dev->bus->number, dev->devfn);
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cxl_pci_to_cfg_record);
317