1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * PCI Endpoint *Controller* (EPC) library
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2017 Texas Instruments
6*4882a593Smuzhiyun * Author: Kishon Vijay Abraham I <kishon@ti.com>
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/device.h>
10*4882a593Smuzhiyun #include <linux/slab.h>
11*4882a593Smuzhiyun #include <linux/module.h>
12*4882a593Smuzhiyun #include <linux/of_device.h>
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #include <linux/pci-epc.h>
15*4882a593Smuzhiyun #include <linux/pci-epf.h>
16*4882a593Smuzhiyun #include <linux/pci-ep-cfs.h>
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun static struct class *pci_epc_class;
19*4882a593Smuzhiyun
devm_pci_epc_release(struct device * dev,void * res)20*4882a593Smuzhiyun static void devm_pci_epc_release(struct device *dev, void *res)
21*4882a593Smuzhiyun {
22*4882a593Smuzhiyun struct pci_epc *epc = *(struct pci_epc **)res;
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun pci_epc_destroy(epc);
25*4882a593Smuzhiyun }
26*4882a593Smuzhiyun
devm_pci_epc_match(struct device * dev,void * res,void * match_data)27*4882a593Smuzhiyun static int devm_pci_epc_match(struct device *dev, void *res, void *match_data)
28*4882a593Smuzhiyun {
29*4882a593Smuzhiyun struct pci_epc **epc = res;
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun return *epc == match_data;
32*4882a593Smuzhiyun }
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun /**
35*4882a593Smuzhiyun * pci_epc_put() - release the PCI endpoint controller
36*4882a593Smuzhiyun * @epc: epc returned by pci_epc_get()
37*4882a593Smuzhiyun *
38*4882a593Smuzhiyun * release the refcount the caller obtained by invoking pci_epc_get()
39*4882a593Smuzhiyun */
pci_epc_put(struct pci_epc * epc)40*4882a593Smuzhiyun void pci_epc_put(struct pci_epc *epc)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun if (!epc || IS_ERR(epc))
43*4882a593Smuzhiyun return;
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun module_put(epc->ops->owner);
46*4882a593Smuzhiyun put_device(&epc->dev);
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pci_epc_put);
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun /**
51*4882a593Smuzhiyun * pci_epc_get() - get the PCI endpoint controller
52*4882a593Smuzhiyun * @epc_name: device name of the endpoint controller
53*4882a593Smuzhiyun *
54*4882a593Smuzhiyun * Invoke to get struct pci_epc * corresponding to the device name of the
55*4882a593Smuzhiyun * endpoint controller
56*4882a593Smuzhiyun */
pci_epc_get(const char * epc_name)57*4882a593Smuzhiyun struct pci_epc *pci_epc_get(const char *epc_name)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun int ret = -EINVAL;
60*4882a593Smuzhiyun struct pci_epc *epc;
61*4882a593Smuzhiyun struct device *dev;
62*4882a593Smuzhiyun struct class_dev_iter iter;
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun class_dev_iter_init(&iter, pci_epc_class, NULL, NULL);
65*4882a593Smuzhiyun while ((dev = class_dev_iter_next(&iter))) {
66*4882a593Smuzhiyun if (strcmp(epc_name, dev_name(dev)))
67*4882a593Smuzhiyun continue;
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun epc = to_pci_epc(dev);
70*4882a593Smuzhiyun if (!try_module_get(epc->ops->owner)) {
71*4882a593Smuzhiyun ret = -EINVAL;
72*4882a593Smuzhiyun goto err;
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun class_dev_iter_exit(&iter);
76*4882a593Smuzhiyun get_device(&epc->dev);
77*4882a593Smuzhiyun return epc;
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun err:
81*4882a593Smuzhiyun class_dev_iter_exit(&iter);
82*4882a593Smuzhiyun return ERR_PTR(ret);
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pci_epc_get);
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun /**
87*4882a593Smuzhiyun * pci_epc_get_first_free_bar() - helper to get first unreserved BAR
88*4882a593Smuzhiyun * @epc_features: pci_epc_features structure that holds the reserved bar bitmap
89*4882a593Smuzhiyun *
90*4882a593Smuzhiyun * Invoke to get the first unreserved BAR that can be used by the endpoint
91*4882a593Smuzhiyun * function. For any incorrect value in reserved_bar return '0'.
92*4882a593Smuzhiyun */
93*4882a593Smuzhiyun enum pci_barno
pci_epc_get_first_free_bar(const struct pci_epc_features * epc_features)94*4882a593Smuzhiyun pci_epc_get_first_free_bar(const struct pci_epc_features *epc_features)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun return pci_epc_get_next_free_bar(epc_features, BAR_0);
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pci_epc_get_first_free_bar);
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun /**
101*4882a593Smuzhiyun * pci_epc_get_next_free_bar() - helper to get unreserved BAR starting from @bar
102*4882a593Smuzhiyun * @epc_features: pci_epc_features structure that holds the reserved bar bitmap
103*4882a593Smuzhiyun * @bar: the starting BAR number from where unreserved BAR should be searched
104*4882a593Smuzhiyun *
105*4882a593Smuzhiyun * Invoke to get the next unreserved BAR starting from @bar that can be used
106*4882a593Smuzhiyun * for endpoint function. For any incorrect value in reserved_bar return '0'.
107*4882a593Smuzhiyun */
pci_epc_get_next_free_bar(const struct pci_epc_features * epc_features,enum pci_barno bar)108*4882a593Smuzhiyun enum pci_barno pci_epc_get_next_free_bar(const struct pci_epc_features
109*4882a593Smuzhiyun *epc_features, enum pci_barno bar)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun unsigned long free_bar;
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun if (!epc_features)
114*4882a593Smuzhiyun return BAR_0;
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun /* If 'bar - 1' is a 64-bit BAR, move to the next BAR */
117*4882a593Smuzhiyun if ((epc_features->bar_fixed_64bit << 1) & 1 << bar)
118*4882a593Smuzhiyun bar++;
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun /* Find if the reserved BAR is also a 64-bit BAR */
121*4882a593Smuzhiyun free_bar = epc_features->reserved_bar & epc_features->bar_fixed_64bit;
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun /* Set the adjacent bit if the reserved BAR is also a 64-bit BAR */
124*4882a593Smuzhiyun free_bar <<= 1;
125*4882a593Smuzhiyun free_bar |= epc_features->reserved_bar;
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun free_bar = find_next_zero_bit(&free_bar, 6, bar);
128*4882a593Smuzhiyun if (free_bar > 5)
129*4882a593Smuzhiyun return NO_BAR;
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun return free_bar;
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pci_epc_get_next_free_bar);
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun /**
136*4882a593Smuzhiyun * pci_epc_get_features() - get the features supported by EPC
137*4882a593Smuzhiyun * @epc: the features supported by *this* EPC device will be returned
138*4882a593Smuzhiyun * @func_no: the features supported by the EPC device specific to the
139*4882a593Smuzhiyun * endpoint function with func_no will be returned
140*4882a593Smuzhiyun *
141*4882a593Smuzhiyun * Invoke to get the features provided by the EPC which may be
142*4882a593Smuzhiyun * specific to an endpoint function. Returns pci_epc_features on success
143*4882a593Smuzhiyun * and NULL for any failures.
144*4882a593Smuzhiyun */
pci_epc_get_features(struct pci_epc * epc,u8 func_no)145*4882a593Smuzhiyun const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc,
146*4882a593Smuzhiyun u8 func_no)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun const struct pci_epc_features *epc_features;
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
151*4882a593Smuzhiyun return NULL;
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun if (!epc->ops->get_features)
154*4882a593Smuzhiyun return NULL;
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun mutex_lock(&epc->lock);
157*4882a593Smuzhiyun epc_features = epc->ops->get_features(epc, func_no);
158*4882a593Smuzhiyun mutex_unlock(&epc->lock);
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun return epc_features;
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pci_epc_get_features);
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun /**
165*4882a593Smuzhiyun * pci_epc_stop() - stop the PCI link
166*4882a593Smuzhiyun * @epc: the link of the EPC device that has to be stopped
167*4882a593Smuzhiyun *
168*4882a593Smuzhiyun * Invoke to stop the PCI link
169*4882a593Smuzhiyun */
pci_epc_stop(struct pci_epc * epc)170*4882a593Smuzhiyun void pci_epc_stop(struct pci_epc *epc)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun if (IS_ERR(epc) || !epc->ops->stop)
173*4882a593Smuzhiyun return;
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun mutex_lock(&epc->lock);
176*4882a593Smuzhiyun epc->ops->stop(epc);
177*4882a593Smuzhiyun mutex_unlock(&epc->lock);
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pci_epc_stop);
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun /**
182*4882a593Smuzhiyun * pci_epc_start() - start the PCI link
183*4882a593Smuzhiyun * @epc: the link of *this* EPC device has to be started
184*4882a593Smuzhiyun *
185*4882a593Smuzhiyun * Invoke to start the PCI link
186*4882a593Smuzhiyun */
pci_epc_start(struct pci_epc * epc)187*4882a593Smuzhiyun int pci_epc_start(struct pci_epc *epc)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun int ret;
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun if (IS_ERR(epc))
192*4882a593Smuzhiyun return -EINVAL;
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun if (!epc->ops->start)
195*4882a593Smuzhiyun return 0;
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun mutex_lock(&epc->lock);
198*4882a593Smuzhiyun ret = epc->ops->start(epc);
199*4882a593Smuzhiyun mutex_unlock(&epc->lock);
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun return ret;
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pci_epc_start);
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun /**
206*4882a593Smuzhiyun * pci_epc_raise_irq() - interrupt the host system
207*4882a593Smuzhiyun * @epc: the EPC device which has to interrupt the host
208*4882a593Smuzhiyun * @func_no: the endpoint function number in the EPC device
209*4882a593Smuzhiyun * @type: specify the type of interrupt; legacy, MSI or MSI-X
210*4882a593Smuzhiyun * @interrupt_num: the MSI or MSI-X interrupt number
211*4882a593Smuzhiyun *
212*4882a593Smuzhiyun * Invoke to raise an legacy, MSI or MSI-X interrupt
213*4882a593Smuzhiyun */
pci_epc_raise_irq(struct pci_epc * epc,u8 func_no,enum pci_epc_irq_type type,u16 interrupt_num)214*4882a593Smuzhiyun int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no,
215*4882a593Smuzhiyun enum pci_epc_irq_type type, u16 interrupt_num)
216*4882a593Smuzhiyun {
217*4882a593Smuzhiyun int ret;
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
220*4882a593Smuzhiyun return -EINVAL;
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun if (!epc->ops->raise_irq)
223*4882a593Smuzhiyun return 0;
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun mutex_lock(&epc->lock);
226*4882a593Smuzhiyun ret = epc->ops->raise_irq(epc, func_no, type, interrupt_num);
227*4882a593Smuzhiyun mutex_unlock(&epc->lock);
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun return ret;
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pci_epc_raise_irq);
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun /**
234*4882a593Smuzhiyun * pci_epc_get_msi() - get the number of MSI interrupt numbers allocated
235*4882a593Smuzhiyun * @epc: the EPC device to which MSI interrupts was requested
236*4882a593Smuzhiyun * @func_no: the endpoint function number in the EPC device
237*4882a593Smuzhiyun *
238*4882a593Smuzhiyun * Invoke to get the number of MSI interrupts allocated by the RC
239*4882a593Smuzhiyun */
pci_epc_get_msi(struct pci_epc * epc,u8 func_no)240*4882a593Smuzhiyun int pci_epc_get_msi(struct pci_epc *epc, u8 func_no)
241*4882a593Smuzhiyun {
242*4882a593Smuzhiyun int interrupt;
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
245*4882a593Smuzhiyun return 0;
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun if (!epc->ops->get_msi)
248*4882a593Smuzhiyun return 0;
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun mutex_lock(&epc->lock);
251*4882a593Smuzhiyun interrupt = epc->ops->get_msi(epc, func_no);
252*4882a593Smuzhiyun mutex_unlock(&epc->lock);
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun if (interrupt < 0)
255*4882a593Smuzhiyun return 0;
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun interrupt = 1 << interrupt;
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun return interrupt;
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pci_epc_get_msi);
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun /**
264*4882a593Smuzhiyun * pci_epc_set_msi() - set the number of MSI interrupt numbers required
265*4882a593Smuzhiyun * @epc: the EPC device on which MSI has to be configured
266*4882a593Smuzhiyun * @func_no: the endpoint function number in the EPC device
267*4882a593Smuzhiyun * @interrupts: number of MSI interrupts required by the EPF
268*4882a593Smuzhiyun *
269*4882a593Smuzhiyun * Invoke to set the required number of MSI interrupts.
270*4882a593Smuzhiyun */
pci_epc_set_msi(struct pci_epc * epc,u8 func_no,u8 interrupts)271*4882a593Smuzhiyun int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun int ret;
274*4882a593Smuzhiyun u8 encode_int;
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
277*4882a593Smuzhiyun interrupts > 32)
278*4882a593Smuzhiyun return -EINVAL;
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun if (!epc->ops->set_msi)
281*4882a593Smuzhiyun return 0;
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun encode_int = order_base_2(interrupts);
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun mutex_lock(&epc->lock);
286*4882a593Smuzhiyun ret = epc->ops->set_msi(epc, func_no, encode_int);
287*4882a593Smuzhiyun mutex_unlock(&epc->lock);
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun return ret;
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pci_epc_set_msi);
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun /**
294*4882a593Smuzhiyun * pci_epc_get_msix() - get the number of MSI-X interrupt numbers allocated
295*4882a593Smuzhiyun * @epc: the EPC device to which MSI-X interrupts was requested
296*4882a593Smuzhiyun * @func_no: the endpoint function number in the EPC device
297*4882a593Smuzhiyun *
298*4882a593Smuzhiyun * Invoke to get the number of MSI-X interrupts allocated by the RC
299*4882a593Smuzhiyun */
pci_epc_get_msix(struct pci_epc * epc,u8 func_no)300*4882a593Smuzhiyun int pci_epc_get_msix(struct pci_epc *epc, u8 func_no)
301*4882a593Smuzhiyun {
302*4882a593Smuzhiyun int interrupt;
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
305*4882a593Smuzhiyun return 0;
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun if (!epc->ops->get_msix)
308*4882a593Smuzhiyun return 0;
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun mutex_lock(&epc->lock);
311*4882a593Smuzhiyun interrupt = epc->ops->get_msix(epc, func_no);
312*4882a593Smuzhiyun mutex_unlock(&epc->lock);
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun if (interrupt < 0)
315*4882a593Smuzhiyun return 0;
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun return interrupt + 1;
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pci_epc_get_msix);
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun /**
322*4882a593Smuzhiyun * pci_epc_set_msix() - set the number of MSI-X interrupt numbers required
323*4882a593Smuzhiyun * @epc: the EPC device on which MSI-X has to be configured
324*4882a593Smuzhiyun * @func_no: the endpoint function number in the EPC device
325*4882a593Smuzhiyun * @interrupts: number of MSI-X interrupts required by the EPF
326*4882a593Smuzhiyun * @bir: BAR where the MSI-X table resides
327*4882a593Smuzhiyun * @offset: Offset pointing to the start of MSI-X table
328*4882a593Smuzhiyun *
329*4882a593Smuzhiyun * Invoke to set the required number of MSI-X interrupts.
330*4882a593Smuzhiyun */
pci_epc_set_msix(struct pci_epc * epc,u8 func_no,u16 interrupts,enum pci_barno bir,u32 offset)331*4882a593Smuzhiyun int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts,
332*4882a593Smuzhiyun enum pci_barno bir, u32 offset)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun int ret;
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
337*4882a593Smuzhiyun interrupts < 1 || interrupts > 2048)
338*4882a593Smuzhiyun return -EINVAL;
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun if (!epc->ops->set_msix)
341*4882a593Smuzhiyun return 0;
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun mutex_lock(&epc->lock);
344*4882a593Smuzhiyun ret = epc->ops->set_msix(epc, func_no, interrupts - 1, bir, offset);
345*4882a593Smuzhiyun mutex_unlock(&epc->lock);
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun return ret;
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pci_epc_set_msix);
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun /**
352*4882a593Smuzhiyun * pci_epc_unmap_addr() - unmap CPU address from PCI address
353*4882a593Smuzhiyun * @epc: the EPC device on which address is allocated
354*4882a593Smuzhiyun * @func_no: the endpoint function number in the EPC device
355*4882a593Smuzhiyun * @phys_addr: physical address of the local system
356*4882a593Smuzhiyun *
357*4882a593Smuzhiyun * Invoke to unmap the CPU address from PCI address.
358*4882a593Smuzhiyun */
pci_epc_unmap_addr(struct pci_epc * epc,u8 func_no,phys_addr_t phys_addr)359*4882a593Smuzhiyun void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no,
360*4882a593Smuzhiyun phys_addr_t phys_addr)
361*4882a593Smuzhiyun {
362*4882a593Smuzhiyun if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
363*4882a593Smuzhiyun return;
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun if (!epc->ops->unmap_addr)
366*4882a593Smuzhiyun return;
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun mutex_lock(&epc->lock);
369*4882a593Smuzhiyun epc->ops->unmap_addr(epc, func_no, phys_addr);
370*4882a593Smuzhiyun mutex_unlock(&epc->lock);
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pci_epc_unmap_addr);
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun /**
375*4882a593Smuzhiyun * pci_epc_map_addr() - map CPU address to PCI address
376*4882a593Smuzhiyun * @epc: the EPC device on which address is allocated
377*4882a593Smuzhiyun * @func_no: the endpoint function number in the EPC device
378*4882a593Smuzhiyun * @phys_addr: physical address of the local system
379*4882a593Smuzhiyun * @pci_addr: PCI address to which the physical address should be mapped
380*4882a593Smuzhiyun * @size: the size of the allocation
381*4882a593Smuzhiyun *
382*4882a593Smuzhiyun * Invoke to map CPU address with PCI address.
383*4882a593Smuzhiyun */
pci_epc_map_addr(struct pci_epc * epc,u8 func_no,phys_addr_t phys_addr,u64 pci_addr,size_t size)384*4882a593Smuzhiyun int pci_epc_map_addr(struct pci_epc *epc, u8 func_no,
385*4882a593Smuzhiyun phys_addr_t phys_addr, u64 pci_addr, size_t size)
386*4882a593Smuzhiyun {
387*4882a593Smuzhiyun int ret;
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
390*4882a593Smuzhiyun return -EINVAL;
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun if (!epc->ops->map_addr)
393*4882a593Smuzhiyun return 0;
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun mutex_lock(&epc->lock);
396*4882a593Smuzhiyun ret = epc->ops->map_addr(epc, func_no, phys_addr, pci_addr, size);
397*4882a593Smuzhiyun mutex_unlock(&epc->lock);
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun return ret;
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pci_epc_map_addr);
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun /**
404*4882a593Smuzhiyun * pci_epc_clear_bar() - reset the BAR
405*4882a593Smuzhiyun * @epc: the EPC device for which the BAR has to be cleared
406*4882a593Smuzhiyun * @func_no: the endpoint function number in the EPC device
407*4882a593Smuzhiyun * @epf_bar: the struct epf_bar that contains the BAR information
408*4882a593Smuzhiyun *
409*4882a593Smuzhiyun * Invoke to reset the BAR of the endpoint device.
410*4882a593Smuzhiyun */
pci_epc_clear_bar(struct pci_epc * epc,u8 func_no,struct pci_epf_bar * epf_bar)411*4882a593Smuzhiyun void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no,
412*4882a593Smuzhiyun struct pci_epf_bar *epf_bar)
413*4882a593Smuzhiyun {
414*4882a593Smuzhiyun if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
415*4882a593Smuzhiyun (epf_bar->barno == BAR_5 &&
416*4882a593Smuzhiyun epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64))
417*4882a593Smuzhiyun return;
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun if (!epc->ops->clear_bar)
420*4882a593Smuzhiyun return;
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun mutex_lock(&epc->lock);
423*4882a593Smuzhiyun epc->ops->clear_bar(epc, func_no, epf_bar);
424*4882a593Smuzhiyun mutex_unlock(&epc->lock);
425*4882a593Smuzhiyun }
426*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun /**
429*4882a593Smuzhiyun * pci_epc_set_bar() - configure BAR in order for host to assign PCI addr space
430*4882a593Smuzhiyun * @epc: the EPC device on which BAR has to be configured
431*4882a593Smuzhiyun * @func_no: the endpoint function number in the EPC device
432*4882a593Smuzhiyun * @epf_bar: the struct epf_bar that contains the BAR information
433*4882a593Smuzhiyun *
434*4882a593Smuzhiyun * Invoke to configure the BAR of the endpoint device.
435*4882a593Smuzhiyun */
pci_epc_set_bar(struct pci_epc * epc,u8 func_no,struct pci_epf_bar * epf_bar)436*4882a593Smuzhiyun int pci_epc_set_bar(struct pci_epc *epc, u8 func_no,
437*4882a593Smuzhiyun struct pci_epf_bar *epf_bar)
438*4882a593Smuzhiyun {
439*4882a593Smuzhiyun int ret;
440*4882a593Smuzhiyun int flags = epf_bar->flags;
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
443*4882a593Smuzhiyun (epf_bar->barno == BAR_5 &&
444*4882a593Smuzhiyun flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ||
445*4882a593Smuzhiyun (flags & PCI_BASE_ADDRESS_SPACE_IO &&
446*4882a593Smuzhiyun flags & PCI_BASE_ADDRESS_IO_MASK) ||
447*4882a593Smuzhiyun (upper_32_bits(epf_bar->size) &&
448*4882a593Smuzhiyun !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64)))
449*4882a593Smuzhiyun return -EINVAL;
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun if (!epc->ops->set_bar)
452*4882a593Smuzhiyun return 0;
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun mutex_lock(&epc->lock);
455*4882a593Smuzhiyun ret = epc->ops->set_bar(epc, func_no, epf_bar);
456*4882a593Smuzhiyun mutex_unlock(&epc->lock);
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun return ret;
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pci_epc_set_bar);
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun /**
463*4882a593Smuzhiyun * pci_epc_write_header() - write standard configuration header
464*4882a593Smuzhiyun * @epc: the EPC device to which the configuration header should be written
465*4882a593Smuzhiyun * @func_no: the endpoint function number in the EPC device
466*4882a593Smuzhiyun * @header: standard configuration header fields
467*4882a593Smuzhiyun *
468*4882a593Smuzhiyun * Invoke to write the configuration header to the endpoint controller. Every
469*4882a593Smuzhiyun * endpoint controller will have a dedicated location to which the standard
470*4882a593Smuzhiyun * configuration header would be written. The callback function should write
471*4882a593Smuzhiyun * the header fields to this dedicated location.
472*4882a593Smuzhiyun */
pci_epc_write_header(struct pci_epc * epc,u8 func_no,struct pci_epf_header * header)473*4882a593Smuzhiyun int pci_epc_write_header(struct pci_epc *epc, u8 func_no,
474*4882a593Smuzhiyun struct pci_epf_header *header)
475*4882a593Smuzhiyun {
476*4882a593Smuzhiyun int ret;
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
479*4882a593Smuzhiyun return -EINVAL;
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun if (!epc->ops->write_header)
482*4882a593Smuzhiyun return 0;
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun mutex_lock(&epc->lock);
485*4882a593Smuzhiyun ret = epc->ops->write_header(epc, func_no, header);
486*4882a593Smuzhiyun mutex_unlock(&epc->lock);
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun return ret;
489*4882a593Smuzhiyun }
490*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pci_epc_write_header);
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun /**
493*4882a593Smuzhiyun * pci_epc_add_epf() - bind PCI endpoint function to an endpoint controller
494*4882a593Smuzhiyun * @epc: the EPC device to which the endpoint function should be added
495*4882a593Smuzhiyun * @epf: the endpoint function to be added
496*4882a593Smuzhiyun *
497*4882a593Smuzhiyun * A PCI endpoint device can have one or more functions. In the case of PCIe,
498*4882a593Smuzhiyun * the specification allows up to 8 PCIe endpoint functions. Invoke
499*4882a593Smuzhiyun * pci_epc_add_epf() to add a PCI endpoint function to an endpoint controller.
500*4882a593Smuzhiyun */
pci_epc_add_epf(struct pci_epc * epc,struct pci_epf * epf)501*4882a593Smuzhiyun int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf)
502*4882a593Smuzhiyun {
503*4882a593Smuzhiyun u32 func_no;
504*4882a593Smuzhiyun int ret = 0;
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun if (epf->epc)
507*4882a593Smuzhiyun return -EBUSY;
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun if (IS_ERR(epc))
510*4882a593Smuzhiyun return -EINVAL;
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun mutex_lock(&epc->lock);
513*4882a593Smuzhiyun func_no = find_first_zero_bit(&epc->function_num_map,
514*4882a593Smuzhiyun BITS_PER_LONG);
515*4882a593Smuzhiyun if (func_no >= BITS_PER_LONG) {
516*4882a593Smuzhiyun ret = -EINVAL;
517*4882a593Smuzhiyun goto ret;
518*4882a593Smuzhiyun }
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun if (func_no > epc->max_functions - 1) {
521*4882a593Smuzhiyun dev_err(&epc->dev, "Exceeding max supported Function Number\n");
522*4882a593Smuzhiyun ret = -EINVAL;
523*4882a593Smuzhiyun goto ret;
524*4882a593Smuzhiyun }
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun set_bit(func_no, &epc->function_num_map);
527*4882a593Smuzhiyun epf->func_no = func_no;
528*4882a593Smuzhiyun epf->epc = epc;
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun list_add_tail(&epf->list, &epc->pci_epf);
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun ret:
533*4882a593Smuzhiyun mutex_unlock(&epc->lock);
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun return ret;
536*4882a593Smuzhiyun }
537*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pci_epc_add_epf);
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun /**
540*4882a593Smuzhiyun * pci_epc_remove_epf() - remove PCI endpoint function from endpoint controller
541*4882a593Smuzhiyun * @epc: the EPC device from which the endpoint function should be removed
542*4882a593Smuzhiyun * @epf: the endpoint function to be removed
543*4882a593Smuzhiyun *
544*4882a593Smuzhiyun * Invoke to remove PCI endpoint function from the endpoint controller.
545*4882a593Smuzhiyun */
pci_epc_remove_epf(struct pci_epc * epc,struct pci_epf * epf)546*4882a593Smuzhiyun void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf)
547*4882a593Smuzhiyun {
548*4882a593Smuzhiyun if (!epc || IS_ERR(epc) || !epf)
549*4882a593Smuzhiyun return;
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun mutex_lock(&epc->lock);
552*4882a593Smuzhiyun clear_bit(epf->func_no, &epc->function_num_map);
553*4882a593Smuzhiyun list_del(&epf->list);
554*4882a593Smuzhiyun epf->epc = NULL;
555*4882a593Smuzhiyun mutex_unlock(&epc->lock);
556*4882a593Smuzhiyun }
557*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pci_epc_remove_epf);
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun /**
560*4882a593Smuzhiyun * pci_epc_linkup() - Notify the EPF device that EPC device has established a
561*4882a593Smuzhiyun * connection with the Root Complex.
562*4882a593Smuzhiyun * @epc: the EPC device which has established link with the host
563*4882a593Smuzhiyun *
564*4882a593Smuzhiyun * Invoke to Notify the EPF device that the EPC device has established a
565*4882a593Smuzhiyun * connection with the Root Complex.
566*4882a593Smuzhiyun */
pci_epc_linkup(struct pci_epc * epc)567*4882a593Smuzhiyun void pci_epc_linkup(struct pci_epc *epc)
568*4882a593Smuzhiyun {
569*4882a593Smuzhiyun if (!epc || IS_ERR(epc))
570*4882a593Smuzhiyun return;
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun atomic_notifier_call_chain(&epc->notifier, LINK_UP, NULL);
573*4882a593Smuzhiyun }
574*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pci_epc_linkup);
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun /**
577*4882a593Smuzhiyun * pci_epc_init_notify() - Notify the EPF device that EPC device's core
578*4882a593Smuzhiyun * initialization is completed.
579*4882a593Smuzhiyun * @epc: the EPC device whose core initialization is completeds
580*4882a593Smuzhiyun *
581*4882a593Smuzhiyun * Invoke to Notify the EPF device that the EPC device's initialization
582*4882a593Smuzhiyun * is completed.
583*4882a593Smuzhiyun */
pci_epc_init_notify(struct pci_epc * epc)584*4882a593Smuzhiyun void pci_epc_init_notify(struct pci_epc *epc)
585*4882a593Smuzhiyun {
586*4882a593Smuzhiyun if (!epc || IS_ERR(epc))
587*4882a593Smuzhiyun return;
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun atomic_notifier_call_chain(&epc->notifier, CORE_INIT, NULL);
590*4882a593Smuzhiyun }
591*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pci_epc_init_notify);
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun /**
594*4882a593Smuzhiyun * pci_epc_destroy() - destroy the EPC device
595*4882a593Smuzhiyun * @epc: the EPC device that has to be destroyed
596*4882a593Smuzhiyun *
597*4882a593Smuzhiyun * Invoke to destroy the PCI EPC device
598*4882a593Smuzhiyun */
pci_epc_destroy(struct pci_epc * epc)599*4882a593Smuzhiyun void pci_epc_destroy(struct pci_epc *epc)
600*4882a593Smuzhiyun {
601*4882a593Smuzhiyun pci_ep_cfs_remove_epc_group(epc->group);
602*4882a593Smuzhiyun device_unregister(&epc->dev);
603*4882a593Smuzhiyun kfree(epc);
604*4882a593Smuzhiyun }
605*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pci_epc_destroy);
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun /**
608*4882a593Smuzhiyun * devm_pci_epc_destroy() - destroy the EPC device
609*4882a593Smuzhiyun * @dev: device that wants to destroy the EPC
610*4882a593Smuzhiyun * @epc: the EPC device that has to be destroyed
611*4882a593Smuzhiyun *
612*4882a593Smuzhiyun * Invoke to destroy the devres associated with this
613*4882a593Smuzhiyun * pci_epc and destroy the EPC device.
614*4882a593Smuzhiyun */
devm_pci_epc_destroy(struct device * dev,struct pci_epc * epc)615*4882a593Smuzhiyun void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc)
616*4882a593Smuzhiyun {
617*4882a593Smuzhiyun int r;
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun r = devres_destroy(dev, devm_pci_epc_release, devm_pci_epc_match,
620*4882a593Smuzhiyun epc);
621*4882a593Smuzhiyun dev_WARN_ONCE(dev, r, "couldn't find PCI EPC resource\n");
622*4882a593Smuzhiyun }
623*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(devm_pci_epc_destroy);
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun /**
626*4882a593Smuzhiyun * __pci_epc_create() - create a new endpoint controller (EPC) device
627*4882a593Smuzhiyun * @dev: device that is creating the new EPC
628*4882a593Smuzhiyun * @ops: function pointers for performing EPC operations
629*4882a593Smuzhiyun * @owner: the owner of the module that creates the EPC device
630*4882a593Smuzhiyun *
631*4882a593Smuzhiyun * Invoke to create a new EPC device and add it to pci_epc class.
632*4882a593Smuzhiyun */
633*4882a593Smuzhiyun struct pci_epc *
__pci_epc_create(struct device * dev,const struct pci_epc_ops * ops,struct module * owner)634*4882a593Smuzhiyun __pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
635*4882a593Smuzhiyun struct module *owner)
636*4882a593Smuzhiyun {
637*4882a593Smuzhiyun int ret;
638*4882a593Smuzhiyun struct pci_epc *epc;
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun if (WARN_ON(!dev)) {
641*4882a593Smuzhiyun ret = -EINVAL;
642*4882a593Smuzhiyun goto err_ret;
643*4882a593Smuzhiyun }
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun epc = kzalloc(sizeof(*epc), GFP_KERNEL);
646*4882a593Smuzhiyun if (!epc) {
647*4882a593Smuzhiyun ret = -ENOMEM;
648*4882a593Smuzhiyun goto err_ret;
649*4882a593Smuzhiyun }
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun mutex_init(&epc->lock);
652*4882a593Smuzhiyun INIT_LIST_HEAD(&epc->pci_epf);
653*4882a593Smuzhiyun ATOMIC_INIT_NOTIFIER_HEAD(&epc->notifier);
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun device_initialize(&epc->dev);
656*4882a593Smuzhiyun epc->dev.class = pci_epc_class;
657*4882a593Smuzhiyun epc->dev.parent = dev;
658*4882a593Smuzhiyun epc->ops = ops;
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun ret = dev_set_name(&epc->dev, "%s", dev_name(dev));
661*4882a593Smuzhiyun if (ret)
662*4882a593Smuzhiyun goto put_dev;
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun ret = device_add(&epc->dev);
665*4882a593Smuzhiyun if (ret)
666*4882a593Smuzhiyun goto put_dev;
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun epc->group = pci_ep_cfs_add_epc_group(dev_name(dev));
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun return epc;
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun put_dev:
673*4882a593Smuzhiyun put_device(&epc->dev);
674*4882a593Smuzhiyun kfree(epc);
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun err_ret:
677*4882a593Smuzhiyun return ERR_PTR(ret);
678*4882a593Smuzhiyun }
679*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(__pci_epc_create);
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun /**
682*4882a593Smuzhiyun * __devm_pci_epc_create() - create a new endpoint controller (EPC) device
683*4882a593Smuzhiyun * @dev: device that is creating the new EPC
684*4882a593Smuzhiyun * @ops: function pointers for performing EPC operations
685*4882a593Smuzhiyun * @owner: the owner of the module that creates the EPC device
686*4882a593Smuzhiyun *
687*4882a593Smuzhiyun * Invoke to create a new EPC device and add it to pci_epc class.
688*4882a593Smuzhiyun * While at that, it also associates the device with the pci_epc using devres.
689*4882a593Smuzhiyun * On driver detach, release function is invoked on the devres data,
690*4882a593Smuzhiyun * then, devres data is freed.
691*4882a593Smuzhiyun */
692*4882a593Smuzhiyun struct pci_epc *
__devm_pci_epc_create(struct device * dev,const struct pci_epc_ops * ops,struct module * owner)693*4882a593Smuzhiyun __devm_pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
694*4882a593Smuzhiyun struct module *owner)
695*4882a593Smuzhiyun {
696*4882a593Smuzhiyun struct pci_epc **ptr, *epc;
697*4882a593Smuzhiyun
698*4882a593Smuzhiyun ptr = devres_alloc(devm_pci_epc_release, sizeof(*ptr), GFP_KERNEL);
699*4882a593Smuzhiyun if (!ptr)
700*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun epc = __pci_epc_create(dev, ops, owner);
703*4882a593Smuzhiyun if (!IS_ERR(epc)) {
704*4882a593Smuzhiyun *ptr = epc;
705*4882a593Smuzhiyun devres_add(dev, ptr);
706*4882a593Smuzhiyun } else {
707*4882a593Smuzhiyun devres_free(ptr);
708*4882a593Smuzhiyun }
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun return epc;
711*4882a593Smuzhiyun }
712*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(__devm_pci_epc_create);
713*4882a593Smuzhiyun
pci_epc_init(void)714*4882a593Smuzhiyun static int __init pci_epc_init(void)
715*4882a593Smuzhiyun {
716*4882a593Smuzhiyun pci_epc_class = class_create(THIS_MODULE, "pci_epc");
717*4882a593Smuzhiyun if (IS_ERR(pci_epc_class)) {
718*4882a593Smuzhiyun pr_err("failed to create pci epc class --> %ld\n",
719*4882a593Smuzhiyun PTR_ERR(pci_epc_class));
720*4882a593Smuzhiyun return PTR_ERR(pci_epc_class);
721*4882a593Smuzhiyun }
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun return 0;
724*4882a593Smuzhiyun }
725*4882a593Smuzhiyun module_init(pci_epc_init);
726*4882a593Smuzhiyun
pci_epc_exit(void)727*4882a593Smuzhiyun static void __exit pci_epc_exit(void)
728*4882a593Smuzhiyun {
729*4882a593Smuzhiyun class_destroy(pci_epc_class);
730*4882a593Smuzhiyun }
731*4882a593Smuzhiyun module_exit(pci_epc_exit);
732*4882a593Smuzhiyun
733*4882a593Smuzhiyun MODULE_DESCRIPTION("PCI EPC Library");
734*4882a593Smuzhiyun MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
735*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
736