xref: /OK3568_Linux_fs/kernel/drivers/xen/pci.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2009, Intel Corporation.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Author: Weidong Han <weidong.han@intel.com>
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <linux/pci.h>
9*4882a593Smuzhiyun #include <linux/acpi.h>
10*4882a593Smuzhiyun #include <linux/pci-acpi.h>
11*4882a593Smuzhiyun #include <xen/xen.h>
12*4882a593Smuzhiyun #include <xen/interface/physdev.h>
13*4882a593Smuzhiyun #include <xen/interface/xen.h>
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #include <asm/xen/hypervisor.h>
16*4882a593Smuzhiyun #include <asm/xen/hypercall.h>
17*4882a593Smuzhiyun #include "../pci/pci.h"
18*4882a593Smuzhiyun #ifdef CONFIG_PCI_MMCONFIG
19*4882a593Smuzhiyun #include <asm/pci_x86.h>
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun static int xen_mcfg_late(void);
22*4882a593Smuzhiyun #endif
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun static bool __read_mostly pci_seg_supported = true;
25*4882a593Smuzhiyun 
xen_add_device(struct device * dev)26*4882a593Smuzhiyun static int xen_add_device(struct device *dev)
27*4882a593Smuzhiyun {
28*4882a593Smuzhiyun 	int r;
29*4882a593Smuzhiyun 	struct pci_dev *pci_dev = to_pci_dev(dev);
30*4882a593Smuzhiyun #ifdef CONFIG_PCI_IOV
31*4882a593Smuzhiyun 	struct pci_dev *physfn = pci_dev->physfn;
32*4882a593Smuzhiyun #endif
33*4882a593Smuzhiyun #ifdef CONFIG_PCI_MMCONFIG
34*4882a593Smuzhiyun 	static bool pci_mcfg_reserved = false;
35*4882a593Smuzhiyun 	/*
36*4882a593Smuzhiyun 	 * Reserve MCFG areas in Xen on first invocation due to this being
37*4882a593Smuzhiyun 	 * potentially called from inside of acpi_init immediately after
38*4882a593Smuzhiyun 	 * MCFG table has been finally parsed.
39*4882a593Smuzhiyun 	 */
40*4882a593Smuzhiyun 	if (!pci_mcfg_reserved) {
41*4882a593Smuzhiyun 		xen_mcfg_late();
42*4882a593Smuzhiyun 		pci_mcfg_reserved = true;
43*4882a593Smuzhiyun 	}
44*4882a593Smuzhiyun #endif
45*4882a593Smuzhiyun 	if (pci_seg_supported) {
46*4882a593Smuzhiyun 		struct {
47*4882a593Smuzhiyun 			struct physdev_pci_device_add add;
48*4882a593Smuzhiyun 			uint32_t pxm;
49*4882a593Smuzhiyun 		} add_ext = {
50*4882a593Smuzhiyun 			.add.seg = pci_domain_nr(pci_dev->bus),
51*4882a593Smuzhiyun 			.add.bus = pci_dev->bus->number,
52*4882a593Smuzhiyun 			.add.devfn = pci_dev->devfn
53*4882a593Smuzhiyun 		};
54*4882a593Smuzhiyun 		struct physdev_pci_device_add *add = &add_ext.add;
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun #ifdef CONFIG_ACPI
57*4882a593Smuzhiyun 		acpi_handle handle;
58*4882a593Smuzhiyun #endif
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun #ifdef CONFIG_PCI_IOV
61*4882a593Smuzhiyun 		if (pci_dev->is_virtfn) {
62*4882a593Smuzhiyun 			add->flags = XEN_PCI_DEV_VIRTFN;
63*4882a593Smuzhiyun 			add->physfn.bus = physfn->bus->number;
64*4882a593Smuzhiyun 			add->physfn.devfn = physfn->devfn;
65*4882a593Smuzhiyun 		} else
66*4882a593Smuzhiyun #endif
67*4882a593Smuzhiyun 		if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn))
68*4882a593Smuzhiyun 			add->flags = XEN_PCI_DEV_EXTFN;
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun #ifdef CONFIG_ACPI
71*4882a593Smuzhiyun 		handle = ACPI_HANDLE(&pci_dev->dev);
72*4882a593Smuzhiyun #ifdef CONFIG_PCI_IOV
73*4882a593Smuzhiyun 		if (!handle && pci_dev->is_virtfn)
74*4882a593Smuzhiyun 			handle = ACPI_HANDLE(physfn->bus->bridge);
75*4882a593Smuzhiyun #endif
76*4882a593Smuzhiyun 		if (!handle) {
77*4882a593Smuzhiyun 			/*
78*4882a593Smuzhiyun 			 * This device was not listed in the ACPI name space at
79*4882a593Smuzhiyun 			 * all. Try to get acpi handle of parent pci bus.
80*4882a593Smuzhiyun 			 */
81*4882a593Smuzhiyun 			struct pci_bus *pbus;
82*4882a593Smuzhiyun 			for (pbus = pci_dev->bus; pbus; pbus = pbus->parent) {
83*4882a593Smuzhiyun 				handle = acpi_pci_get_bridge_handle(pbus);
84*4882a593Smuzhiyun 				if (handle)
85*4882a593Smuzhiyun 					break;
86*4882a593Smuzhiyun 			}
87*4882a593Smuzhiyun 		}
88*4882a593Smuzhiyun 		if (handle) {
89*4882a593Smuzhiyun 			acpi_status status;
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 			do {
92*4882a593Smuzhiyun 				unsigned long long pxm;
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 				status = acpi_evaluate_integer(handle, "_PXM",
95*4882a593Smuzhiyun 							       NULL, &pxm);
96*4882a593Smuzhiyun 				if (ACPI_SUCCESS(status)) {
97*4882a593Smuzhiyun 					add->optarr[0] = pxm;
98*4882a593Smuzhiyun 					add->flags |= XEN_PCI_DEV_PXM;
99*4882a593Smuzhiyun 					break;
100*4882a593Smuzhiyun 				}
101*4882a593Smuzhiyun 				status = acpi_get_parent(handle, &handle);
102*4882a593Smuzhiyun 			} while (ACPI_SUCCESS(status));
103*4882a593Smuzhiyun 		}
104*4882a593Smuzhiyun #endif /* CONFIG_ACPI */
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 		r = HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_add, add);
107*4882a593Smuzhiyun 		if (r != -ENOSYS)
108*4882a593Smuzhiyun 			return r;
109*4882a593Smuzhiyun 		pci_seg_supported = false;
110*4882a593Smuzhiyun 	}
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	if (pci_domain_nr(pci_dev->bus))
113*4882a593Smuzhiyun 		r = -ENOSYS;
114*4882a593Smuzhiyun #ifdef CONFIG_PCI_IOV
115*4882a593Smuzhiyun 	else if (pci_dev->is_virtfn) {
116*4882a593Smuzhiyun 		struct physdev_manage_pci_ext manage_pci_ext = {
117*4882a593Smuzhiyun 			.bus		= pci_dev->bus->number,
118*4882a593Smuzhiyun 			.devfn		= pci_dev->devfn,
119*4882a593Smuzhiyun 			.is_virtfn 	= 1,
120*4882a593Smuzhiyun 			.physfn.bus	= physfn->bus->number,
121*4882a593Smuzhiyun 			.physfn.devfn	= physfn->devfn,
122*4882a593Smuzhiyun 		};
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 		r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add_ext,
125*4882a593Smuzhiyun 			&manage_pci_ext);
126*4882a593Smuzhiyun 	}
127*4882a593Smuzhiyun #endif
128*4882a593Smuzhiyun 	else if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn)) {
129*4882a593Smuzhiyun 		struct physdev_manage_pci_ext manage_pci_ext = {
130*4882a593Smuzhiyun 			.bus		= pci_dev->bus->number,
131*4882a593Smuzhiyun 			.devfn		= pci_dev->devfn,
132*4882a593Smuzhiyun 			.is_extfn	= 1,
133*4882a593Smuzhiyun 		};
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 		r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add_ext,
136*4882a593Smuzhiyun 			&manage_pci_ext);
137*4882a593Smuzhiyun 	} else {
138*4882a593Smuzhiyun 		struct physdev_manage_pci manage_pci = {
139*4882a593Smuzhiyun 			.bus	= pci_dev->bus->number,
140*4882a593Smuzhiyun 			.devfn	= pci_dev->devfn,
141*4882a593Smuzhiyun 		};
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 		r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add,
144*4882a593Smuzhiyun 			&manage_pci);
145*4882a593Smuzhiyun 	}
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	return r;
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun 
xen_remove_device(struct device * dev)150*4882a593Smuzhiyun static int xen_remove_device(struct device *dev)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun 	int r;
153*4882a593Smuzhiyun 	struct pci_dev *pci_dev = to_pci_dev(dev);
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	if (pci_seg_supported) {
156*4882a593Smuzhiyun 		struct physdev_pci_device device = {
157*4882a593Smuzhiyun 			.seg = pci_domain_nr(pci_dev->bus),
158*4882a593Smuzhiyun 			.bus = pci_dev->bus->number,
159*4882a593Smuzhiyun 			.devfn = pci_dev->devfn
160*4882a593Smuzhiyun 		};
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 		r = HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_remove,
163*4882a593Smuzhiyun 					  &device);
164*4882a593Smuzhiyun 	} else if (pci_domain_nr(pci_dev->bus))
165*4882a593Smuzhiyun 		r = -ENOSYS;
166*4882a593Smuzhiyun 	else {
167*4882a593Smuzhiyun 		struct physdev_manage_pci manage_pci = {
168*4882a593Smuzhiyun 			.bus = pci_dev->bus->number,
169*4882a593Smuzhiyun 			.devfn = pci_dev->devfn
170*4882a593Smuzhiyun 		};
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 		r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_remove,
173*4882a593Smuzhiyun 					  &manage_pci);
174*4882a593Smuzhiyun 	}
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	return r;
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun 
xen_pci_notifier(struct notifier_block * nb,unsigned long action,void * data)179*4882a593Smuzhiyun static int xen_pci_notifier(struct notifier_block *nb,
180*4882a593Smuzhiyun 			    unsigned long action, void *data)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun 	struct device *dev = data;
183*4882a593Smuzhiyun 	int r = 0;
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	switch (action) {
186*4882a593Smuzhiyun 	case BUS_NOTIFY_ADD_DEVICE:
187*4882a593Smuzhiyun 		r = xen_add_device(dev);
188*4882a593Smuzhiyun 		break;
189*4882a593Smuzhiyun 	case BUS_NOTIFY_DEL_DEVICE:
190*4882a593Smuzhiyun 		r = xen_remove_device(dev);
191*4882a593Smuzhiyun 		break;
192*4882a593Smuzhiyun 	default:
193*4882a593Smuzhiyun 		return NOTIFY_DONE;
194*4882a593Smuzhiyun 	}
195*4882a593Smuzhiyun 	if (r)
196*4882a593Smuzhiyun 		dev_err(dev, "Failed to %s - passthrough or MSI/MSI-X might fail!\n",
197*4882a593Smuzhiyun 			action == BUS_NOTIFY_ADD_DEVICE ? "add" :
198*4882a593Smuzhiyun 			(action == BUS_NOTIFY_DEL_DEVICE ? "delete" : "?"));
199*4882a593Smuzhiyun 	return NOTIFY_OK;
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun static struct notifier_block device_nb = {
203*4882a593Smuzhiyun 	.notifier_call = xen_pci_notifier,
204*4882a593Smuzhiyun };
205*4882a593Smuzhiyun 
register_xen_pci_notifier(void)206*4882a593Smuzhiyun static int __init register_xen_pci_notifier(void)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun 	if (!xen_initial_domain())
209*4882a593Smuzhiyun 		return 0;
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	return bus_register_notifier(&pci_bus_type, &device_nb);
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun arch_initcall(register_xen_pci_notifier);
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun #ifdef CONFIG_PCI_MMCONFIG
xen_mcfg_late(void)217*4882a593Smuzhiyun static int xen_mcfg_late(void)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun 	struct pci_mmcfg_region *cfg;
220*4882a593Smuzhiyun 	int rc;
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	if (!xen_initial_domain())
223*4882a593Smuzhiyun 		return 0;
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	if ((pci_probe & PCI_PROBE_MMCONF) == 0)
226*4882a593Smuzhiyun 		return 0;
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	if (list_empty(&pci_mmcfg_list))
229*4882a593Smuzhiyun 		return 0;
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	/* Check whether they are in the right area. */
232*4882a593Smuzhiyun 	list_for_each_entry(cfg, &pci_mmcfg_list, list) {
233*4882a593Smuzhiyun 		struct physdev_pci_mmcfg_reserved r;
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 		r.address = cfg->address;
236*4882a593Smuzhiyun 		r.segment = cfg->segment;
237*4882a593Smuzhiyun 		r.start_bus = cfg->start_bus;
238*4882a593Smuzhiyun 		r.end_bus = cfg->end_bus;
239*4882a593Smuzhiyun 		r.flags = XEN_PCI_MMCFG_RESERVED;
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 		rc = HYPERVISOR_physdev_op(PHYSDEVOP_pci_mmcfg_reserved, &r);
242*4882a593Smuzhiyun 		switch (rc) {
243*4882a593Smuzhiyun 		case 0:
244*4882a593Smuzhiyun 		case -ENOSYS:
245*4882a593Smuzhiyun 			continue;
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 		default:
248*4882a593Smuzhiyun 			pr_warn("Failed to report MMCONFIG reservation"
249*4882a593Smuzhiyun 				" state for %s to hypervisor"
250*4882a593Smuzhiyun 				" (%d)\n",
251*4882a593Smuzhiyun 				cfg->name, rc);
252*4882a593Smuzhiyun 		}
253*4882a593Smuzhiyun 	}
254*4882a593Smuzhiyun 	return 0;
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun #endif
257