xref: /OK3568_Linux_fs/kernel/include/linux/pci.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *	pci.h
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  *	PCI defines and function prototypes
6*4882a593Smuzhiyun  *	Copyright 1994, Drew Eckhardt
7*4882a593Smuzhiyun  *	Copyright 1997--1999 Martin Mares <mj@ucw.cz>
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  *	PCI Express ASPM defines and function prototypes
10*4882a593Smuzhiyun  *	Copyright (c) 2007 Intel Corp.
11*4882a593Smuzhiyun  *		Zhang Yanmin (yanmin.zhang@intel.com)
12*4882a593Smuzhiyun  *		Shaohua Li (shaohua.li@intel.com)
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  *	For more information, please consult the following manuals (look at
15*4882a593Smuzhiyun  *	http://www.pcisig.com/ for how to get them):
16*4882a593Smuzhiyun  *
17*4882a593Smuzhiyun  *	PCI BIOS Specification
18*4882a593Smuzhiyun  *	PCI Local Bus Specification
19*4882a593Smuzhiyun  *	PCI to PCI Bridge Specification
20*4882a593Smuzhiyun  *	PCI Express Specification
21*4882a593Smuzhiyun  *	PCI System Design Guide
22*4882a593Smuzhiyun  */
23*4882a593Smuzhiyun #ifndef LINUX_PCI_H
24*4882a593Smuzhiyun #define LINUX_PCI_H
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun #include <linux/mod_devicetable.h>
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun #include <linux/types.h>
30*4882a593Smuzhiyun #include <linux/init.h>
31*4882a593Smuzhiyun #include <linux/ioport.h>
32*4882a593Smuzhiyun #include <linux/list.h>
33*4882a593Smuzhiyun #include <linux/compiler.h>
34*4882a593Smuzhiyun #include <linux/errno.h>
35*4882a593Smuzhiyun #include <linux/kobject.h>
36*4882a593Smuzhiyun #include <linux/atomic.h>
37*4882a593Smuzhiyun #include <linux/device.h>
38*4882a593Smuzhiyun #include <linux/interrupt.h>
39*4882a593Smuzhiyun #include <linux/io.h>
40*4882a593Smuzhiyun #include <linux/resource_ext.h>
41*4882a593Smuzhiyun #include <uapi/linux/pci.h>
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun #include <linux/pci_ids.h>
44*4882a593Smuzhiyun #include <linux/android_kabi.h>
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun #define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY  | \
47*4882a593Smuzhiyun 			       PCI_STATUS_SIG_SYSTEM_ERROR | \
48*4882a593Smuzhiyun 			       PCI_STATUS_REC_MASTER_ABORT | \
49*4882a593Smuzhiyun 			       PCI_STATUS_REC_TARGET_ABORT | \
50*4882a593Smuzhiyun 			       PCI_STATUS_SIG_TARGET_ABORT | \
51*4882a593Smuzhiyun 			       PCI_STATUS_PARITY)
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun /*
54*4882a593Smuzhiyun  * The PCI interface treats multi-function devices as independent
55*4882a593Smuzhiyun  * devices.  The slot/function address of each device is encoded
56*4882a593Smuzhiyun  * in a single byte as follows:
57*4882a593Smuzhiyun  *
58*4882a593Smuzhiyun  *	7:3 = slot
59*4882a593Smuzhiyun  *	2:0 = function
60*4882a593Smuzhiyun  *
61*4882a593Smuzhiyun  * PCI_DEVFN(), PCI_SLOT(), and PCI_FUNC() are defined in uapi/linux/pci.h.
62*4882a593Smuzhiyun  * In the interest of not exposing interfaces to user-space unnecessarily,
63*4882a593Smuzhiyun  * the following kernel-only defines are being added here.
64*4882a593Smuzhiyun  */
65*4882a593Smuzhiyun #define PCI_DEVID(bus, devfn)	((((u16)(bus)) << 8) | (devfn))
66*4882a593Smuzhiyun /* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */
67*4882a593Smuzhiyun #define PCI_BUS_NUM(x) (((x) >> 8) & 0xff)
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun /* pci_slot represents a physical slot */
70*4882a593Smuzhiyun struct pci_slot {
71*4882a593Smuzhiyun 	struct pci_bus		*bus;		/* Bus this slot is on */
72*4882a593Smuzhiyun 	struct list_head	list;		/* Node in list of slots */
73*4882a593Smuzhiyun 	struct hotplug_slot	*hotplug;	/* Hotplug info (move here) */
74*4882a593Smuzhiyun 	unsigned char		number;		/* PCI_SLOT(pci_dev->devfn) */
75*4882a593Smuzhiyun 	struct kobject		kobj;
76*4882a593Smuzhiyun };
77*4882a593Smuzhiyun 
pci_slot_name(const struct pci_slot * slot)78*4882a593Smuzhiyun static inline const char *pci_slot_name(const struct pci_slot *slot)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun 	return kobject_name(&slot->kobj);
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun /* File state for mmap()s on /proc/bus/pci/X/Y */
84*4882a593Smuzhiyun enum pci_mmap_state {
85*4882a593Smuzhiyun 	pci_mmap_io,
86*4882a593Smuzhiyun 	pci_mmap_mem
87*4882a593Smuzhiyun };
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun /* For PCI devices, the region numbers are assigned this way: */
90*4882a593Smuzhiyun enum {
91*4882a593Smuzhiyun 	/* #0-5: standard PCI resources */
92*4882a593Smuzhiyun 	PCI_STD_RESOURCES,
93*4882a593Smuzhiyun 	PCI_STD_RESOURCE_END = PCI_STD_RESOURCES + PCI_STD_NUM_BARS - 1,
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 	/* #6: expansion ROM resource */
96*4882a593Smuzhiyun 	PCI_ROM_RESOURCE,
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	/* Device-specific resources */
99*4882a593Smuzhiyun #ifdef CONFIG_PCI_IOV
100*4882a593Smuzhiyun 	PCI_IOV_RESOURCES,
101*4882a593Smuzhiyun 	PCI_IOV_RESOURCE_END = PCI_IOV_RESOURCES + PCI_SRIOV_NUM_BARS - 1,
102*4882a593Smuzhiyun #endif
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun /* PCI-to-PCI (P2P) bridge windows */
105*4882a593Smuzhiyun #define PCI_BRIDGE_IO_WINDOW		(PCI_BRIDGE_RESOURCES + 0)
106*4882a593Smuzhiyun #define PCI_BRIDGE_MEM_WINDOW		(PCI_BRIDGE_RESOURCES + 1)
107*4882a593Smuzhiyun #define PCI_BRIDGE_PREF_MEM_WINDOW	(PCI_BRIDGE_RESOURCES + 2)
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun /* CardBus bridge windows */
110*4882a593Smuzhiyun #define PCI_CB_BRIDGE_IO_0_WINDOW	(PCI_BRIDGE_RESOURCES + 0)
111*4882a593Smuzhiyun #define PCI_CB_BRIDGE_IO_1_WINDOW	(PCI_BRIDGE_RESOURCES + 1)
112*4882a593Smuzhiyun #define PCI_CB_BRIDGE_MEM_0_WINDOW	(PCI_BRIDGE_RESOURCES + 2)
113*4882a593Smuzhiyun #define PCI_CB_BRIDGE_MEM_1_WINDOW	(PCI_BRIDGE_RESOURCES + 3)
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun /* Total number of bridge resources for P2P and CardBus */
116*4882a593Smuzhiyun #define PCI_BRIDGE_RESOURCE_NUM 4
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	/* Resources assigned to buses behind the bridge */
119*4882a593Smuzhiyun 	PCI_BRIDGE_RESOURCES,
120*4882a593Smuzhiyun 	PCI_BRIDGE_RESOURCE_END = PCI_BRIDGE_RESOURCES +
121*4882a593Smuzhiyun 				  PCI_BRIDGE_RESOURCE_NUM - 1,
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	/* Total resources associated with a PCI device */
124*4882a593Smuzhiyun 	PCI_NUM_RESOURCES,
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	/* Preserve this for compatibility */
127*4882a593Smuzhiyun 	DEVICE_COUNT_RESOURCE = PCI_NUM_RESOURCES,
128*4882a593Smuzhiyun };
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun /**
131*4882a593Smuzhiyun  * enum pci_interrupt_pin - PCI INTx interrupt values
132*4882a593Smuzhiyun  * @PCI_INTERRUPT_UNKNOWN: Unknown or unassigned interrupt
133*4882a593Smuzhiyun  * @PCI_INTERRUPT_INTA: PCI INTA pin
134*4882a593Smuzhiyun  * @PCI_INTERRUPT_INTB: PCI INTB pin
135*4882a593Smuzhiyun  * @PCI_INTERRUPT_INTC: PCI INTC pin
136*4882a593Smuzhiyun  * @PCI_INTERRUPT_INTD: PCI INTD pin
137*4882a593Smuzhiyun  *
138*4882a593Smuzhiyun  * Corresponds to values for legacy PCI INTx interrupts, as can be found in the
139*4882a593Smuzhiyun  * PCI_INTERRUPT_PIN register.
140*4882a593Smuzhiyun  */
141*4882a593Smuzhiyun enum pci_interrupt_pin {
142*4882a593Smuzhiyun 	PCI_INTERRUPT_UNKNOWN,
143*4882a593Smuzhiyun 	PCI_INTERRUPT_INTA,
144*4882a593Smuzhiyun 	PCI_INTERRUPT_INTB,
145*4882a593Smuzhiyun 	PCI_INTERRUPT_INTC,
146*4882a593Smuzhiyun 	PCI_INTERRUPT_INTD,
147*4882a593Smuzhiyun };
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun /* The number of legacy PCI INTx interrupts */
150*4882a593Smuzhiyun #define PCI_NUM_INTX	4
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun /*
153*4882a593Smuzhiyun  * pci_power_t values must match the bits in the Capabilities PME_Support
154*4882a593Smuzhiyun  * and Control/Status PowerState fields in the Power Management capability.
155*4882a593Smuzhiyun  */
156*4882a593Smuzhiyun typedef int __bitwise pci_power_t;
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun #define PCI_D0		((pci_power_t __force) 0)
159*4882a593Smuzhiyun #define PCI_D1		((pci_power_t __force) 1)
160*4882a593Smuzhiyun #define PCI_D2		((pci_power_t __force) 2)
161*4882a593Smuzhiyun #define PCI_D3hot	((pci_power_t __force) 3)
162*4882a593Smuzhiyun #define PCI_D3cold	((pci_power_t __force) 4)
163*4882a593Smuzhiyun #define PCI_UNKNOWN	((pci_power_t __force) 5)
164*4882a593Smuzhiyun #define PCI_POWER_ERROR	((pci_power_t __force) -1)
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun /* Remember to update this when the list above changes! */
167*4882a593Smuzhiyun extern const char *pci_power_names[];
168*4882a593Smuzhiyun 
pci_power_name(pci_power_t state)169*4882a593Smuzhiyun static inline const char *pci_power_name(pci_power_t state)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun 	return pci_power_names[1 + (__force int) state];
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun /**
175*4882a593Smuzhiyun  * typedef pci_channel_state_t
176*4882a593Smuzhiyun  *
177*4882a593Smuzhiyun  * The pci_channel state describes connectivity between the CPU and
178*4882a593Smuzhiyun  * the PCI device.  If some PCI bus between here and the PCI device
179*4882a593Smuzhiyun  * has crashed or locked up, this info is reflected here.
180*4882a593Smuzhiyun  */
181*4882a593Smuzhiyun typedef unsigned int __bitwise pci_channel_state_t;
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun enum {
184*4882a593Smuzhiyun 	/* I/O channel is in normal state */
185*4882a593Smuzhiyun 	pci_channel_io_normal = (__force pci_channel_state_t) 1,
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	/* I/O to channel is blocked */
188*4882a593Smuzhiyun 	pci_channel_io_frozen = (__force pci_channel_state_t) 2,
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	/* PCI card is dead */
191*4882a593Smuzhiyun 	pci_channel_io_perm_failure = (__force pci_channel_state_t) 3,
192*4882a593Smuzhiyun };
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun typedef unsigned int __bitwise pcie_reset_state_t;
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun enum pcie_reset_state {
197*4882a593Smuzhiyun 	/* Reset is NOT asserted (Use to deassert reset) */
198*4882a593Smuzhiyun 	pcie_deassert_reset = (__force pcie_reset_state_t) 1,
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	/* Use #PERST to reset PCIe device */
201*4882a593Smuzhiyun 	pcie_warm_reset = (__force pcie_reset_state_t) 2,
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	/* Use PCIe Hot Reset to reset device */
204*4882a593Smuzhiyun 	pcie_hot_reset = (__force pcie_reset_state_t) 3
205*4882a593Smuzhiyun };
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun typedef unsigned short __bitwise pci_dev_flags_t;
208*4882a593Smuzhiyun enum pci_dev_flags {
209*4882a593Smuzhiyun 	/* INTX_DISABLE in PCI_COMMAND register disables MSI too */
210*4882a593Smuzhiyun 	PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) (1 << 0),
211*4882a593Smuzhiyun 	/* Device configuration is irrevocably lost if disabled into D3 */
212*4882a593Smuzhiyun 	PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) (1 << 1),
213*4882a593Smuzhiyun 	/* Provide indication device is assigned by a Virtual Machine Manager */
214*4882a593Smuzhiyun 	PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) (1 << 2),
215*4882a593Smuzhiyun 	/* Flag for quirk use to store if quirk-specific ACS is enabled */
216*4882a593Smuzhiyun 	PCI_DEV_FLAGS_ACS_ENABLED_QUIRK = (__force pci_dev_flags_t) (1 << 3),
217*4882a593Smuzhiyun 	/* Use a PCIe-to-PCI bridge alias even if !pci_is_pcie */
218*4882a593Smuzhiyun 	PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = (__force pci_dev_flags_t) (1 << 5),
219*4882a593Smuzhiyun 	/* Do not use bus resets for device */
220*4882a593Smuzhiyun 	PCI_DEV_FLAGS_NO_BUS_RESET = (__force pci_dev_flags_t) (1 << 6),
221*4882a593Smuzhiyun 	/* Do not use PM reset even if device advertises NoSoftRst- */
222*4882a593Smuzhiyun 	PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7),
223*4882a593Smuzhiyun 	/* Get VPD from function 0 VPD */
224*4882a593Smuzhiyun 	PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8),
225*4882a593Smuzhiyun 	/* A non-root bridge where translation occurs, stop alias search here */
226*4882a593Smuzhiyun 	PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = (__force pci_dev_flags_t) (1 << 9),
227*4882a593Smuzhiyun 	/* Do not use FLR even if device advertises PCI_AF_CAP */
228*4882a593Smuzhiyun 	PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10),
229*4882a593Smuzhiyun 	/* Don't use Relaxed Ordering for TLPs directed at this device */
230*4882a593Smuzhiyun 	PCI_DEV_FLAGS_NO_RELAXED_ORDERING = (__force pci_dev_flags_t) (1 << 11),
231*4882a593Smuzhiyun 	/* Device does honor MSI masking despite saying otherwise */
232*4882a593Smuzhiyun 	PCI_DEV_FLAGS_HAS_MSI_MASKING = (__force pci_dev_flags_t) (1 << 12),
233*4882a593Smuzhiyun };
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun enum pci_irq_reroute_variant {
236*4882a593Smuzhiyun 	INTEL_IRQ_REROUTE_VARIANT = 1,
237*4882a593Smuzhiyun 	MAX_IRQ_REROUTE_VARIANTS = 3
238*4882a593Smuzhiyun };
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun typedef unsigned short __bitwise pci_bus_flags_t;
241*4882a593Smuzhiyun enum pci_bus_flags {
242*4882a593Smuzhiyun 	PCI_BUS_FLAGS_NO_MSI	= (__force pci_bus_flags_t) 1,
243*4882a593Smuzhiyun 	PCI_BUS_FLAGS_NO_MMRBC	= (__force pci_bus_flags_t) 2,
244*4882a593Smuzhiyun 	PCI_BUS_FLAGS_NO_AERSID	= (__force pci_bus_flags_t) 4,
245*4882a593Smuzhiyun 	PCI_BUS_FLAGS_NO_EXTCFG	= (__force pci_bus_flags_t) 8,
246*4882a593Smuzhiyun };
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun /* Values from Link Status register, PCIe r3.1, sec 7.8.8 */
249*4882a593Smuzhiyun enum pcie_link_width {
250*4882a593Smuzhiyun 	PCIE_LNK_WIDTH_RESRV	= 0x00,
251*4882a593Smuzhiyun 	PCIE_LNK_X1		= 0x01,
252*4882a593Smuzhiyun 	PCIE_LNK_X2		= 0x02,
253*4882a593Smuzhiyun 	PCIE_LNK_X4		= 0x04,
254*4882a593Smuzhiyun 	PCIE_LNK_X8		= 0x08,
255*4882a593Smuzhiyun 	PCIE_LNK_X12		= 0x0c,
256*4882a593Smuzhiyun 	PCIE_LNK_X16		= 0x10,
257*4882a593Smuzhiyun 	PCIE_LNK_X32		= 0x20,
258*4882a593Smuzhiyun 	PCIE_LNK_WIDTH_UNKNOWN	= 0xff,
259*4882a593Smuzhiyun };
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun /* See matching string table in pci_speed_string() */
262*4882a593Smuzhiyun enum pci_bus_speed {
263*4882a593Smuzhiyun 	PCI_SPEED_33MHz			= 0x00,
264*4882a593Smuzhiyun 	PCI_SPEED_66MHz			= 0x01,
265*4882a593Smuzhiyun 	PCI_SPEED_66MHz_PCIX		= 0x02,
266*4882a593Smuzhiyun 	PCI_SPEED_100MHz_PCIX		= 0x03,
267*4882a593Smuzhiyun 	PCI_SPEED_133MHz_PCIX		= 0x04,
268*4882a593Smuzhiyun 	PCI_SPEED_66MHz_PCIX_ECC	= 0x05,
269*4882a593Smuzhiyun 	PCI_SPEED_100MHz_PCIX_ECC	= 0x06,
270*4882a593Smuzhiyun 	PCI_SPEED_133MHz_PCIX_ECC	= 0x07,
271*4882a593Smuzhiyun 	PCI_SPEED_66MHz_PCIX_266	= 0x09,
272*4882a593Smuzhiyun 	PCI_SPEED_100MHz_PCIX_266	= 0x0a,
273*4882a593Smuzhiyun 	PCI_SPEED_133MHz_PCIX_266	= 0x0b,
274*4882a593Smuzhiyun 	AGP_UNKNOWN			= 0x0c,
275*4882a593Smuzhiyun 	AGP_1X				= 0x0d,
276*4882a593Smuzhiyun 	AGP_2X				= 0x0e,
277*4882a593Smuzhiyun 	AGP_4X				= 0x0f,
278*4882a593Smuzhiyun 	AGP_8X				= 0x10,
279*4882a593Smuzhiyun 	PCI_SPEED_66MHz_PCIX_533	= 0x11,
280*4882a593Smuzhiyun 	PCI_SPEED_100MHz_PCIX_533	= 0x12,
281*4882a593Smuzhiyun 	PCI_SPEED_133MHz_PCIX_533	= 0x13,
282*4882a593Smuzhiyun 	PCIE_SPEED_2_5GT		= 0x14,
283*4882a593Smuzhiyun 	PCIE_SPEED_5_0GT		= 0x15,
284*4882a593Smuzhiyun 	PCIE_SPEED_8_0GT		= 0x16,
285*4882a593Smuzhiyun 	PCIE_SPEED_16_0GT		= 0x17,
286*4882a593Smuzhiyun 	PCIE_SPEED_32_0GT		= 0x18,
287*4882a593Smuzhiyun 	PCI_SPEED_UNKNOWN		= 0xff,
288*4882a593Smuzhiyun };
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev);
291*4882a593Smuzhiyun enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev);
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun struct pci_cap_saved_data {
294*4882a593Smuzhiyun 	u16		cap_nr;
295*4882a593Smuzhiyun 	bool		cap_extended;
296*4882a593Smuzhiyun 	unsigned int	size;
297*4882a593Smuzhiyun 	u32		data[];
298*4882a593Smuzhiyun };
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun struct pci_cap_saved_state {
301*4882a593Smuzhiyun 	struct hlist_node		next;
302*4882a593Smuzhiyun 	struct pci_cap_saved_data	cap;
303*4882a593Smuzhiyun };
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun struct irq_affinity;
306*4882a593Smuzhiyun struct pcie_link_state;
307*4882a593Smuzhiyun struct pci_vpd;
308*4882a593Smuzhiyun struct pci_sriov;
309*4882a593Smuzhiyun struct pci_p2pdma;
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun /* The pci_dev structure describes PCI devices */
312*4882a593Smuzhiyun struct pci_dev {
313*4882a593Smuzhiyun 	struct list_head bus_list;	/* Node in per-bus list */
314*4882a593Smuzhiyun 	struct pci_bus	*bus;		/* Bus this device is on */
315*4882a593Smuzhiyun 	struct pci_bus	*subordinate;	/* Bus this device bridges to */
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	void		*sysdata;	/* Hook for sys-specific extension */
318*4882a593Smuzhiyun 	struct proc_dir_entry *procent;	/* Device entry in /proc/bus/pci */
319*4882a593Smuzhiyun 	struct pci_slot	*slot;		/* Physical slot this device is in */
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	unsigned int	devfn;		/* Encoded device & function index */
322*4882a593Smuzhiyun 	unsigned short	vendor;
323*4882a593Smuzhiyun 	unsigned short	device;
324*4882a593Smuzhiyun 	unsigned short	subsystem_vendor;
325*4882a593Smuzhiyun 	unsigned short	subsystem_device;
326*4882a593Smuzhiyun 	unsigned int	class;		/* 3 bytes: (base,sub,prog-if) */
327*4882a593Smuzhiyun 	u8		revision;	/* PCI revision, low byte of class word */
328*4882a593Smuzhiyun 	u8		hdr_type;	/* PCI header type (`multi' flag masked out) */
329*4882a593Smuzhiyun #ifdef CONFIG_PCIEAER
330*4882a593Smuzhiyun 	u16		aer_cap;	/* AER capability offset */
331*4882a593Smuzhiyun 	struct aer_stats *aer_stats;	/* AER stats for this device */
332*4882a593Smuzhiyun #endif
333*4882a593Smuzhiyun 	u8		pcie_cap;	/* PCIe capability offset */
334*4882a593Smuzhiyun 	u8		msi_cap;	/* MSI capability offset */
335*4882a593Smuzhiyun 	u8		msix_cap;	/* MSI-X capability offset */
336*4882a593Smuzhiyun 	u8		pcie_mpss:3;	/* PCIe Max Payload Size Supported */
337*4882a593Smuzhiyun 	u8		rom_base_reg;	/* Config register controlling ROM */
338*4882a593Smuzhiyun 	u8		pin;		/* Interrupt pin this device uses */
339*4882a593Smuzhiyun 	u16		pcie_flags_reg;	/* Cached PCIe Capabilities Register */
340*4882a593Smuzhiyun 	unsigned long	*dma_alias_mask;/* Mask of enabled devfn aliases */
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	struct pci_driver *driver;	/* Driver bound to this device */
343*4882a593Smuzhiyun 	u64		dma_mask;	/* Mask of the bits of bus address this
344*4882a593Smuzhiyun 					   device implements.  Normally this is
345*4882a593Smuzhiyun 					   0xffffffff.  You only need to change
346*4882a593Smuzhiyun 					   this if your device has broken DMA
347*4882a593Smuzhiyun 					   or supports 64-bit transfers.  */
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 	struct device_dma_parameters dma_parms;
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 	pci_power_t	current_state;	/* Current operating state. In ACPI,
352*4882a593Smuzhiyun 					   this is D0-D3, D0 being fully
353*4882a593Smuzhiyun 					   functional, and D3 being off. */
354*4882a593Smuzhiyun 	unsigned int	imm_ready:1;	/* Supports Immediate Readiness */
355*4882a593Smuzhiyun 	u8		pm_cap;		/* PM capability offset */
356*4882a593Smuzhiyun 	unsigned int	pme_support:5;	/* Bitmask of states from which PME#
357*4882a593Smuzhiyun 					   can be generated */
358*4882a593Smuzhiyun 	unsigned int	pme_poll:1;	/* Poll device's PME status bit */
359*4882a593Smuzhiyun 	unsigned int	d1_support:1;	/* Low power state D1 is supported */
360*4882a593Smuzhiyun 	unsigned int	d2_support:1;	/* Low power state D2 is supported */
361*4882a593Smuzhiyun 	unsigned int	no_d1d2:1;	/* D1 and D2 are forbidden */
362*4882a593Smuzhiyun 	unsigned int	no_d3cold:1;	/* D3cold is forbidden */
363*4882a593Smuzhiyun 	unsigned int	bridge_d3:1;	/* Allow D3 for bridge */
364*4882a593Smuzhiyun 	unsigned int	d3cold_allowed:1;	/* D3cold is allowed by user */
365*4882a593Smuzhiyun 	unsigned int	mmio_always_on:1;	/* Disallow turning off io/mem
366*4882a593Smuzhiyun 						   decoding during BAR sizing */
367*4882a593Smuzhiyun 	unsigned int	wakeup_prepared:1;
368*4882a593Smuzhiyun 	unsigned int	runtime_d3cold:1;	/* Whether go through runtime
369*4882a593Smuzhiyun 						   D3cold, not set for devices
370*4882a593Smuzhiyun 						   powered on/off by the
371*4882a593Smuzhiyun 						   corresponding bridge */
372*4882a593Smuzhiyun 	unsigned int	skip_bus_pm:1;	/* Internal: Skip bus-level PM */
373*4882a593Smuzhiyun 	unsigned int	ignore_hotplug:1;	/* Ignore hotplug events */
374*4882a593Smuzhiyun 	unsigned int	hotplug_user_indicators:1; /* SlotCtl indicators
375*4882a593Smuzhiyun 						      controlled exclusively by
376*4882a593Smuzhiyun 						      user sysfs */
377*4882a593Smuzhiyun 	unsigned int	clear_retrain_link:1;	/* Need to clear Retrain Link
378*4882a593Smuzhiyun 						   bit manually */
379*4882a593Smuzhiyun 	unsigned int	d3hot_delay;	/* D3hot->D0 transition time in ms */
380*4882a593Smuzhiyun 	unsigned int	d3cold_delay;	/* D3cold->D0 transition time in ms */
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun #ifdef CONFIG_PCIEASPM
383*4882a593Smuzhiyun 	struct pcie_link_state	*link_state;	/* ASPM link state */
384*4882a593Smuzhiyun 	unsigned int	ltr_path:1;	/* Latency Tolerance Reporting
385*4882a593Smuzhiyun 					   supported from root to here */
386*4882a593Smuzhiyun 	int		l1ss;		/* L1SS Capability pointer */
387*4882a593Smuzhiyun #endif
388*4882a593Smuzhiyun 	unsigned int	eetlp_prefix_path:1;	/* End-to-End TLP Prefix */
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 	pci_channel_state_t error_state;	/* Current connectivity state */
391*4882a593Smuzhiyun 	struct device	dev;			/* Generic device interface */
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 	int		cfg_size;		/* Size of config space */
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	/*
396*4882a593Smuzhiyun 	 * Instead of touching interrupt line and base address registers
397*4882a593Smuzhiyun 	 * directly, use the values stored here. They might be different!
398*4882a593Smuzhiyun 	 */
399*4882a593Smuzhiyun 	unsigned int	irq;
400*4882a593Smuzhiyun 	struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 	bool		match_driver;		/* Skip attaching driver */
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	unsigned int	transparent:1;		/* Subtractive decode bridge */
405*4882a593Smuzhiyun 	unsigned int	io_window:1;		/* Bridge has I/O window */
406*4882a593Smuzhiyun 	unsigned int	pref_window:1;		/* Bridge has pref mem window */
407*4882a593Smuzhiyun 	unsigned int	pref_64_window:1;	/* Pref mem window is 64-bit */
408*4882a593Smuzhiyun 	unsigned int	multifunction:1;	/* Multi-function device */
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	unsigned int	is_busmaster:1;		/* Is busmaster */
411*4882a593Smuzhiyun 	unsigned int	no_msi:1;		/* May not use MSI */
412*4882a593Smuzhiyun 	unsigned int	no_64bit_msi:1;		/* May only use 32-bit MSIs */
413*4882a593Smuzhiyun 	unsigned int	block_cfg_access:1;	/* Config space access blocked */
414*4882a593Smuzhiyun 	unsigned int	broken_parity_status:1;	/* Generates false positive parity */
415*4882a593Smuzhiyun 	unsigned int	irq_reroute_variant:2;	/* Needs IRQ rerouting variant */
416*4882a593Smuzhiyun 	unsigned int	msi_enabled:1;
417*4882a593Smuzhiyun 	unsigned int	msix_enabled:1;
418*4882a593Smuzhiyun 	unsigned int	ari_enabled:1;		/* ARI forwarding */
419*4882a593Smuzhiyun 	unsigned int	ats_enabled:1;		/* Address Translation Svc */
420*4882a593Smuzhiyun 	unsigned int	pasid_enabled:1;	/* Process Address Space ID */
421*4882a593Smuzhiyun 	unsigned int	pri_enabled:1;		/* Page Request Interface */
422*4882a593Smuzhiyun 	unsigned int	is_managed:1;
423*4882a593Smuzhiyun 	unsigned int	needs_freset:1;		/* Requires fundamental reset */
424*4882a593Smuzhiyun 	unsigned int	state_saved:1;
425*4882a593Smuzhiyun 	unsigned int	is_physfn:1;
426*4882a593Smuzhiyun 	unsigned int	is_virtfn:1;
427*4882a593Smuzhiyun 	unsigned int	reset_fn:1;
428*4882a593Smuzhiyun 	unsigned int	is_hotplug_bridge:1;
429*4882a593Smuzhiyun 	unsigned int	shpc_managed:1;		/* SHPC owned by shpchp */
430*4882a593Smuzhiyun 	unsigned int	is_thunderbolt:1;	/* Thunderbolt controller */
431*4882a593Smuzhiyun 	/*
432*4882a593Smuzhiyun 	 * Devices marked being untrusted are the ones that can potentially
433*4882a593Smuzhiyun 	 * execute DMA attacks and similar. They are typically connected
434*4882a593Smuzhiyun 	 * through external ports such as Thunderbolt but not limited to
435*4882a593Smuzhiyun 	 * that. When an IOMMU is enabled they should be getting full
436*4882a593Smuzhiyun 	 * mappings to make sure they cannot access arbitrary memory.
437*4882a593Smuzhiyun 	 */
438*4882a593Smuzhiyun 	unsigned int	untrusted:1;
439*4882a593Smuzhiyun 	/*
440*4882a593Smuzhiyun 	 * Info from the platform, e.g., ACPI or device tree, may mark a
441*4882a593Smuzhiyun 	 * device as "external-facing".  An external-facing device is
442*4882a593Smuzhiyun 	 * itself internal but devices downstream from it are external.
443*4882a593Smuzhiyun 	 */
444*4882a593Smuzhiyun 	unsigned int	external_facing:1;
445*4882a593Smuzhiyun 	unsigned int	broken_intx_masking:1;	/* INTx masking can't be used */
446*4882a593Smuzhiyun 	unsigned int	io_window_1k:1;		/* Intel bridge 1K I/O windows */
447*4882a593Smuzhiyun 	unsigned int	irq_managed:1;
448*4882a593Smuzhiyun 	unsigned int	non_compliant_bars:1;	/* Broken BARs; ignore them */
449*4882a593Smuzhiyun 	unsigned int	is_probed:1;		/* Device probing in progress */
450*4882a593Smuzhiyun 	unsigned int	link_active_reporting:1;/* Device capable of reporting link active */
451*4882a593Smuzhiyun 	unsigned int	no_vf_scan:1;		/* Don't scan for VFs after IOV enablement */
452*4882a593Smuzhiyun 	unsigned int	no_command_memory:1;	/* No PCI_COMMAND_MEMORY */
453*4882a593Smuzhiyun 	pci_dev_flags_t dev_flags;
454*4882a593Smuzhiyun 	atomic_t	enable_cnt;	/* pci_enable_device has been called */
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun #ifdef CONFIG_NO_GKI
457*4882a593Smuzhiyun 	atomic_t	sysfs_init_cnt;		/* pci_create_sysfs_dev_files has been called */
458*4882a593Smuzhiyun #endif
459*4882a593Smuzhiyun 	u32		saved_config_space[16]; /* Config space saved at suspend time */
460*4882a593Smuzhiyun 	struct hlist_head saved_cap_space;
461*4882a593Smuzhiyun 	struct bin_attribute *rom_attr;		/* Attribute descriptor for sysfs ROM entry */
462*4882a593Smuzhiyun 	int		rom_attr_enabled;	/* Display of ROM attribute enabled? */
463*4882a593Smuzhiyun 	struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
464*4882a593Smuzhiyun 	struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun #ifdef CONFIG_HOTPLUG_PCI_PCIE
467*4882a593Smuzhiyun 	unsigned int	broken_cmd_compl:1;	/* No compl for some cmds */
468*4882a593Smuzhiyun #endif
469*4882a593Smuzhiyun #ifdef CONFIG_PCIE_PTM
470*4882a593Smuzhiyun 	unsigned int	ptm_root:1;
471*4882a593Smuzhiyun 	unsigned int	ptm_enabled:1;
472*4882a593Smuzhiyun 	u8		ptm_granularity;
473*4882a593Smuzhiyun #endif
474*4882a593Smuzhiyun #ifdef CONFIG_PCI_MSI
475*4882a593Smuzhiyun 	const struct attribute_group **msi_irq_groups;
476*4882a593Smuzhiyun #endif
477*4882a593Smuzhiyun 	struct pci_vpd *vpd;
478*4882a593Smuzhiyun #ifdef CONFIG_PCIE_DPC
479*4882a593Smuzhiyun 	u16		dpc_cap;
480*4882a593Smuzhiyun 	unsigned int	dpc_rp_extensions:1;
481*4882a593Smuzhiyun 	u8		dpc_rp_log_size;
482*4882a593Smuzhiyun #endif
483*4882a593Smuzhiyun #ifdef CONFIG_PCI_ATS
484*4882a593Smuzhiyun 	union {
485*4882a593Smuzhiyun 		struct pci_sriov	*sriov;		/* PF: SR-IOV info */
486*4882a593Smuzhiyun 		struct pci_dev		*physfn;	/* VF: related PF */
487*4882a593Smuzhiyun 	};
488*4882a593Smuzhiyun 	u16		ats_cap;	/* ATS Capability offset */
489*4882a593Smuzhiyun 	u8		ats_stu;	/* ATS Smallest Translation Unit */
490*4882a593Smuzhiyun #endif
491*4882a593Smuzhiyun #ifdef CONFIG_PCI_PRI
492*4882a593Smuzhiyun 	u16		pri_cap;	/* PRI Capability offset */
493*4882a593Smuzhiyun 	u32		pri_reqs_alloc; /* Number of PRI requests allocated */
494*4882a593Smuzhiyun 	unsigned int	pasid_required:1; /* PRG Response PASID Required */
495*4882a593Smuzhiyun #endif
496*4882a593Smuzhiyun #ifdef CONFIG_PCI_PASID
497*4882a593Smuzhiyun 	u16		pasid_cap;	/* PASID Capability offset */
498*4882a593Smuzhiyun 	u16		pasid_features;
499*4882a593Smuzhiyun #endif
500*4882a593Smuzhiyun #ifdef CONFIG_PCI_P2PDMA
501*4882a593Smuzhiyun 	struct pci_p2pdma *p2pdma;
502*4882a593Smuzhiyun #endif
503*4882a593Smuzhiyun 	u16		acs_cap;	/* ACS Capability offset */
504*4882a593Smuzhiyun 	phys_addr_t	rom;		/* Physical address if not from BAR */
505*4882a593Smuzhiyun 	size_t		romlen;		/* Length if not from BAR */
506*4882a593Smuzhiyun 	char		*driver_override; /* Driver name to force a match */
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun 	unsigned long	priv_flags;	/* Private flags for the PCI driver */
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 	ANDROID_KABI_RESERVE(1);
511*4882a593Smuzhiyun 	ANDROID_KABI_RESERVE(2);
512*4882a593Smuzhiyun 	ANDROID_KABI_RESERVE(3);
513*4882a593Smuzhiyun 	ANDROID_KABI_RESERVE(4);
514*4882a593Smuzhiyun };
515*4882a593Smuzhiyun 
pci_physfn(struct pci_dev * dev)516*4882a593Smuzhiyun static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
517*4882a593Smuzhiyun {
518*4882a593Smuzhiyun #ifdef CONFIG_PCI_IOV
519*4882a593Smuzhiyun 	if (dev->is_virtfn)
520*4882a593Smuzhiyun 		dev = dev->physfn;
521*4882a593Smuzhiyun #endif
522*4882a593Smuzhiyun 	return dev;
523*4882a593Smuzhiyun }
524*4882a593Smuzhiyun 
525*4882a593Smuzhiyun struct pci_dev *pci_alloc_dev(struct pci_bus *bus);
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun #define	to_pci_dev(n) container_of(n, struct pci_dev, dev)
528*4882a593Smuzhiyun #define for_each_pci_dev(d) while ((d = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL)
529*4882a593Smuzhiyun 
pci_channel_offline(struct pci_dev * pdev)530*4882a593Smuzhiyun static inline int pci_channel_offline(struct pci_dev *pdev)
531*4882a593Smuzhiyun {
532*4882a593Smuzhiyun 	return (pdev->error_state != pci_channel_io_normal);
533*4882a593Smuzhiyun }
534*4882a593Smuzhiyun 
535*4882a593Smuzhiyun struct pci_host_bridge {
536*4882a593Smuzhiyun 	struct device	dev;
537*4882a593Smuzhiyun 	struct pci_bus	*bus;		/* Root bus */
538*4882a593Smuzhiyun 	struct pci_ops	*ops;
539*4882a593Smuzhiyun 	struct pci_ops	*child_ops;
540*4882a593Smuzhiyun 	void		*sysdata;
541*4882a593Smuzhiyun 	int		busnr;
542*4882a593Smuzhiyun 	struct list_head windows;	/* resource_entry */
543*4882a593Smuzhiyun 	struct list_head dma_ranges;	/* dma ranges resource list */
544*4882a593Smuzhiyun 	u8 (*swizzle_irq)(struct pci_dev *, u8 *); /* Platform IRQ swizzler */
545*4882a593Smuzhiyun 	int (*map_irq)(const struct pci_dev *, u8, u8);
546*4882a593Smuzhiyun 	void (*release_fn)(struct pci_host_bridge *);
547*4882a593Smuzhiyun 	void		*release_data;
548*4882a593Smuzhiyun 	struct msi_controller *msi;
549*4882a593Smuzhiyun 	unsigned int	ignore_reset_delay:1;	/* For entire hierarchy */
550*4882a593Smuzhiyun 	unsigned int	no_ext_tags:1;		/* No Extended Tags */
551*4882a593Smuzhiyun 	unsigned int	native_aer:1;		/* OS may use PCIe AER */
552*4882a593Smuzhiyun 	unsigned int	native_pcie_hotplug:1;	/* OS may use PCIe hotplug */
553*4882a593Smuzhiyun 	unsigned int	native_shpc_hotplug:1;	/* OS may use SHPC hotplug */
554*4882a593Smuzhiyun 	unsigned int	native_pme:1;		/* OS may use PCIe PME */
555*4882a593Smuzhiyun 	unsigned int	native_ltr:1;		/* OS may use PCIe LTR */
556*4882a593Smuzhiyun 	unsigned int	native_dpc:1;		/* OS may use PCIe DPC */
557*4882a593Smuzhiyun 	unsigned int	preserve_config:1;	/* Preserve FW resource setup */
558*4882a593Smuzhiyun 	unsigned int	size_windows:1;		/* Enable root bus sizing */
559*4882a593Smuzhiyun 
560*4882a593Smuzhiyun 	/* Resource alignment requirements */
561*4882a593Smuzhiyun 	resource_size_t (*align_resource)(struct pci_dev *dev,
562*4882a593Smuzhiyun 			const struct resource *res,
563*4882a593Smuzhiyun 			resource_size_t start,
564*4882a593Smuzhiyun 			resource_size_t size,
565*4882a593Smuzhiyun 			resource_size_t align);
566*4882a593Smuzhiyun 
567*4882a593Smuzhiyun 	ANDROID_KABI_RESERVE(1);
568*4882a593Smuzhiyun 	ANDROID_KABI_RESERVE(2);
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun 	unsigned long	private[] ____cacheline_aligned;
571*4882a593Smuzhiyun };
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun #define	to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev)
574*4882a593Smuzhiyun 
pci_host_bridge_priv(struct pci_host_bridge * bridge)575*4882a593Smuzhiyun static inline void *pci_host_bridge_priv(struct pci_host_bridge *bridge)
576*4882a593Smuzhiyun {
577*4882a593Smuzhiyun 	return (void *)bridge->private;
578*4882a593Smuzhiyun }
579*4882a593Smuzhiyun 
pci_host_bridge_from_priv(void * priv)580*4882a593Smuzhiyun static inline struct pci_host_bridge *pci_host_bridge_from_priv(void *priv)
581*4882a593Smuzhiyun {
582*4882a593Smuzhiyun 	return container_of(priv, struct pci_host_bridge, private);
583*4882a593Smuzhiyun }
584*4882a593Smuzhiyun 
585*4882a593Smuzhiyun struct pci_host_bridge *pci_alloc_host_bridge(size_t priv);
586*4882a593Smuzhiyun struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev,
587*4882a593Smuzhiyun 						   size_t priv);
588*4882a593Smuzhiyun void pci_free_host_bridge(struct pci_host_bridge *bridge);
589*4882a593Smuzhiyun struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus);
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun void pci_set_host_bridge_release(struct pci_host_bridge *bridge,
592*4882a593Smuzhiyun 				 void (*release_fn)(struct pci_host_bridge *),
593*4882a593Smuzhiyun 				 void *release_data);
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge);
596*4882a593Smuzhiyun 
597*4882a593Smuzhiyun /*
598*4882a593Smuzhiyun  * The first PCI_BRIDGE_RESOURCE_NUM PCI bus resources (those that correspond
599*4882a593Smuzhiyun  * to P2P or CardBus bridge windows) go in a table.  Additional ones (for
600*4882a593Smuzhiyun  * buses below host bridges or subtractive decode bridges) go in the list.
601*4882a593Smuzhiyun  * Use pci_bus_for_each_resource() to iterate through all the resources.
602*4882a593Smuzhiyun  */
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun /*
605*4882a593Smuzhiyun  * PCI_SUBTRACTIVE_DECODE means the bridge forwards the window implicitly
606*4882a593Smuzhiyun  * and there's no way to program the bridge with the details of the window.
607*4882a593Smuzhiyun  * This does not apply to ACPI _CRS windows, even with the _DEC subtractive-
608*4882a593Smuzhiyun  * decode bit set, because they are explicit and can be programmed with _SRS.
609*4882a593Smuzhiyun  */
610*4882a593Smuzhiyun #define PCI_SUBTRACTIVE_DECODE	0x1
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun struct pci_bus_resource {
613*4882a593Smuzhiyun 	struct list_head	list;
614*4882a593Smuzhiyun 	struct resource		*res;
615*4882a593Smuzhiyun 	unsigned int		flags;
616*4882a593Smuzhiyun };
617*4882a593Smuzhiyun 
618*4882a593Smuzhiyun #define PCI_REGION_FLAG_MASK	0x0fU	/* These bits of resource flags tell us the PCI region flags */
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun struct pci_bus {
621*4882a593Smuzhiyun 	struct list_head node;		/* Node in list of buses */
622*4882a593Smuzhiyun 	struct pci_bus	*parent;	/* Parent bus this bridge is on */
623*4882a593Smuzhiyun 	struct list_head children;	/* List of child buses */
624*4882a593Smuzhiyun 	struct list_head devices;	/* List of devices on this bus */
625*4882a593Smuzhiyun 	struct pci_dev	*self;		/* Bridge device as seen by parent */
626*4882a593Smuzhiyun 	struct list_head slots;		/* List of slots on this bus;
627*4882a593Smuzhiyun 					   protected by pci_slot_mutex */
628*4882a593Smuzhiyun 	struct resource *resource[PCI_BRIDGE_RESOURCE_NUM];
629*4882a593Smuzhiyun 	struct list_head resources;	/* Address space routed to this bus */
630*4882a593Smuzhiyun 	struct resource busn_res;	/* Bus numbers routed to this bus */
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun 	struct pci_ops	*ops;		/* Configuration access functions */
633*4882a593Smuzhiyun 	struct msi_controller *msi;	/* MSI controller */
634*4882a593Smuzhiyun 	void		*sysdata;	/* Hook for sys-specific extension */
635*4882a593Smuzhiyun 	struct proc_dir_entry *procdir;	/* Directory entry in /proc/bus/pci */
636*4882a593Smuzhiyun 
637*4882a593Smuzhiyun 	unsigned char	number;		/* Bus number */
638*4882a593Smuzhiyun 	unsigned char	primary;	/* Number of primary bridge */
639*4882a593Smuzhiyun 	unsigned char	max_bus_speed;	/* enum pci_bus_speed */
640*4882a593Smuzhiyun 	unsigned char	cur_bus_speed;	/* enum pci_bus_speed */
641*4882a593Smuzhiyun #ifdef CONFIG_PCI_DOMAINS_GENERIC
642*4882a593Smuzhiyun 	int		domain_nr;
643*4882a593Smuzhiyun #endif
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun 	char		name[48];
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun 	unsigned short	bridge_ctl;	/* Manage NO_ISA/FBB/et al behaviors */
648*4882a593Smuzhiyun 	pci_bus_flags_t bus_flags;	/* Inherited by child buses */
649*4882a593Smuzhiyun 	struct device		*bridge;
650*4882a593Smuzhiyun 	struct device		dev;
651*4882a593Smuzhiyun 	struct bin_attribute	*legacy_io;	/* Legacy I/O for this bus */
652*4882a593Smuzhiyun 	struct bin_attribute	*legacy_mem;	/* Legacy mem */
653*4882a593Smuzhiyun 	unsigned int		is_added:1;
654*4882a593Smuzhiyun 
655*4882a593Smuzhiyun 	ANDROID_KABI_RESERVE(1);
656*4882a593Smuzhiyun 	ANDROID_KABI_RESERVE(2);
657*4882a593Smuzhiyun 	ANDROID_KABI_RESERVE(3);
658*4882a593Smuzhiyun 	ANDROID_KABI_RESERVE(4);
659*4882a593Smuzhiyun };
660*4882a593Smuzhiyun 
661*4882a593Smuzhiyun #define to_pci_bus(n)	container_of(n, struct pci_bus, dev)
662*4882a593Smuzhiyun 
pci_dev_id(struct pci_dev * dev)663*4882a593Smuzhiyun static inline u16 pci_dev_id(struct pci_dev *dev)
664*4882a593Smuzhiyun {
665*4882a593Smuzhiyun 	return PCI_DEVID(dev->bus->number, dev->devfn);
666*4882a593Smuzhiyun }
667*4882a593Smuzhiyun 
668*4882a593Smuzhiyun /*
669*4882a593Smuzhiyun  * Returns true if the PCI bus is root (behind host-PCI bridge),
670*4882a593Smuzhiyun  * false otherwise
671*4882a593Smuzhiyun  *
672*4882a593Smuzhiyun  * Some code assumes that "bus->self == NULL" means that bus is a root bus.
673*4882a593Smuzhiyun  * This is incorrect because "virtual" buses added for SR-IOV (via
674*4882a593Smuzhiyun  * virtfn_add_bus()) have "bus->self == NULL" but are not root buses.
675*4882a593Smuzhiyun  */
pci_is_root_bus(struct pci_bus * pbus)676*4882a593Smuzhiyun static inline bool pci_is_root_bus(struct pci_bus *pbus)
677*4882a593Smuzhiyun {
678*4882a593Smuzhiyun 	return !(pbus->parent);
679*4882a593Smuzhiyun }
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun /**
682*4882a593Smuzhiyun  * pci_is_bridge - check if the PCI device is a bridge
683*4882a593Smuzhiyun  * @dev: PCI device
684*4882a593Smuzhiyun  *
685*4882a593Smuzhiyun  * Return true if the PCI device is bridge whether it has subordinate
686*4882a593Smuzhiyun  * or not.
687*4882a593Smuzhiyun  */
pci_is_bridge(struct pci_dev * dev)688*4882a593Smuzhiyun static inline bool pci_is_bridge(struct pci_dev *dev)
689*4882a593Smuzhiyun {
690*4882a593Smuzhiyun 	return dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
691*4882a593Smuzhiyun 		dev->hdr_type == PCI_HEADER_TYPE_CARDBUS;
692*4882a593Smuzhiyun }
693*4882a593Smuzhiyun 
694*4882a593Smuzhiyun #define for_each_pci_bridge(dev, bus)				\
695*4882a593Smuzhiyun 	list_for_each_entry(dev, &bus->devices, bus_list)	\
696*4882a593Smuzhiyun 		if (!pci_is_bridge(dev)) {} else
697*4882a593Smuzhiyun 
pci_upstream_bridge(struct pci_dev * dev)698*4882a593Smuzhiyun static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev)
699*4882a593Smuzhiyun {
700*4882a593Smuzhiyun 	dev = pci_physfn(dev);
701*4882a593Smuzhiyun 	if (pci_is_root_bus(dev->bus))
702*4882a593Smuzhiyun 		return NULL;
703*4882a593Smuzhiyun 
704*4882a593Smuzhiyun 	return dev->bus->self;
705*4882a593Smuzhiyun }
706*4882a593Smuzhiyun 
707*4882a593Smuzhiyun #ifdef CONFIG_PCI_MSI
pci_dev_msi_enabled(struct pci_dev * pci_dev)708*4882a593Smuzhiyun static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev)
709*4882a593Smuzhiyun {
710*4882a593Smuzhiyun 	return pci_dev->msi_enabled || pci_dev->msix_enabled;
711*4882a593Smuzhiyun }
712*4882a593Smuzhiyun #else
pci_dev_msi_enabled(struct pci_dev * pci_dev)713*4882a593Smuzhiyun static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false; }
714*4882a593Smuzhiyun #endif
715*4882a593Smuzhiyun 
716*4882a593Smuzhiyun /* Error values that may be returned by PCI functions */
717*4882a593Smuzhiyun #define PCIBIOS_SUCCESSFUL		0x00
718*4882a593Smuzhiyun #define PCIBIOS_FUNC_NOT_SUPPORTED	0x81
719*4882a593Smuzhiyun #define PCIBIOS_BAD_VENDOR_ID		0x83
720*4882a593Smuzhiyun #define PCIBIOS_DEVICE_NOT_FOUND	0x86
721*4882a593Smuzhiyun #define PCIBIOS_BAD_REGISTER_NUMBER	0x87
722*4882a593Smuzhiyun #define PCIBIOS_SET_FAILED		0x88
723*4882a593Smuzhiyun #define PCIBIOS_BUFFER_TOO_SMALL	0x89
724*4882a593Smuzhiyun 
725*4882a593Smuzhiyun /* Translate above to generic errno for passing back through non-PCI code */
pcibios_err_to_errno(int err)726*4882a593Smuzhiyun static inline int pcibios_err_to_errno(int err)
727*4882a593Smuzhiyun {
728*4882a593Smuzhiyun 	if (err <= PCIBIOS_SUCCESSFUL)
729*4882a593Smuzhiyun 		return err; /* Assume already errno */
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun 	switch (err) {
732*4882a593Smuzhiyun 	case PCIBIOS_FUNC_NOT_SUPPORTED:
733*4882a593Smuzhiyun 		return -ENOENT;
734*4882a593Smuzhiyun 	case PCIBIOS_BAD_VENDOR_ID:
735*4882a593Smuzhiyun 		return -ENOTTY;
736*4882a593Smuzhiyun 	case PCIBIOS_DEVICE_NOT_FOUND:
737*4882a593Smuzhiyun 		return -ENODEV;
738*4882a593Smuzhiyun 	case PCIBIOS_BAD_REGISTER_NUMBER:
739*4882a593Smuzhiyun 		return -EFAULT;
740*4882a593Smuzhiyun 	case PCIBIOS_SET_FAILED:
741*4882a593Smuzhiyun 		return -EIO;
742*4882a593Smuzhiyun 	case PCIBIOS_BUFFER_TOO_SMALL:
743*4882a593Smuzhiyun 		return -ENOSPC;
744*4882a593Smuzhiyun 	}
745*4882a593Smuzhiyun 
746*4882a593Smuzhiyun 	return -ERANGE;
747*4882a593Smuzhiyun }
748*4882a593Smuzhiyun 
749*4882a593Smuzhiyun /* Low-level architecture-dependent routines */
750*4882a593Smuzhiyun 
751*4882a593Smuzhiyun struct pci_ops {
752*4882a593Smuzhiyun 	int (*add_bus)(struct pci_bus *bus);
753*4882a593Smuzhiyun 	void (*remove_bus)(struct pci_bus *bus);
754*4882a593Smuzhiyun 	void __iomem *(*map_bus)(struct pci_bus *bus, unsigned int devfn, int where);
755*4882a593Smuzhiyun 	int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val);
756*4882a593Smuzhiyun 	int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val);
757*4882a593Smuzhiyun 
758*4882a593Smuzhiyun 	ANDROID_KABI_RESERVE(1);
759*4882a593Smuzhiyun };
760*4882a593Smuzhiyun 
761*4882a593Smuzhiyun /*
762*4882a593Smuzhiyun  * ACPI needs to be able to access PCI config space before we've done a
763*4882a593Smuzhiyun  * PCI bus scan and created pci_bus structures.
764*4882a593Smuzhiyun  */
765*4882a593Smuzhiyun int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
766*4882a593Smuzhiyun 		 int reg, int len, u32 *val);
767*4882a593Smuzhiyun int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn,
768*4882a593Smuzhiyun 		  int reg, int len, u32 val);
769*4882a593Smuzhiyun 
770*4882a593Smuzhiyun #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
771*4882a593Smuzhiyun typedef u64 pci_bus_addr_t;
772*4882a593Smuzhiyun #else
773*4882a593Smuzhiyun typedef u32 pci_bus_addr_t;
774*4882a593Smuzhiyun #endif
775*4882a593Smuzhiyun 
776*4882a593Smuzhiyun struct pci_bus_region {
777*4882a593Smuzhiyun 	pci_bus_addr_t	start;
778*4882a593Smuzhiyun 	pci_bus_addr_t	end;
779*4882a593Smuzhiyun };
780*4882a593Smuzhiyun 
781*4882a593Smuzhiyun struct pci_dynids {
782*4882a593Smuzhiyun 	spinlock_t		lock;	/* Protects list, index */
783*4882a593Smuzhiyun 	struct list_head	list;	/* For IDs added at runtime */
784*4882a593Smuzhiyun };
785*4882a593Smuzhiyun 
786*4882a593Smuzhiyun 
787*4882a593Smuzhiyun /*
788*4882a593Smuzhiyun  * PCI Error Recovery System (PCI-ERS).  If a PCI device driver provides
789*4882a593Smuzhiyun  * a set of callbacks in struct pci_error_handlers, that device driver
790*4882a593Smuzhiyun  * will be notified of PCI bus errors, and will be driven to recovery
791*4882a593Smuzhiyun  * when an error occurs.
792*4882a593Smuzhiyun  */
793*4882a593Smuzhiyun 
794*4882a593Smuzhiyun typedef unsigned int __bitwise pci_ers_result_t;
795*4882a593Smuzhiyun 
796*4882a593Smuzhiyun enum pci_ers_result {
797*4882a593Smuzhiyun 	/* No result/none/not supported in device driver */
798*4882a593Smuzhiyun 	PCI_ERS_RESULT_NONE = (__force pci_ers_result_t) 1,
799*4882a593Smuzhiyun 
800*4882a593Smuzhiyun 	/* Device driver can recover without slot reset */
801*4882a593Smuzhiyun 	PCI_ERS_RESULT_CAN_RECOVER = (__force pci_ers_result_t) 2,
802*4882a593Smuzhiyun 
803*4882a593Smuzhiyun 	/* Device driver wants slot to be reset */
804*4882a593Smuzhiyun 	PCI_ERS_RESULT_NEED_RESET = (__force pci_ers_result_t) 3,
805*4882a593Smuzhiyun 
806*4882a593Smuzhiyun 	/* Device has completely failed, is unrecoverable */
807*4882a593Smuzhiyun 	PCI_ERS_RESULT_DISCONNECT = (__force pci_ers_result_t) 4,
808*4882a593Smuzhiyun 
809*4882a593Smuzhiyun 	/* Device driver is fully recovered and operational */
810*4882a593Smuzhiyun 	PCI_ERS_RESULT_RECOVERED = (__force pci_ers_result_t) 5,
811*4882a593Smuzhiyun 
812*4882a593Smuzhiyun 	/* No AER capabilities registered for the driver */
813*4882a593Smuzhiyun 	PCI_ERS_RESULT_NO_AER_DRIVER = (__force pci_ers_result_t) 6,
814*4882a593Smuzhiyun };
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun /* PCI bus error event callbacks */
817*4882a593Smuzhiyun struct pci_error_handlers {
818*4882a593Smuzhiyun 	/* PCI bus error detected on this device */
819*4882a593Smuzhiyun 	pci_ers_result_t (*error_detected)(struct pci_dev *dev,
820*4882a593Smuzhiyun 					   pci_channel_state_t error);
821*4882a593Smuzhiyun 
822*4882a593Smuzhiyun 	/* MMIO has been re-enabled, but not DMA */
823*4882a593Smuzhiyun 	pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev);
824*4882a593Smuzhiyun 
825*4882a593Smuzhiyun 	/* PCI slot has been reset */
826*4882a593Smuzhiyun 	pci_ers_result_t (*slot_reset)(struct pci_dev *dev);
827*4882a593Smuzhiyun 
828*4882a593Smuzhiyun 	/* PCI function reset prepare or completed */
829*4882a593Smuzhiyun 	void (*reset_prepare)(struct pci_dev *dev);
830*4882a593Smuzhiyun 	void (*reset_done)(struct pci_dev *dev);
831*4882a593Smuzhiyun 
832*4882a593Smuzhiyun 	/* Device driver may resume normal operations */
833*4882a593Smuzhiyun 	void (*resume)(struct pci_dev *dev);
834*4882a593Smuzhiyun 
835*4882a593Smuzhiyun 	ANDROID_KABI_RESERVE(1);
836*4882a593Smuzhiyun };
837*4882a593Smuzhiyun 
838*4882a593Smuzhiyun 
839*4882a593Smuzhiyun struct module;
840*4882a593Smuzhiyun 
841*4882a593Smuzhiyun /**
842*4882a593Smuzhiyun  * struct pci_driver - PCI driver structure
843*4882a593Smuzhiyun  * @node:	List of driver structures.
844*4882a593Smuzhiyun  * @name:	Driver name.
845*4882a593Smuzhiyun  * @id_table:	Pointer to table of device IDs the driver is
846*4882a593Smuzhiyun  *		interested in.  Most drivers should export this
847*4882a593Smuzhiyun  *		table using MODULE_DEVICE_TABLE(pci,...).
848*4882a593Smuzhiyun  * @probe:	This probing function gets called (during execution
849*4882a593Smuzhiyun  *		of pci_register_driver() for already existing
850*4882a593Smuzhiyun  *		devices or later if a new device gets inserted) for
851*4882a593Smuzhiyun  *		all PCI devices which match the ID table and are not
852*4882a593Smuzhiyun  *		"owned" by the other drivers yet. This function gets
853*4882a593Smuzhiyun  *		passed a "struct pci_dev \*" for each device whose
854*4882a593Smuzhiyun  *		entry in the ID table matches the device. The probe
855*4882a593Smuzhiyun  *		function returns zero when the driver chooses to
856*4882a593Smuzhiyun  *		take "ownership" of the device or an error code
857*4882a593Smuzhiyun  *		(negative number) otherwise.
858*4882a593Smuzhiyun  *		The probe function always gets called from process
859*4882a593Smuzhiyun  *		context, so it can sleep.
860*4882a593Smuzhiyun  * @remove:	The remove() function gets called whenever a device
861*4882a593Smuzhiyun  *		being handled by this driver is removed (either during
862*4882a593Smuzhiyun  *		deregistration of the driver or when it's manually
863*4882a593Smuzhiyun  *		pulled out of a hot-pluggable slot).
864*4882a593Smuzhiyun  *		The remove function always gets called from process
865*4882a593Smuzhiyun  *		context, so it can sleep.
866*4882a593Smuzhiyun  * @suspend:	Put device into low power state.
867*4882a593Smuzhiyun  * @resume:	Wake device from low power state.
868*4882a593Smuzhiyun  *		(Please see Documentation/power/pci.rst for descriptions
869*4882a593Smuzhiyun  *		of PCI Power Management and the related functions.)
870*4882a593Smuzhiyun  * @shutdown:	Hook into reboot_notifier_list (kernel/sys.c).
871*4882a593Smuzhiyun  *		Intended to stop any idling DMA operations.
872*4882a593Smuzhiyun  *		Useful for enabling wake-on-lan (NIC) or changing
873*4882a593Smuzhiyun  *		the power state of a device before reboot.
874*4882a593Smuzhiyun  *		e.g. drivers/net/e100.c.
875*4882a593Smuzhiyun  * @sriov_configure: Optional driver callback to allow configuration of
876*4882a593Smuzhiyun  *		number of VFs to enable via sysfs "sriov_numvfs" file.
877*4882a593Smuzhiyun  * @err_handler: See Documentation/PCI/pci-error-recovery.rst
878*4882a593Smuzhiyun  * @groups:	Sysfs attribute groups.
879*4882a593Smuzhiyun  * @driver:	Driver model structure.
880*4882a593Smuzhiyun  * @dynids:	List of dynamically added device IDs.
881*4882a593Smuzhiyun  */
882*4882a593Smuzhiyun struct pci_driver {
883*4882a593Smuzhiyun 	struct list_head	node;
884*4882a593Smuzhiyun 	const char		*name;
885*4882a593Smuzhiyun 	const struct pci_device_id *id_table;	/* Must be non-NULL for probe to be called */
886*4882a593Smuzhiyun 	int  (*probe)(struct pci_dev *dev, const struct pci_device_id *id);	/* New device inserted */
887*4882a593Smuzhiyun 	void (*remove)(struct pci_dev *dev);	/* Device removed (NULL if not a hot-plug capable driver) */
888*4882a593Smuzhiyun 	int  (*suspend)(struct pci_dev *dev, pm_message_t state);	/* Device suspended */
889*4882a593Smuzhiyun 	int  (*resume)(struct pci_dev *dev);	/* Device woken up */
890*4882a593Smuzhiyun 	void (*shutdown)(struct pci_dev *dev);
891*4882a593Smuzhiyun 	int  (*sriov_configure)(struct pci_dev *dev, int num_vfs); /* On PF */
892*4882a593Smuzhiyun 	const struct pci_error_handlers *err_handler;
893*4882a593Smuzhiyun 	const struct attribute_group **groups;
894*4882a593Smuzhiyun 	struct device_driver	driver;
895*4882a593Smuzhiyun 	struct pci_dynids	dynids;
896*4882a593Smuzhiyun 
897*4882a593Smuzhiyun 	ANDROID_KABI_RESERVE(1);
898*4882a593Smuzhiyun 	ANDROID_KABI_RESERVE(2);
899*4882a593Smuzhiyun 	ANDROID_KABI_RESERVE(3);
900*4882a593Smuzhiyun 	ANDROID_KABI_RESERVE(4);
901*4882a593Smuzhiyun };
902*4882a593Smuzhiyun 
903*4882a593Smuzhiyun #define	to_pci_driver(drv) container_of(drv, struct pci_driver, driver)
904*4882a593Smuzhiyun 
905*4882a593Smuzhiyun /**
906*4882a593Smuzhiyun  * PCI_DEVICE - macro used to describe a specific PCI device
907*4882a593Smuzhiyun  * @vend: the 16 bit PCI Vendor ID
908*4882a593Smuzhiyun  * @dev: the 16 bit PCI Device ID
909*4882a593Smuzhiyun  *
910*4882a593Smuzhiyun  * This macro is used to create a struct pci_device_id that matches a
911*4882a593Smuzhiyun  * specific device.  The subvendor and subdevice fields will be set to
912*4882a593Smuzhiyun  * PCI_ANY_ID.
913*4882a593Smuzhiyun  */
914*4882a593Smuzhiyun #define PCI_DEVICE(vend,dev) \
915*4882a593Smuzhiyun 	.vendor = (vend), .device = (dev), \
916*4882a593Smuzhiyun 	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
917*4882a593Smuzhiyun 
918*4882a593Smuzhiyun /**
919*4882a593Smuzhiyun  * PCI_DEVICE_SUB - macro used to describe a specific PCI device with subsystem
920*4882a593Smuzhiyun  * @vend: the 16 bit PCI Vendor ID
921*4882a593Smuzhiyun  * @dev: the 16 bit PCI Device ID
922*4882a593Smuzhiyun  * @subvend: the 16 bit PCI Subvendor ID
923*4882a593Smuzhiyun  * @subdev: the 16 bit PCI Subdevice ID
924*4882a593Smuzhiyun  *
925*4882a593Smuzhiyun  * This macro is used to create a struct pci_device_id that matches a
926*4882a593Smuzhiyun  * specific device with subsystem information.
927*4882a593Smuzhiyun  */
928*4882a593Smuzhiyun #define PCI_DEVICE_SUB(vend, dev, subvend, subdev) \
929*4882a593Smuzhiyun 	.vendor = (vend), .device = (dev), \
930*4882a593Smuzhiyun 	.subvendor = (subvend), .subdevice = (subdev)
931*4882a593Smuzhiyun 
932*4882a593Smuzhiyun /**
933*4882a593Smuzhiyun  * PCI_DEVICE_CLASS - macro used to describe a specific PCI device class
934*4882a593Smuzhiyun  * @dev_class: the class, subclass, prog-if triple for this device
935*4882a593Smuzhiyun  * @dev_class_mask: the class mask for this device
936*4882a593Smuzhiyun  *
937*4882a593Smuzhiyun  * This macro is used to create a struct pci_device_id that matches a
938*4882a593Smuzhiyun  * specific PCI class.  The vendor, device, subvendor, and subdevice
939*4882a593Smuzhiyun  * fields will be set to PCI_ANY_ID.
940*4882a593Smuzhiyun  */
941*4882a593Smuzhiyun #define PCI_DEVICE_CLASS(dev_class,dev_class_mask) \
942*4882a593Smuzhiyun 	.class = (dev_class), .class_mask = (dev_class_mask), \
943*4882a593Smuzhiyun 	.vendor = PCI_ANY_ID, .device = PCI_ANY_ID, \
944*4882a593Smuzhiyun 	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
945*4882a593Smuzhiyun 
946*4882a593Smuzhiyun /**
947*4882a593Smuzhiyun  * PCI_VDEVICE - macro used to describe a specific PCI device in short form
948*4882a593Smuzhiyun  * @vend: the vendor name
949*4882a593Smuzhiyun  * @dev: the 16 bit PCI Device ID
950*4882a593Smuzhiyun  *
951*4882a593Smuzhiyun  * This macro is used to create a struct pci_device_id that matches a
952*4882a593Smuzhiyun  * specific PCI device.  The subvendor, and subdevice fields will be set
953*4882a593Smuzhiyun  * to PCI_ANY_ID. The macro allows the next field to follow as the device
954*4882a593Smuzhiyun  * private data.
955*4882a593Smuzhiyun  */
956*4882a593Smuzhiyun #define PCI_VDEVICE(vend, dev) \
957*4882a593Smuzhiyun 	.vendor = PCI_VENDOR_ID_##vend, .device = (dev), \
958*4882a593Smuzhiyun 	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0
959*4882a593Smuzhiyun 
960*4882a593Smuzhiyun /**
961*4882a593Smuzhiyun  * PCI_DEVICE_DATA - macro used to describe a specific PCI device in very short form
962*4882a593Smuzhiyun  * @vend: the vendor name (without PCI_VENDOR_ID_ prefix)
963*4882a593Smuzhiyun  * @dev: the device name (without PCI_DEVICE_ID_<vend>_ prefix)
964*4882a593Smuzhiyun  * @data: the driver data to be filled
965*4882a593Smuzhiyun  *
966*4882a593Smuzhiyun  * This macro is used to create a struct pci_device_id that matches a
967*4882a593Smuzhiyun  * specific PCI device.  The subvendor, and subdevice fields will be set
968*4882a593Smuzhiyun  * to PCI_ANY_ID.
969*4882a593Smuzhiyun  */
970*4882a593Smuzhiyun #define PCI_DEVICE_DATA(vend, dev, data) \
971*4882a593Smuzhiyun 	.vendor = PCI_VENDOR_ID_##vend, .device = PCI_DEVICE_ID_##vend##_##dev, \
972*4882a593Smuzhiyun 	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0, \
973*4882a593Smuzhiyun 	.driver_data = (kernel_ulong_t)(data)
974*4882a593Smuzhiyun 
975*4882a593Smuzhiyun enum {
976*4882a593Smuzhiyun 	PCI_REASSIGN_ALL_RSRC	= 0x00000001,	/* Ignore firmware setup */
977*4882a593Smuzhiyun 	PCI_REASSIGN_ALL_BUS	= 0x00000002,	/* Reassign all bus numbers */
978*4882a593Smuzhiyun 	PCI_PROBE_ONLY		= 0x00000004,	/* Use existing setup */
979*4882a593Smuzhiyun 	PCI_CAN_SKIP_ISA_ALIGN	= 0x00000008,	/* Don't do ISA alignment */
980*4882a593Smuzhiyun 	PCI_ENABLE_PROC_DOMAINS	= 0x00000010,	/* Enable domains in /proc */
981*4882a593Smuzhiyun 	PCI_COMPAT_DOMAIN_0	= 0x00000020,	/* ... except domain 0 */
982*4882a593Smuzhiyun 	PCI_SCAN_ALL_PCIE_DEVS	= 0x00000040,	/* Scan all, not just dev 0 */
983*4882a593Smuzhiyun };
984*4882a593Smuzhiyun 
985*4882a593Smuzhiyun #define PCI_IRQ_LEGACY		(1 << 0) /* Allow legacy interrupts */
986*4882a593Smuzhiyun #define PCI_IRQ_MSI		(1 << 1) /* Allow MSI interrupts */
987*4882a593Smuzhiyun #define PCI_IRQ_MSIX		(1 << 2) /* Allow MSI-X interrupts */
988*4882a593Smuzhiyun #define PCI_IRQ_AFFINITY	(1 << 3) /* Auto-assign affinity */
989*4882a593Smuzhiyun 
990*4882a593Smuzhiyun /* These external functions are only available when PCI support is enabled */
991*4882a593Smuzhiyun #ifdef CONFIG_PCI
992*4882a593Smuzhiyun 
993*4882a593Smuzhiyun extern unsigned int pci_flags;
994*4882a593Smuzhiyun 
pci_set_flags(int flags)995*4882a593Smuzhiyun static inline void pci_set_flags(int flags) { pci_flags = flags; }
pci_add_flags(int flags)996*4882a593Smuzhiyun static inline void pci_add_flags(int flags) { pci_flags |= flags; }
pci_clear_flags(int flags)997*4882a593Smuzhiyun static inline void pci_clear_flags(int flags) { pci_flags &= ~flags; }
pci_has_flag(int flag)998*4882a593Smuzhiyun static inline int pci_has_flag(int flag) { return pci_flags & flag; }
999*4882a593Smuzhiyun 
1000*4882a593Smuzhiyun void pcie_bus_configure_settings(struct pci_bus *bus);
1001*4882a593Smuzhiyun 
1002*4882a593Smuzhiyun enum pcie_bus_config_types {
1003*4882a593Smuzhiyun 	PCIE_BUS_TUNE_OFF,	/* Don't touch MPS at all */
1004*4882a593Smuzhiyun 	PCIE_BUS_DEFAULT,	/* Ensure MPS matches upstream bridge */
1005*4882a593Smuzhiyun 	PCIE_BUS_SAFE,		/* Use largest MPS boot-time devices support */
1006*4882a593Smuzhiyun 	PCIE_BUS_PERFORMANCE,	/* Use MPS and MRRS for best performance */
1007*4882a593Smuzhiyun 	PCIE_BUS_PEER2PEER,	/* Set MPS = 128 for all devices */
1008*4882a593Smuzhiyun };
1009*4882a593Smuzhiyun 
1010*4882a593Smuzhiyun extern enum pcie_bus_config_types pcie_bus_config;
1011*4882a593Smuzhiyun 
1012*4882a593Smuzhiyun extern struct bus_type pci_bus_type;
1013*4882a593Smuzhiyun 
1014*4882a593Smuzhiyun /* Do NOT directly access these two variables, unless you are arch-specific PCI
1015*4882a593Smuzhiyun  * code, or PCI core code. */
1016*4882a593Smuzhiyun extern struct list_head pci_root_buses;	/* List of all known PCI buses */
1017*4882a593Smuzhiyun /* Some device drivers need know if PCI is initiated */
1018*4882a593Smuzhiyun int no_pci_devices(void);
1019*4882a593Smuzhiyun 
1020*4882a593Smuzhiyun void pcibios_resource_survey_bus(struct pci_bus *bus);
1021*4882a593Smuzhiyun void pcibios_bus_add_device(struct pci_dev *pdev);
1022*4882a593Smuzhiyun void pcibios_add_bus(struct pci_bus *bus);
1023*4882a593Smuzhiyun void pcibios_remove_bus(struct pci_bus *bus);
1024*4882a593Smuzhiyun void pcibios_fixup_bus(struct pci_bus *);
1025*4882a593Smuzhiyun int __must_check pcibios_enable_device(struct pci_dev *, int mask);
1026*4882a593Smuzhiyun /* Architecture-specific versions may override this (weak) */
1027*4882a593Smuzhiyun char *pcibios_setup(char *str);
1028*4882a593Smuzhiyun 
1029*4882a593Smuzhiyun /* Used only when drivers/pci/setup.c is used */
1030*4882a593Smuzhiyun resource_size_t pcibios_align_resource(void *, const struct resource *,
1031*4882a593Smuzhiyun 				resource_size_t,
1032*4882a593Smuzhiyun 				resource_size_t);
1033*4882a593Smuzhiyun 
1034*4882a593Smuzhiyun /* Weak but can be overridden by arch */
1035*4882a593Smuzhiyun void pci_fixup_cardbus(struct pci_bus *);
1036*4882a593Smuzhiyun 
1037*4882a593Smuzhiyun /* Generic PCI functions used internally */
1038*4882a593Smuzhiyun 
1039*4882a593Smuzhiyun void pcibios_resource_to_bus(struct pci_bus *bus, struct pci_bus_region *region,
1040*4882a593Smuzhiyun 			     struct resource *res);
1041*4882a593Smuzhiyun void pcibios_bus_to_resource(struct pci_bus *bus, struct resource *res,
1042*4882a593Smuzhiyun 			     struct pci_bus_region *region);
1043*4882a593Smuzhiyun void pcibios_scan_specific_bus(int busn);
1044*4882a593Smuzhiyun struct pci_bus *pci_find_bus(int domain, int busnr);
1045*4882a593Smuzhiyun void pci_bus_add_devices(const struct pci_bus *bus);
1046*4882a593Smuzhiyun struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata);
1047*4882a593Smuzhiyun struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
1048*4882a593Smuzhiyun 				    struct pci_ops *ops, void *sysdata,
1049*4882a593Smuzhiyun 				    struct list_head *resources);
1050*4882a593Smuzhiyun int pci_host_probe(struct pci_host_bridge *bridge);
1051*4882a593Smuzhiyun int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int busmax);
1052*4882a593Smuzhiyun int pci_bus_update_busn_res_end(struct pci_bus *b, int busmax);
1053*4882a593Smuzhiyun void pci_bus_release_busn_res(struct pci_bus *b);
1054*4882a593Smuzhiyun struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
1055*4882a593Smuzhiyun 				  struct pci_ops *ops, void *sysdata,
1056*4882a593Smuzhiyun 				  struct list_head *resources);
1057*4882a593Smuzhiyun int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge);
1058*4882a593Smuzhiyun struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
1059*4882a593Smuzhiyun 				int busnr);
1060*4882a593Smuzhiyun struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
1061*4882a593Smuzhiyun 				 const char *name,
1062*4882a593Smuzhiyun 				 struct hotplug_slot *hotplug);
1063*4882a593Smuzhiyun void pci_destroy_slot(struct pci_slot *slot);
1064*4882a593Smuzhiyun #ifdef CONFIG_SYSFS
1065*4882a593Smuzhiyun void pci_dev_assign_slot(struct pci_dev *dev);
1066*4882a593Smuzhiyun #else
pci_dev_assign_slot(struct pci_dev * dev)1067*4882a593Smuzhiyun static inline void pci_dev_assign_slot(struct pci_dev *dev) { }
1068*4882a593Smuzhiyun #endif
1069*4882a593Smuzhiyun int pci_scan_slot(struct pci_bus *bus, int devfn);
1070*4882a593Smuzhiyun struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn);
1071*4882a593Smuzhiyun void pci_device_add(struct pci_dev *dev, struct pci_bus *bus);
1072*4882a593Smuzhiyun unsigned int pci_scan_child_bus(struct pci_bus *bus);
1073*4882a593Smuzhiyun void pci_bus_add_device(struct pci_dev *dev);
1074*4882a593Smuzhiyun void pci_read_bridge_bases(struct pci_bus *child);
1075*4882a593Smuzhiyun struct resource *pci_find_parent_resource(const struct pci_dev *dev,
1076*4882a593Smuzhiyun 					  struct resource *res);
1077*4882a593Smuzhiyun u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin);
1078*4882a593Smuzhiyun int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge);
1079*4882a593Smuzhiyun u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp);
1080*4882a593Smuzhiyun struct pci_dev *pci_dev_get(struct pci_dev *dev);
1081*4882a593Smuzhiyun void pci_dev_put(struct pci_dev *dev);
1082*4882a593Smuzhiyun void pci_remove_bus(struct pci_bus *b);
1083*4882a593Smuzhiyun void pci_stop_and_remove_bus_device(struct pci_dev *dev);
1084*4882a593Smuzhiyun void pci_stop_and_remove_bus_device_locked(struct pci_dev *dev);
1085*4882a593Smuzhiyun void pci_stop_root_bus(struct pci_bus *bus);
1086*4882a593Smuzhiyun void pci_remove_root_bus(struct pci_bus *bus);
1087*4882a593Smuzhiyun void pci_setup_cardbus(struct pci_bus *bus);
1088*4882a593Smuzhiyun void pcibios_setup_bridge(struct pci_bus *bus, unsigned long type);
1089*4882a593Smuzhiyun void pci_sort_breadthfirst(void);
1090*4882a593Smuzhiyun #define dev_is_pci(d) ((d)->bus == &pci_bus_type)
1091*4882a593Smuzhiyun #define dev_is_pf(d) ((dev_is_pci(d) ? to_pci_dev(d)->is_physfn : false))
1092*4882a593Smuzhiyun 
1093*4882a593Smuzhiyun /* Generic PCI functions exported to card drivers */
1094*4882a593Smuzhiyun 
1095*4882a593Smuzhiyun int pci_find_capability(struct pci_dev *dev, int cap);
1096*4882a593Smuzhiyun int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap);
1097*4882a593Smuzhiyun int pci_find_ext_capability(struct pci_dev *dev, int cap);
1098*4882a593Smuzhiyun int pci_find_next_ext_capability(struct pci_dev *dev, int pos, int cap);
1099*4882a593Smuzhiyun int pci_find_ht_capability(struct pci_dev *dev, int ht_cap);
1100*4882a593Smuzhiyun int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap);
1101*4882a593Smuzhiyun struct pci_bus *pci_find_next_bus(const struct pci_bus *from);
1102*4882a593Smuzhiyun 
1103*4882a593Smuzhiyun u64 pci_get_dsn(struct pci_dev *dev);
1104*4882a593Smuzhiyun 
1105*4882a593Smuzhiyun struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device,
1106*4882a593Smuzhiyun 			       struct pci_dev *from);
1107*4882a593Smuzhiyun struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device,
1108*4882a593Smuzhiyun 			       unsigned int ss_vendor, unsigned int ss_device,
1109*4882a593Smuzhiyun 			       struct pci_dev *from);
1110*4882a593Smuzhiyun struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn);
1111*4882a593Smuzhiyun struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus,
1112*4882a593Smuzhiyun 					    unsigned int devfn);
1113*4882a593Smuzhiyun struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from);
1114*4882a593Smuzhiyun int pci_dev_present(const struct pci_device_id *ids);
1115*4882a593Smuzhiyun 
1116*4882a593Smuzhiyun int pci_bus_read_config_byte(struct pci_bus *bus, unsigned int devfn,
1117*4882a593Smuzhiyun 			     int where, u8 *val);
1118*4882a593Smuzhiyun int pci_bus_read_config_word(struct pci_bus *bus, unsigned int devfn,
1119*4882a593Smuzhiyun 			     int where, u16 *val);
1120*4882a593Smuzhiyun int pci_bus_read_config_dword(struct pci_bus *bus, unsigned int devfn,
1121*4882a593Smuzhiyun 			      int where, u32 *val);
1122*4882a593Smuzhiyun int pci_bus_write_config_byte(struct pci_bus *bus, unsigned int devfn,
1123*4882a593Smuzhiyun 			      int where, u8 val);
1124*4882a593Smuzhiyun int pci_bus_write_config_word(struct pci_bus *bus, unsigned int devfn,
1125*4882a593Smuzhiyun 			      int where, u16 val);
1126*4882a593Smuzhiyun int pci_bus_write_config_dword(struct pci_bus *bus, unsigned int devfn,
1127*4882a593Smuzhiyun 			       int where, u32 val);
1128*4882a593Smuzhiyun 
1129*4882a593Smuzhiyun int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn,
1130*4882a593Smuzhiyun 			    int where, int size, u32 *val);
1131*4882a593Smuzhiyun int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn,
1132*4882a593Smuzhiyun 			    int where, int size, u32 val);
1133*4882a593Smuzhiyun int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn,
1134*4882a593Smuzhiyun 			      int where, int size, u32 *val);
1135*4882a593Smuzhiyun int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn,
1136*4882a593Smuzhiyun 			       int where, int size, u32 val);
1137*4882a593Smuzhiyun 
1138*4882a593Smuzhiyun struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops);
1139*4882a593Smuzhiyun 
1140*4882a593Smuzhiyun int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val);
1141*4882a593Smuzhiyun int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val);
1142*4882a593Smuzhiyun int pci_read_config_dword(const struct pci_dev *dev, int where, u32 *val);
1143*4882a593Smuzhiyun int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val);
1144*4882a593Smuzhiyun int pci_write_config_word(const struct pci_dev *dev, int where, u16 val);
1145*4882a593Smuzhiyun int pci_write_config_dword(const struct pci_dev *dev, int where, u32 val);
1146*4882a593Smuzhiyun 
1147*4882a593Smuzhiyun int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val);
1148*4882a593Smuzhiyun int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val);
1149*4882a593Smuzhiyun int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val);
1150*4882a593Smuzhiyun int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val);
1151*4882a593Smuzhiyun int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
1152*4882a593Smuzhiyun 				       u16 clear, u16 set);
1153*4882a593Smuzhiyun int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos,
1154*4882a593Smuzhiyun 					u32 clear, u32 set);
1155*4882a593Smuzhiyun 
pcie_capability_set_word(struct pci_dev * dev,int pos,u16 set)1156*4882a593Smuzhiyun static inline int pcie_capability_set_word(struct pci_dev *dev, int pos,
1157*4882a593Smuzhiyun 					   u16 set)
1158*4882a593Smuzhiyun {
1159*4882a593Smuzhiyun 	return pcie_capability_clear_and_set_word(dev, pos, 0, set);
1160*4882a593Smuzhiyun }
1161*4882a593Smuzhiyun 
pcie_capability_set_dword(struct pci_dev * dev,int pos,u32 set)1162*4882a593Smuzhiyun static inline int pcie_capability_set_dword(struct pci_dev *dev, int pos,
1163*4882a593Smuzhiyun 					    u32 set)
1164*4882a593Smuzhiyun {
1165*4882a593Smuzhiyun 	return pcie_capability_clear_and_set_dword(dev, pos, 0, set);
1166*4882a593Smuzhiyun }
1167*4882a593Smuzhiyun 
pcie_capability_clear_word(struct pci_dev * dev,int pos,u16 clear)1168*4882a593Smuzhiyun static inline int pcie_capability_clear_word(struct pci_dev *dev, int pos,
1169*4882a593Smuzhiyun 					     u16 clear)
1170*4882a593Smuzhiyun {
1171*4882a593Smuzhiyun 	return pcie_capability_clear_and_set_word(dev, pos, clear, 0);
1172*4882a593Smuzhiyun }
1173*4882a593Smuzhiyun 
pcie_capability_clear_dword(struct pci_dev * dev,int pos,u32 clear)1174*4882a593Smuzhiyun static inline int pcie_capability_clear_dword(struct pci_dev *dev, int pos,
1175*4882a593Smuzhiyun 					      u32 clear)
1176*4882a593Smuzhiyun {
1177*4882a593Smuzhiyun 	return pcie_capability_clear_and_set_dword(dev, pos, clear, 0);
1178*4882a593Smuzhiyun }
1179*4882a593Smuzhiyun 
1180*4882a593Smuzhiyun /* User-space driven config access */
1181*4882a593Smuzhiyun int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val);
1182*4882a593Smuzhiyun int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val);
1183*4882a593Smuzhiyun int pci_user_read_config_dword(struct pci_dev *dev, int where, u32 *val);
1184*4882a593Smuzhiyun int pci_user_write_config_byte(struct pci_dev *dev, int where, u8 val);
1185*4882a593Smuzhiyun int pci_user_write_config_word(struct pci_dev *dev, int where, u16 val);
1186*4882a593Smuzhiyun int pci_user_write_config_dword(struct pci_dev *dev, int where, u32 val);
1187*4882a593Smuzhiyun 
1188*4882a593Smuzhiyun int __must_check pci_enable_device(struct pci_dev *dev);
1189*4882a593Smuzhiyun int __must_check pci_enable_device_io(struct pci_dev *dev);
1190*4882a593Smuzhiyun int __must_check pci_enable_device_mem(struct pci_dev *dev);
1191*4882a593Smuzhiyun int __must_check pci_reenable_device(struct pci_dev *);
1192*4882a593Smuzhiyun int __must_check pcim_enable_device(struct pci_dev *pdev);
1193*4882a593Smuzhiyun void pcim_pin_device(struct pci_dev *pdev);
1194*4882a593Smuzhiyun 
pci_intx_mask_supported(struct pci_dev * pdev)1195*4882a593Smuzhiyun static inline bool pci_intx_mask_supported(struct pci_dev *pdev)
1196*4882a593Smuzhiyun {
1197*4882a593Smuzhiyun 	/*
1198*4882a593Smuzhiyun 	 * INTx masking is supported if PCI_COMMAND_INTX_DISABLE is
1199*4882a593Smuzhiyun 	 * writable and no quirk has marked the feature broken.
1200*4882a593Smuzhiyun 	 */
1201*4882a593Smuzhiyun 	return !pdev->broken_intx_masking;
1202*4882a593Smuzhiyun }
1203*4882a593Smuzhiyun 
pci_is_enabled(struct pci_dev * pdev)1204*4882a593Smuzhiyun static inline int pci_is_enabled(struct pci_dev *pdev)
1205*4882a593Smuzhiyun {
1206*4882a593Smuzhiyun 	return (atomic_read(&pdev->enable_cnt) > 0);
1207*4882a593Smuzhiyun }
1208*4882a593Smuzhiyun 
pci_is_managed(struct pci_dev * pdev)1209*4882a593Smuzhiyun static inline int pci_is_managed(struct pci_dev *pdev)
1210*4882a593Smuzhiyun {
1211*4882a593Smuzhiyun 	return pdev->is_managed;
1212*4882a593Smuzhiyun }
1213*4882a593Smuzhiyun 
1214*4882a593Smuzhiyun void pci_disable_device(struct pci_dev *dev);
1215*4882a593Smuzhiyun 
1216*4882a593Smuzhiyun extern unsigned int pcibios_max_latency;
1217*4882a593Smuzhiyun void pci_set_master(struct pci_dev *dev);
1218*4882a593Smuzhiyun void pci_clear_master(struct pci_dev *dev);
1219*4882a593Smuzhiyun 
1220*4882a593Smuzhiyun int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state);
1221*4882a593Smuzhiyun int pci_set_cacheline_size(struct pci_dev *dev);
1222*4882a593Smuzhiyun #define HAVE_PCI_SET_MWI
1223*4882a593Smuzhiyun int __must_check pci_set_mwi(struct pci_dev *dev);
1224*4882a593Smuzhiyun int __must_check pcim_set_mwi(struct pci_dev *dev);
1225*4882a593Smuzhiyun int pci_try_set_mwi(struct pci_dev *dev);
1226*4882a593Smuzhiyun void pci_clear_mwi(struct pci_dev *dev);
1227*4882a593Smuzhiyun void pci_intx(struct pci_dev *dev, int enable);
1228*4882a593Smuzhiyun bool pci_check_and_mask_intx(struct pci_dev *dev);
1229*4882a593Smuzhiyun bool pci_check_and_unmask_intx(struct pci_dev *dev);
1230*4882a593Smuzhiyun int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask);
1231*4882a593Smuzhiyun int pci_wait_for_pending_transaction(struct pci_dev *dev);
1232*4882a593Smuzhiyun int pcix_get_max_mmrbc(struct pci_dev *dev);
1233*4882a593Smuzhiyun int pcix_get_mmrbc(struct pci_dev *dev);
1234*4882a593Smuzhiyun int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc);
1235*4882a593Smuzhiyun int pcie_get_readrq(struct pci_dev *dev);
1236*4882a593Smuzhiyun int pcie_set_readrq(struct pci_dev *dev, int rq);
1237*4882a593Smuzhiyun int pcie_get_mps(struct pci_dev *dev);
1238*4882a593Smuzhiyun int pcie_set_mps(struct pci_dev *dev, int mps);
1239*4882a593Smuzhiyun u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
1240*4882a593Smuzhiyun 			     enum pci_bus_speed *speed,
1241*4882a593Smuzhiyun 			     enum pcie_link_width *width);
1242*4882a593Smuzhiyun void pcie_print_link_status(struct pci_dev *dev);
1243*4882a593Smuzhiyun bool pcie_has_flr(struct pci_dev *dev);
1244*4882a593Smuzhiyun int pcie_flr(struct pci_dev *dev);
1245*4882a593Smuzhiyun int __pci_reset_function_locked(struct pci_dev *dev);
1246*4882a593Smuzhiyun int pci_reset_function(struct pci_dev *dev);
1247*4882a593Smuzhiyun int pci_reset_function_locked(struct pci_dev *dev);
1248*4882a593Smuzhiyun int pci_try_reset_function(struct pci_dev *dev);
1249*4882a593Smuzhiyun int pci_probe_reset_slot(struct pci_slot *slot);
1250*4882a593Smuzhiyun int pci_probe_reset_bus(struct pci_bus *bus);
1251*4882a593Smuzhiyun int pci_reset_bus(struct pci_dev *dev);
1252*4882a593Smuzhiyun void pci_reset_secondary_bus(struct pci_dev *dev);
1253*4882a593Smuzhiyun void pcibios_reset_secondary_bus(struct pci_dev *dev);
1254*4882a593Smuzhiyun void pci_update_resource(struct pci_dev *dev, int resno);
1255*4882a593Smuzhiyun int __must_check pci_assign_resource(struct pci_dev *dev, int i);
1256*4882a593Smuzhiyun int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align);
1257*4882a593Smuzhiyun void pci_release_resource(struct pci_dev *dev, int resno);
1258*4882a593Smuzhiyun int __must_check pci_resize_resource(struct pci_dev *dev, int i, int size);
1259*4882a593Smuzhiyun int pci_select_bars(struct pci_dev *dev, unsigned long flags);
1260*4882a593Smuzhiyun bool pci_device_is_present(struct pci_dev *pdev);
1261*4882a593Smuzhiyun void pci_ignore_hotplug(struct pci_dev *dev);
1262*4882a593Smuzhiyun struct pci_dev *pci_real_dma_dev(struct pci_dev *dev);
1263*4882a593Smuzhiyun int pci_status_get_and_clear_errors(struct pci_dev *pdev);
1264*4882a593Smuzhiyun 
1265*4882a593Smuzhiyun int __printf(6, 7) pci_request_irq(struct pci_dev *dev, unsigned int nr,
1266*4882a593Smuzhiyun 		irq_handler_t handler, irq_handler_t thread_fn, void *dev_id,
1267*4882a593Smuzhiyun 		const char *fmt, ...);
1268*4882a593Smuzhiyun void pci_free_irq(struct pci_dev *dev, unsigned int nr, void *dev_id);
1269*4882a593Smuzhiyun 
1270*4882a593Smuzhiyun /* ROM control related routines */
1271*4882a593Smuzhiyun int pci_enable_rom(struct pci_dev *pdev);
1272*4882a593Smuzhiyun void pci_disable_rom(struct pci_dev *pdev);
1273*4882a593Smuzhiyun void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size);
1274*4882a593Smuzhiyun void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom);
1275*4882a593Smuzhiyun 
1276*4882a593Smuzhiyun /* Power management related routines */
1277*4882a593Smuzhiyun int pci_save_state(struct pci_dev *dev);
1278*4882a593Smuzhiyun void pci_restore_state(struct pci_dev *dev);
1279*4882a593Smuzhiyun struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev);
1280*4882a593Smuzhiyun int pci_load_saved_state(struct pci_dev *dev,
1281*4882a593Smuzhiyun 			 struct pci_saved_state *state);
1282*4882a593Smuzhiyun int pci_load_and_free_saved_state(struct pci_dev *dev,
1283*4882a593Smuzhiyun 				  struct pci_saved_state **state);
1284*4882a593Smuzhiyun struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap);
1285*4882a593Smuzhiyun struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev,
1286*4882a593Smuzhiyun 						   u16 cap);
1287*4882a593Smuzhiyun int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size);
1288*4882a593Smuzhiyun int pci_add_ext_cap_save_buffer(struct pci_dev *dev,
1289*4882a593Smuzhiyun 				u16 cap, unsigned int size);
1290*4882a593Smuzhiyun int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state);
1291*4882a593Smuzhiyun int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
1292*4882a593Smuzhiyun pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
1293*4882a593Smuzhiyun bool pci_pme_capable(struct pci_dev *dev, pci_power_t state);
1294*4882a593Smuzhiyun void pci_pme_active(struct pci_dev *dev, bool enable);
1295*4882a593Smuzhiyun int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable);
1296*4882a593Smuzhiyun int pci_wake_from_d3(struct pci_dev *dev, bool enable);
1297*4882a593Smuzhiyun int pci_prepare_to_sleep(struct pci_dev *dev);
1298*4882a593Smuzhiyun int pci_back_from_sleep(struct pci_dev *dev);
1299*4882a593Smuzhiyun bool pci_dev_run_wake(struct pci_dev *dev);
1300*4882a593Smuzhiyun void pci_d3cold_enable(struct pci_dev *dev);
1301*4882a593Smuzhiyun void pci_d3cold_disable(struct pci_dev *dev);
1302*4882a593Smuzhiyun bool pcie_relaxed_ordering_enabled(struct pci_dev *dev);
1303*4882a593Smuzhiyun void pci_wakeup_bus(struct pci_bus *bus);
1304*4882a593Smuzhiyun void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state);
1305*4882a593Smuzhiyun 
1306*4882a593Smuzhiyun /* For use by arch with custom probe code */
1307*4882a593Smuzhiyun void set_pcie_port_type(struct pci_dev *pdev);
1308*4882a593Smuzhiyun void set_pcie_hotplug_bridge(struct pci_dev *pdev);
1309*4882a593Smuzhiyun 
1310*4882a593Smuzhiyun /* Functions for PCI Hotplug drivers to use */
1311*4882a593Smuzhiyun int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap);
1312*4882a593Smuzhiyun unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge);
1313*4882a593Smuzhiyun unsigned int pci_rescan_bus(struct pci_bus *bus);
1314*4882a593Smuzhiyun void pci_lock_rescan_remove(void);
1315*4882a593Smuzhiyun void pci_unlock_rescan_remove(void);
1316*4882a593Smuzhiyun 
1317*4882a593Smuzhiyun /* Vital Product Data routines */
1318*4882a593Smuzhiyun ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
1319*4882a593Smuzhiyun ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
1320*4882a593Smuzhiyun int pci_set_vpd_size(struct pci_dev *dev, size_t len);
1321*4882a593Smuzhiyun 
1322*4882a593Smuzhiyun /* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */
1323*4882a593Smuzhiyun resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx);
1324*4882a593Smuzhiyun void pci_bus_assign_resources(const struct pci_bus *bus);
1325*4882a593Smuzhiyun void pci_bus_claim_resources(struct pci_bus *bus);
1326*4882a593Smuzhiyun void pci_bus_size_bridges(struct pci_bus *bus);
1327*4882a593Smuzhiyun int pci_claim_resource(struct pci_dev *, int);
1328*4882a593Smuzhiyun int pci_claim_bridge_resource(struct pci_dev *bridge, int i);
1329*4882a593Smuzhiyun void pci_assign_unassigned_resources(void);
1330*4882a593Smuzhiyun void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge);
1331*4882a593Smuzhiyun void pci_assign_unassigned_bus_resources(struct pci_bus *bus);
1332*4882a593Smuzhiyun void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus);
1333*4882a593Smuzhiyun int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type);
1334*4882a593Smuzhiyun void pdev_enable_device(struct pci_dev *);
1335*4882a593Smuzhiyun int pci_enable_resources(struct pci_dev *, int mask);
1336*4882a593Smuzhiyun void pci_assign_irq(struct pci_dev *dev);
1337*4882a593Smuzhiyun struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res);
1338*4882a593Smuzhiyun #define HAVE_PCI_REQ_REGIONS	2
1339*4882a593Smuzhiyun int __must_check pci_request_regions(struct pci_dev *, const char *);
1340*4882a593Smuzhiyun int __must_check pci_request_regions_exclusive(struct pci_dev *, const char *);
1341*4882a593Smuzhiyun void pci_release_regions(struct pci_dev *);
1342*4882a593Smuzhiyun int __must_check pci_request_region(struct pci_dev *, int, const char *);
1343*4882a593Smuzhiyun void pci_release_region(struct pci_dev *, int);
1344*4882a593Smuzhiyun int pci_request_selected_regions(struct pci_dev *, int, const char *);
1345*4882a593Smuzhiyun int pci_request_selected_regions_exclusive(struct pci_dev *, int, const char *);
1346*4882a593Smuzhiyun void pci_release_selected_regions(struct pci_dev *, int);
1347*4882a593Smuzhiyun 
1348*4882a593Smuzhiyun /* drivers/pci/bus.c */
1349*4882a593Smuzhiyun void pci_add_resource(struct list_head *resources, struct resource *res);
1350*4882a593Smuzhiyun void pci_add_resource_offset(struct list_head *resources, struct resource *res,
1351*4882a593Smuzhiyun 			     resource_size_t offset);
1352*4882a593Smuzhiyun void pci_free_resource_list(struct list_head *resources);
1353*4882a593Smuzhiyun void pci_bus_add_resource(struct pci_bus *bus, struct resource *res,
1354*4882a593Smuzhiyun 			  unsigned int flags);
1355*4882a593Smuzhiyun struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n);
1356*4882a593Smuzhiyun void pci_bus_remove_resources(struct pci_bus *bus);
1357*4882a593Smuzhiyun int devm_request_pci_bus_resources(struct device *dev,
1358*4882a593Smuzhiyun 				   struct list_head *resources);
1359*4882a593Smuzhiyun 
1360*4882a593Smuzhiyun /* Temporary until new and working PCI SBR API in place */
1361*4882a593Smuzhiyun int pci_bridge_secondary_bus_reset(struct pci_dev *dev);
1362*4882a593Smuzhiyun 
1363*4882a593Smuzhiyun #define pci_bus_for_each_resource(bus, res, i)				\
1364*4882a593Smuzhiyun 	for (i = 0;							\
1365*4882a593Smuzhiyun 	    (res = pci_bus_resource_n(bus, i)) || i < PCI_BRIDGE_RESOURCE_NUM; \
1366*4882a593Smuzhiyun 	     i++)
1367*4882a593Smuzhiyun 
1368*4882a593Smuzhiyun int __must_check pci_bus_alloc_resource(struct pci_bus *bus,
1369*4882a593Smuzhiyun 			struct resource *res, resource_size_t size,
1370*4882a593Smuzhiyun 			resource_size_t align, resource_size_t min,
1371*4882a593Smuzhiyun 			unsigned long type_mask,
1372*4882a593Smuzhiyun 			resource_size_t (*alignf)(void *,
1373*4882a593Smuzhiyun 						  const struct resource *,
1374*4882a593Smuzhiyun 						  resource_size_t,
1375*4882a593Smuzhiyun 						  resource_size_t),
1376*4882a593Smuzhiyun 			void *alignf_data);
1377*4882a593Smuzhiyun 
1378*4882a593Smuzhiyun 
1379*4882a593Smuzhiyun int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
1380*4882a593Smuzhiyun 			resource_size_t size);
1381*4882a593Smuzhiyun unsigned long pci_address_to_pio(phys_addr_t addr);
1382*4882a593Smuzhiyun phys_addr_t pci_pio_to_address(unsigned long pio);
1383*4882a593Smuzhiyun int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
1384*4882a593Smuzhiyun int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
1385*4882a593Smuzhiyun 			   phys_addr_t phys_addr);
1386*4882a593Smuzhiyun void pci_unmap_iospace(struct resource *res);
1387*4882a593Smuzhiyun void __iomem *devm_pci_remap_cfgspace(struct device *dev,
1388*4882a593Smuzhiyun 				      resource_size_t offset,
1389*4882a593Smuzhiyun 				      resource_size_t size);
1390*4882a593Smuzhiyun void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
1391*4882a593Smuzhiyun 					  struct resource *res);
1392*4882a593Smuzhiyun 
pci_bus_address(struct pci_dev * pdev,int bar)1393*4882a593Smuzhiyun static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
1394*4882a593Smuzhiyun {
1395*4882a593Smuzhiyun 	struct pci_bus_region region;
1396*4882a593Smuzhiyun 
1397*4882a593Smuzhiyun 	pcibios_resource_to_bus(pdev->bus, &region, &pdev->resource[bar]);
1398*4882a593Smuzhiyun 	return region.start;
1399*4882a593Smuzhiyun }
1400*4882a593Smuzhiyun 
1401*4882a593Smuzhiyun /* Proper probing supporting hot-pluggable devices */
1402*4882a593Smuzhiyun int __must_check __pci_register_driver(struct pci_driver *, struct module *,
1403*4882a593Smuzhiyun 				       const char *mod_name);
1404*4882a593Smuzhiyun 
1405*4882a593Smuzhiyun /* pci_register_driver() must be a macro so KBUILD_MODNAME can be expanded */
1406*4882a593Smuzhiyun #define pci_register_driver(driver)		\
1407*4882a593Smuzhiyun 	__pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
1408*4882a593Smuzhiyun 
1409*4882a593Smuzhiyun void pci_unregister_driver(struct pci_driver *dev);
1410*4882a593Smuzhiyun 
1411*4882a593Smuzhiyun /**
1412*4882a593Smuzhiyun  * module_pci_driver() - Helper macro for registering a PCI driver
1413*4882a593Smuzhiyun  * @__pci_driver: pci_driver struct
1414*4882a593Smuzhiyun  *
1415*4882a593Smuzhiyun  * Helper macro for PCI drivers which do not do anything special in module
1416*4882a593Smuzhiyun  * init/exit. This eliminates a lot of boilerplate. Each module may only
1417*4882a593Smuzhiyun  * use this macro once, and calling it replaces module_init() and module_exit()
1418*4882a593Smuzhiyun  */
1419*4882a593Smuzhiyun #define module_pci_driver(__pci_driver) \
1420*4882a593Smuzhiyun 	module_driver(__pci_driver, pci_register_driver, pci_unregister_driver)
1421*4882a593Smuzhiyun 
1422*4882a593Smuzhiyun /**
1423*4882a593Smuzhiyun  * builtin_pci_driver() - Helper macro for registering a PCI driver
1424*4882a593Smuzhiyun  * @__pci_driver: pci_driver struct
1425*4882a593Smuzhiyun  *
1426*4882a593Smuzhiyun  * Helper macro for PCI drivers which do not do anything special in their
1427*4882a593Smuzhiyun  * init code. This eliminates a lot of boilerplate. Each driver may only
1428*4882a593Smuzhiyun  * use this macro once, and calling it replaces device_initcall(...)
1429*4882a593Smuzhiyun  */
1430*4882a593Smuzhiyun #define builtin_pci_driver(__pci_driver) \
1431*4882a593Smuzhiyun 	builtin_driver(__pci_driver, pci_register_driver)
1432*4882a593Smuzhiyun 
1433*4882a593Smuzhiyun struct pci_driver *pci_dev_driver(const struct pci_dev *dev);
1434*4882a593Smuzhiyun int pci_add_dynid(struct pci_driver *drv,
1435*4882a593Smuzhiyun 		  unsigned int vendor, unsigned int device,
1436*4882a593Smuzhiyun 		  unsigned int subvendor, unsigned int subdevice,
1437*4882a593Smuzhiyun 		  unsigned int class, unsigned int class_mask,
1438*4882a593Smuzhiyun 		  unsigned long driver_data);
1439*4882a593Smuzhiyun const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
1440*4882a593Smuzhiyun 					 struct pci_dev *dev);
1441*4882a593Smuzhiyun int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
1442*4882a593Smuzhiyun 		    int pass);
1443*4882a593Smuzhiyun 
1444*4882a593Smuzhiyun void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
1445*4882a593Smuzhiyun 		  void *userdata);
1446*4882a593Smuzhiyun int pci_cfg_space_size(struct pci_dev *dev);
1447*4882a593Smuzhiyun unsigned char pci_bus_max_busnr(struct pci_bus *bus);
1448*4882a593Smuzhiyun void pci_setup_bridge(struct pci_bus *bus);
1449*4882a593Smuzhiyun resource_size_t pcibios_window_alignment(struct pci_bus *bus,
1450*4882a593Smuzhiyun 					 unsigned long type);
1451*4882a593Smuzhiyun 
1452*4882a593Smuzhiyun #define PCI_VGA_STATE_CHANGE_BRIDGE (1 << 0)
1453*4882a593Smuzhiyun #define PCI_VGA_STATE_CHANGE_DECODES (1 << 1)
1454*4882a593Smuzhiyun 
1455*4882a593Smuzhiyun int pci_set_vga_state(struct pci_dev *pdev, bool decode,
1456*4882a593Smuzhiyun 		      unsigned int command_bits, u32 flags);
1457*4882a593Smuzhiyun 
1458*4882a593Smuzhiyun /*
1459*4882a593Smuzhiyun  * Virtual interrupts allow for more interrupts to be allocated
1460*4882a593Smuzhiyun  * than the device has interrupts for. These are not programmed
1461*4882a593Smuzhiyun  * into the device's MSI-X table and must be handled by some
1462*4882a593Smuzhiyun  * other driver means.
1463*4882a593Smuzhiyun  */
1464*4882a593Smuzhiyun #define PCI_IRQ_VIRTUAL		(1 << 4)
1465*4882a593Smuzhiyun 
1466*4882a593Smuzhiyun #define PCI_IRQ_ALL_TYPES \
1467*4882a593Smuzhiyun 	(PCI_IRQ_LEGACY | PCI_IRQ_MSI | PCI_IRQ_MSIX)
1468*4882a593Smuzhiyun 
1469*4882a593Smuzhiyun /* kmem_cache style wrapper around pci_alloc_consistent() */
1470*4882a593Smuzhiyun 
1471*4882a593Smuzhiyun #include <linux/dmapool.h>
1472*4882a593Smuzhiyun 
1473*4882a593Smuzhiyun #define	pci_pool dma_pool
1474*4882a593Smuzhiyun #define pci_pool_create(name, pdev, size, align, allocation) \
1475*4882a593Smuzhiyun 		dma_pool_create(name, &pdev->dev, size, align, allocation)
1476*4882a593Smuzhiyun #define	pci_pool_destroy(pool) dma_pool_destroy(pool)
1477*4882a593Smuzhiyun #define	pci_pool_alloc(pool, flags, handle) dma_pool_alloc(pool, flags, handle)
1478*4882a593Smuzhiyun #define	pci_pool_zalloc(pool, flags, handle) \
1479*4882a593Smuzhiyun 		dma_pool_zalloc(pool, flags, handle)
1480*4882a593Smuzhiyun #define	pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr)
1481*4882a593Smuzhiyun 
1482*4882a593Smuzhiyun struct msix_entry {
1483*4882a593Smuzhiyun 	u32	vector;	/* Kernel uses to write allocated vector */
1484*4882a593Smuzhiyun 	u16	entry;	/* Driver uses to specify entry, OS writes */
1485*4882a593Smuzhiyun };
1486*4882a593Smuzhiyun 
1487*4882a593Smuzhiyun #ifdef CONFIG_PCI_MSI
1488*4882a593Smuzhiyun int pci_msi_vec_count(struct pci_dev *dev);
1489*4882a593Smuzhiyun void pci_disable_msi(struct pci_dev *dev);
1490*4882a593Smuzhiyun int pci_msix_vec_count(struct pci_dev *dev);
1491*4882a593Smuzhiyun void pci_disable_msix(struct pci_dev *dev);
1492*4882a593Smuzhiyun void pci_restore_msi_state(struct pci_dev *dev);
1493*4882a593Smuzhiyun int pci_msi_enabled(void);
1494*4882a593Smuzhiyun int pci_enable_msi(struct pci_dev *dev);
1495*4882a593Smuzhiyun int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
1496*4882a593Smuzhiyun 			  int minvec, int maxvec);
pci_enable_msix_exact(struct pci_dev * dev,struct msix_entry * entries,int nvec)1497*4882a593Smuzhiyun static inline int pci_enable_msix_exact(struct pci_dev *dev,
1498*4882a593Smuzhiyun 					struct msix_entry *entries, int nvec)
1499*4882a593Smuzhiyun {
1500*4882a593Smuzhiyun 	int rc = pci_enable_msix_range(dev, entries, nvec, nvec);
1501*4882a593Smuzhiyun 	if (rc < 0)
1502*4882a593Smuzhiyun 		return rc;
1503*4882a593Smuzhiyun 	return 0;
1504*4882a593Smuzhiyun }
1505*4882a593Smuzhiyun int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1506*4882a593Smuzhiyun 				   unsigned int max_vecs, unsigned int flags,
1507*4882a593Smuzhiyun 				   struct irq_affinity *affd);
1508*4882a593Smuzhiyun 
1509*4882a593Smuzhiyun void pci_free_irq_vectors(struct pci_dev *dev);
1510*4882a593Smuzhiyun int pci_irq_vector(struct pci_dev *dev, unsigned int nr);
1511*4882a593Smuzhiyun const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, int vec);
1512*4882a593Smuzhiyun 
1513*4882a593Smuzhiyun #else
pci_msi_vec_count(struct pci_dev * dev)1514*4882a593Smuzhiyun static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; }
pci_disable_msi(struct pci_dev * dev)1515*4882a593Smuzhiyun static inline void pci_disable_msi(struct pci_dev *dev) { }
pci_msix_vec_count(struct pci_dev * dev)1516*4882a593Smuzhiyun static inline int pci_msix_vec_count(struct pci_dev *dev) { return -ENOSYS; }
pci_disable_msix(struct pci_dev * dev)1517*4882a593Smuzhiyun static inline void pci_disable_msix(struct pci_dev *dev) { }
pci_restore_msi_state(struct pci_dev * dev)1518*4882a593Smuzhiyun static inline void pci_restore_msi_state(struct pci_dev *dev) { }
pci_msi_enabled(void)1519*4882a593Smuzhiyun static inline int pci_msi_enabled(void) { return 0; }
pci_enable_msi(struct pci_dev * dev)1520*4882a593Smuzhiyun static inline int pci_enable_msi(struct pci_dev *dev)
1521*4882a593Smuzhiyun { return -ENOSYS; }
pci_enable_msix_range(struct pci_dev * dev,struct msix_entry * entries,int minvec,int maxvec)1522*4882a593Smuzhiyun static inline int pci_enable_msix_range(struct pci_dev *dev,
1523*4882a593Smuzhiyun 			struct msix_entry *entries, int minvec, int maxvec)
1524*4882a593Smuzhiyun { return -ENOSYS; }
pci_enable_msix_exact(struct pci_dev * dev,struct msix_entry * entries,int nvec)1525*4882a593Smuzhiyun static inline int pci_enable_msix_exact(struct pci_dev *dev,
1526*4882a593Smuzhiyun 			struct msix_entry *entries, int nvec)
1527*4882a593Smuzhiyun { return -ENOSYS; }
1528*4882a593Smuzhiyun 
1529*4882a593Smuzhiyun static inline int
pci_alloc_irq_vectors_affinity(struct pci_dev * dev,unsigned int min_vecs,unsigned int max_vecs,unsigned int flags,struct irq_affinity * aff_desc)1530*4882a593Smuzhiyun pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1531*4882a593Smuzhiyun 			       unsigned int max_vecs, unsigned int flags,
1532*4882a593Smuzhiyun 			       struct irq_affinity *aff_desc)
1533*4882a593Smuzhiyun {
1534*4882a593Smuzhiyun 	if ((flags & PCI_IRQ_LEGACY) && min_vecs == 1 && dev->irq)
1535*4882a593Smuzhiyun 		return 1;
1536*4882a593Smuzhiyun 	return -ENOSPC;
1537*4882a593Smuzhiyun }
1538*4882a593Smuzhiyun 
pci_free_irq_vectors(struct pci_dev * dev)1539*4882a593Smuzhiyun static inline void pci_free_irq_vectors(struct pci_dev *dev)
1540*4882a593Smuzhiyun {
1541*4882a593Smuzhiyun }
1542*4882a593Smuzhiyun 
pci_irq_vector(struct pci_dev * dev,unsigned int nr)1543*4882a593Smuzhiyun static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
1544*4882a593Smuzhiyun {
1545*4882a593Smuzhiyun 	if (WARN_ON_ONCE(nr > 0))
1546*4882a593Smuzhiyun 		return -EINVAL;
1547*4882a593Smuzhiyun 	return dev->irq;
1548*4882a593Smuzhiyun }
pci_irq_get_affinity(struct pci_dev * pdev,int vec)1549*4882a593Smuzhiyun static inline const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev,
1550*4882a593Smuzhiyun 		int vec)
1551*4882a593Smuzhiyun {
1552*4882a593Smuzhiyun 	return cpu_possible_mask;
1553*4882a593Smuzhiyun }
1554*4882a593Smuzhiyun #endif
1555*4882a593Smuzhiyun 
1556*4882a593Smuzhiyun /**
1557*4882a593Smuzhiyun  * pci_irqd_intx_xlate() - Translate PCI INTx value to an IRQ domain hwirq
1558*4882a593Smuzhiyun  * @d: the INTx IRQ domain
1559*4882a593Smuzhiyun  * @node: the DT node for the device whose interrupt we're translating
1560*4882a593Smuzhiyun  * @intspec: the interrupt specifier data from the DT
1561*4882a593Smuzhiyun  * @intsize: the number of entries in @intspec
1562*4882a593Smuzhiyun  * @out_hwirq: pointer at which to write the hwirq number
1563*4882a593Smuzhiyun  * @out_type: pointer at which to write the interrupt type
1564*4882a593Smuzhiyun  *
1565*4882a593Smuzhiyun  * Translate a PCI INTx interrupt number from device tree in the range 1-4, as
1566*4882a593Smuzhiyun  * stored in the standard PCI_INTERRUPT_PIN register, to a value in the range
1567*4882a593Smuzhiyun  * 0-3 suitable for use in a 4 entry IRQ domain. That is, subtract one from the
1568*4882a593Smuzhiyun  * INTx value to obtain the hwirq number.
1569*4882a593Smuzhiyun  *
1570*4882a593Smuzhiyun  * Returns 0 on success, or -EINVAL if the interrupt specifier is out of range.
1571*4882a593Smuzhiyun  */
pci_irqd_intx_xlate(struct irq_domain * d,struct device_node * node,const u32 * intspec,unsigned int intsize,unsigned long * out_hwirq,unsigned int * out_type)1572*4882a593Smuzhiyun static inline int pci_irqd_intx_xlate(struct irq_domain *d,
1573*4882a593Smuzhiyun 				      struct device_node *node,
1574*4882a593Smuzhiyun 				      const u32 *intspec,
1575*4882a593Smuzhiyun 				      unsigned int intsize,
1576*4882a593Smuzhiyun 				      unsigned long *out_hwirq,
1577*4882a593Smuzhiyun 				      unsigned int *out_type)
1578*4882a593Smuzhiyun {
1579*4882a593Smuzhiyun 	const u32 intx = intspec[0];
1580*4882a593Smuzhiyun 
1581*4882a593Smuzhiyun 	if (intx < PCI_INTERRUPT_INTA || intx > PCI_INTERRUPT_INTD)
1582*4882a593Smuzhiyun 		return -EINVAL;
1583*4882a593Smuzhiyun 
1584*4882a593Smuzhiyun 	*out_hwirq = intx - PCI_INTERRUPT_INTA;
1585*4882a593Smuzhiyun 	return 0;
1586*4882a593Smuzhiyun }
1587*4882a593Smuzhiyun 
1588*4882a593Smuzhiyun #ifdef CONFIG_PCIEPORTBUS
1589*4882a593Smuzhiyun extern bool pcie_ports_disabled;
1590*4882a593Smuzhiyun extern bool pcie_ports_native;
1591*4882a593Smuzhiyun #else
1592*4882a593Smuzhiyun #define pcie_ports_disabled	true
1593*4882a593Smuzhiyun #define pcie_ports_native	false
1594*4882a593Smuzhiyun #endif
1595*4882a593Smuzhiyun 
1596*4882a593Smuzhiyun #define PCIE_LINK_STATE_L0S		BIT(0)
1597*4882a593Smuzhiyun #define PCIE_LINK_STATE_L1		BIT(1)
1598*4882a593Smuzhiyun #define PCIE_LINK_STATE_CLKPM		BIT(2)
1599*4882a593Smuzhiyun #define PCIE_LINK_STATE_L1_1		BIT(3)
1600*4882a593Smuzhiyun #define PCIE_LINK_STATE_L1_2		BIT(4)
1601*4882a593Smuzhiyun #define PCIE_LINK_STATE_L1_1_PCIPM	BIT(5)
1602*4882a593Smuzhiyun #define PCIE_LINK_STATE_L1_2_PCIPM	BIT(6)
1603*4882a593Smuzhiyun 
1604*4882a593Smuzhiyun #ifdef CONFIG_PCIEASPM
1605*4882a593Smuzhiyun int pci_disable_link_state(struct pci_dev *pdev, int state);
1606*4882a593Smuzhiyun int pci_disable_link_state_locked(struct pci_dev *pdev, int state);
1607*4882a593Smuzhiyun void pcie_no_aspm(void);
1608*4882a593Smuzhiyun bool pcie_aspm_support_enabled(void);
1609*4882a593Smuzhiyun bool pcie_aspm_enabled(struct pci_dev *pdev);
1610*4882a593Smuzhiyun #else
pci_disable_link_state(struct pci_dev * pdev,int state)1611*4882a593Smuzhiyun static inline int pci_disable_link_state(struct pci_dev *pdev, int state)
1612*4882a593Smuzhiyun { return 0; }
pci_disable_link_state_locked(struct pci_dev * pdev,int state)1613*4882a593Smuzhiyun static inline int pci_disable_link_state_locked(struct pci_dev *pdev, int state)
1614*4882a593Smuzhiyun { return 0; }
pcie_no_aspm(void)1615*4882a593Smuzhiyun static inline void pcie_no_aspm(void) { }
pcie_aspm_support_enabled(void)1616*4882a593Smuzhiyun static inline bool pcie_aspm_support_enabled(void) { return false; }
pcie_aspm_enabled(struct pci_dev * pdev)1617*4882a593Smuzhiyun static inline bool pcie_aspm_enabled(struct pci_dev *pdev) { return false; }
1618*4882a593Smuzhiyun #endif
1619*4882a593Smuzhiyun 
1620*4882a593Smuzhiyun #ifdef CONFIG_PCIEAER
1621*4882a593Smuzhiyun bool pci_aer_available(void);
1622*4882a593Smuzhiyun #else
pci_aer_available(void)1623*4882a593Smuzhiyun static inline bool pci_aer_available(void) { return false; }
1624*4882a593Smuzhiyun #endif
1625*4882a593Smuzhiyun 
1626*4882a593Smuzhiyun bool pci_ats_disabled(void);
1627*4882a593Smuzhiyun 
1628*4882a593Smuzhiyun #ifdef CONFIG_PCIE_PTM
1629*4882a593Smuzhiyun int pci_enable_ptm(struct pci_dev *dev, u8 *granularity);
1630*4882a593Smuzhiyun #else
pci_enable_ptm(struct pci_dev * dev,u8 * granularity)1631*4882a593Smuzhiyun static inline int pci_enable_ptm(struct pci_dev *dev, u8 *granularity)
1632*4882a593Smuzhiyun { return -EINVAL; }
1633*4882a593Smuzhiyun #endif
1634*4882a593Smuzhiyun 
1635*4882a593Smuzhiyun void pci_cfg_access_lock(struct pci_dev *dev);
1636*4882a593Smuzhiyun bool pci_cfg_access_trylock(struct pci_dev *dev);
1637*4882a593Smuzhiyun void pci_cfg_access_unlock(struct pci_dev *dev);
1638*4882a593Smuzhiyun 
1639*4882a593Smuzhiyun /*
1640*4882a593Smuzhiyun  * PCI domain support.  Sometimes called PCI segment (eg by ACPI),
1641*4882a593Smuzhiyun  * a PCI domain is defined to be a set of PCI buses which share
1642*4882a593Smuzhiyun  * configuration space.
1643*4882a593Smuzhiyun  */
1644*4882a593Smuzhiyun #ifdef CONFIG_PCI_DOMAINS
1645*4882a593Smuzhiyun extern int pci_domains_supported;
1646*4882a593Smuzhiyun #else
1647*4882a593Smuzhiyun enum { pci_domains_supported = 0 };
pci_domain_nr(struct pci_bus * bus)1648*4882a593Smuzhiyun static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
pci_proc_domain(struct pci_bus * bus)1649*4882a593Smuzhiyun static inline int pci_proc_domain(struct pci_bus *bus) { return 0; }
1650*4882a593Smuzhiyun #endif /* CONFIG_PCI_DOMAINS */
1651*4882a593Smuzhiyun 
1652*4882a593Smuzhiyun /*
1653*4882a593Smuzhiyun  * Generic implementation for PCI domain support. If your
1654*4882a593Smuzhiyun  * architecture does not need custom management of PCI
1655*4882a593Smuzhiyun  * domains then this implementation will be used
1656*4882a593Smuzhiyun  */
1657*4882a593Smuzhiyun #ifdef CONFIG_PCI_DOMAINS_GENERIC
pci_domain_nr(struct pci_bus * bus)1658*4882a593Smuzhiyun static inline int pci_domain_nr(struct pci_bus *bus)
1659*4882a593Smuzhiyun {
1660*4882a593Smuzhiyun 	return bus->domain_nr;
1661*4882a593Smuzhiyun }
1662*4882a593Smuzhiyun #ifdef CONFIG_ACPI
1663*4882a593Smuzhiyun int acpi_pci_bus_find_domain_nr(struct pci_bus *bus);
1664*4882a593Smuzhiyun #else
acpi_pci_bus_find_domain_nr(struct pci_bus * bus)1665*4882a593Smuzhiyun static inline int acpi_pci_bus_find_domain_nr(struct pci_bus *bus)
1666*4882a593Smuzhiyun { return 0; }
1667*4882a593Smuzhiyun #endif
1668*4882a593Smuzhiyun int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent);
1669*4882a593Smuzhiyun #endif
1670*4882a593Smuzhiyun 
1671*4882a593Smuzhiyun /* Some architectures require additional setup to direct VGA traffic */
1672*4882a593Smuzhiyun typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode,
1673*4882a593Smuzhiyun 				    unsigned int command_bits, u32 flags);
1674*4882a593Smuzhiyun void pci_register_set_vga_state(arch_set_vga_state_t func);
1675*4882a593Smuzhiyun 
1676*4882a593Smuzhiyun static inline int
pci_request_io_regions(struct pci_dev * pdev,const char * name)1677*4882a593Smuzhiyun pci_request_io_regions(struct pci_dev *pdev, const char *name)
1678*4882a593Smuzhiyun {
1679*4882a593Smuzhiyun 	return pci_request_selected_regions(pdev,
1680*4882a593Smuzhiyun 			    pci_select_bars(pdev, IORESOURCE_IO), name);
1681*4882a593Smuzhiyun }
1682*4882a593Smuzhiyun 
1683*4882a593Smuzhiyun static inline void
pci_release_io_regions(struct pci_dev * pdev)1684*4882a593Smuzhiyun pci_release_io_regions(struct pci_dev *pdev)
1685*4882a593Smuzhiyun {
1686*4882a593Smuzhiyun 	return pci_release_selected_regions(pdev,
1687*4882a593Smuzhiyun 			    pci_select_bars(pdev, IORESOURCE_IO));
1688*4882a593Smuzhiyun }
1689*4882a593Smuzhiyun 
1690*4882a593Smuzhiyun static inline int
pci_request_mem_regions(struct pci_dev * pdev,const char * name)1691*4882a593Smuzhiyun pci_request_mem_regions(struct pci_dev *pdev, const char *name)
1692*4882a593Smuzhiyun {
1693*4882a593Smuzhiyun 	return pci_request_selected_regions(pdev,
1694*4882a593Smuzhiyun 			    pci_select_bars(pdev, IORESOURCE_MEM), name);
1695*4882a593Smuzhiyun }
1696*4882a593Smuzhiyun 
1697*4882a593Smuzhiyun static inline void
pci_release_mem_regions(struct pci_dev * pdev)1698*4882a593Smuzhiyun pci_release_mem_regions(struct pci_dev *pdev)
1699*4882a593Smuzhiyun {
1700*4882a593Smuzhiyun 	return pci_release_selected_regions(pdev,
1701*4882a593Smuzhiyun 			    pci_select_bars(pdev, IORESOURCE_MEM));
1702*4882a593Smuzhiyun }
1703*4882a593Smuzhiyun 
1704*4882a593Smuzhiyun #else /* CONFIG_PCI is not enabled */
1705*4882a593Smuzhiyun 
pci_set_flags(int flags)1706*4882a593Smuzhiyun static inline void pci_set_flags(int flags) { }
pci_add_flags(int flags)1707*4882a593Smuzhiyun static inline void pci_add_flags(int flags) { }
pci_clear_flags(int flags)1708*4882a593Smuzhiyun static inline void pci_clear_flags(int flags) { }
pci_has_flag(int flag)1709*4882a593Smuzhiyun static inline int pci_has_flag(int flag) { return 0; }
1710*4882a593Smuzhiyun 
1711*4882a593Smuzhiyun /*
1712*4882a593Smuzhiyun  * If the system does not have PCI, clearly these return errors.  Define
1713*4882a593Smuzhiyun  * these as simple inline functions to avoid hair in drivers.
1714*4882a593Smuzhiyun  */
1715*4882a593Smuzhiyun #define _PCI_NOP(o, s, t) \
1716*4882a593Smuzhiyun 	static inline int pci_##o##_config_##s(struct pci_dev *dev, \
1717*4882a593Smuzhiyun 						int where, t val) \
1718*4882a593Smuzhiyun 		{ return PCIBIOS_FUNC_NOT_SUPPORTED; }
1719*4882a593Smuzhiyun 
1720*4882a593Smuzhiyun #define _PCI_NOP_ALL(o, x)	_PCI_NOP(o, byte, u8 x) \
1721*4882a593Smuzhiyun 				_PCI_NOP(o, word, u16 x) \
1722*4882a593Smuzhiyun 				_PCI_NOP(o, dword, u32 x)
1723*4882a593Smuzhiyun _PCI_NOP_ALL(read, *)
1724*4882a593Smuzhiyun _PCI_NOP_ALL(write,)
1725*4882a593Smuzhiyun 
pci_get_device(unsigned int vendor,unsigned int device,struct pci_dev * from)1726*4882a593Smuzhiyun static inline struct pci_dev *pci_get_device(unsigned int vendor,
1727*4882a593Smuzhiyun 					     unsigned int device,
1728*4882a593Smuzhiyun 					     struct pci_dev *from)
1729*4882a593Smuzhiyun { return NULL; }
1730*4882a593Smuzhiyun 
pci_get_subsys(unsigned int vendor,unsigned int device,unsigned int ss_vendor,unsigned int ss_device,struct pci_dev * from)1731*4882a593Smuzhiyun static inline struct pci_dev *pci_get_subsys(unsigned int vendor,
1732*4882a593Smuzhiyun 					     unsigned int device,
1733*4882a593Smuzhiyun 					     unsigned int ss_vendor,
1734*4882a593Smuzhiyun 					     unsigned int ss_device,
1735*4882a593Smuzhiyun 					     struct pci_dev *from)
1736*4882a593Smuzhiyun { return NULL; }
1737*4882a593Smuzhiyun 
pci_get_class(unsigned int class,struct pci_dev * from)1738*4882a593Smuzhiyun static inline struct pci_dev *pci_get_class(unsigned int class,
1739*4882a593Smuzhiyun 					    struct pci_dev *from)
1740*4882a593Smuzhiyun { return NULL; }
1741*4882a593Smuzhiyun 
1742*4882a593Smuzhiyun #define pci_dev_present(ids)	(0)
1743*4882a593Smuzhiyun #define no_pci_devices()	(1)
1744*4882a593Smuzhiyun #define pci_dev_put(dev)	do { } while (0)
1745*4882a593Smuzhiyun 
pci_set_master(struct pci_dev * dev)1746*4882a593Smuzhiyun static inline void pci_set_master(struct pci_dev *dev) { }
pci_enable_device(struct pci_dev * dev)1747*4882a593Smuzhiyun static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; }
pci_disable_device(struct pci_dev * dev)1748*4882a593Smuzhiyun static inline void pci_disable_device(struct pci_dev *dev) { }
pcim_enable_device(struct pci_dev * pdev)1749*4882a593Smuzhiyun static inline int pcim_enable_device(struct pci_dev *pdev) { return -EIO; }
pci_assign_resource(struct pci_dev * dev,int i)1750*4882a593Smuzhiyun static inline int pci_assign_resource(struct pci_dev *dev, int i)
1751*4882a593Smuzhiyun { return -EBUSY; }
__pci_register_driver(struct pci_driver * drv,struct module * owner,const char * mod_name)1752*4882a593Smuzhiyun static inline int __must_check __pci_register_driver(struct pci_driver *drv,
1753*4882a593Smuzhiyun 						     struct module *owner,
1754*4882a593Smuzhiyun 						     const char *mod_name)
1755*4882a593Smuzhiyun { return 0; }
pci_register_driver(struct pci_driver * drv)1756*4882a593Smuzhiyun static inline int pci_register_driver(struct pci_driver *drv)
1757*4882a593Smuzhiyun { return 0; }
pci_unregister_driver(struct pci_driver * drv)1758*4882a593Smuzhiyun static inline void pci_unregister_driver(struct pci_driver *drv) { }
pci_find_capability(struct pci_dev * dev,int cap)1759*4882a593Smuzhiyun static inline int pci_find_capability(struct pci_dev *dev, int cap)
1760*4882a593Smuzhiyun { return 0; }
pci_find_next_capability(struct pci_dev * dev,u8 post,int cap)1761*4882a593Smuzhiyun static inline int pci_find_next_capability(struct pci_dev *dev, u8 post,
1762*4882a593Smuzhiyun 					   int cap)
1763*4882a593Smuzhiyun { return 0; }
pci_find_ext_capability(struct pci_dev * dev,int cap)1764*4882a593Smuzhiyun static inline int pci_find_ext_capability(struct pci_dev *dev, int cap)
1765*4882a593Smuzhiyun { return 0; }
1766*4882a593Smuzhiyun 
pci_get_dsn(struct pci_dev * dev)1767*4882a593Smuzhiyun static inline u64 pci_get_dsn(struct pci_dev *dev)
1768*4882a593Smuzhiyun { return 0; }
1769*4882a593Smuzhiyun 
1770*4882a593Smuzhiyun /* Power management related routines */
pci_save_state(struct pci_dev * dev)1771*4882a593Smuzhiyun static inline int pci_save_state(struct pci_dev *dev) { return 0; }
pci_restore_state(struct pci_dev * dev)1772*4882a593Smuzhiyun static inline void pci_restore_state(struct pci_dev *dev) { }
pci_set_power_state(struct pci_dev * dev,pci_power_t state)1773*4882a593Smuzhiyun static inline int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
1774*4882a593Smuzhiyun { return 0; }
pci_wake_from_d3(struct pci_dev * dev,bool enable)1775*4882a593Smuzhiyun static inline int pci_wake_from_d3(struct pci_dev *dev, bool enable)
1776*4882a593Smuzhiyun { return 0; }
pci_choose_state(struct pci_dev * dev,pm_message_t state)1777*4882a593Smuzhiyun static inline pci_power_t pci_choose_state(struct pci_dev *dev,
1778*4882a593Smuzhiyun 					   pm_message_t state)
1779*4882a593Smuzhiyun { return PCI_D0; }
pci_enable_wake(struct pci_dev * dev,pci_power_t state,int enable)1780*4882a593Smuzhiyun static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
1781*4882a593Smuzhiyun 				  int enable)
1782*4882a593Smuzhiyun { return 0; }
1783*4882a593Smuzhiyun 
pci_find_resource(struct pci_dev * dev,struct resource * res)1784*4882a593Smuzhiyun static inline struct resource *pci_find_resource(struct pci_dev *dev,
1785*4882a593Smuzhiyun 						 struct resource *res)
1786*4882a593Smuzhiyun { return NULL; }
pci_request_regions(struct pci_dev * dev,const char * res_name)1787*4882a593Smuzhiyun static inline int pci_request_regions(struct pci_dev *dev, const char *res_name)
1788*4882a593Smuzhiyun { return -EIO; }
pci_release_regions(struct pci_dev * dev)1789*4882a593Smuzhiyun static inline void pci_release_regions(struct pci_dev *dev) { }
1790*4882a593Smuzhiyun 
pci_address_to_pio(phys_addr_t addr)1791*4882a593Smuzhiyun static inline unsigned long pci_address_to_pio(phys_addr_t addr) { return -1; }
1792*4882a593Smuzhiyun 
pci_find_next_bus(const struct pci_bus * from)1793*4882a593Smuzhiyun static inline struct pci_bus *pci_find_next_bus(const struct pci_bus *from)
1794*4882a593Smuzhiyun { return NULL; }
pci_get_slot(struct pci_bus * bus,unsigned int devfn)1795*4882a593Smuzhiyun static inline struct pci_dev *pci_get_slot(struct pci_bus *bus,
1796*4882a593Smuzhiyun 						unsigned int devfn)
1797*4882a593Smuzhiyun { return NULL; }
pci_get_domain_bus_and_slot(int domain,unsigned int bus,unsigned int devfn)1798*4882a593Smuzhiyun static inline struct pci_dev *pci_get_domain_bus_and_slot(int domain,
1799*4882a593Smuzhiyun 					unsigned int bus, unsigned int devfn)
1800*4882a593Smuzhiyun { return NULL; }
1801*4882a593Smuzhiyun 
pci_domain_nr(struct pci_bus * bus)1802*4882a593Smuzhiyun static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
pci_dev_get(struct pci_dev * dev)1803*4882a593Smuzhiyun static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; }
1804*4882a593Smuzhiyun 
1805*4882a593Smuzhiyun #define dev_is_pci(d) (false)
1806*4882a593Smuzhiyun #define dev_is_pf(d) (false)
pci_acs_enabled(struct pci_dev * pdev,u16 acs_flags)1807*4882a593Smuzhiyun static inline bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
1808*4882a593Smuzhiyun { return false; }
pci_irqd_intx_xlate(struct irq_domain * d,struct device_node * node,const u32 * intspec,unsigned int intsize,unsigned long * out_hwirq,unsigned int * out_type)1809*4882a593Smuzhiyun static inline int pci_irqd_intx_xlate(struct irq_domain *d,
1810*4882a593Smuzhiyun 				      struct device_node *node,
1811*4882a593Smuzhiyun 				      const u32 *intspec,
1812*4882a593Smuzhiyun 				      unsigned int intsize,
1813*4882a593Smuzhiyun 				      unsigned long *out_hwirq,
1814*4882a593Smuzhiyun 				      unsigned int *out_type)
1815*4882a593Smuzhiyun { return -EINVAL; }
1816*4882a593Smuzhiyun 
pci_match_id(const struct pci_device_id * ids,struct pci_dev * dev)1817*4882a593Smuzhiyun static inline const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
1818*4882a593Smuzhiyun 							 struct pci_dev *dev)
1819*4882a593Smuzhiyun { return NULL; }
pci_ats_disabled(void)1820*4882a593Smuzhiyun static inline bool pci_ats_disabled(void) { return true; }
1821*4882a593Smuzhiyun 
pci_irq_vector(struct pci_dev * dev,unsigned int nr)1822*4882a593Smuzhiyun static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
1823*4882a593Smuzhiyun {
1824*4882a593Smuzhiyun 	return -EINVAL;
1825*4882a593Smuzhiyun }
1826*4882a593Smuzhiyun 
1827*4882a593Smuzhiyun static inline int
pci_alloc_irq_vectors_affinity(struct pci_dev * dev,unsigned int min_vecs,unsigned int max_vecs,unsigned int flags,struct irq_affinity * aff_desc)1828*4882a593Smuzhiyun pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1829*4882a593Smuzhiyun 			       unsigned int max_vecs, unsigned int flags,
1830*4882a593Smuzhiyun 			       struct irq_affinity *aff_desc)
1831*4882a593Smuzhiyun {
1832*4882a593Smuzhiyun 	return -ENOSPC;
1833*4882a593Smuzhiyun }
1834*4882a593Smuzhiyun #endif /* CONFIG_PCI */
1835*4882a593Smuzhiyun 
1836*4882a593Smuzhiyun static inline int
pci_alloc_irq_vectors(struct pci_dev * dev,unsigned int min_vecs,unsigned int max_vecs,unsigned int flags)1837*4882a593Smuzhiyun pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
1838*4882a593Smuzhiyun 		      unsigned int max_vecs, unsigned int flags)
1839*4882a593Smuzhiyun {
1840*4882a593Smuzhiyun 	return pci_alloc_irq_vectors_affinity(dev, min_vecs, max_vecs, flags,
1841*4882a593Smuzhiyun 					      NULL);
1842*4882a593Smuzhiyun }
1843*4882a593Smuzhiyun 
1844*4882a593Smuzhiyun /* Include architecture-dependent settings and functions */
1845*4882a593Smuzhiyun 
1846*4882a593Smuzhiyun #include <asm/pci.h>
1847*4882a593Smuzhiyun 
1848*4882a593Smuzhiyun /* These two functions provide almost identical functionality. Depending
1849*4882a593Smuzhiyun  * on the architecture, one will be implemented as a wrapper around the
1850*4882a593Smuzhiyun  * other (in drivers/pci/mmap.c).
1851*4882a593Smuzhiyun  *
1852*4882a593Smuzhiyun  * pci_mmap_resource_range() maps a specific BAR, and vm->vm_pgoff
1853*4882a593Smuzhiyun  * is expected to be an offset within that region.
1854*4882a593Smuzhiyun  *
1855*4882a593Smuzhiyun  * pci_mmap_page_range() is the legacy architecture-specific interface,
1856*4882a593Smuzhiyun  * which accepts a "user visible" resource address converted by
1857*4882a593Smuzhiyun  * pci_resource_to_user(), as used in the legacy mmap() interface in
1858*4882a593Smuzhiyun  * /proc/bus/pci/.
1859*4882a593Smuzhiyun  */
1860*4882a593Smuzhiyun int pci_mmap_resource_range(struct pci_dev *dev, int bar,
1861*4882a593Smuzhiyun 			    struct vm_area_struct *vma,
1862*4882a593Smuzhiyun 			    enum pci_mmap_state mmap_state, int write_combine);
1863*4882a593Smuzhiyun int pci_mmap_page_range(struct pci_dev *pdev, int bar,
1864*4882a593Smuzhiyun 			struct vm_area_struct *vma,
1865*4882a593Smuzhiyun 			enum pci_mmap_state mmap_state, int write_combine);
1866*4882a593Smuzhiyun 
1867*4882a593Smuzhiyun #ifndef arch_can_pci_mmap_wc
1868*4882a593Smuzhiyun #define arch_can_pci_mmap_wc()		0
1869*4882a593Smuzhiyun #endif
1870*4882a593Smuzhiyun 
1871*4882a593Smuzhiyun #ifndef arch_can_pci_mmap_io
1872*4882a593Smuzhiyun #define arch_can_pci_mmap_io()		0
1873*4882a593Smuzhiyun #define pci_iobar_pfn(pdev, bar, vma) (-EINVAL)
1874*4882a593Smuzhiyun #else
1875*4882a593Smuzhiyun int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma);
1876*4882a593Smuzhiyun #endif
1877*4882a593Smuzhiyun 
1878*4882a593Smuzhiyun #ifndef pci_root_bus_fwnode
1879*4882a593Smuzhiyun #define pci_root_bus_fwnode(bus)	NULL
1880*4882a593Smuzhiyun #endif
1881*4882a593Smuzhiyun 
1882*4882a593Smuzhiyun /*
1883*4882a593Smuzhiyun  * These helpers provide future and backwards compatibility
1884*4882a593Smuzhiyun  * for accessing popular PCI BAR info
1885*4882a593Smuzhiyun  */
1886*4882a593Smuzhiyun #define pci_resource_start(dev, bar)	((dev)->resource[(bar)].start)
1887*4882a593Smuzhiyun #define pci_resource_end(dev, bar)	((dev)->resource[(bar)].end)
1888*4882a593Smuzhiyun #define pci_resource_flags(dev, bar)	((dev)->resource[(bar)].flags)
1889*4882a593Smuzhiyun #define pci_resource_len(dev,bar) \
1890*4882a593Smuzhiyun 	((pci_resource_start((dev), (bar)) == 0 &&	\
1891*4882a593Smuzhiyun 	  pci_resource_end((dev), (bar)) ==		\
1892*4882a593Smuzhiyun 	  pci_resource_start((dev), (bar))) ? 0 :	\
1893*4882a593Smuzhiyun 							\
1894*4882a593Smuzhiyun 	 (pci_resource_end((dev), (bar)) -		\
1895*4882a593Smuzhiyun 	  pci_resource_start((dev), (bar)) + 1))
1896*4882a593Smuzhiyun 
1897*4882a593Smuzhiyun /*
1898*4882a593Smuzhiyun  * Similar to the helpers above, these manipulate per-pci_dev
1899*4882a593Smuzhiyun  * driver-specific data.  They are really just a wrapper around
1900*4882a593Smuzhiyun  * the generic device structure functions of these calls.
1901*4882a593Smuzhiyun  */
pci_get_drvdata(struct pci_dev * pdev)1902*4882a593Smuzhiyun static inline void *pci_get_drvdata(struct pci_dev *pdev)
1903*4882a593Smuzhiyun {
1904*4882a593Smuzhiyun 	return dev_get_drvdata(&pdev->dev);
1905*4882a593Smuzhiyun }
1906*4882a593Smuzhiyun 
pci_set_drvdata(struct pci_dev * pdev,void * data)1907*4882a593Smuzhiyun static inline void pci_set_drvdata(struct pci_dev *pdev, void *data)
1908*4882a593Smuzhiyun {
1909*4882a593Smuzhiyun 	dev_set_drvdata(&pdev->dev, data);
1910*4882a593Smuzhiyun }
1911*4882a593Smuzhiyun 
pci_name(const struct pci_dev * pdev)1912*4882a593Smuzhiyun static inline const char *pci_name(const struct pci_dev *pdev)
1913*4882a593Smuzhiyun {
1914*4882a593Smuzhiyun 	return dev_name(&pdev->dev);
1915*4882a593Smuzhiyun }
1916*4882a593Smuzhiyun 
1917*4882a593Smuzhiyun void pci_resource_to_user(const struct pci_dev *dev, int bar,
1918*4882a593Smuzhiyun 			  const struct resource *rsrc,
1919*4882a593Smuzhiyun 			  resource_size_t *start, resource_size_t *end);
1920*4882a593Smuzhiyun 
1921*4882a593Smuzhiyun /*
1922*4882a593Smuzhiyun  * The world is not perfect and supplies us with broken PCI devices.
1923*4882a593Smuzhiyun  * For at least a part of these bugs we need a work-around, so both
1924*4882a593Smuzhiyun  * generic (drivers/pci/quirks.c) and per-architecture code can define
1925*4882a593Smuzhiyun  * fixup hooks to be called for particular buggy devices.
1926*4882a593Smuzhiyun  */
1927*4882a593Smuzhiyun 
1928*4882a593Smuzhiyun struct pci_fixup {
1929*4882a593Smuzhiyun 	u16 vendor;			/* Or PCI_ANY_ID */
1930*4882a593Smuzhiyun 	u16 device;			/* Or PCI_ANY_ID */
1931*4882a593Smuzhiyun 	u32 class;			/* Or PCI_ANY_ID */
1932*4882a593Smuzhiyun 	unsigned int class_shift;	/* should be 0, 8, 16 */
1933*4882a593Smuzhiyun #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
1934*4882a593Smuzhiyun 	int hook_offset;
1935*4882a593Smuzhiyun #else
1936*4882a593Smuzhiyun 	void (*hook)(struct pci_dev *dev);
1937*4882a593Smuzhiyun #endif
1938*4882a593Smuzhiyun };
1939*4882a593Smuzhiyun 
1940*4882a593Smuzhiyun enum pci_fixup_pass {
1941*4882a593Smuzhiyun 	pci_fixup_early,	/* Before probing BARs */
1942*4882a593Smuzhiyun 	pci_fixup_header,	/* After reading configuration header */
1943*4882a593Smuzhiyun 	pci_fixup_final,	/* Final phase of device fixups */
1944*4882a593Smuzhiyun 	pci_fixup_enable,	/* pci_enable_device() time */
1945*4882a593Smuzhiyun 	pci_fixup_resume,	/* pci_device_resume() */
1946*4882a593Smuzhiyun 	pci_fixup_suspend,	/* pci_device_suspend() */
1947*4882a593Smuzhiyun 	pci_fixup_resume_early, /* pci_device_resume_early() */
1948*4882a593Smuzhiyun 	pci_fixup_suspend_late,	/* pci_device_suspend_late() */
1949*4882a593Smuzhiyun };
1950*4882a593Smuzhiyun 
1951*4882a593Smuzhiyun #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
1952*4882a593Smuzhiyun #define ___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class,	\
1953*4882a593Smuzhiyun 				    class_shift, hook, stub)		\
1954*4882a593Smuzhiyun 	void __cficanonical stub(struct pci_dev *dev);			\
1955*4882a593Smuzhiyun 	void __cficanonical stub(struct pci_dev *dev)			\
1956*4882a593Smuzhiyun 	{ 								\
1957*4882a593Smuzhiyun 		hook(dev); 						\
1958*4882a593Smuzhiyun 	}								\
1959*4882a593Smuzhiyun 	asm(".section "	#sec ", \"a\"				\n"	\
1960*4882a593Smuzhiyun 	    ".balign	16					\n"	\
1961*4882a593Smuzhiyun 	    ".short "	#vendor ", " #device "			\n"	\
1962*4882a593Smuzhiyun 	    ".long "	#class ", " #class_shift "		\n"	\
1963*4882a593Smuzhiyun 	    ".long "	#stub " - .				\n"	\
1964*4882a593Smuzhiyun 	    ".previous						\n");
1965*4882a593Smuzhiyun 
1966*4882a593Smuzhiyun #define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class,	\
1967*4882a593Smuzhiyun 				  class_shift, hook, stub)		\
1968*4882a593Smuzhiyun 	___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class,	\
1969*4882a593Smuzhiyun 				  class_shift, hook, stub)
1970*4882a593Smuzhiyun #define DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class,	\
1971*4882a593Smuzhiyun 				  class_shift, hook)			\
1972*4882a593Smuzhiyun 	__DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class,	\
1973*4882a593Smuzhiyun 				  class_shift, hook, __UNIQUE_ID(hook))
1974*4882a593Smuzhiyun #else
1975*4882a593Smuzhiyun /* Anonymous variables would be nice... */
1976*4882a593Smuzhiyun #define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, class,	\
1977*4882a593Smuzhiyun 				  class_shift, hook)			\
1978*4882a593Smuzhiyun 	static const struct pci_fixup __PASTE(__pci_fixup_##name,__LINE__) __used	\
1979*4882a593Smuzhiyun 	__attribute__((__section__(#section), aligned((sizeof(void *)))))    \
1980*4882a593Smuzhiyun 		= { vendor, device, class, class_shift, hook };
1981*4882a593Smuzhiyun #endif
1982*4882a593Smuzhiyun 
1983*4882a593Smuzhiyun #define DECLARE_PCI_FIXUP_CLASS_EARLY(vendor, device, class,		\
1984*4882a593Smuzhiyun 					 class_shift, hook)		\
1985*4882a593Smuzhiyun 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early,			\
1986*4882a593Smuzhiyun 		hook, vendor, device, class, class_shift, hook)
1987*4882a593Smuzhiyun #define DECLARE_PCI_FIXUP_CLASS_HEADER(vendor, device, class,		\
1988*4882a593Smuzhiyun 					 class_shift, hook)		\
1989*4882a593Smuzhiyun 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header,			\
1990*4882a593Smuzhiyun 		hook, vendor, device, class, class_shift, hook)
1991*4882a593Smuzhiyun #define DECLARE_PCI_FIXUP_CLASS_FINAL(vendor, device, class,		\
1992*4882a593Smuzhiyun 					 class_shift, hook)		\
1993*4882a593Smuzhiyun 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final,			\
1994*4882a593Smuzhiyun 		hook, vendor, device, class, class_shift, hook)
1995*4882a593Smuzhiyun #define DECLARE_PCI_FIXUP_CLASS_ENABLE(vendor, device, class,		\
1996*4882a593Smuzhiyun 					 class_shift, hook)		\
1997*4882a593Smuzhiyun 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable,			\
1998*4882a593Smuzhiyun 		hook, vendor, device, class, class_shift, hook)
1999*4882a593Smuzhiyun #define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class,		\
2000*4882a593Smuzhiyun 					 class_shift, hook)		\
2001*4882a593Smuzhiyun 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume,			\
2002*4882a593Smuzhiyun 		resume##hook, vendor, device, class, class_shift, hook)
2003*4882a593Smuzhiyun #define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class,	\
2004*4882a593Smuzhiyun 					 class_shift, hook)		\
2005*4882a593Smuzhiyun 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early,		\
2006*4882a593Smuzhiyun 		resume_early##hook, vendor, device, class, class_shift, hook)
2007*4882a593Smuzhiyun #define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class,		\
2008*4882a593Smuzhiyun 					 class_shift, hook)		\
2009*4882a593Smuzhiyun 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend,			\
2010*4882a593Smuzhiyun 		suspend##hook, vendor, device, class, class_shift, hook)
2011*4882a593Smuzhiyun #define DECLARE_PCI_FIXUP_CLASS_SUSPEND_LATE(vendor, device, class,	\
2012*4882a593Smuzhiyun 					 class_shift, hook)		\
2013*4882a593Smuzhiyun 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late,		\
2014*4882a593Smuzhiyun 		suspend_late##hook, vendor, device, class, class_shift, hook)
2015*4882a593Smuzhiyun 
2016*4882a593Smuzhiyun #define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook)			\
2017*4882a593Smuzhiyun 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early,			\
2018*4882a593Smuzhiyun 		hook, vendor, device, PCI_ANY_ID, 0, hook)
2019*4882a593Smuzhiyun #define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook)			\
2020*4882a593Smuzhiyun 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header,			\
2021*4882a593Smuzhiyun 		hook, vendor, device, PCI_ANY_ID, 0, hook)
2022*4882a593Smuzhiyun #define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook)			\
2023*4882a593Smuzhiyun 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final,			\
2024*4882a593Smuzhiyun 		hook, vendor, device, PCI_ANY_ID, 0, hook)
2025*4882a593Smuzhiyun #define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook)			\
2026*4882a593Smuzhiyun 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable,			\
2027*4882a593Smuzhiyun 		hook, vendor, device, PCI_ANY_ID, 0, hook)
2028*4882a593Smuzhiyun #define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook)			\
2029*4882a593Smuzhiyun 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume,			\
2030*4882a593Smuzhiyun 		resume##hook, vendor, device, PCI_ANY_ID, 0, hook)
2031*4882a593Smuzhiyun #define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook)		\
2032*4882a593Smuzhiyun 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early,		\
2033*4882a593Smuzhiyun 		resume_early##hook, vendor, device, PCI_ANY_ID, 0, hook)
2034*4882a593Smuzhiyun #define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook)			\
2035*4882a593Smuzhiyun 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend,			\
2036*4882a593Smuzhiyun 		suspend##hook, vendor, device, PCI_ANY_ID, 0, hook)
2037*4882a593Smuzhiyun #define DECLARE_PCI_FIXUP_SUSPEND_LATE(vendor, device, hook)		\
2038*4882a593Smuzhiyun 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late,		\
2039*4882a593Smuzhiyun 		suspend_late##hook, vendor, device, PCI_ANY_ID, 0, hook)
2040*4882a593Smuzhiyun 
2041*4882a593Smuzhiyun #ifdef CONFIG_PCI_QUIRKS
2042*4882a593Smuzhiyun void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
2043*4882a593Smuzhiyun #else
pci_fixup_device(enum pci_fixup_pass pass,struct pci_dev * dev)2044*4882a593Smuzhiyun static inline void pci_fixup_device(enum pci_fixup_pass pass,
2045*4882a593Smuzhiyun 				    struct pci_dev *dev) { }
2046*4882a593Smuzhiyun #endif
2047*4882a593Smuzhiyun 
2048*4882a593Smuzhiyun void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen);
2049*4882a593Smuzhiyun void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr);
2050*4882a593Smuzhiyun void __iomem * const *pcim_iomap_table(struct pci_dev *pdev);
2051*4882a593Smuzhiyun int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name);
2052*4882a593Smuzhiyun int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask,
2053*4882a593Smuzhiyun 				   const char *name);
2054*4882a593Smuzhiyun void pcim_iounmap_regions(struct pci_dev *pdev, int mask);
2055*4882a593Smuzhiyun 
2056*4882a593Smuzhiyun extern int pci_pci_problems;
2057*4882a593Smuzhiyun #define PCIPCI_FAIL		1	/* No PCI PCI DMA */
2058*4882a593Smuzhiyun #define PCIPCI_TRITON		2
2059*4882a593Smuzhiyun #define PCIPCI_NATOMA		4
2060*4882a593Smuzhiyun #define PCIPCI_VIAETBF		8
2061*4882a593Smuzhiyun #define PCIPCI_VSFX		16
2062*4882a593Smuzhiyun #define PCIPCI_ALIMAGIK		32	/* Need low latency setting */
2063*4882a593Smuzhiyun #define PCIAGP_FAIL		64	/* No PCI to AGP DMA */
2064*4882a593Smuzhiyun 
2065*4882a593Smuzhiyun extern unsigned long pci_cardbus_io_size;
2066*4882a593Smuzhiyun extern unsigned long pci_cardbus_mem_size;
2067*4882a593Smuzhiyun extern u8 pci_dfl_cache_line_size;
2068*4882a593Smuzhiyun extern u8 pci_cache_line_size;
2069*4882a593Smuzhiyun 
2070*4882a593Smuzhiyun /* Architecture-specific versions may override these (weak) */
2071*4882a593Smuzhiyun void pcibios_disable_device(struct pci_dev *dev);
2072*4882a593Smuzhiyun void pcibios_set_master(struct pci_dev *dev);
2073*4882a593Smuzhiyun int pcibios_set_pcie_reset_state(struct pci_dev *dev,
2074*4882a593Smuzhiyun 				 enum pcie_reset_state state);
2075*4882a593Smuzhiyun int pcibios_add_device(struct pci_dev *dev);
2076*4882a593Smuzhiyun void pcibios_release_device(struct pci_dev *dev);
2077*4882a593Smuzhiyun #ifdef CONFIG_PCI
2078*4882a593Smuzhiyun void pcibios_penalize_isa_irq(int irq, int active);
2079*4882a593Smuzhiyun #else
pcibios_penalize_isa_irq(int irq,int active)2080*4882a593Smuzhiyun static inline void pcibios_penalize_isa_irq(int irq, int active) {}
2081*4882a593Smuzhiyun #endif
2082*4882a593Smuzhiyun int pcibios_alloc_irq(struct pci_dev *dev);
2083*4882a593Smuzhiyun void pcibios_free_irq(struct pci_dev *dev);
2084*4882a593Smuzhiyun resource_size_t pcibios_default_alignment(void);
2085*4882a593Smuzhiyun 
2086*4882a593Smuzhiyun #if defined(CONFIG_PCI_MMCONFIG) || defined(CONFIG_ACPI_MCFG)
2087*4882a593Smuzhiyun void __init pci_mmcfg_early_init(void);
2088*4882a593Smuzhiyun void __init pci_mmcfg_late_init(void);
2089*4882a593Smuzhiyun #else
pci_mmcfg_early_init(void)2090*4882a593Smuzhiyun static inline void pci_mmcfg_early_init(void) { }
pci_mmcfg_late_init(void)2091*4882a593Smuzhiyun static inline void pci_mmcfg_late_init(void) { }
2092*4882a593Smuzhiyun #endif
2093*4882a593Smuzhiyun 
2094*4882a593Smuzhiyun int pci_ext_cfg_avail(void);
2095*4882a593Smuzhiyun 
2096*4882a593Smuzhiyun void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar);
2097*4882a593Smuzhiyun void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar);
2098*4882a593Smuzhiyun 
2099*4882a593Smuzhiyun #ifdef CONFIG_PCI_IOV
2100*4882a593Smuzhiyun int pci_iov_virtfn_bus(struct pci_dev *dev, int id);
2101*4882a593Smuzhiyun int pci_iov_virtfn_devfn(struct pci_dev *dev, int id);
2102*4882a593Smuzhiyun 
2103*4882a593Smuzhiyun int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn);
2104*4882a593Smuzhiyun void pci_disable_sriov(struct pci_dev *dev);
2105*4882a593Smuzhiyun 
2106*4882a593Smuzhiyun int pci_iov_sysfs_link(struct pci_dev *dev, struct pci_dev *virtfn, int id);
2107*4882a593Smuzhiyun int pci_iov_add_virtfn(struct pci_dev *dev, int id);
2108*4882a593Smuzhiyun void pci_iov_remove_virtfn(struct pci_dev *dev, int id);
2109*4882a593Smuzhiyun int pci_num_vf(struct pci_dev *dev);
2110*4882a593Smuzhiyun int pci_vfs_assigned(struct pci_dev *dev);
2111*4882a593Smuzhiyun int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs);
2112*4882a593Smuzhiyun int pci_sriov_get_totalvfs(struct pci_dev *dev);
2113*4882a593Smuzhiyun int pci_sriov_configure_simple(struct pci_dev *dev, int nr_virtfn);
2114*4882a593Smuzhiyun resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno);
2115*4882a593Smuzhiyun void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe);
2116*4882a593Smuzhiyun 
2117*4882a593Smuzhiyun /* Arch may override these (weak) */
2118*4882a593Smuzhiyun int pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs);
2119*4882a593Smuzhiyun int pcibios_sriov_disable(struct pci_dev *pdev);
2120*4882a593Smuzhiyun resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno);
2121*4882a593Smuzhiyun #else
pci_iov_virtfn_bus(struct pci_dev * dev,int id)2122*4882a593Smuzhiyun static inline int pci_iov_virtfn_bus(struct pci_dev *dev, int id)
2123*4882a593Smuzhiyun {
2124*4882a593Smuzhiyun 	return -ENOSYS;
2125*4882a593Smuzhiyun }
pci_iov_virtfn_devfn(struct pci_dev * dev,int id)2126*4882a593Smuzhiyun static inline int pci_iov_virtfn_devfn(struct pci_dev *dev, int id)
2127*4882a593Smuzhiyun {
2128*4882a593Smuzhiyun 	return -ENOSYS;
2129*4882a593Smuzhiyun }
pci_enable_sriov(struct pci_dev * dev,int nr_virtfn)2130*4882a593Smuzhiyun static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
2131*4882a593Smuzhiyun { return -ENODEV; }
2132*4882a593Smuzhiyun 
pci_iov_sysfs_link(struct pci_dev * dev,struct pci_dev * virtfn,int id)2133*4882a593Smuzhiyun static inline int pci_iov_sysfs_link(struct pci_dev *dev,
2134*4882a593Smuzhiyun 				     struct pci_dev *virtfn, int id)
2135*4882a593Smuzhiyun {
2136*4882a593Smuzhiyun 	return -ENODEV;
2137*4882a593Smuzhiyun }
pci_iov_add_virtfn(struct pci_dev * dev,int id)2138*4882a593Smuzhiyun static inline int pci_iov_add_virtfn(struct pci_dev *dev, int id)
2139*4882a593Smuzhiyun {
2140*4882a593Smuzhiyun 	return -ENOSYS;
2141*4882a593Smuzhiyun }
pci_iov_remove_virtfn(struct pci_dev * dev,int id)2142*4882a593Smuzhiyun static inline void pci_iov_remove_virtfn(struct pci_dev *dev,
2143*4882a593Smuzhiyun 					 int id) { }
pci_disable_sriov(struct pci_dev * dev)2144*4882a593Smuzhiyun static inline void pci_disable_sriov(struct pci_dev *dev) { }
pci_num_vf(struct pci_dev * dev)2145*4882a593Smuzhiyun static inline int pci_num_vf(struct pci_dev *dev) { return 0; }
pci_vfs_assigned(struct pci_dev * dev)2146*4882a593Smuzhiyun static inline int pci_vfs_assigned(struct pci_dev *dev)
2147*4882a593Smuzhiyun { return 0; }
pci_sriov_set_totalvfs(struct pci_dev * dev,u16 numvfs)2148*4882a593Smuzhiyun static inline int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs)
2149*4882a593Smuzhiyun { return 0; }
pci_sriov_get_totalvfs(struct pci_dev * dev)2150*4882a593Smuzhiyun static inline int pci_sriov_get_totalvfs(struct pci_dev *dev)
2151*4882a593Smuzhiyun { return 0; }
2152*4882a593Smuzhiyun #define pci_sriov_configure_simple	NULL
pci_iov_resource_size(struct pci_dev * dev,int resno)2153*4882a593Smuzhiyun static inline resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno)
2154*4882a593Smuzhiyun { return 0; }
pci_vf_drivers_autoprobe(struct pci_dev * dev,bool probe)2155*4882a593Smuzhiyun static inline void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe) { }
2156*4882a593Smuzhiyun #endif
2157*4882a593Smuzhiyun 
2158*4882a593Smuzhiyun #if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE)
2159*4882a593Smuzhiyun void pci_hp_create_module_link(struct pci_slot *pci_slot);
2160*4882a593Smuzhiyun void pci_hp_remove_module_link(struct pci_slot *pci_slot);
2161*4882a593Smuzhiyun #endif
2162*4882a593Smuzhiyun 
2163*4882a593Smuzhiyun /**
2164*4882a593Smuzhiyun  * pci_pcie_cap - get the saved PCIe capability offset
2165*4882a593Smuzhiyun  * @dev: PCI device
2166*4882a593Smuzhiyun  *
2167*4882a593Smuzhiyun  * PCIe capability offset is calculated at PCI device initialization
2168*4882a593Smuzhiyun  * time and saved in the data structure. This function returns saved
2169*4882a593Smuzhiyun  * PCIe capability offset. Using this instead of pci_find_capability()
2170*4882a593Smuzhiyun  * reduces unnecessary search in the PCI configuration space. If you
2171*4882a593Smuzhiyun  * need to calculate PCIe capability offset from raw device for some
2172*4882a593Smuzhiyun  * reasons, please use pci_find_capability() instead.
2173*4882a593Smuzhiyun  */
pci_pcie_cap(struct pci_dev * dev)2174*4882a593Smuzhiyun static inline int pci_pcie_cap(struct pci_dev *dev)
2175*4882a593Smuzhiyun {
2176*4882a593Smuzhiyun 	return dev->pcie_cap;
2177*4882a593Smuzhiyun }
2178*4882a593Smuzhiyun 
2179*4882a593Smuzhiyun /**
2180*4882a593Smuzhiyun  * pci_is_pcie - check if the PCI device is PCI Express capable
2181*4882a593Smuzhiyun  * @dev: PCI device
2182*4882a593Smuzhiyun  *
2183*4882a593Smuzhiyun  * Returns: true if the PCI device is PCI Express capable, false otherwise.
2184*4882a593Smuzhiyun  */
pci_is_pcie(struct pci_dev * dev)2185*4882a593Smuzhiyun static inline bool pci_is_pcie(struct pci_dev *dev)
2186*4882a593Smuzhiyun {
2187*4882a593Smuzhiyun 	return pci_pcie_cap(dev);
2188*4882a593Smuzhiyun }
2189*4882a593Smuzhiyun 
2190*4882a593Smuzhiyun /**
2191*4882a593Smuzhiyun  * pcie_caps_reg - get the PCIe Capabilities Register
2192*4882a593Smuzhiyun  * @dev: PCI device
2193*4882a593Smuzhiyun  */
pcie_caps_reg(const struct pci_dev * dev)2194*4882a593Smuzhiyun static inline u16 pcie_caps_reg(const struct pci_dev *dev)
2195*4882a593Smuzhiyun {
2196*4882a593Smuzhiyun 	return dev->pcie_flags_reg;
2197*4882a593Smuzhiyun }
2198*4882a593Smuzhiyun 
2199*4882a593Smuzhiyun /**
2200*4882a593Smuzhiyun  * pci_pcie_type - get the PCIe device/port type
2201*4882a593Smuzhiyun  * @dev: PCI device
2202*4882a593Smuzhiyun  */
pci_pcie_type(const struct pci_dev * dev)2203*4882a593Smuzhiyun static inline int pci_pcie_type(const struct pci_dev *dev)
2204*4882a593Smuzhiyun {
2205*4882a593Smuzhiyun 	return (pcie_caps_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4;
2206*4882a593Smuzhiyun }
2207*4882a593Smuzhiyun 
2208*4882a593Smuzhiyun /**
2209*4882a593Smuzhiyun  * pcie_find_root_port - Get the PCIe root port device
2210*4882a593Smuzhiyun  * @dev: PCI device
2211*4882a593Smuzhiyun  *
2212*4882a593Smuzhiyun  * Traverse up the parent chain and return the PCIe Root Port PCI Device
2213*4882a593Smuzhiyun  * for a given PCI/PCIe Device.
2214*4882a593Smuzhiyun  */
pcie_find_root_port(struct pci_dev * dev)2215*4882a593Smuzhiyun static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
2216*4882a593Smuzhiyun {
2217*4882a593Smuzhiyun 	while (dev) {
2218*4882a593Smuzhiyun 		if (pci_is_pcie(dev) &&
2219*4882a593Smuzhiyun 		    pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
2220*4882a593Smuzhiyun 			return dev;
2221*4882a593Smuzhiyun 		dev = pci_upstream_bridge(dev);
2222*4882a593Smuzhiyun 	}
2223*4882a593Smuzhiyun 
2224*4882a593Smuzhiyun 	return NULL;
2225*4882a593Smuzhiyun }
2226*4882a593Smuzhiyun 
2227*4882a593Smuzhiyun void pci_request_acs(void);
2228*4882a593Smuzhiyun bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags);
2229*4882a593Smuzhiyun bool pci_acs_path_enabled(struct pci_dev *start,
2230*4882a593Smuzhiyun 			  struct pci_dev *end, u16 acs_flags);
2231*4882a593Smuzhiyun int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask);
2232*4882a593Smuzhiyun 
2233*4882a593Smuzhiyun #define PCI_VPD_LRDT			0x80	/* Large Resource Data Type */
2234*4882a593Smuzhiyun #define PCI_VPD_LRDT_ID(x)		((x) | PCI_VPD_LRDT)
2235*4882a593Smuzhiyun 
2236*4882a593Smuzhiyun /* Large Resource Data Type Tag Item Names */
2237*4882a593Smuzhiyun #define PCI_VPD_LTIN_ID_STRING		0x02	/* Identifier String */
2238*4882a593Smuzhiyun #define PCI_VPD_LTIN_RO_DATA		0x10	/* Read-Only Data */
2239*4882a593Smuzhiyun #define PCI_VPD_LTIN_RW_DATA		0x11	/* Read-Write Data */
2240*4882a593Smuzhiyun 
2241*4882a593Smuzhiyun #define PCI_VPD_LRDT_ID_STRING		PCI_VPD_LRDT_ID(PCI_VPD_LTIN_ID_STRING)
2242*4882a593Smuzhiyun #define PCI_VPD_LRDT_RO_DATA		PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RO_DATA)
2243*4882a593Smuzhiyun #define PCI_VPD_LRDT_RW_DATA		PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RW_DATA)
2244*4882a593Smuzhiyun 
2245*4882a593Smuzhiyun /* Small Resource Data Type Tag Item Names */
2246*4882a593Smuzhiyun #define PCI_VPD_STIN_END		0x0f	/* End */
2247*4882a593Smuzhiyun 
2248*4882a593Smuzhiyun #define PCI_VPD_SRDT_END		(PCI_VPD_STIN_END << 3)
2249*4882a593Smuzhiyun 
2250*4882a593Smuzhiyun #define PCI_VPD_SRDT_TIN_MASK		0x78
2251*4882a593Smuzhiyun #define PCI_VPD_SRDT_LEN_MASK		0x07
2252*4882a593Smuzhiyun #define PCI_VPD_LRDT_TIN_MASK		0x7f
2253*4882a593Smuzhiyun 
2254*4882a593Smuzhiyun #define PCI_VPD_LRDT_TAG_SIZE		3
2255*4882a593Smuzhiyun #define PCI_VPD_SRDT_TAG_SIZE		1
2256*4882a593Smuzhiyun 
2257*4882a593Smuzhiyun #define PCI_VPD_INFO_FLD_HDR_SIZE	3
2258*4882a593Smuzhiyun 
2259*4882a593Smuzhiyun #define PCI_VPD_RO_KEYWORD_PARTNO	"PN"
2260*4882a593Smuzhiyun #define PCI_VPD_RO_KEYWORD_SERIALNO	"SN"
2261*4882a593Smuzhiyun #define PCI_VPD_RO_KEYWORD_MFR_ID	"MN"
2262*4882a593Smuzhiyun #define PCI_VPD_RO_KEYWORD_VENDOR0	"V0"
2263*4882a593Smuzhiyun #define PCI_VPD_RO_KEYWORD_CHKSUM	"RV"
2264*4882a593Smuzhiyun 
2265*4882a593Smuzhiyun /**
2266*4882a593Smuzhiyun  * pci_vpd_lrdt_size - Extracts the Large Resource Data Type length
2267*4882a593Smuzhiyun  * @lrdt: Pointer to the beginning of the Large Resource Data Type tag
2268*4882a593Smuzhiyun  *
2269*4882a593Smuzhiyun  * Returns the extracted Large Resource Data Type length.
2270*4882a593Smuzhiyun  */
pci_vpd_lrdt_size(const u8 * lrdt)2271*4882a593Smuzhiyun static inline u16 pci_vpd_lrdt_size(const u8 *lrdt)
2272*4882a593Smuzhiyun {
2273*4882a593Smuzhiyun 	return (u16)lrdt[1] + ((u16)lrdt[2] << 8);
2274*4882a593Smuzhiyun }
2275*4882a593Smuzhiyun 
2276*4882a593Smuzhiyun /**
2277*4882a593Smuzhiyun  * pci_vpd_lrdt_tag - Extracts the Large Resource Data Type Tag Item
2278*4882a593Smuzhiyun  * @lrdt: Pointer to the beginning of the Large Resource Data Type tag
2279*4882a593Smuzhiyun  *
2280*4882a593Smuzhiyun  * Returns the extracted Large Resource Data Type Tag item.
2281*4882a593Smuzhiyun  */
pci_vpd_lrdt_tag(const u8 * lrdt)2282*4882a593Smuzhiyun static inline u16 pci_vpd_lrdt_tag(const u8 *lrdt)
2283*4882a593Smuzhiyun {
2284*4882a593Smuzhiyun 	return (u16)(lrdt[0] & PCI_VPD_LRDT_TIN_MASK);
2285*4882a593Smuzhiyun }
2286*4882a593Smuzhiyun 
2287*4882a593Smuzhiyun /**
2288*4882a593Smuzhiyun  * pci_vpd_srdt_size - Extracts the Small Resource Data Type length
2289*4882a593Smuzhiyun  * @srdt: Pointer to the beginning of the Small Resource Data Type tag
2290*4882a593Smuzhiyun  *
2291*4882a593Smuzhiyun  * Returns the extracted Small Resource Data Type length.
2292*4882a593Smuzhiyun  */
pci_vpd_srdt_size(const u8 * srdt)2293*4882a593Smuzhiyun static inline u8 pci_vpd_srdt_size(const u8 *srdt)
2294*4882a593Smuzhiyun {
2295*4882a593Smuzhiyun 	return (*srdt) & PCI_VPD_SRDT_LEN_MASK;
2296*4882a593Smuzhiyun }
2297*4882a593Smuzhiyun 
2298*4882a593Smuzhiyun /**
2299*4882a593Smuzhiyun  * pci_vpd_srdt_tag - Extracts the Small Resource Data Type Tag Item
2300*4882a593Smuzhiyun  * @srdt: Pointer to the beginning of the Small Resource Data Type tag
2301*4882a593Smuzhiyun  *
2302*4882a593Smuzhiyun  * Returns the extracted Small Resource Data Type Tag Item.
2303*4882a593Smuzhiyun  */
pci_vpd_srdt_tag(const u8 * srdt)2304*4882a593Smuzhiyun static inline u8 pci_vpd_srdt_tag(const u8 *srdt)
2305*4882a593Smuzhiyun {
2306*4882a593Smuzhiyun 	return ((*srdt) & PCI_VPD_SRDT_TIN_MASK) >> 3;
2307*4882a593Smuzhiyun }
2308*4882a593Smuzhiyun 
2309*4882a593Smuzhiyun /**
2310*4882a593Smuzhiyun  * pci_vpd_info_field_size - Extracts the information field length
2311*4882a593Smuzhiyun  * @info_field: Pointer to the beginning of an information field header
2312*4882a593Smuzhiyun  *
2313*4882a593Smuzhiyun  * Returns the extracted information field length.
2314*4882a593Smuzhiyun  */
pci_vpd_info_field_size(const u8 * info_field)2315*4882a593Smuzhiyun static inline u8 pci_vpd_info_field_size(const u8 *info_field)
2316*4882a593Smuzhiyun {
2317*4882a593Smuzhiyun 	return info_field[2];
2318*4882a593Smuzhiyun }
2319*4882a593Smuzhiyun 
2320*4882a593Smuzhiyun /**
2321*4882a593Smuzhiyun  * pci_vpd_find_tag - Locates the Resource Data Type tag provided
2322*4882a593Smuzhiyun  * @buf: Pointer to buffered vpd data
2323*4882a593Smuzhiyun  * @off: The offset into the buffer at which to begin the search
2324*4882a593Smuzhiyun  * @len: The length of the vpd buffer
2325*4882a593Smuzhiyun  * @rdt: The Resource Data Type to search for
2326*4882a593Smuzhiyun  *
2327*4882a593Smuzhiyun  * Returns the index where the Resource Data Type was found or
2328*4882a593Smuzhiyun  * -ENOENT otherwise.
2329*4882a593Smuzhiyun  */
2330*4882a593Smuzhiyun int pci_vpd_find_tag(const u8 *buf, unsigned int off, unsigned int len, u8 rdt);
2331*4882a593Smuzhiyun 
2332*4882a593Smuzhiyun /**
2333*4882a593Smuzhiyun  * pci_vpd_find_info_keyword - Locates an information field keyword in the VPD
2334*4882a593Smuzhiyun  * @buf: Pointer to buffered vpd data
2335*4882a593Smuzhiyun  * @off: The offset into the buffer at which to begin the search
2336*4882a593Smuzhiyun  * @len: The length of the buffer area, relative to off, in which to search
2337*4882a593Smuzhiyun  * @kw: The keyword to search for
2338*4882a593Smuzhiyun  *
2339*4882a593Smuzhiyun  * Returns the index where the information field keyword was found or
2340*4882a593Smuzhiyun  * -ENOENT otherwise.
2341*4882a593Smuzhiyun  */
2342*4882a593Smuzhiyun int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off,
2343*4882a593Smuzhiyun 			      unsigned int len, const char *kw);
2344*4882a593Smuzhiyun 
2345*4882a593Smuzhiyun /* PCI <-> OF binding helpers */
2346*4882a593Smuzhiyun #ifdef CONFIG_OF
2347*4882a593Smuzhiyun struct device_node;
2348*4882a593Smuzhiyun struct irq_domain;
2349*4882a593Smuzhiyun struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus);
2350*4882a593Smuzhiyun 
2351*4882a593Smuzhiyun /* Arch may override this (weak) */
2352*4882a593Smuzhiyun struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
2353*4882a593Smuzhiyun 
2354*4882a593Smuzhiyun #else	/* CONFIG_OF */
2355*4882a593Smuzhiyun static inline struct irq_domain *
pci_host_bridge_of_msi_domain(struct pci_bus * bus)2356*4882a593Smuzhiyun pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; }
2357*4882a593Smuzhiyun #endif  /* CONFIG_OF */
2358*4882a593Smuzhiyun 
2359*4882a593Smuzhiyun static inline struct device_node *
pci_device_to_OF_node(const struct pci_dev * pdev)2360*4882a593Smuzhiyun pci_device_to_OF_node(const struct pci_dev *pdev)
2361*4882a593Smuzhiyun {
2362*4882a593Smuzhiyun 	return pdev ? pdev->dev.of_node : NULL;
2363*4882a593Smuzhiyun }
2364*4882a593Smuzhiyun 
pci_bus_to_OF_node(struct pci_bus * bus)2365*4882a593Smuzhiyun static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
2366*4882a593Smuzhiyun {
2367*4882a593Smuzhiyun 	return bus ? bus->dev.of_node : NULL;
2368*4882a593Smuzhiyun }
2369*4882a593Smuzhiyun 
2370*4882a593Smuzhiyun #ifdef CONFIG_ACPI
2371*4882a593Smuzhiyun struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus);
2372*4882a593Smuzhiyun 
2373*4882a593Smuzhiyun void
2374*4882a593Smuzhiyun pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *));
2375*4882a593Smuzhiyun bool pci_pr3_present(struct pci_dev *pdev);
2376*4882a593Smuzhiyun #else
2377*4882a593Smuzhiyun static inline struct irq_domain *
pci_host_bridge_acpi_msi_domain(struct pci_bus * bus)2378*4882a593Smuzhiyun pci_host_bridge_acpi_msi_domain(struct pci_bus *bus) { return NULL; }
pci_pr3_present(struct pci_dev * pdev)2379*4882a593Smuzhiyun static inline bool pci_pr3_present(struct pci_dev *pdev) { return false; }
2380*4882a593Smuzhiyun #endif
2381*4882a593Smuzhiyun 
2382*4882a593Smuzhiyun #ifdef CONFIG_EEH
pci_dev_to_eeh_dev(struct pci_dev * pdev)2383*4882a593Smuzhiyun static inline struct eeh_dev *pci_dev_to_eeh_dev(struct pci_dev *pdev)
2384*4882a593Smuzhiyun {
2385*4882a593Smuzhiyun 	return pdev->dev.archdata.edev;
2386*4882a593Smuzhiyun }
2387*4882a593Smuzhiyun #endif
2388*4882a593Smuzhiyun 
2389*4882a593Smuzhiyun void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from, unsigned nr_devfns);
2390*4882a593Smuzhiyun bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2);
2391*4882a593Smuzhiyun int pci_for_each_dma_alias(struct pci_dev *pdev,
2392*4882a593Smuzhiyun 			   int (*fn)(struct pci_dev *pdev,
2393*4882a593Smuzhiyun 				     u16 alias, void *data), void *data);
2394*4882a593Smuzhiyun 
2395*4882a593Smuzhiyun /* Helper functions for operation of device flag */
pci_set_dev_assigned(struct pci_dev * pdev)2396*4882a593Smuzhiyun static inline void pci_set_dev_assigned(struct pci_dev *pdev)
2397*4882a593Smuzhiyun {
2398*4882a593Smuzhiyun 	pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
2399*4882a593Smuzhiyun }
pci_clear_dev_assigned(struct pci_dev * pdev)2400*4882a593Smuzhiyun static inline void pci_clear_dev_assigned(struct pci_dev *pdev)
2401*4882a593Smuzhiyun {
2402*4882a593Smuzhiyun 	pdev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
2403*4882a593Smuzhiyun }
pci_is_dev_assigned(struct pci_dev * pdev)2404*4882a593Smuzhiyun static inline bool pci_is_dev_assigned(struct pci_dev *pdev)
2405*4882a593Smuzhiyun {
2406*4882a593Smuzhiyun 	return (pdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) == PCI_DEV_FLAGS_ASSIGNED;
2407*4882a593Smuzhiyun }
2408*4882a593Smuzhiyun 
2409*4882a593Smuzhiyun /**
2410*4882a593Smuzhiyun  * pci_ari_enabled - query ARI forwarding status
2411*4882a593Smuzhiyun  * @bus: the PCI bus
2412*4882a593Smuzhiyun  *
2413*4882a593Smuzhiyun  * Returns true if ARI forwarding is enabled.
2414*4882a593Smuzhiyun  */
pci_ari_enabled(struct pci_bus * bus)2415*4882a593Smuzhiyun static inline bool pci_ari_enabled(struct pci_bus *bus)
2416*4882a593Smuzhiyun {
2417*4882a593Smuzhiyun 	return bus->self && bus->self->ari_enabled;
2418*4882a593Smuzhiyun }
2419*4882a593Smuzhiyun 
2420*4882a593Smuzhiyun /**
2421*4882a593Smuzhiyun  * pci_is_thunderbolt_attached - whether device is on a Thunderbolt daisy chain
2422*4882a593Smuzhiyun  * @pdev: PCI device to check
2423*4882a593Smuzhiyun  *
2424*4882a593Smuzhiyun  * Walk upwards from @pdev and check for each encountered bridge if it's part
2425*4882a593Smuzhiyun  * of a Thunderbolt controller.  Reaching the host bridge means @pdev is not
2426*4882a593Smuzhiyun  * Thunderbolt-attached.  (But rather soldered to the mainboard usually.)
2427*4882a593Smuzhiyun  */
pci_is_thunderbolt_attached(struct pci_dev * pdev)2428*4882a593Smuzhiyun static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev)
2429*4882a593Smuzhiyun {
2430*4882a593Smuzhiyun 	struct pci_dev *parent = pdev;
2431*4882a593Smuzhiyun 
2432*4882a593Smuzhiyun 	if (pdev->is_thunderbolt)
2433*4882a593Smuzhiyun 		return true;
2434*4882a593Smuzhiyun 
2435*4882a593Smuzhiyun 	while ((parent = pci_upstream_bridge(parent)))
2436*4882a593Smuzhiyun 		if (parent->is_thunderbolt)
2437*4882a593Smuzhiyun 			return true;
2438*4882a593Smuzhiyun 
2439*4882a593Smuzhiyun 	return false;
2440*4882a593Smuzhiyun }
2441*4882a593Smuzhiyun 
2442*4882a593Smuzhiyun #if defined(CONFIG_PCIEPORTBUS) || defined(CONFIG_EEH)
2443*4882a593Smuzhiyun void pci_uevent_ers(struct pci_dev *pdev, enum  pci_ers_result err_type);
2444*4882a593Smuzhiyun #endif
2445*4882a593Smuzhiyun 
2446*4882a593Smuzhiyun /* Provide the legacy pci_dma_* API */
2447*4882a593Smuzhiyun #include <linux/pci-dma-compat.h>
2448*4882a593Smuzhiyun 
2449*4882a593Smuzhiyun #define pci_printk(level, pdev, fmt, arg...) \
2450*4882a593Smuzhiyun 	dev_printk(level, &(pdev)->dev, fmt, ##arg)
2451*4882a593Smuzhiyun 
2452*4882a593Smuzhiyun #define pci_emerg(pdev, fmt, arg...)	dev_emerg(&(pdev)->dev, fmt, ##arg)
2453*4882a593Smuzhiyun #define pci_alert(pdev, fmt, arg...)	dev_alert(&(pdev)->dev, fmt, ##arg)
2454*4882a593Smuzhiyun #define pci_crit(pdev, fmt, arg...)	dev_crit(&(pdev)->dev, fmt, ##arg)
2455*4882a593Smuzhiyun #define pci_err(pdev, fmt, arg...)	dev_err(&(pdev)->dev, fmt, ##arg)
2456*4882a593Smuzhiyun #define pci_warn(pdev, fmt, arg...)	dev_warn(&(pdev)->dev, fmt, ##arg)
2457*4882a593Smuzhiyun #define pci_notice(pdev, fmt, arg...)	dev_notice(&(pdev)->dev, fmt, ##arg)
2458*4882a593Smuzhiyun #define pci_info(pdev, fmt, arg...)	dev_info(&(pdev)->dev, fmt, ##arg)
2459*4882a593Smuzhiyun #define pci_dbg(pdev, fmt, arg...)	dev_dbg(&(pdev)->dev, fmt, ##arg)
2460*4882a593Smuzhiyun 
2461*4882a593Smuzhiyun #define pci_notice_ratelimited(pdev, fmt, arg...) \
2462*4882a593Smuzhiyun 	dev_notice_ratelimited(&(pdev)->dev, fmt, ##arg)
2463*4882a593Smuzhiyun 
2464*4882a593Smuzhiyun #define pci_info_ratelimited(pdev, fmt, arg...) \
2465*4882a593Smuzhiyun 	dev_info_ratelimited(&(pdev)->dev, fmt, ##arg)
2466*4882a593Smuzhiyun 
2467*4882a593Smuzhiyun #define pci_WARN(pdev, condition, fmt, arg...) \
2468*4882a593Smuzhiyun 	WARN(condition, "%s %s: " fmt, \
2469*4882a593Smuzhiyun 	     dev_driver_string(&(pdev)->dev), pci_name(pdev), ##arg)
2470*4882a593Smuzhiyun 
2471*4882a593Smuzhiyun #define pci_WARN_ONCE(pdev, condition, fmt, arg...) \
2472*4882a593Smuzhiyun 	WARN_ONCE(condition, "%s %s: " fmt, \
2473*4882a593Smuzhiyun 		  dev_driver_string(&(pdev)->dev), pci_name(pdev), ##arg)
2474*4882a593Smuzhiyun 
2475*4882a593Smuzhiyun #endif /* LINUX_PCI_H */
2476