xref: /OK3568_Linux_fs/kernel/arch/ia64/pci/pci.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * pci.c - Low-Level PCI Access in IA-64
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Derived from bios32.c of i386 tree.
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * (c) Copyright 2002, 2005 Hewlett-Packard Development Company, L.P.
8*4882a593Smuzhiyun  *	David Mosberger-Tang <davidm@hpl.hp.com>
9*4882a593Smuzhiyun  *	Bjorn Helgaas <bjorn.helgaas@hp.com>
10*4882a593Smuzhiyun  * Copyright (C) 2004 Silicon Graphics, Inc.
11*4882a593Smuzhiyun  *
12*4882a593Smuzhiyun  * Note: Above list of copyright holders is incomplete...
13*4882a593Smuzhiyun  */
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #include <linux/acpi.h>
16*4882a593Smuzhiyun #include <linux/types.h>
17*4882a593Smuzhiyun #include <linux/kernel.h>
18*4882a593Smuzhiyun #include <linux/pci.h>
19*4882a593Smuzhiyun #include <linux/pci-acpi.h>
20*4882a593Smuzhiyun #include <linux/init.h>
21*4882a593Smuzhiyun #include <linux/ioport.h>
22*4882a593Smuzhiyun #include <linux/slab.h>
23*4882a593Smuzhiyun #include <linux/spinlock.h>
24*4882a593Smuzhiyun #include <linux/memblock.h>
25*4882a593Smuzhiyun #include <linux/export.h>
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun #include <asm/page.h>
28*4882a593Smuzhiyun #include <asm/io.h>
29*4882a593Smuzhiyun #include <asm/sal.h>
30*4882a593Smuzhiyun #include <asm/smp.h>
31*4882a593Smuzhiyun #include <asm/irq.h>
32*4882a593Smuzhiyun #include <asm/hw_irq.h>
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun /*
35*4882a593Smuzhiyun  * Low-level SAL-based PCI configuration access functions. Note that SAL
36*4882a593Smuzhiyun  * calls are already serialized (via sal_lock), so we don't need another
37*4882a593Smuzhiyun  * synchronization mechanism here.
38*4882a593Smuzhiyun  */
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun #define PCI_SAL_ADDRESS(seg, bus, devfn, reg)		\
41*4882a593Smuzhiyun 	(((u64) seg << 24) | (bus << 16) | (devfn << 8) | (reg))
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun /* SAL 3.2 adds support for extended config space. */
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun #define PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg)	\
46*4882a593Smuzhiyun 	(((u64) seg << 28) | (bus << 20) | (devfn << 12) | (reg))
47*4882a593Smuzhiyun 
raw_pci_read(unsigned int seg,unsigned int bus,unsigned int devfn,int reg,int len,u32 * value)48*4882a593Smuzhiyun int raw_pci_read(unsigned int seg, unsigned int bus, unsigned int devfn,
49*4882a593Smuzhiyun 	      int reg, int len, u32 *value)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun 	u64 addr, data = 0;
52*4882a593Smuzhiyun 	int mode, result;
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 	if (!value || (seg > 65535) || (bus > 255) || (devfn > 255) || (reg > 4095))
55*4882a593Smuzhiyun 		return -EINVAL;
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	if ((seg | reg) <= 255) {
58*4882a593Smuzhiyun 		addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg);
59*4882a593Smuzhiyun 		mode = 0;
60*4882a593Smuzhiyun 	} else if (sal_revision >= SAL_VERSION_CODE(3,2)) {
61*4882a593Smuzhiyun 		addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg);
62*4882a593Smuzhiyun 		mode = 1;
63*4882a593Smuzhiyun 	} else {
64*4882a593Smuzhiyun 		return -EINVAL;
65*4882a593Smuzhiyun 	}
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	result = ia64_sal_pci_config_read(addr, mode, len, &data);
68*4882a593Smuzhiyun 	if (result != 0)
69*4882a593Smuzhiyun 		return -EINVAL;
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	*value = (u32) data;
72*4882a593Smuzhiyun 	return 0;
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun 
raw_pci_write(unsigned int seg,unsigned int bus,unsigned int devfn,int reg,int len,u32 value)75*4882a593Smuzhiyun int raw_pci_write(unsigned int seg, unsigned int bus, unsigned int devfn,
76*4882a593Smuzhiyun 	       int reg, int len, u32 value)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun 	u64 addr;
79*4882a593Smuzhiyun 	int mode, result;
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	if ((seg > 65535) || (bus > 255) || (devfn > 255) || (reg > 4095))
82*4882a593Smuzhiyun 		return -EINVAL;
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	if ((seg | reg) <= 255) {
85*4882a593Smuzhiyun 		addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg);
86*4882a593Smuzhiyun 		mode = 0;
87*4882a593Smuzhiyun 	} else if (sal_revision >= SAL_VERSION_CODE(3,2)) {
88*4882a593Smuzhiyun 		addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg);
89*4882a593Smuzhiyun 		mode = 1;
90*4882a593Smuzhiyun 	} else {
91*4882a593Smuzhiyun 		return -EINVAL;
92*4882a593Smuzhiyun 	}
93*4882a593Smuzhiyun 	result = ia64_sal_pci_config_write(addr, mode, len, value);
94*4882a593Smuzhiyun 	if (result != 0)
95*4882a593Smuzhiyun 		return -EINVAL;
96*4882a593Smuzhiyun 	return 0;
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun 
pci_read(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * value)99*4882a593Smuzhiyun static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
100*4882a593Smuzhiyun 							int size, u32 *value)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun 	return raw_pci_read(pci_domain_nr(bus), bus->number,
103*4882a593Smuzhiyun 				 devfn, where, size, value);
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun 
pci_write(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 value)106*4882a593Smuzhiyun static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
107*4882a593Smuzhiyun 							int size, u32 value)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun 	return raw_pci_write(pci_domain_nr(bus), bus->number,
110*4882a593Smuzhiyun 				  devfn, where, size, value);
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun struct pci_ops pci_root_ops = {
114*4882a593Smuzhiyun 	.read = pci_read,
115*4882a593Smuzhiyun 	.write = pci_write,
116*4882a593Smuzhiyun };
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun struct pci_root_info {
119*4882a593Smuzhiyun 	struct acpi_pci_root_info common;
120*4882a593Smuzhiyun 	struct pci_controller controller;
121*4882a593Smuzhiyun 	struct list_head io_resources;
122*4882a593Smuzhiyun };
123*4882a593Smuzhiyun 
new_space(u64 phys_base,int sparse)124*4882a593Smuzhiyun static unsigned int new_space(u64 phys_base, int sparse)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun 	u64 mmio_base;
127*4882a593Smuzhiyun 	int i;
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	if (phys_base == 0)
130*4882a593Smuzhiyun 		return 0;	/* legacy I/O port space */
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	mmio_base = (u64) ioremap(phys_base, 0);
133*4882a593Smuzhiyun 	for (i = 0; i < num_io_spaces; i++)
134*4882a593Smuzhiyun 		if (io_space[i].mmio_base == mmio_base &&
135*4882a593Smuzhiyun 		    io_space[i].sparse == sparse)
136*4882a593Smuzhiyun 			return i;
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	if (num_io_spaces == MAX_IO_SPACES) {
139*4882a593Smuzhiyun 		pr_err("PCI: Too many IO port spaces "
140*4882a593Smuzhiyun 			"(MAX_IO_SPACES=%lu)\n", MAX_IO_SPACES);
141*4882a593Smuzhiyun 		return ~0;
142*4882a593Smuzhiyun 	}
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	i = num_io_spaces++;
145*4882a593Smuzhiyun 	io_space[i].mmio_base = mmio_base;
146*4882a593Smuzhiyun 	io_space[i].sparse = sparse;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	return i;
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun 
add_io_space(struct device * dev,struct pci_root_info * info,struct resource_entry * entry)151*4882a593Smuzhiyun static int add_io_space(struct device *dev, struct pci_root_info *info,
152*4882a593Smuzhiyun 			struct resource_entry *entry)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun 	struct resource_entry *iospace;
155*4882a593Smuzhiyun 	struct resource *resource, *res = entry->res;
156*4882a593Smuzhiyun 	char *name;
157*4882a593Smuzhiyun 	unsigned long base, min, max, base_port;
158*4882a593Smuzhiyun 	unsigned int sparse = 0, space_nr, len;
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	len = strlen(info->common.name) + 32;
161*4882a593Smuzhiyun 	iospace = resource_list_create_entry(NULL, len);
162*4882a593Smuzhiyun 	if (!iospace) {
163*4882a593Smuzhiyun 		dev_err(dev, "PCI: No memory for %s I/O port space\n",
164*4882a593Smuzhiyun 			info->common.name);
165*4882a593Smuzhiyun 		return -ENOMEM;
166*4882a593Smuzhiyun 	}
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	if (res->flags & IORESOURCE_IO_SPARSE)
169*4882a593Smuzhiyun 		sparse = 1;
170*4882a593Smuzhiyun 	space_nr = new_space(entry->offset, sparse);
171*4882a593Smuzhiyun 	if (space_nr == ~0)
172*4882a593Smuzhiyun 		goto free_resource;
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	name = (char *)(iospace + 1);
175*4882a593Smuzhiyun 	min = res->start - entry->offset;
176*4882a593Smuzhiyun 	max = res->end - entry->offset;
177*4882a593Smuzhiyun 	base = __pa(io_space[space_nr].mmio_base);
178*4882a593Smuzhiyun 	base_port = IO_SPACE_BASE(space_nr);
179*4882a593Smuzhiyun 	snprintf(name, len, "%s I/O Ports %08lx-%08lx", info->common.name,
180*4882a593Smuzhiyun 		 base_port + min, base_port + max);
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	/*
183*4882a593Smuzhiyun 	 * The SDM guarantees the legacy 0-64K space is sparse, but if the
184*4882a593Smuzhiyun 	 * mapping is done by the processor (not the bridge), ACPI may not
185*4882a593Smuzhiyun 	 * mark it as sparse.
186*4882a593Smuzhiyun 	 */
187*4882a593Smuzhiyun 	if (space_nr == 0)
188*4882a593Smuzhiyun 		sparse = 1;
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	resource = iospace->res;
191*4882a593Smuzhiyun 	resource->name  = name;
192*4882a593Smuzhiyun 	resource->flags = IORESOURCE_MEM;
193*4882a593Smuzhiyun 	resource->start = base + (sparse ? IO_SPACE_SPARSE_ENCODING(min) : min);
194*4882a593Smuzhiyun 	resource->end   = base + (sparse ? IO_SPACE_SPARSE_ENCODING(max) : max);
195*4882a593Smuzhiyun 	if (insert_resource(&iomem_resource, resource)) {
196*4882a593Smuzhiyun 		dev_err(dev,
197*4882a593Smuzhiyun 			"can't allocate host bridge io space resource  %pR\n",
198*4882a593Smuzhiyun 			resource);
199*4882a593Smuzhiyun 		goto free_resource;
200*4882a593Smuzhiyun 	}
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	entry->offset = base_port;
203*4882a593Smuzhiyun 	res->start = min + base_port;
204*4882a593Smuzhiyun 	res->end = max + base_port;
205*4882a593Smuzhiyun 	resource_list_add_tail(iospace, &info->io_resources);
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	return 0;
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun free_resource:
210*4882a593Smuzhiyun 	resource_list_free_entry(iospace);
211*4882a593Smuzhiyun 	return -ENOSPC;
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun /*
215*4882a593Smuzhiyun  * An IO port or MMIO resource assigned to a PCI host bridge may be
216*4882a593Smuzhiyun  * consumed by the host bridge itself or available to its child
217*4882a593Smuzhiyun  * bus/devices. The ACPI specification defines a bit (Producer/Consumer)
218*4882a593Smuzhiyun  * to tell whether the resource is consumed by the host bridge itself,
219*4882a593Smuzhiyun  * but firmware hasn't used that bit consistently, so we can't rely on it.
220*4882a593Smuzhiyun  *
221*4882a593Smuzhiyun  * On x86 and IA64 platforms, all IO port and MMIO resources are assumed
222*4882a593Smuzhiyun  * to be available to child bus/devices except one special case:
223*4882a593Smuzhiyun  *     IO port [0xCF8-0xCFF] is consumed by the host bridge itself
224*4882a593Smuzhiyun  *     to access PCI configuration space.
225*4882a593Smuzhiyun  *
226*4882a593Smuzhiyun  * So explicitly filter out PCI CFG IO ports[0xCF8-0xCFF].
227*4882a593Smuzhiyun  */
resource_is_pcicfg_ioport(struct resource * res)228*4882a593Smuzhiyun static bool resource_is_pcicfg_ioport(struct resource *res)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun 	return (res->flags & IORESOURCE_IO) &&
231*4882a593Smuzhiyun 		res->start == 0xCF8 && res->end == 0xCFF;
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun 
pci_acpi_root_prepare_resources(struct acpi_pci_root_info * ci)234*4882a593Smuzhiyun static int pci_acpi_root_prepare_resources(struct acpi_pci_root_info *ci)
235*4882a593Smuzhiyun {
236*4882a593Smuzhiyun 	struct device *dev = &ci->bridge->dev;
237*4882a593Smuzhiyun 	struct pci_root_info *info;
238*4882a593Smuzhiyun 	struct resource *res;
239*4882a593Smuzhiyun 	struct resource_entry *entry, *tmp;
240*4882a593Smuzhiyun 	int status;
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	status = acpi_pci_probe_root_resources(ci);
243*4882a593Smuzhiyun 	if (status > 0) {
244*4882a593Smuzhiyun 		info = container_of(ci, struct pci_root_info, common);
245*4882a593Smuzhiyun 		resource_list_for_each_entry_safe(entry, tmp, &ci->resources) {
246*4882a593Smuzhiyun 			res = entry->res;
247*4882a593Smuzhiyun 			if (res->flags & IORESOURCE_MEM) {
248*4882a593Smuzhiyun 				/*
249*4882a593Smuzhiyun 				 * HP's firmware has a hack to work around a
250*4882a593Smuzhiyun 				 * Windows bug. Ignore these tiny memory ranges.
251*4882a593Smuzhiyun 				 */
252*4882a593Smuzhiyun 				if (resource_size(res) <= 16) {
253*4882a593Smuzhiyun 					resource_list_del(entry);
254*4882a593Smuzhiyun 					insert_resource(&iomem_resource,
255*4882a593Smuzhiyun 							entry->res);
256*4882a593Smuzhiyun 					resource_list_add_tail(entry,
257*4882a593Smuzhiyun 							&info->io_resources);
258*4882a593Smuzhiyun 				}
259*4882a593Smuzhiyun 			} else if (res->flags & IORESOURCE_IO) {
260*4882a593Smuzhiyun 				if (resource_is_pcicfg_ioport(entry->res))
261*4882a593Smuzhiyun 					resource_list_destroy_entry(entry);
262*4882a593Smuzhiyun 				else if (add_io_space(dev, info, entry))
263*4882a593Smuzhiyun 					resource_list_destroy_entry(entry);
264*4882a593Smuzhiyun 			}
265*4882a593Smuzhiyun 		}
266*4882a593Smuzhiyun 	}
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	return status;
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun 
pci_acpi_root_release_info(struct acpi_pci_root_info * ci)271*4882a593Smuzhiyun static void pci_acpi_root_release_info(struct acpi_pci_root_info *ci)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun 	struct pci_root_info *info;
274*4882a593Smuzhiyun 	struct resource_entry *entry, *tmp;
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	info = container_of(ci, struct pci_root_info, common);
277*4882a593Smuzhiyun 	resource_list_for_each_entry_safe(entry, tmp, &info->io_resources) {
278*4882a593Smuzhiyun 		release_resource(entry->res);
279*4882a593Smuzhiyun 		resource_list_destroy_entry(entry);
280*4882a593Smuzhiyun 	}
281*4882a593Smuzhiyun 	kfree(info);
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun static struct acpi_pci_root_ops pci_acpi_root_ops = {
285*4882a593Smuzhiyun 	.pci_ops = &pci_root_ops,
286*4882a593Smuzhiyun 	.release_info = pci_acpi_root_release_info,
287*4882a593Smuzhiyun 	.prepare_resources = pci_acpi_root_prepare_resources,
288*4882a593Smuzhiyun };
289*4882a593Smuzhiyun 
pci_acpi_scan_root(struct acpi_pci_root * root)290*4882a593Smuzhiyun struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
291*4882a593Smuzhiyun {
292*4882a593Smuzhiyun 	struct acpi_device *device = root->device;
293*4882a593Smuzhiyun 	struct pci_root_info *info;
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	info = kzalloc(sizeof(*info), GFP_KERNEL);
296*4882a593Smuzhiyun 	if (!info) {
297*4882a593Smuzhiyun 		dev_err(&device->dev,
298*4882a593Smuzhiyun 			"pci_bus %04x:%02x: ignored (out of memory)\n",
299*4882a593Smuzhiyun 			root->segment, (int)root->secondary.start);
300*4882a593Smuzhiyun 		return NULL;
301*4882a593Smuzhiyun 	}
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 	info->controller.segment = root->segment;
304*4882a593Smuzhiyun 	info->controller.companion = device;
305*4882a593Smuzhiyun 	info->controller.node = acpi_get_node(device->handle);
306*4882a593Smuzhiyun 	INIT_LIST_HEAD(&info->io_resources);
307*4882a593Smuzhiyun 	return acpi_pci_root_create(root, &pci_acpi_root_ops,
308*4882a593Smuzhiyun 				    &info->common, &info->controller);
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun 
pcibios_root_bridge_prepare(struct pci_host_bridge * bridge)311*4882a593Smuzhiyun int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun 	/*
314*4882a593Smuzhiyun 	 * We pass NULL as parent to pci_create_root_bus(), so if it is not NULL
315*4882a593Smuzhiyun 	 * here, pci_create_root_bus() has been called by someone else and
316*4882a593Smuzhiyun 	 * sysdata is likely to be different from what we expect.  Let it go in
317*4882a593Smuzhiyun 	 * that case.
318*4882a593Smuzhiyun 	 */
319*4882a593Smuzhiyun 	if (!bridge->dev.parent) {
320*4882a593Smuzhiyun 		struct pci_controller *controller = bridge->bus->sysdata;
321*4882a593Smuzhiyun 		ACPI_COMPANION_SET(&bridge->dev, controller->companion);
322*4882a593Smuzhiyun 	}
323*4882a593Smuzhiyun 	return 0;
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun 
pcibios_fixup_device_resources(struct pci_dev * dev)326*4882a593Smuzhiyun void pcibios_fixup_device_resources(struct pci_dev *dev)
327*4882a593Smuzhiyun {
328*4882a593Smuzhiyun 	int idx;
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	if (!dev->bus)
331*4882a593Smuzhiyun 		return;
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	for (idx = 0; idx < PCI_BRIDGE_RESOURCES; idx++) {
334*4882a593Smuzhiyun 		struct resource *r = &dev->resource[idx];
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 		if (!r->flags || r->parent || !r->start)
337*4882a593Smuzhiyun 			continue;
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 		pci_claim_resource(dev, idx);
340*4882a593Smuzhiyun 	}
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pcibios_fixup_device_resources);
343*4882a593Smuzhiyun 
pcibios_fixup_bridge_resources(struct pci_dev * dev)344*4882a593Smuzhiyun static void pcibios_fixup_bridge_resources(struct pci_dev *dev)
345*4882a593Smuzhiyun {
346*4882a593Smuzhiyun 	int idx;
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	if (!dev->bus)
349*4882a593Smuzhiyun 		return;
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 	for (idx = PCI_BRIDGE_RESOURCES; idx < PCI_NUM_RESOURCES; idx++) {
352*4882a593Smuzhiyun 		struct resource *r = &dev->resource[idx];
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 		if (!r->flags || r->parent || !r->start)
355*4882a593Smuzhiyun 			continue;
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 		pci_claim_bridge_resource(dev, idx);
358*4882a593Smuzhiyun 	}
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun /*
362*4882a593Smuzhiyun  *  Called after each bus is probed, but before its children are examined.
363*4882a593Smuzhiyun  */
pcibios_fixup_bus(struct pci_bus * b)364*4882a593Smuzhiyun void pcibios_fixup_bus(struct pci_bus *b)
365*4882a593Smuzhiyun {
366*4882a593Smuzhiyun 	struct pci_dev *dev;
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	if (b->self) {
369*4882a593Smuzhiyun 		pci_read_bridge_bases(b);
370*4882a593Smuzhiyun 		pcibios_fixup_bridge_resources(b->self);
371*4882a593Smuzhiyun 	}
372*4882a593Smuzhiyun 	list_for_each_entry(dev, &b->devices, bus_list)
373*4882a593Smuzhiyun 		pcibios_fixup_device_resources(dev);
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun 
pcibios_add_bus(struct pci_bus * bus)376*4882a593Smuzhiyun void pcibios_add_bus(struct pci_bus *bus)
377*4882a593Smuzhiyun {
378*4882a593Smuzhiyun 	acpi_pci_add_bus(bus);
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun 
pcibios_remove_bus(struct pci_bus * bus)381*4882a593Smuzhiyun void pcibios_remove_bus(struct pci_bus *bus)
382*4882a593Smuzhiyun {
383*4882a593Smuzhiyun 	acpi_pci_remove_bus(bus);
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun 
pcibios_set_master(struct pci_dev * dev)386*4882a593Smuzhiyun void pcibios_set_master (struct pci_dev *dev)
387*4882a593Smuzhiyun {
388*4882a593Smuzhiyun 	/* No special bus mastering setup handling */
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun int
pcibios_enable_device(struct pci_dev * dev,int mask)392*4882a593Smuzhiyun pcibios_enable_device (struct pci_dev *dev, int mask)
393*4882a593Smuzhiyun {
394*4882a593Smuzhiyun 	int ret;
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 	ret = pci_enable_resources(dev, mask);
397*4882a593Smuzhiyun 	if (ret < 0)
398*4882a593Smuzhiyun 		return ret;
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	if (!pci_dev_msi_enabled(dev))
401*4882a593Smuzhiyun 		return acpi_pci_irq_enable(dev);
402*4882a593Smuzhiyun 	return 0;
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun void
pcibios_disable_device(struct pci_dev * dev)406*4882a593Smuzhiyun pcibios_disable_device (struct pci_dev *dev)
407*4882a593Smuzhiyun {
408*4882a593Smuzhiyun 	BUG_ON(atomic_read(&dev->enable_cnt));
409*4882a593Smuzhiyun 	if (!pci_dev_msi_enabled(dev))
410*4882a593Smuzhiyun 		acpi_pci_irq_disable(dev);
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun /**
414*4882a593Smuzhiyun  * pci_get_legacy_mem - generic legacy mem routine
415*4882a593Smuzhiyun  * @bus: bus to get legacy memory base address for
416*4882a593Smuzhiyun  *
417*4882a593Smuzhiyun  * Find the base of legacy memory for @bus.  This is typically the first
418*4882a593Smuzhiyun  * megabyte of bus address space for @bus or is simply 0 on platforms whose
419*4882a593Smuzhiyun  * chipsets support legacy I/O and memory routing.  Returns the base address
420*4882a593Smuzhiyun  * or an error pointer if an error occurred.
421*4882a593Smuzhiyun  *
422*4882a593Smuzhiyun  * This is the ia64 generic version of this routine.  Other platforms
423*4882a593Smuzhiyun  * are free to override it with a machine vector.
424*4882a593Smuzhiyun  */
pci_get_legacy_mem(struct pci_bus * bus)425*4882a593Smuzhiyun char *pci_get_legacy_mem(struct pci_bus *bus)
426*4882a593Smuzhiyun {
427*4882a593Smuzhiyun 	return (char *)__IA64_UNCACHED_OFFSET;
428*4882a593Smuzhiyun }
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun /**
431*4882a593Smuzhiyun  * pci_mmap_legacy_page_range - map legacy memory space to userland
432*4882a593Smuzhiyun  * @bus: bus whose legacy space we're mapping
433*4882a593Smuzhiyun  * @vma: vma passed in by mmap
434*4882a593Smuzhiyun  *
435*4882a593Smuzhiyun  * Map legacy memory space for this device back to userspace using a machine
436*4882a593Smuzhiyun  * vector to get the base address.
437*4882a593Smuzhiyun  */
438*4882a593Smuzhiyun int
pci_mmap_legacy_page_range(struct pci_bus * bus,struct vm_area_struct * vma,enum pci_mmap_state mmap_state)439*4882a593Smuzhiyun pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma,
440*4882a593Smuzhiyun 			   enum pci_mmap_state mmap_state)
441*4882a593Smuzhiyun {
442*4882a593Smuzhiyun 	unsigned long size = vma->vm_end - vma->vm_start;
443*4882a593Smuzhiyun 	pgprot_t prot;
444*4882a593Smuzhiyun 	char *addr;
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 	/* We only support mmap'ing of legacy memory space */
447*4882a593Smuzhiyun 	if (mmap_state != pci_mmap_mem)
448*4882a593Smuzhiyun 		return -ENOSYS;
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 	/*
451*4882a593Smuzhiyun 	 * Avoid attribute aliasing.  See Documentation/ia64/aliasing.rst
452*4882a593Smuzhiyun 	 * for more details.
453*4882a593Smuzhiyun 	 */
454*4882a593Smuzhiyun 	if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
455*4882a593Smuzhiyun 		return -EINVAL;
456*4882a593Smuzhiyun 	prot = phys_mem_access_prot(NULL, vma->vm_pgoff, size,
457*4882a593Smuzhiyun 				    vma->vm_page_prot);
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun 	addr = pci_get_legacy_mem(bus);
460*4882a593Smuzhiyun 	if (IS_ERR(addr))
461*4882a593Smuzhiyun 		return PTR_ERR(addr);
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 	vma->vm_pgoff += (unsigned long)addr >> PAGE_SHIFT;
464*4882a593Smuzhiyun 	vma->vm_page_prot = prot;
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
467*4882a593Smuzhiyun 			    size, vma->vm_page_prot))
468*4882a593Smuzhiyun 		return -EAGAIN;
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun 	return 0;
471*4882a593Smuzhiyun }
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun /**
474*4882a593Smuzhiyun  * pci_legacy_read - read from legacy I/O space
475*4882a593Smuzhiyun  * @bus: bus to read
476*4882a593Smuzhiyun  * @port: legacy port value
477*4882a593Smuzhiyun  * @val: caller allocated storage for returned value
478*4882a593Smuzhiyun  * @size: number of bytes to read
479*4882a593Smuzhiyun  *
480*4882a593Smuzhiyun  * Simply reads @size bytes from @port and puts the result in @val.
481*4882a593Smuzhiyun  *
482*4882a593Smuzhiyun  * Again, this (and the write routine) are generic versions that can be
483*4882a593Smuzhiyun  * overridden by the platform.  This is necessary on platforms that don't
484*4882a593Smuzhiyun  * support legacy I/O routing or that hard fail on legacy I/O timeouts.
485*4882a593Smuzhiyun  */
pci_legacy_read(struct pci_bus * bus,u16 port,u32 * val,u8 size)486*4882a593Smuzhiyun int pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size)
487*4882a593Smuzhiyun {
488*4882a593Smuzhiyun 	int ret = size;
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 	switch (size) {
491*4882a593Smuzhiyun 	case 1:
492*4882a593Smuzhiyun 		*val = inb(port);
493*4882a593Smuzhiyun 		break;
494*4882a593Smuzhiyun 	case 2:
495*4882a593Smuzhiyun 		*val = inw(port);
496*4882a593Smuzhiyun 		break;
497*4882a593Smuzhiyun 	case 4:
498*4882a593Smuzhiyun 		*val = inl(port);
499*4882a593Smuzhiyun 		break;
500*4882a593Smuzhiyun 	default:
501*4882a593Smuzhiyun 		ret = -EINVAL;
502*4882a593Smuzhiyun 		break;
503*4882a593Smuzhiyun 	}
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 	return ret;
506*4882a593Smuzhiyun }
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun /**
509*4882a593Smuzhiyun  * pci_legacy_write - perform a legacy I/O write
510*4882a593Smuzhiyun  * @bus: bus pointer
511*4882a593Smuzhiyun  * @port: port to write
512*4882a593Smuzhiyun  * @val: value to write
513*4882a593Smuzhiyun  * @size: number of bytes to write from @val
514*4882a593Smuzhiyun  *
515*4882a593Smuzhiyun  * Simply writes @size bytes of @val to @port.
516*4882a593Smuzhiyun  */
pci_legacy_write(struct pci_bus * bus,u16 port,u32 val,u8 size)517*4882a593Smuzhiyun int pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
518*4882a593Smuzhiyun {
519*4882a593Smuzhiyun 	int ret = size;
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 	switch (size) {
522*4882a593Smuzhiyun 	case 1:
523*4882a593Smuzhiyun 		outb(val, port);
524*4882a593Smuzhiyun 		break;
525*4882a593Smuzhiyun 	case 2:
526*4882a593Smuzhiyun 		outw(val, port);
527*4882a593Smuzhiyun 		break;
528*4882a593Smuzhiyun 	case 4:
529*4882a593Smuzhiyun 		outl(val, port);
530*4882a593Smuzhiyun 		break;
531*4882a593Smuzhiyun 	default:
532*4882a593Smuzhiyun 		ret = -EINVAL;
533*4882a593Smuzhiyun 		break;
534*4882a593Smuzhiyun 	}
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun 	return ret;
537*4882a593Smuzhiyun }
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun /**
540*4882a593Smuzhiyun  * set_pci_cacheline_size - determine cacheline size for PCI devices
541*4882a593Smuzhiyun  *
542*4882a593Smuzhiyun  * We want to use the line-size of the outer-most cache.  We assume
543*4882a593Smuzhiyun  * that this line-size is the same for all CPUs.
544*4882a593Smuzhiyun  *
545*4882a593Smuzhiyun  * Code mostly taken from arch/ia64/kernel/palinfo.c:cache_info().
546*4882a593Smuzhiyun  */
set_pci_dfl_cacheline_size(void)547*4882a593Smuzhiyun static void __init set_pci_dfl_cacheline_size(void)
548*4882a593Smuzhiyun {
549*4882a593Smuzhiyun 	unsigned long levels, unique_caches;
550*4882a593Smuzhiyun 	long status;
551*4882a593Smuzhiyun 	pal_cache_config_info_t cci;
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 	status = ia64_pal_cache_summary(&levels, &unique_caches);
554*4882a593Smuzhiyun 	if (status != 0) {
555*4882a593Smuzhiyun 		pr_err("%s: ia64_pal_cache_summary() failed "
556*4882a593Smuzhiyun 			"(status=%ld)\n", __func__, status);
557*4882a593Smuzhiyun 		return;
558*4882a593Smuzhiyun 	}
559*4882a593Smuzhiyun 
560*4882a593Smuzhiyun 	status = ia64_pal_cache_config_info(levels - 1,
561*4882a593Smuzhiyun 				/* cache_type (data_or_unified)= */ 2, &cci);
562*4882a593Smuzhiyun 	if (status != 0) {
563*4882a593Smuzhiyun 		pr_err("%s: ia64_pal_cache_config_info() failed "
564*4882a593Smuzhiyun 			"(status=%ld)\n", __func__, status);
565*4882a593Smuzhiyun 		return;
566*4882a593Smuzhiyun 	}
567*4882a593Smuzhiyun 	pci_dfl_cache_line_size = (1 << cci.pcci_line_size) / 4;
568*4882a593Smuzhiyun }
569*4882a593Smuzhiyun 
pcibios_init(void)570*4882a593Smuzhiyun static int __init pcibios_init(void)
571*4882a593Smuzhiyun {
572*4882a593Smuzhiyun 	set_pci_dfl_cacheline_size();
573*4882a593Smuzhiyun 	return 0;
574*4882a593Smuzhiyun }
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun subsys_initcall(pcibios_init);
577