xref: /OK3568_Linux_fs/kernel/arch/sh/drivers/pci/pci.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * New-style PCI core.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (c) 2004 - 2009  Paul Mundt
6*4882a593Smuzhiyun  * Copyright (c) 2002  M. R. Brown
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * Modelled after arch/mips/pci/pci.c:
9*4882a593Smuzhiyun  *  Copyright (C) 2003, 04 Ralf Baechle (ralf@linux-mips.org)
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun #include <linux/kernel.h>
12*4882a593Smuzhiyun #include <linux/mm.h>
13*4882a593Smuzhiyun #include <linux/pci.h>
14*4882a593Smuzhiyun #include <linux/init.h>
15*4882a593Smuzhiyun #include <linux/types.h>
16*4882a593Smuzhiyun #include <linux/io.h>
17*4882a593Smuzhiyun #include <linux/mutex.h>
18*4882a593Smuzhiyun #include <linux/spinlock.h>
19*4882a593Smuzhiyun #include <linux/export.h>
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun unsigned long PCIBIOS_MIN_IO = 0x0000;
22*4882a593Smuzhiyun unsigned long PCIBIOS_MIN_MEM = 0;
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun /*
25*4882a593Smuzhiyun  * The PCI controller list.
26*4882a593Smuzhiyun  */
27*4882a593Smuzhiyun static struct pci_channel *hose_head, **hose_tail = &hose_head;
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun static int pci_initialized;
30*4882a593Smuzhiyun 
pcibios_scanbus(struct pci_channel * hose)31*4882a593Smuzhiyun static void pcibios_scanbus(struct pci_channel *hose)
32*4882a593Smuzhiyun {
33*4882a593Smuzhiyun 	static int next_busno;
34*4882a593Smuzhiyun 	static int need_domain_info;
35*4882a593Smuzhiyun 	LIST_HEAD(resources);
36*4882a593Smuzhiyun 	struct resource *res;
37*4882a593Smuzhiyun 	resource_size_t offset;
38*4882a593Smuzhiyun 	int i, ret;
39*4882a593Smuzhiyun 	struct pci_host_bridge *bridge;
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun 	bridge = pci_alloc_host_bridge(0);
42*4882a593Smuzhiyun 	if (!bridge)
43*4882a593Smuzhiyun 		return;
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 	for (i = 0; i < hose->nr_resources; i++) {
46*4882a593Smuzhiyun 		res = hose->resources + i;
47*4882a593Smuzhiyun 		offset = 0;
48*4882a593Smuzhiyun 		if (res->flags & IORESOURCE_DISABLED)
49*4882a593Smuzhiyun 			continue;
50*4882a593Smuzhiyun 		if (res->flags & IORESOURCE_IO)
51*4882a593Smuzhiyun 			offset = hose->io_offset;
52*4882a593Smuzhiyun 		else if (res->flags & IORESOURCE_MEM)
53*4882a593Smuzhiyun 			offset = hose->mem_offset;
54*4882a593Smuzhiyun 		pci_add_resource_offset(&resources, res, offset);
55*4882a593Smuzhiyun 	}
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	list_splice_init(&resources, &bridge->windows);
58*4882a593Smuzhiyun 	bridge->dev.parent = NULL;
59*4882a593Smuzhiyun 	bridge->sysdata = hose;
60*4882a593Smuzhiyun 	bridge->busnr = next_busno;
61*4882a593Smuzhiyun 	bridge->ops = hose->pci_ops;
62*4882a593Smuzhiyun 	bridge->swizzle_irq = pci_common_swizzle;
63*4882a593Smuzhiyun 	bridge->map_irq = pcibios_map_platform_irq;
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	ret = pci_scan_root_bus_bridge(bridge);
66*4882a593Smuzhiyun 	if (ret) {
67*4882a593Smuzhiyun 		pci_free_host_bridge(bridge);
68*4882a593Smuzhiyun 		return;
69*4882a593Smuzhiyun 	}
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	hose->bus = bridge->bus;
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	need_domain_info = need_domain_info || hose->index;
74*4882a593Smuzhiyun 	hose->need_domain_info = need_domain_info;
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	next_busno = hose->bus->busn_res.end + 1;
77*4882a593Smuzhiyun 	/* Don't allow 8-bit bus number overflow inside the hose -
78*4882a593Smuzhiyun 	   reserve some space for bridges. */
79*4882a593Smuzhiyun 	if (next_busno > 224) {
80*4882a593Smuzhiyun 		next_busno = 0;
81*4882a593Smuzhiyun 		need_domain_info = 1;
82*4882a593Smuzhiyun 	}
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	pci_bus_size_bridges(hose->bus);
85*4882a593Smuzhiyun 	pci_bus_assign_resources(hose->bus);
86*4882a593Smuzhiyun 	pci_bus_add_devices(hose->bus);
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun /*
90*4882a593Smuzhiyun  * This interrupt-safe spinlock protects all accesses to PCI
91*4882a593Smuzhiyun  * configuration space.
92*4882a593Smuzhiyun  */
93*4882a593Smuzhiyun DEFINE_RAW_SPINLOCK(pci_config_lock);
94*4882a593Smuzhiyun static DEFINE_MUTEX(pci_scan_mutex);
95*4882a593Smuzhiyun 
register_pci_controller(struct pci_channel * hose)96*4882a593Smuzhiyun int register_pci_controller(struct pci_channel *hose)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun 	int i;
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	for (i = 0; i < hose->nr_resources; i++) {
101*4882a593Smuzhiyun 		struct resource *res = hose->resources + i;
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 		if (res->flags & IORESOURCE_DISABLED)
104*4882a593Smuzhiyun 			continue;
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 		if (res->flags & IORESOURCE_IO) {
107*4882a593Smuzhiyun 			if (request_resource(&ioport_resource, res) < 0)
108*4882a593Smuzhiyun 				goto out;
109*4882a593Smuzhiyun 		} else {
110*4882a593Smuzhiyun 			if (request_resource(&iomem_resource, res) < 0)
111*4882a593Smuzhiyun 				goto out;
112*4882a593Smuzhiyun 		}
113*4882a593Smuzhiyun 	}
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	*hose_tail = hose;
116*4882a593Smuzhiyun 	hose_tail = &hose->next;
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	/*
119*4882a593Smuzhiyun 	 * Do not panic here but later - this might happen before console init.
120*4882a593Smuzhiyun 	 */
121*4882a593Smuzhiyun 	if (!hose->io_map_base) {
122*4882a593Smuzhiyun 		pr_warn("registering PCI controller with io_map_base unset\n");
123*4882a593Smuzhiyun 	}
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	/*
126*4882a593Smuzhiyun 	 * Setup the ERR/PERR and SERR timers, if available.
127*4882a593Smuzhiyun 	 */
128*4882a593Smuzhiyun 	pcibios_enable_timers(hose);
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	/*
131*4882a593Smuzhiyun 	 * Scan the bus if it is register after the PCI subsystem
132*4882a593Smuzhiyun 	 * initialization.
133*4882a593Smuzhiyun 	 */
134*4882a593Smuzhiyun 	if (pci_initialized) {
135*4882a593Smuzhiyun 		mutex_lock(&pci_scan_mutex);
136*4882a593Smuzhiyun 		pcibios_scanbus(hose);
137*4882a593Smuzhiyun 		mutex_unlock(&pci_scan_mutex);
138*4882a593Smuzhiyun 	}
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	return 0;
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun out:
143*4882a593Smuzhiyun 	for (--i; i >= 0; i--)
144*4882a593Smuzhiyun 		release_resource(&hose->resources[i]);
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	pr_warn("Skipping PCI bus scan due to resource conflict\n");
147*4882a593Smuzhiyun 	return -1;
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun 
pcibios_init(void)150*4882a593Smuzhiyun static int __init pcibios_init(void)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun 	struct pci_channel *hose;
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	/* Scan all of the recorded PCI controllers.  */
155*4882a593Smuzhiyun 	for (hose = hose_head; hose; hose = hose->next)
156*4882a593Smuzhiyun 		pcibios_scanbus(hose);
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	pci_initialized = 1;
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	return 0;
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun subsys_initcall(pcibios_init);
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun /*
165*4882a593Smuzhiyun  * We need to avoid collisions with `mirrored' VGA ports
166*4882a593Smuzhiyun  * and other strange ISA hardware, so we always want the
167*4882a593Smuzhiyun  * addresses to be allocated in the 0x000-0x0ff region
168*4882a593Smuzhiyun  * modulo 0x400.
169*4882a593Smuzhiyun  */
pcibios_align_resource(void * data,const struct resource * res,resource_size_t size,resource_size_t align)170*4882a593Smuzhiyun resource_size_t pcibios_align_resource(void *data, const struct resource *res,
171*4882a593Smuzhiyun 				resource_size_t size, resource_size_t align)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun 	struct pci_dev *dev = data;
174*4882a593Smuzhiyun 	struct pci_channel *hose = dev->sysdata;
175*4882a593Smuzhiyun 	resource_size_t start = res->start;
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	if (res->flags & IORESOURCE_IO) {
178*4882a593Smuzhiyun 		if (start < PCIBIOS_MIN_IO + hose->resources[0].start)
179*4882a593Smuzhiyun 			start = PCIBIOS_MIN_IO + hose->resources[0].start;
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 		/*
182*4882a593Smuzhiyun                  * Put everything into 0x00-0xff region modulo 0x400.
183*4882a593Smuzhiyun 		 */
184*4882a593Smuzhiyun 		if (start & 0x300)
185*4882a593Smuzhiyun 			start = (start + 0x3ff) & ~0x3ff;
186*4882a593Smuzhiyun 	}
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	return start;
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun static void __init
pcibios_bus_report_status_early(struct pci_channel * hose,int top_bus,int current_bus,unsigned int status_mask,int warn)192*4882a593Smuzhiyun pcibios_bus_report_status_early(struct pci_channel *hose,
193*4882a593Smuzhiyun 				int top_bus, int current_bus,
194*4882a593Smuzhiyun 				unsigned int status_mask, int warn)
195*4882a593Smuzhiyun {
196*4882a593Smuzhiyun 	unsigned int pci_devfn;
197*4882a593Smuzhiyun 	u16 status;
198*4882a593Smuzhiyun 	int ret;
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	for (pci_devfn = 0; pci_devfn < 0xff; pci_devfn++) {
201*4882a593Smuzhiyun 		if (PCI_FUNC(pci_devfn))
202*4882a593Smuzhiyun 			continue;
203*4882a593Smuzhiyun 		ret = early_read_config_word(hose, top_bus, current_bus,
204*4882a593Smuzhiyun 					     pci_devfn, PCI_STATUS, &status);
205*4882a593Smuzhiyun 		if (ret != PCIBIOS_SUCCESSFUL)
206*4882a593Smuzhiyun 			continue;
207*4882a593Smuzhiyun 		if (status == 0xffff)
208*4882a593Smuzhiyun 			continue;
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 		early_write_config_word(hose, top_bus, current_bus,
211*4882a593Smuzhiyun 					pci_devfn, PCI_STATUS,
212*4882a593Smuzhiyun 					status & status_mask);
213*4882a593Smuzhiyun 		if (warn)
214*4882a593Smuzhiyun 			pr_cont("(%02x:%02x: %04X) ", current_bus, pci_devfn,
215*4882a593Smuzhiyun 				status);
216*4882a593Smuzhiyun 	}
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun /*
220*4882a593Smuzhiyun  * We can't use pci_find_device() here since we are
221*4882a593Smuzhiyun  * called from interrupt context.
222*4882a593Smuzhiyun  */
223*4882a593Smuzhiyun static void __ref
pcibios_bus_report_status(struct pci_bus * bus,unsigned int status_mask,int warn)224*4882a593Smuzhiyun pcibios_bus_report_status(struct pci_bus *bus, unsigned int status_mask,
225*4882a593Smuzhiyun 			  int warn)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun 	struct pci_dev *dev;
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	list_for_each_entry(dev, &bus->devices, bus_list) {
230*4882a593Smuzhiyun 		u16 status;
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 		/*
233*4882a593Smuzhiyun 		 * ignore host bridge - we handle
234*4882a593Smuzhiyun 		 * that separately
235*4882a593Smuzhiyun 		 */
236*4882a593Smuzhiyun 		if (dev->bus->number == 0 && dev->devfn == 0)
237*4882a593Smuzhiyun 			continue;
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 		pci_read_config_word(dev, PCI_STATUS, &status);
240*4882a593Smuzhiyun 		if (status == 0xffff)
241*4882a593Smuzhiyun 			continue;
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 		if ((status & status_mask) == 0)
244*4882a593Smuzhiyun 			continue;
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 		/* clear the status errors */
247*4882a593Smuzhiyun 		pci_write_config_word(dev, PCI_STATUS, status & status_mask);
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 		if (warn)
250*4882a593Smuzhiyun 			pr_cont("(%s: %04X) ", pci_name(dev), status);
251*4882a593Smuzhiyun 	}
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	list_for_each_entry(dev, &bus->devices, bus_list)
254*4882a593Smuzhiyun 		if (dev->subordinate)
255*4882a593Smuzhiyun 			pcibios_bus_report_status(dev->subordinate, status_mask, warn);
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun 
pcibios_report_status(unsigned int status_mask,int warn)258*4882a593Smuzhiyun void __ref pcibios_report_status(unsigned int status_mask, int warn)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun 	struct pci_channel *hose;
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	for (hose = hose_head; hose; hose = hose->next) {
263*4882a593Smuzhiyun 		if (unlikely(!hose->bus))
264*4882a593Smuzhiyun 			pcibios_bus_report_status_early(hose, hose_head->index,
265*4882a593Smuzhiyun 					hose->index, status_mask, warn);
266*4882a593Smuzhiyun 		else
267*4882a593Smuzhiyun 			pcibios_bus_report_status(hose->bus, status_mask, warn);
268*4882a593Smuzhiyun 	}
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun #ifndef CONFIG_GENERIC_IOMAP
272*4882a593Smuzhiyun 
__pci_ioport_map(struct pci_dev * dev,unsigned long port,unsigned int nr)273*4882a593Smuzhiyun void __iomem *__pci_ioport_map(struct pci_dev *dev,
274*4882a593Smuzhiyun 			       unsigned long port, unsigned int nr)
275*4882a593Smuzhiyun {
276*4882a593Smuzhiyun 	struct pci_channel *chan = dev->sysdata;
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	if (unlikely(!chan->io_map_base)) {
279*4882a593Smuzhiyun 		chan->io_map_base = sh_io_port_base;
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 		if (pci_domains_supported)
282*4882a593Smuzhiyun 			panic("To avoid data corruption io_map_base MUST be "
283*4882a593Smuzhiyun 			      "set with multiple PCI domains.");
284*4882a593Smuzhiyun 	}
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	return (void __iomem *)(chan->io_map_base + port);
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun 
pci_iounmap(struct pci_dev * dev,void __iomem * addr)289*4882a593Smuzhiyun void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun 	iounmap(addr);
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun EXPORT_SYMBOL(pci_iounmap);
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun #endif /* CONFIG_GENERIC_IOMAP */
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun EXPORT_SYMBOL(PCIBIOS_MIN_IO);
298*4882a593Smuzhiyun EXPORT_SYMBOL(PCIBIOS_MIN_MEM);
299