xref: /OK3568_Linux_fs/u-boot/drivers/pci/pci-uclass.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright (c) 2014 Google, Inc
3*4882a593Smuzhiyun  * Written by Simon Glass <sjg@chromium.org>
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * SPDX-License-Identifier:	GPL-2.0+
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <common.h>
9*4882a593Smuzhiyun #include <dm.h>
10*4882a593Smuzhiyun #include <errno.h>
11*4882a593Smuzhiyun #include <inttypes.h>
12*4882a593Smuzhiyun #include <pci.h>
13*4882a593Smuzhiyun #include <asm/io.h>
14*4882a593Smuzhiyun #include <dm/device-internal.h>
15*4882a593Smuzhiyun #include <dm/lists.h>
16*4882a593Smuzhiyun #if defined(CONFIG_X86) && defined(CONFIG_HAVE_FSP)
17*4882a593Smuzhiyun #include <asm/fsp/fsp_support.h>
18*4882a593Smuzhiyun #endif
19*4882a593Smuzhiyun #include "pci_internal.h"
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun DECLARE_GLOBAL_DATA_PTR;
22*4882a593Smuzhiyun 
pci_get_bus(int busnum,struct udevice ** busp)23*4882a593Smuzhiyun int pci_get_bus(int busnum, struct udevice **busp)
24*4882a593Smuzhiyun {
25*4882a593Smuzhiyun 	int ret;
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun 	ret = uclass_get_device_by_seq(UCLASS_PCI, busnum, busp);
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun 	/* Since buses may not be numbered yet try a little harder with bus 0 */
30*4882a593Smuzhiyun 	if (ret == -ENODEV) {
31*4882a593Smuzhiyun 		ret = uclass_first_device_err(UCLASS_PCI, busp);
32*4882a593Smuzhiyun 		if (ret)
33*4882a593Smuzhiyun 			return ret;
34*4882a593Smuzhiyun 		ret = uclass_get_device_by_seq(UCLASS_PCI, busnum, busp);
35*4882a593Smuzhiyun 	}
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun 	return ret;
38*4882a593Smuzhiyun }
39*4882a593Smuzhiyun 
pci_get_controller(struct udevice * dev)40*4882a593Smuzhiyun struct udevice *pci_get_controller(struct udevice *dev)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun 	while (device_is_on_pci_bus(dev))
43*4882a593Smuzhiyun 		dev = dev->parent;
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 	return dev;
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun 
dm_pci_get_bdf(struct udevice * dev)48*4882a593Smuzhiyun pci_dev_t dm_pci_get_bdf(struct udevice *dev)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun 	struct pci_child_platdata *pplat = dev_get_parent_platdata(dev);
51*4882a593Smuzhiyun 	struct udevice *bus = dev->parent;
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun 	return PCI_ADD_BUS(bus->seq, pplat->devfn);
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun /**
57*4882a593Smuzhiyun  * pci_get_bus_max() - returns the bus number of the last active bus
58*4882a593Smuzhiyun  *
59*4882a593Smuzhiyun  * @return last bus number, or -1 if no active buses
60*4882a593Smuzhiyun  */
pci_get_bus_max(void)61*4882a593Smuzhiyun static int pci_get_bus_max(void)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun 	struct udevice *bus;
64*4882a593Smuzhiyun 	struct uclass *uc;
65*4882a593Smuzhiyun 	int ret = -1;
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	ret = uclass_get(UCLASS_PCI, &uc);
68*4882a593Smuzhiyun 	uclass_foreach_dev(bus, uc) {
69*4882a593Smuzhiyun 		if (bus->seq > ret)
70*4882a593Smuzhiyun 			ret = bus->seq;
71*4882a593Smuzhiyun 	}
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	debug("%s: ret=%d\n", __func__, ret);
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	return ret;
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun 
pci_last_busno(void)78*4882a593Smuzhiyun int pci_last_busno(void)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun 	return pci_get_bus_max();
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun 
pci_get_ff(enum pci_size_t size)83*4882a593Smuzhiyun int pci_get_ff(enum pci_size_t size)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun 	switch (size) {
86*4882a593Smuzhiyun 	case PCI_SIZE_8:
87*4882a593Smuzhiyun 		return 0xff;
88*4882a593Smuzhiyun 	case PCI_SIZE_16:
89*4882a593Smuzhiyun 		return 0xffff;
90*4882a593Smuzhiyun 	default:
91*4882a593Smuzhiyun 		return 0xffffffff;
92*4882a593Smuzhiyun 	}
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun 
pci_bus_find_devfn(struct udevice * bus,pci_dev_t find_devfn,struct udevice ** devp)95*4882a593Smuzhiyun int pci_bus_find_devfn(struct udevice *bus, pci_dev_t find_devfn,
96*4882a593Smuzhiyun 		       struct udevice **devp)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun 	struct udevice *dev;
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	for (device_find_first_child(bus, &dev);
101*4882a593Smuzhiyun 	     dev;
102*4882a593Smuzhiyun 	     device_find_next_child(&dev)) {
103*4882a593Smuzhiyun 		struct pci_child_platdata *pplat;
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 		pplat = dev_get_parent_platdata(dev);
106*4882a593Smuzhiyun 		if (pplat && pplat->devfn == find_devfn) {
107*4882a593Smuzhiyun 			*devp = dev;
108*4882a593Smuzhiyun 			return 0;
109*4882a593Smuzhiyun 		}
110*4882a593Smuzhiyun 	}
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	return -ENODEV;
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun 
dm_pci_bus_find_bdf(pci_dev_t bdf,struct udevice ** devp)115*4882a593Smuzhiyun int dm_pci_bus_find_bdf(pci_dev_t bdf, struct udevice **devp)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun 	struct udevice *bus;
118*4882a593Smuzhiyun 	int ret;
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	ret = pci_get_bus(PCI_BUS(bdf), &bus);
121*4882a593Smuzhiyun 	if (ret)
122*4882a593Smuzhiyun 		return ret;
123*4882a593Smuzhiyun 	return pci_bus_find_devfn(bus, PCI_MASK_BUS(bdf), devp);
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun 
pci_device_matches_ids(struct udevice * dev,struct pci_device_id * ids)126*4882a593Smuzhiyun static int pci_device_matches_ids(struct udevice *dev,
127*4882a593Smuzhiyun 				  struct pci_device_id *ids)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun 	struct pci_child_platdata *pplat;
130*4882a593Smuzhiyun 	int i;
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	pplat = dev_get_parent_platdata(dev);
133*4882a593Smuzhiyun 	if (!pplat)
134*4882a593Smuzhiyun 		return -EINVAL;
135*4882a593Smuzhiyun 	for (i = 0; ids[i].vendor != 0; i++) {
136*4882a593Smuzhiyun 		if (pplat->vendor == ids[i].vendor &&
137*4882a593Smuzhiyun 		    pplat->device == ids[i].device)
138*4882a593Smuzhiyun 			return i;
139*4882a593Smuzhiyun 	}
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	return -EINVAL;
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun 
pci_bus_find_devices(struct udevice * bus,struct pci_device_id * ids,int * indexp,struct udevice ** devp)144*4882a593Smuzhiyun int pci_bus_find_devices(struct udevice *bus, struct pci_device_id *ids,
145*4882a593Smuzhiyun 			 int *indexp, struct udevice **devp)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun 	struct udevice *dev;
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	/* Scan all devices on this bus */
150*4882a593Smuzhiyun 	for (device_find_first_child(bus, &dev);
151*4882a593Smuzhiyun 	     dev;
152*4882a593Smuzhiyun 	     device_find_next_child(&dev)) {
153*4882a593Smuzhiyun 		if (pci_device_matches_ids(dev, ids) >= 0) {
154*4882a593Smuzhiyun 			if ((*indexp)-- <= 0) {
155*4882a593Smuzhiyun 				*devp = dev;
156*4882a593Smuzhiyun 				return 0;
157*4882a593Smuzhiyun 			}
158*4882a593Smuzhiyun 		}
159*4882a593Smuzhiyun 	}
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	return -ENODEV;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun 
pci_find_device_id(struct pci_device_id * ids,int index,struct udevice ** devp)164*4882a593Smuzhiyun int pci_find_device_id(struct pci_device_id *ids, int index,
165*4882a593Smuzhiyun 		       struct udevice **devp)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun 	struct udevice *bus;
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	/* Scan all known buses */
170*4882a593Smuzhiyun 	for (uclass_first_device(UCLASS_PCI, &bus);
171*4882a593Smuzhiyun 	     bus;
172*4882a593Smuzhiyun 	     uclass_next_device(&bus)) {
173*4882a593Smuzhiyun 		if (!pci_bus_find_devices(bus, ids, &index, devp))
174*4882a593Smuzhiyun 			return 0;
175*4882a593Smuzhiyun 	}
176*4882a593Smuzhiyun 	*devp = NULL;
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	return -ENODEV;
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun 
dm_pci_bus_find_device(struct udevice * bus,unsigned int vendor,unsigned int device,int * indexp,struct udevice ** devp)181*4882a593Smuzhiyun static int dm_pci_bus_find_device(struct udevice *bus, unsigned int vendor,
182*4882a593Smuzhiyun 				  unsigned int device, int *indexp,
183*4882a593Smuzhiyun 				  struct udevice **devp)
184*4882a593Smuzhiyun {
185*4882a593Smuzhiyun 	struct pci_child_platdata *pplat;
186*4882a593Smuzhiyun 	struct udevice *dev;
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	for (device_find_first_child(bus, &dev);
189*4882a593Smuzhiyun 	     dev;
190*4882a593Smuzhiyun 	     device_find_next_child(&dev)) {
191*4882a593Smuzhiyun 		pplat = dev_get_parent_platdata(dev);
192*4882a593Smuzhiyun 		if (pplat->vendor == vendor && pplat->device == device) {
193*4882a593Smuzhiyun 			if (!(*indexp)--) {
194*4882a593Smuzhiyun 				*devp = dev;
195*4882a593Smuzhiyun 				return 0;
196*4882a593Smuzhiyun 			}
197*4882a593Smuzhiyun 		}
198*4882a593Smuzhiyun 	}
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	return -ENODEV;
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun 
dm_pci_find_device(unsigned int vendor,unsigned int device,int index,struct udevice ** devp)203*4882a593Smuzhiyun int dm_pci_find_device(unsigned int vendor, unsigned int device, int index,
204*4882a593Smuzhiyun 		       struct udevice **devp)
205*4882a593Smuzhiyun {
206*4882a593Smuzhiyun 	struct udevice *bus;
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	/* Scan all known buses */
209*4882a593Smuzhiyun 	for (uclass_first_device(UCLASS_PCI, &bus);
210*4882a593Smuzhiyun 	     bus;
211*4882a593Smuzhiyun 	     uclass_next_device(&bus)) {
212*4882a593Smuzhiyun 		if (!dm_pci_bus_find_device(bus, vendor, device, &index, devp))
213*4882a593Smuzhiyun 			return device_probe(*devp);
214*4882a593Smuzhiyun 	}
215*4882a593Smuzhiyun 	*devp = NULL;
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	return -ENODEV;
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun 
dm_pci_find_class(uint find_class,int index,struct udevice ** devp)220*4882a593Smuzhiyun int dm_pci_find_class(uint find_class, int index, struct udevice **devp)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun 	struct udevice *dev;
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	/* Scan all known buses */
225*4882a593Smuzhiyun 	for (pci_find_first_device(&dev);
226*4882a593Smuzhiyun 	     dev;
227*4882a593Smuzhiyun 	     pci_find_next_device(&dev)) {
228*4882a593Smuzhiyun 		struct pci_child_platdata *pplat = dev_get_parent_platdata(dev);
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 		if (pplat->class == find_class && !index--) {
231*4882a593Smuzhiyun 			*devp = dev;
232*4882a593Smuzhiyun 			return device_probe(*devp);
233*4882a593Smuzhiyun 		}
234*4882a593Smuzhiyun 	}
235*4882a593Smuzhiyun 	*devp = NULL;
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	return -ENODEV;
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun 
pci_bus_write_config(struct udevice * bus,pci_dev_t bdf,int offset,unsigned long value,enum pci_size_t size)240*4882a593Smuzhiyun int pci_bus_write_config(struct udevice *bus, pci_dev_t bdf, int offset,
241*4882a593Smuzhiyun 			 unsigned long value, enum pci_size_t size)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun 	struct dm_pci_ops *ops;
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	ops = pci_get_ops(bus);
246*4882a593Smuzhiyun 	if (!ops->write_config)
247*4882a593Smuzhiyun 		return -ENOSYS;
248*4882a593Smuzhiyun 	return ops->write_config(bus, bdf, offset, value, size);
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun 
pci_bus_clrset_config32(struct udevice * bus,pci_dev_t bdf,int offset,u32 clr,u32 set)251*4882a593Smuzhiyun int pci_bus_clrset_config32(struct udevice *bus, pci_dev_t bdf, int offset,
252*4882a593Smuzhiyun 			    u32 clr, u32 set)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun 	ulong val;
255*4882a593Smuzhiyun 	int ret;
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	ret = pci_bus_read_config(bus, bdf, offset, &val, PCI_SIZE_32);
258*4882a593Smuzhiyun 	if (ret)
259*4882a593Smuzhiyun 		return ret;
260*4882a593Smuzhiyun 	val &= ~clr;
261*4882a593Smuzhiyun 	val |= set;
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	return pci_bus_write_config(bus, bdf, offset, val, PCI_SIZE_32);
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun 
pci_write_config(pci_dev_t bdf,int offset,unsigned long value,enum pci_size_t size)266*4882a593Smuzhiyun int pci_write_config(pci_dev_t bdf, int offset, unsigned long value,
267*4882a593Smuzhiyun 		     enum pci_size_t size)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun 	struct udevice *bus;
270*4882a593Smuzhiyun 	int ret;
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	ret = pci_get_bus(PCI_BUS(bdf), &bus);
273*4882a593Smuzhiyun 	if (ret)
274*4882a593Smuzhiyun 		return ret;
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	return pci_bus_write_config(bus, bdf, offset, value, size);
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun 
dm_pci_write_config(struct udevice * dev,int offset,unsigned long value,enum pci_size_t size)279*4882a593Smuzhiyun int dm_pci_write_config(struct udevice *dev, int offset, unsigned long value,
280*4882a593Smuzhiyun 			enum pci_size_t size)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun 	struct udevice *bus;
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	for (bus = dev; device_is_on_pci_bus(bus);)
285*4882a593Smuzhiyun 		bus = bus->parent;
286*4882a593Smuzhiyun 	return pci_bus_write_config(bus, dm_pci_get_bdf(dev), offset, value,
287*4882a593Smuzhiyun 				    size);
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun 
pci_write_config32(pci_dev_t bdf,int offset,u32 value)290*4882a593Smuzhiyun int pci_write_config32(pci_dev_t bdf, int offset, u32 value)
291*4882a593Smuzhiyun {
292*4882a593Smuzhiyun 	return pci_write_config(bdf, offset, value, PCI_SIZE_32);
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun 
pci_write_config16(pci_dev_t bdf,int offset,u16 value)295*4882a593Smuzhiyun int pci_write_config16(pci_dev_t bdf, int offset, u16 value)
296*4882a593Smuzhiyun {
297*4882a593Smuzhiyun 	return pci_write_config(bdf, offset, value, PCI_SIZE_16);
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun 
pci_write_config8(pci_dev_t bdf,int offset,u8 value)300*4882a593Smuzhiyun int pci_write_config8(pci_dev_t bdf, int offset, u8 value)
301*4882a593Smuzhiyun {
302*4882a593Smuzhiyun 	return pci_write_config(bdf, offset, value, PCI_SIZE_8);
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun 
dm_pci_write_config8(struct udevice * dev,int offset,u8 value)305*4882a593Smuzhiyun int dm_pci_write_config8(struct udevice *dev, int offset, u8 value)
306*4882a593Smuzhiyun {
307*4882a593Smuzhiyun 	return dm_pci_write_config(dev, offset, value, PCI_SIZE_8);
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun 
dm_pci_write_config16(struct udevice * dev,int offset,u16 value)310*4882a593Smuzhiyun int dm_pci_write_config16(struct udevice *dev, int offset, u16 value)
311*4882a593Smuzhiyun {
312*4882a593Smuzhiyun 	return dm_pci_write_config(dev, offset, value, PCI_SIZE_16);
313*4882a593Smuzhiyun }
314*4882a593Smuzhiyun 
dm_pci_write_config32(struct udevice * dev,int offset,u32 value)315*4882a593Smuzhiyun int dm_pci_write_config32(struct udevice *dev, int offset, u32 value)
316*4882a593Smuzhiyun {
317*4882a593Smuzhiyun 	return dm_pci_write_config(dev, offset, value, PCI_SIZE_32);
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun 
pci_bus_read_config(struct udevice * bus,pci_dev_t bdf,int offset,unsigned long * valuep,enum pci_size_t size)320*4882a593Smuzhiyun int pci_bus_read_config(struct udevice *bus, pci_dev_t bdf, int offset,
321*4882a593Smuzhiyun 			unsigned long *valuep, enum pci_size_t size)
322*4882a593Smuzhiyun {
323*4882a593Smuzhiyun 	struct dm_pci_ops *ops;
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	ops = pci_get_ops(bus);
326*4882a593Smuzhiyun 	if (!ops->read_config)
327*4882a593Smuzhiyun 		return -ENOSYS;
328*4882a593Smuzhiyun 	return ops->read_config(bus, bdf, offset, valuep, size);
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun 
pci_read_config(pci_dev_t bdf,int offset,unsigned long * valuep,enum pci_size_t size)331*4882a593Smuzhiyun int pci_read_config(pci_dev_t bdf, int offset, unsigned long *valuep,
332*4882a593Smuzhiyun 		    enum pci_size_t size)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun 	struct udevice *bus;
335*4882a593Smuzhiyun 	int ret;
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	ret = pci_get_bus(PCI_BUS(bdf), &bus);
338*4882a593Smuzhiyun 	if (ret)
339*4882a593Smuzhiyun 		return ret;
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	return pci_bus_read_config(bus, bdf, offset, valuep, size);
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun 
dm_pci_read_config(struct udevice * dev,int offset,unsigned long * valuep,enum pci_size_t size)344*4882a593Smuzhiyun int dm_pci_read_config(struct udevice *dev, int offset, unsigned long *valuep,
345*4882a593Smuzhiyun 		       enum pci_size_t size)
346*4882a593Smuzhiyun {
347*4882a593Smuzhiyun 	struct udevice *bus;
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 	for (bus = dev; device_is_on_pci_bus(bus);)
350*4882a593Smuzhiyun 		bus = bus->parent;
351*4882a593Smuzhiyun 	return pci_bus_read_config(bus, dm_pci_get_bdf(dev), offset, valuep,
352*4882a593Smuzhiyun 				   size);
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun 
pci_read_config32(pci_dev_t bdf,int offset,u32 * valuep)355*4882a593Smuzhiyun int pci_read_config32(pci_dev_t bdf, int offset, u32 *valuep)
356*4882a593Smuzhiyun {
357*4882a593Smuzhiyun 	unsigned long value;
358*4882a593Smuzhiyun 	int ret;
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	ret = pci_read_config(bdf, offset, &value, PCI_SIZE_32);
361*4882a593Smuzhiyun 	if (ret)
362*4882a593Smuzhiyun 		return ret;
363*4882a593Smuzhiyun 	*valuep = value;
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	return 0;
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun 
pci_read_config16(pci_dev_t bdf,int offset,u16 * valuep)368*4882a593Smuzhiyun int pci_read_config16(pci_dev_t bdf, int offset, u16 *valuep)
369*4882a593Smuzhiyun {
370*4882a593Smuzhiyun 	unsigned long value;
371*4882a593Smuzhiyun 	int ret;
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 	ret = pci_read_config(bdf, offset, &value, PCI_SIZE_16);
374*4882a593Smuzhiyun 	if (ret)
375*4882a593Smuzhiyun 		return ret;
376*4882a593Smuzhiyun 	*valuep = value;
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	return 0;
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun 
pci_read_config8(pci_dev_t bdf,int offset,u8 * valuep)381*4882a593Smuzhiyun int pci_read_config8(pci_dev_t bdf, int offset, u8 *valuep)
382*4882a593Smuzhiyun {
383*4882a593Smuzhiyun 	unsigned long value;
384*4882a593Smuzhiyun 	int ret;
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 	ret = pci_read_config(bdf, offset, &value, PCI_SIZE_8);
387*4882a593Smuzhiyun 	if (ret)
388*4882a593Smuzhiyun 		return ret;
389*4882a593Smuzhiyun 	*valuep = value;
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 	return 0;
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun 
dm_pci_read_config8(struct udevice * dev,int offset,u8 * valuep)394*4882a593Smuzhiyun int dm_pci_read_config8(struct udevice *dev, int offset, u8 *valuep)
395*4882a593Smuzhiyun {
396*4882a593Smuzhiyun 	unsigned long value;
397*4882a593Smuzhiyun 	int ret;
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 	ret = dm_pci_read_config(dev, offset, &value, PCI_SIZE_8);
400*4882a593Smuzhiyun 	if (ret)
401*4882a593Smuzhiyun 		return ret;
402*4882a593Smuzhiyun 	*valuep = value;
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	return 0;
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun 
dm_pci_read_config16(struct udevice * dev,int offset,u16 * valuep)407*4882a593Smuzhiyun int dm_pci_read_config16(struct udevice *dev, int offset, u16 *valuep)
408*4882a593Smuzhiyun {
409*4882a593Smuzhiyun 	unsigned long value;
410*4882a593Smuzhiyun 	int ret;
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 	ret = dm_pci_read_config(dev, offset, &value, PCI_SIZE_16);
413*4882a593Smuzhiyun 	if (ret)
414*4882a593Smuzhiyun 		return ret;
415*4882a593Smuzhiyun 	*valuep = value;
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	return 0;
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun 
dm_pci_read_config32(struct udevice * dev,int offset,u32 * valuep)420*4882a593Smuzhiyun int dm_pci_read_config32(struct udevice *dev, int offset, u32 *valuep)
421*4882a593Smuzhiyun {
422*4882a593Smuzhiyun 	unsigned long value;
423*4882a593Smuzhiyun 	int ret;
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	ret = dm_pci_read_config(dev, offset, &value, PCI_SIZE_32);
426*4882a593Smuzhiyun 	if (ret)
427*4882a593Smuzhiyun 		return ret;
428*4882a593Smuzhiyun 	*valuep = value;
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 	return 0;
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun 
dm_pci_clrset_config8(struct udevice * dev,int offset,u32 clr,u32 set)433*4882a593Smuzhiyun int dm_pci_clrset_config8(struct udevice *dev, int offset, u32 clr, u32 set)
434*4882a593Smuzhiyun {
435*4882a593Smuzhiyun 	u8 val;
436*4882a593Smuzhiyun 	int ret;
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 	ret = dm_pci_read_config8(dev, offset, &val);
439*4882a593Smuzhiyun 	if (ret)
440*4882a593Smuzhiyun 		return ret;
441*4882a593Smuzhiyun 	val &= ~clr;
442*4882a593Smuzhiyun 	val |= set;
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun 	return dm_pci_write_config8(dev, offset, val);
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun 
dm_pci_clrset_config16(struct udevice * dev,int offset,u32 clr,u32 set)447*4882a593Smuzhiyun int dm_pci_clrset_config16(struct udevice *dev, int offset, u32 clr, u32 set)
448*4882a593Smuzhiyun {
449*4882a593Smuzhiyun 	u16 val;
450*4882a593Smuzhiyun 	int ret;
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 	ret = dm_pci_read_config16(dev, offset, &val);
453*4882a593Smuzhiyun 	if (ret)
454*4882a593Smuzhiyun 		return ret;
455*4882a593Smuzhiyun 	val &= ~clr;
456*4882a593Smuzhiyun 	val |= set;
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	return dm_pci_write_config16(dev, offset, val);
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun 
dm_pci_clrset_config32(struct udevice * dev,int offset,u32 clr,u32 set)461*4882a593Smuzhiyun int dm_pci_clrset_config32(struct udevice *dev, int offset, u32 clr, u32 set)
462*4882a593Smuzhiyun {
463*4882a593Smuzhiyun 	u32 val;
464*4882a593Smuzhiyun 	int ret;
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	ret = dm_pci_read_config32(dev, offset, &val);
467*4882a593Smuzhiyun 	if (ret)
468*4882a593Smuzhiyun 		return ret;
469*4882a593Smuzhiyun 	val &= ~clr;
470*4882a593Smuzhiyun 	val |= set;
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 	return dm_pci_write_config32(dev, offset, val);
473*4882a593Smuzhiyun }
474*4882a593Smuzhiyun 
set_vga_bridge_bits(struct udevice * dev)475*4882a593Smuzhiyun static void set_vga_bridge_bits(struct udevice *dev)
476*4882a593Smuzhiyun {
477*4882a593Smuzhiyun 	struct udevice *parent = dev->parent;
478*4882a593Smuzhiyun 	u16 bc;
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 	while (parent->seq != 0) {
481*4882a593Smuzhiyun 		dm_pci_read_config16(parent, PCI_BRIDGE_CONTROL, &bc);
482*4882a593Smuzhiyun 		bc |= PCI_BRIDGE_CTL_VGA;
483*4882a593Smuzhiyun 		dm_pci_write_config16(parent, PCI_BRIDGE_CONTROL, bc);
484*4882a593Smuzhiyun 		parent = parent->parent;
485*4882a593Smuzhiyun 	}
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun 
pci_auto_config_devices(struct udevice * bus)488*4882a593Smuzhiyun int pci_auto_config_devices(struct udevice *bus)
489*4882a593Smuzhiyun {
490*4882a593Smuzhiyun 	struct pci_controller *hose = bus->uclass_priv;
491*4882a593Smuzhiyun 	struct pci_child_platdata *pplat;
492*4882a593Smuzhiyun 	unsigned int sub_bus;
493*4882a593Smuzhiyun 	struct udevice *dev;
494*4882a593Smuzhiyun 	int ret;
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun 	sub_bus = bus->seq;
497*4882a593Smuzhiyun 	debug("%s: start\n", __func__);
498*4882a593Smuzhiyun 	pciauto_config_init(hose);
499*4882a593Smuzhiyun 	for (ret = device_find_first_child(bus, &dev);
500*4882a593Smuzhiyun 	     !ret && dev;
501*4882a593Smuzhiyun 	     ret = device_find_next_child(&dev)) {
502*4882a593Smuzhiyun 		unsigned int max_bus;
503*4882a593Smuzhiyun 		int ret;
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 		debug("%s: device %s\n", __func__, dev->name);
506*4882a593Smuzhiyun 		ret = dm_pciauto_config_device(dev);
507*4882a593Smuzhiyun 		if (ret < 0)
508*4882a593Smuzhiyun 			return ret;
509*4882a593Smuzhiyun 		max_bus = ret;
510*4882a593Smuzhiyun 		sub_bus = max(sub_bus, max_bus);
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun 		pplat = dev_get_parent_platdata(dev);
513*4882a593Smuzhiyun 		if (pplat->class == (PCI_CLASS_DISPLAY_VGA << 8))
514*4882a593Smuzhiyun 			set_vga_bridge_bits(dev);
515*4882a593Smuzhiyun 	}
516*4882a593Smuzhiyun 	debug("%s: done\n", __func__);
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun 	return sub_bus;
519*4882a593Smuzhiyun }
520*4882a593Smuzhiyun 
dm_pci_hose_probe_bus(struct udevice * bus)521*4882a593Smuzhiyun int dm_pci_hose_probe_bus(struct udevice *bus)
522*4882a593Smuzhiyun {
523*4882a593Smuzhiyun 	int sub_bus;
524*4882a593Smuzhiyun 	int ret;
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun 	debug("%s\n", __func__);
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 	sub_bus = pci_get_bus_max() + 1;
529*4882a593Smuzhiyun 	debug("%s: bus = %d/%s\n", __func__, sub_bus, bus->name);
530*4882a593Smuzhiyun 	dm_pciauto_prescan_setup_bridge(bus, sub_bus);
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun 	ret = device_probe(bus);
533*4882a593Smuzhiyun 	if (ret) {
534*4882a593Smuzhiyun 		debug("%s: Cannot probe bus %s: %d\n", __func__, bus->name,
535*4882a593Smuzhiyun 		      ret);
536*4882a593Smuzhiyun 		return ret;
537*4882a593Smuzhiyun 	}
538*4882a593Smuzhiyun 	if (sub_bus != bus->seq) {
539*4882a593Smuzhiyun 		printf("%s: Internal error, bus '%s' got seq %d, expected %d\n",
540*4882a593Smuzhiyun 		       __func__, bus->name, bus->seq, sub_bus);
541*4882a593Smuzhiyun 		return -EPIPE;
542*4882a593Smuzhiyun 	}
543*4882a593Smuzhiyun 	sub_bus = pci_get_bus_max();
544*4882a593Smuzhiyun 	dm_pciauto_postscan_setup_bridge(bus, sub_bus);
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun 	return sub_bus;
547*4882a593Smuzhiyun }
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun /**
550*4882a593Smuzhiyun  * pci_match_one_device - Tell if a PCI device structure has a matching
551*4882a593Smuzhiyun  *                        PCI device id structure
552*4882a593Smuzhiyun  * @id: single PCI device id structure to match
553*4882a593Smuzhiyun  * @find: the PCI device id structure to match against
554*4882a593Smuzhiyun  *
555*4882a593Smuzhiyun  * Returns true if the finding pci_device_id structure matched or false if
556*4882a593Smuzhiyun  * there is no match.
557*4882a593Smuzhiyun  */
pci_match_one_id(const struct pci_device_id * id,const struct pci_device_id * find)558*4882a593Smuzhiyun static bool pci_match_one_id(const struct pci_device_id *id,
559*4882a593Smuzhiyun 			     const struct pci_device_id *find)
560*4882a593Smuzhiyun {
561*4882a593Smuzhiyun 	if ((id->vendor == PCI_ANY_ID || id->vendor == find->vendor) &&
562*4882a593Smuzhiyun 	    (id->device == PCI_ANY_ID || id->device == find->device) &&
563*4882a593Smuzhiyun 	    (id->subvendor == PCI_ANY_ID || id->subvendor == find->subvendor) &&
564*4882a593Smuzhiyun 	    (id->subdevice == PCI_ANY_ID || id->subdevice == find->subdevice) &&
565*4882a593Smuzhiyun 	    !((id->class ^ find->class) & id->class_mask))
566*4882a593Smuzhiyun 		return true;
567*4882a593Smuzhiyun 
568*4882a593Smuzhiyun 	return false;
569*4882a593Smuzhiyun }
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun /**
572*4882a593Smuzhiyun  * pci_find_and_bind_driver() - Find and bind the right PCI driver
573*4882a593Smuzhiyun  *
574*4882a593Smuzhiyun  * This only looks at certain fields in the descriptor.
575*4882a593Smuzhiyun  *
576*4882a593Smuzhiyun  * @parent:	Parent bus
577*4882a593Smuzhiyun  * @find_id:	Specification of the driver to find
578*4882a593Smuzhiyun  * @bdf:	Bus/device/function addreess - see PCI_BDF()
579*4882a593Smuzhiyun  * @devp:	Returns a pointer to the device created
580*4882a593Smuzhiyun  * @return 0 if OK, -EPERM if the device is not needed before relocation and
581*4882a593Smuzhiyun  *	   therefore was not created, other -ve value on error
582*4882a593Smuzhiyun  */
pci_find_and_bind_driver(struct udevice * parent,struct pci_device_id * find_id,pci_dev_t bdf,struct udevice ** devp)583*4882a593Smuzhiyun static int pci_find_and_bind_driver(struct udevice *parent,
584*4882a593Smuzhiyun 				    struct pci_device_id *find_id,
585*4882a593Smuzhiyun 				    pci_dev_t bdf, struct udevice **devp)
586*4882a593Smuzhiyun {
587*4882a593Smuzhiyun 	struct pci_driver_entry *start, *entry;
588*4882a593Smuzhiyun 	const char *drv;
589*4882a593Smuzhiyun 	int n_ents;
590*4882a593Smuzhiyun 	int ret;
591*4882a593Smuzhiyun 	char name[30], *str;
592*4882a593Smuzhiyun 	bool bridge;
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 	*devp = NULL;
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun 	debug("%s: Searching for driver: vendor=%x, device=%x\n", __func__,
597*4882a593Smuzhiyun 	      find_id->vendor, find_id->device);
598*4882a593Smuzhiyun 	start = ll_entry_start(struct pci_driver_entry, pci_driver_entry);
599*4882a593Smuzhiyun 	n_ents = ll_entry_count(struct pci_driver_entry, pci_driver_entry);
600*4882a593Smuzhiyun 	for (entry = start; entry != start + n_ents; entry++) {
601*4882a593Smuzhiyun 		const struct pci_device_id *id;
602*4882a593Smuzhiyun 		struct udevice *dev;
603*4882a593Smuzhiyun 		const struct driver *drv;
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun 		for (id = entry->match;
606*4882a593Smuzhiyun 		     id->vendor || id->subvendor || id->class_mask;
607*4882a593Smuzhiyun 		     id++) {
608*4882a593Smuzhiyun 			if (!pci_match_one_id(id, find_id))
609*4882a593Smuzhiyun 				continue;
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 			drv = entry->driver;
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun 			/*
614*4882a593Smuzhiyun 			 * In the pre-relocation phase, we only bind devices
615*4882a593Smuzhiyun 			 * whose driver has the DM_FLAG_PRE_RELOC set, to save
616*4882a593Smuzhiyun 			 * precious memory space as on some platforms as that
617*4882a593Smuzhiyun 			 * space is pretty limited (ie: using Cache As RAM).
618*4882a593Smuzhiyun 			 */
619*4882a593Smuzhiyun 			if (!(gd->flags & GD_FLG_RELOC) &&
620*4882a593Smuzhiyun 			    !(drv->flags & DM_FLAG_PRE_RELOC))
621*4882a593Smuzhiyun 				return -EPERM;
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun 			/*
624*4882a593Smuzhiyun 			 * We could pass the descriptor to the driver as
625*4882a593Smuzhiyun 			 * platdata (instead of NULL) and allow its bind()
626*4882a593Smuzhiyun 			 * method to return -ENOENT if it doesn't support this
627*4882a593Smuzhiyun 			 * device. That way we could continue the search to
628*4882a593Smuzhiyun 			 * find another driver. For now this doesn't seem
629*4882a593Smuzhiyun 			 * necesssary, so just bind the first match.
630*4882a593Smuzhiyun 			 */
631*4882a593Smuzhiyun 			ret = device_bind(parent, drv, drv->name, NULL, -1,
632*4882a593Smuzhiyun 					  &dev);
633*4882a593Smuzhiyun 			if (ret)
634*4882a593Smuzhiyun 				goto error;
635*4882a593Smuzhiyun 			debug("%s: Match found: %s\n", __func__, drv->name);
636*4882a593Smuzhiyun 			dev->driver_data = find_id->driver_data;
637*4882a593Smuzhiyun 			*devp = dev;
638*4882a593Smuzhiyun 			return 0;
639*4882a593Smuzhiyun 		}
640*4882a593Smuzhiyun 	}
641*4882a593Smuzhiyun 
642*4882a593Smuzhiyun 	bridge = (find_id->class >> 8) == PCI_CLASS_BRIDGE_PCI;
643*4882a593Smuzhiyun 	/*
644*4882a593Smuzhiyun 	 * In the pre-relocation phase, we only bind bridge devices to save
645*4882a593Smuzhiyun 	 * precious memory space as on some platforms as that space is pretty
646*4882a593Smuzhiyun 	 * limited (ie: using Cache As RAM).
647*4882a593Smuzhiyun 	 */
648*4882a593Smuzhiyun 	if (!(gd->flags & GD_FLG_RELOC) && !bridge)
649*4882a593Smuzhiyun 		return -EPERM;
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun 	/* Bind a generic driver so that the device can be used */
652*4882a593Smuzhiyun 	sprintf(name, "pci_%x:%x.%x", parent->seq, PCI_DEV(bdf),
653*4882a593Smuzhiyun 		PCI_FUNC(bdf));
654*4882a593Smuzhiyun 	str = strdup(name);
655*4882a593Smuzhiyun 	if (!str)
656*4882a593Smuzhiyun 		return -ENOMEM;
657*4882a593Smuzhiyun 	drv = bridge ? "pci_bridge_drv" : "pci_generic_drv";
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun 	ret = device_bind_driver(parent, drv, str, devp);
660*4882a593Smuzhiyun 	if (ret) {
661*4882a593Smuzhiyun 		debug("%s: Failed to bind generic driver: %d\n", __func__, ret);
662*4882a593Smuzhiyun 		free(str);
663*4882a593Smuzhiyun 		return ret;
664*4882a593Smuzhiyun 	}
665*4882a593Smuzhiyun 	debug("%s: No match found: bound generic driver instead\n", __func__);
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun 	return 0;
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun error:
670*4882a593Smuzhiyun 	debug("%s: No match found: error %d\n", __func__, ret);
671*4882a593Smuzhiyun 	return ret;
672*4882a593Smuzhiyun }
673*4882a593Smuzhiyun 
pci_bind_bus_devices(struct udevice * bus)674*4882a593Smuzhiyun int pci_bind_bus_devices(struct udevice *bus)
675*4882a593Smuzhiyun {
676*4882a593Smuzhiyun 	ulong vendor, device;
677*4882a593Smuzhiyun 	ulong header_type;
678*4882a593Smuzhiyun 	pci_dev_t bdf, end;
679*4882a593Smuzhiyun 	bool found_multi;
680*4882a593Smuzhiyun 	int ret;
681*4882a593Smuzhiyun 
682*4882a593Smuzhiyun 	found_multi = false;
683*4882a593Smuzhiyun 	end = PCI_BDF(bus->seq, PCI_MAX_PCI_DEVICES - 1,
684*4882a593Smuzhiyun 		      PCI_MAX_PCI_FUNCTIONS - 1);
685*4882a593Smuzhiyun 	for (bdf = PCI_BDF(bus->seq, 0, 0); bdf <= end;
686*4882a593Smuzhiyun 	     bdf += PCI_BDF(0, 0, 1)) {
687*4882a593Smuzhiyun 		struct pci_child_platdata *pplat;
688*4882a593Smuzhiyun 		struct udevice *dev;
689*4882a593Smuzhiyun 		ulong class;
690*4882a593Smuzhiyun 
691*4882a593Smuzhiyun 		if (PCI_FUNC(bdf) && !found_multi)
692*4882a593Smuzhiyun 			continue;
693*4882a593Smuzhiyun 		/* Check only the first access, we don't expect problems */
694*4882a593Smuzhiyun 		ret = pci_bus_read_config(bus, bdf, PCI_HEADER_TYPE,
695*4882a593Smuzhiyun 					  &header_type, PCI_SIZE_8);
696*4882a593Smuzhiyun 		if (ret)
697*4882a593Smuzhiyun 			goto error;
698*4882a593Smuzhiyun 		pci_bus_read_config(bus, bdf, PCI_VENDOR_ID, &vendor,
699*4882a593Smuzhiyun 				    PCI_SIZE_16);
700*4882a593Smuzhiyun 		if (vendor == 0xffff || vendor == 0x0000)
701*4882a593Smuzhiyun 			continue;
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun 		if (!PCI_FUNC(bdf))
704*4882a593Smuzhiyun 			found_multi = header_type & 0x80;
705*4882a593Smuzhiyun 
706*4882a593Smuzhiyun 		debug("%s: bus %d/%s: found device %x, function %d\n", __func__,
707*4882a593Smuzhiyun 		      bus->seq, bus->name, PCI_DEV(bdf), PCI_FUNC(bdf));
708*4882a593Smuzhiyun 		pci_bus_read_config(bus, bdf, PCI_DEVICE_ID, &device,
709*4882a593Smuzhiyun 				    PCI_SIZE_16);
710*4882a593Smuzhiyun 		pci_bus_read_config(bus, bdf, PCI_CLASS_REVISION, &class,
711*4882a593Smuzhiyun 				    PCI_SIZE_32);
712*4882a593Smuzhiyun 		class >>= 8;
713*4882a593Smuzhiyun 
714*4882a593Smuzhiyun 		/* Find this device in the device tree */
715*4882a593Smuzhiyun 		ret = pci_bus_find_devfn(bus, PCI_MASK_BUS(bdf), &dev);
716*4882a593Smuzhiyun 
717*4882a593Smuzhiyun 		/* If nothing in the device tree, bind a device */
718*4882a593Smuzhiyun 		if (ret == -ENODEV) {
719*4882a593Smuzhiyun 			struct pci_device_id find_id;
720*4882a593Smuzhiyun 			ulong val;
721*4882a593Smuzhiyun 
722*4882a593Smuzhiyun 			memset(&find_id, '\0', sizeof(find_id));
723*4882a593Smuzhiyun 			find_id.vendor = vendor;
724*4882a593Smuzhiyun 			find_id.device = device;
725*4882a593Smuzhiyun 			find_id.class = class;
726*4882a593Smuzhiyun 			if ((header_type & 0x7f) == PCI_HEADER_TYPE_NORMAL) {
727*4882a593Smuzhiyun 				pci_bus_read_config(bus, bdf,
728*4882a593Smuzhiyun 						    PCI_SUBSYSTEM_VENDOR_ID,
729*4882a593Smuzhiyun 						    &val, PCI_SIZE_32);
730*4882a593Smuzhiyun 				find_id.subvendor = val & 0xffff;
731*4882a593Smuzhiyun 				find_id.subdevice = val >> 16;
732*4882a593Smuzhiyun 			}
733*4882a593Smuzhiyun 			ret = pci_find_and_bind_driver(bus, &find_id, bdf,
734*4882a593Smuzhiyun 						       &dev);
735*4882a593Smuzhiyun 		}
736*4882a593Smuzhiyun 		if (ret == -EPERM)
737*4882a593Smuzhiyun 			continue;
738*4882a593Smuzhiyun 		else if (ret)
739*4882a593Smuzhiyun 			return ret;
740*4882a593Smuzhiyun 
741*4882a593Smuzhiyun 		/* Update the platform data */
742*4882a593Smuzhiyun 		pplat = dev_get_parent_platdata(dev);
743*4882a593Smuzhiyun 		pplat->devfn = PCI_MASK_BUS(bdf);
744*4882a593Smuzhiyun 		pplat->vendor = vendor;
745*4882a593Smuzhiyun 		pplat->device = device;
746*4882a593Smuzhiyun 		pplat->class = class;
747*4882a593Smuzhiyun 	}
748*4882a593Smuzhiyun 
749*4882a593Smuzhiyun 	return 0;
750*4882a593Smuzhiyun error:
751*4882a593Smuzhiyun 	printf("Cannot read bus configuration: %d\n", ret);
752*4882a593Smuzhiyun 
753*4882a593Smuzhiyun 	return ret;
754*4882a593Smuzhiyun }
755*4882a593Smuzhiyun 
decode_regions(struct pci_controller * hose,ofnode parent_node,ofnode node)756*4882a593Smuzhiyun static int decode_regions(struct pci_controller *hose, ofnode parent_node,
757*4882a593Smuzhiyun 			  ofnode node)
758*4882a593Smuzhiyun {
759*4882a593Smuzhiyun 	int pci_addr_cells, addr_cells, size_cells;
760*4882a593Smuzhiyun 	phys_addr_t base = 0, size;
761*4882a593Smuzhiyun 	int cells_per_record;
762*4882a593Smuzhiyun 	const u32 *prop;
763*4882a593Smuzhiyun 	int len;
764*4882a593Smuzhiyun 	int i;
765*4882a593Smuzhiyun 
766*4882a593Smuzhiyun 	prop = ofnode_get_property(node, "ranges", &len);
767*4882a593Smuzhiyun 	if (!prop)
768*4882a593Smuzhiyun 		return -EINVAL;
769*4882a593Smuzhiyun 	pci_addr_cells = ofnode_read_simple_addr_cells(node);
770*4882a593Smuzhiyun 	addr_cells = ofnode_read_simple_addr_cells(parent_node);
771*4882a593Smuzhiyun 	size_cells = ofnode_read_simple_size_cells(node);
772*4882a593Smuzhiyun 
773*4882a593Smuzhiyun 	/* PCI addresses are always 3-cells */
774*4882a593Smuzhiyun 	len /= sizeof(u32);
775*4882a593Smuzhiyun 	cells_per_record = pci_addr_cells + addr_cells + size_cells;
776*4882a593Smuzhiyun 	hose->region_count = 0;
777*4882a593Smuzhiyun 	debug("%s: len=%d, cells_per_record=%d\n", __func__, len,
778*4882a593Smuzhiyun 	      cells_per_record);
779*4882a593Smuzhiyun 	for (i = 0; i < MAX_PCI_REGIONS; i++, len -= cells_per_record) {
780*4882a593Smuzhiyun 		u64 pci_addr, addr, size;
781*4882a593Smuzhiyun 		int space_code;
782*4882a593Smuzhiyun 		u32 flags;
783*4882a593Smuzhiyun 		int type;
784*4882a593Smuzhiyun 		int pos;
785*4882a593Smuzhiyun 
786*4882a593Smuzhiyun 		if (len < cells_per_record)
787*4882a593Smuzhiyun 			break;
788*4882a593Smuzhiyun 		flags = fdt32_to_cpu(prop[0]);
789*4882a593Smuzhiyun 		space_code = (flags >> 24) & 3;
790*4882a593Smuzhiyun 		pci_addr = fdtdec_get_number(prop + 1, 2);
791*4882a593Smuzhiyun 		prop += pci_addr_cells;
792*4882a593Smuzhiyun 		addr = fdtdec_get_number(prop, addr_cells);
793*4882a593Smuzhiyun 		prop += addr_cells;
794*4882a593Smuzhiyun 		size = fdtdec_get_number(prop, size_cells);
795*4882a593Smuzhiyun 		prop += size_cells;
796*4882a593Smuzhiyun 		debug("%s: region %d, pci_addr=%" PRIx64 ", addr=%" PRIx64
797*4882a593Smuzhiyun 		      ", size=%" PRIx64 ", space_code=%d\n", __func__,
798*4882a593Smuzhiyun 		      hose->region_count, pci_addr, addr, size, space_code);
799*4882a593Smuzhiyun 		if (space_code & 2) {
800*4882a593Smuzhiyun 			type = flags & (1U << 30) ? PCI_REGION_PREFETCH :
801*4882a593Smuzhiyun 					PCI_REGION_MEM;
802*4882a593Smuzhiyun 		} else if (space_code & 1) {
803*4882a593Smuzhiyun 			type = PCI_REGION_IO;
804*4882a593Smuzhiyun 		} else {
805*4882a593Smuzhiyun 			continue;
806*4882a593Smuzhiyun 		}
807*4882a593Smuzhiyun 		pos = -1;
808*4882a593Smuzhiyun 		for (i = 0; i < hose->region_count; i++) {
809*4882a593Smuzhiyun 			if (hose->regions[i].flags == type)
810*4882a593Smuzhiyun 				pos = i;
811*4882a593Smuzhiyun 		}
812*4882a593Smuzhiyun 		if (pos == -1)
813*4882a593Smuzhiyun 			pos = hose->region_count++;
814*4882a593Smuzhiyun 		debug(" - type=%d, pos=%d\n", type, pos);
815*4882a593Smuzhiyun 		pci_set_region(hose->regions + pos, pci_addr, addr, size, type);
816*4882a593Smuzhiyun 	}
817*4882a593Smuzhiyun 
818*4882a593Smuzhiyun 	/* Add a region for our local memory */
819*4882a593Smuzhiyun 	size = gd->ram_size;
820*4882a593Smuzhiyun #ifdef CONFIG_SYS_SDRAM_BASE
821*4882a593Smuzhiyun 	base = CONFIG_SYS_SDRAM_BASE;
822*4882a593Smuzhiyun #endif
823*4882a593Smuzhiyun 	if (gd->pci_ram_top && gd->pci_ram_top < base + size)
824*4882a593Smuzhiyun 		size = gd->pci_ram_top - base;
825*4882a593Smuzhiyun 	pci_set_region(hose->regions + hose->region_count++, base, base,
826*4882a593Smuzhiyun 		       size, PCI_REGION_MEM | PCI_REGION_SYS_MEMORY);
827*4882a593Smuzhiyun 
828*4882a593Smuzhiyun 	return 0;
829*4882a593Smuzhiyun }
830*4882a593Smuzhiyun 
pci_uclass_pre_probe(struct udevice * bus)831*4882a593Smuzhiyun static int pci_uclass_pre_probe(struct udevice *bus)
832*4882a593Smuzhiyun {
833*4882a593Smuzhiyun 	struct pci_controller *hose;
834*4882a593Smuzhiyun 	int ret;
835*4882a593Smuzhiyun 
836*4882a593Smuzhiyun 	debug("%s, bus=%d/%s, parent=%s\n", __func__, bus->seq, bus->name,
837*4882a593Smuzhiyun 	      bus->parent->name);
838*4882a593Smuzhiyun 	hose = bus->uclass_priv;
839*4882a593Smuzhiyun 
840*4882a593Smuzhiyun 	/* For bridges, use the top-level PCI controller */
841*4882a593Smuzhiyun 	if (!device_is_on_pci_bus(bus)) {
842*4882a593Smuzhiyun 		hose->ctlr = bus;
843*4882a593Smuzhiyun 		ret = decode_regions(hose, dev_ofnode(bus->parent),
844*4882a593Smuzhiyun 				     dev_ofnode(bus));
845*4882a593Smuzhiyun 		if (ret) {
846*4882a593Smuzhiyun 			debug("%s: Cannot decode regions\n", __func__);
847*4882a593Smuzhiyun 			return ret;
848*4882a593Smuzhiyun 		}
849*4882a593Smuzhiyun 	} else {
850*4882a593Smuzhiyun 		struct pci_controller *parent_hose;
851*4882a593Smuzhiyun 
852*4882a593Smuzhiyun 		parent_hose = dev_get_uclass_priv(bus->parent);
853*4882a593Smuzhiyun 		hose->ctlr = parent_hose->bus;
854*4882a593Smuzhiyun 	}
855*4882a593Smuzhiyun 	hose->bus = bus;
856*4882a593Smuzhiyun 	hose->first_busno = bus->seq;
857*4882a593Smuzhiyun 	hose->last_busno = bus->seq;
858*4882a593Smuzhiyun 
859*4882a593Smuzhiyun 	return 0;
860*4882a593Smuzhiyun }
861*4882a593Smuzhiyun 
pci_uclass_post_probe(struct udevice * bus)862*4882a593Smuzhiyun static int pci_uclass_post_probe(struct udevice *bus)
863*4882a593Smuzhiyun {
864*4882a593Smuzhiyun 	int ret;
865*4882a593Smuzhiyun 
866*4882a593Smuzhiyun 	debug("%s: probing bus %d\n", __func__, bus->seq);
867*4882a593Smuzhiyun 	ret = pci_bind_bus_devices(bus);
868*4882a593Smuzhiyun 	if (ret)
869*4882a593Smuzhiyun 		return ret;
870*4882a593Smuzhiyun 
871*4882a593Smuzhiyun #ifdef CONFIG_PCI_PNP
872*4882a593Smuzhiyun 	ret = pci_auto_config_devices(bus);
873*4882a593Smuzhiyun 	if (ret < 0)
874*4882a593Smuzhiyun 		return ret;
875*4882a593Smuzhiyun #endif
876*4882a593Smuzhiyun 
877*4882a593Smuzhiyun #if defined(CONFIG_X86) && defined(CONFIG_HAVE_FSP)
878*4882a593Smuzhiyun 	/*
879*4882a593Smuzhiyun 	 * Per Intel FSP specification, we should call FSP notify API to
880*4882a593Smuzhiyun 	 * inform FSP that PCI enumeration has been done so that FSP will
881*4882a593Smuzhiyun 	 * do any necessary initialization as required by the chipset's
882*4882a593Smuzhiyun 	 * BIOS Writer's Guide (BWG).
883*4882a593Smuzhiyun 	 *
884*4882a593Smuzhiyun 	 * Unfortunately we have to put this call here as with driver model,
885*4882a593Smuzhiyun 	 * the enumeration is all done on a lazy basis as needed, so until
886*4882a593Smuzhiyun 	 * something is touched on PCI it won't happen.
887*4882a593Smuzhiyun 	 *
888*4882a593Smuzhiyun 	 * Note we only call this 1) after U-Boot is relocated, and 2)
889*4882a593Smuzhiyun 	 * root bus has finished probing.
890*4882a593Smuzhiyun 	 */
891*4882a593Smuzhiyun 	if ((gd->flags & GD_FLG_RELOC) && (bus->seq == 0)) {
892*4882a593Smuzhiyun 		ret = fsp_init_phase_pci();
893*4882a593Smuzhiyun 		if (ret)
894*4882a593Smuzhiyun 			return ret;
895*4882a593Smuzhiyun 	}
896*4882a593Smuzhiyun #endif
897*4882a593Smuzhiyun 
898*4882a593Smuzhiyun 	return 0;
899*4882a593Smuzhiyun }
900*4882a593Smuzhiyun 
pci_uclass_child_post_bind(struct udevice * dev)901*4882a593Smuzhiyun static int pci_uclass_child_post_bind(struct udevice *dev)
902*4882a593Smuzhiyun {
903*4882a593Smuzhiyun 	struct pci_child_platdata *pplat;
904*4882a593Smuzhiyun 	struct fdt_pci_addr addr;
905*4882a593Smuzhiyun 	int ret;
906*4882a593Smuzhiyun 
907*4882a593Smuzhiyun 	if (!dev_of_valid(dev))
908*4882a593Smuzhiyun 		return 0;
909*4882a593Smuzhiyun 
910*4882a593Smuzhiyun 	/*
911*4882a593Smuzhiyun 	 * We could read vendor, device, class if available. But for now we
912*4882a593Smuzhiyun 	 * just check the address.
913*4882a593Smuzhiyun 	 */
914*4882a593Smuzhiyun 	pplat = dev_get_parent_platdata(dev);
915*4882a593Smuzhiyun 	ret = ofnode_read_pci_addr(dev_ofnode(dev), FDT_PCI_SPACE_CONFIG, "reg",
916*4882a593Smuzhiyun 				   &addr);
917*4882a593Smuzhiyun 
918*4882a593Smuzhiyun 	if (ret) {
919*4882a593Smuzhiyun 		if (ret != -ENOENT)
920*4882a593Smuzhiyun 			return -EINVAL;
921*4882a593Smuzhiyun 	} else {
922*4882a593Smuzhiyun 		/* extract the devfn from fdt_pci_addr */
923*4882a593Smuzhiyun 		pplat->devfn = addr.phys_hi & 0xff00;
924*4882a593Smuzhiyun 	}
925*4882a593Smuzhiyun 
926*4882a593Smuzhiyun 	return 0;
927*4882a593Smuzhiyun }
928*4882a593Smuzhiyun 
pci_bridge_read_config(struct udevice * bus,pci_dev_t bdf,uint offset,ulong * valuep,enum pci_size_t size)929*4882a593Smuzhiyun static int pci_bridge_read_config(struct udevice *bus, pci_dev_t bdf,
930*4882a593Smuzhiyun 				  uint offset, ulong *valuep,
931*4882a593Smuzhiyun 				  enum pci_size_t size)
932*4882a593Smuzhiyun {
933*4882a593Smuzhiyun 	struct pci_controller *hose = bus->uclass_priv;
934*4882a593Smuzhiyun 
935*4882a593Smuzhiyun 	return pci_bus_read_config(hose->ctlr, bdf, offset, valuep, size);
936*4882a593Smuzhiyun }
937*4882a593Smuzhiyun 
pci_bridge_write_config(struct udevice * bus,pci_dev_t bdf,uint offset,ulong value,enum pci_size_t size)938*4882a593Smuzhiyun static int pci_bridge_write_config(struct udevice *bus, pci_dev_t bdf,
939*4882a593Smuzhiyun 				   uint offset, ulong value,
940*4882a593Smuzhiyun 				   enum pci_size_t size)
941*4882a593Smuzhiyun {
942*4882a593Smuzhiyun 	struct pci_controller *hose = bus->uclass_priv;
943*4882a593Smuzhiyun 
944*4882a593Smuzhiyun 	return pci_bus_write_config(hose->ctlr, bdf, offset, value, size);
945*4882a593Smuzhiyun }
946*4882a593Smuzhiyun 
skip_to_next_device(struct udevice * bus,struct udevice ** devp)947*4882a593Smuzhiyun static int skip_to_next_device(struct udevice *bus, struct udevice **devp)
948*4882a593Smuzhiyun {
949*4882a593Smuzhiyun 	struct udevice *dev;
950*4882a593Smuzhiyun 	int ret = 0;
951*4882a593Smuzhiyun 
952*4882a593Smuzhiyun 	/*
953*4882a593Smuzhiyun 	 * Scan through all the PCI controllers. On x86 there will only be one
954*4882a593Smuzhiyun 	 * but that is not necessarily true on other hardware.
955*4882a593Smuzhiyun 	 */
956*4882a593Smuzhiyun 	do {
957*4882a593Smuzhiyun 		device_find_first_child(bus, &dev);
958*4882a593Smuzhiyun 		if (dev) {
959*4882a593Smuzhiyun 			*devp = dev;
960*4882a593Smuzhiyun 			return 0;
961*4882a593Smuzhiyun 		}
962*4882a593Smuzhiyun 		ret = uclass_next_device(&bus);
963*4882a593Smuzhiyun 		if (ret)
964*4882a593Smuzhiyun 			return ret;
965*4882a593Smuzhiyun 	} while (bus);
966*4882a593Smuzhiyun 
967*4882a593Smuzhiyun 	return 0;
968*4882a593Smuzhiyun }
969*4882a593Smuzhiyun 
pci_find_next_device(struct udevice ** devp)970*4882a593Smuzhiyun int pci_find_next_device(struct udevice **devp)
971*4882a593Smuzhiyun {
972*4882a593Smuzhiyun 	struct udevice *child = *devp;
973*4882a593Smuzhiyun 	struct udevice *bus = child->parent;
974*4882a593Smuzhiyun 	int ret;
975*4882a593Smuzhiyun 
976*4882a593Smuzhiyun 	/* First try all the siblings */
977*4882a593Smuzhiyun 	*devp = NULL;
978*4882a593Smuzhiyun 	while (child) {
979*4882a593Smuzhiyun 		device_find_next_child(&child);
980*4882a593Smuzhiyun 		if (child) {
981*4882a593Smuzhiyun 			*devp = child;
982*4882a593Smuzhiyun 			return 0;
983*4882a593Smuzhiyun 		}
984*4882a593Smuzhiyun 	}
985*4882a593Smuzhiyun 
986*4882a593Smuzhiyun 	/* We ran out of siblings. Try the next bus */
987*4882a593Smuzhiyun 	ret = uclass_next_device(&bus);
988*4882a593Smuzhiyun 	if (ret)
989*4882a593Smuzhiyun 		return ret;
990*4882a593Smuzhiyun 
991*4882a593Smuzhiyun 	return bus ? skip_to_next_device(bus, devp) : 0;
992*4882a593Smuzhiyun }
993*4882a593Smuzhiyun 
pci_find_first_device(struct udevice ** devp)994*4882a593Smuzhiyun int pci_find_first_device(struct udevice **devp)
995*4882a593Smuzhiyun {
996*4882a593Smuzhiyun 	struct udevice *bus;
997*4882a593Smuzhiyun 	int ret;
998*4882a593Smuzhiyun 
999*4882a593Smuzhiyun 	*devp = NULL;
1000*4882a593Smuzhiyun 	ret = uclass_first_device(UCLASS_PCI, &bus);
1001*4882a593Smuzhiyun 	if (ret)
1002*4882a593Smuzhiyun 		return ret;
1003*4882a593Smuzhiyun 
1004*4882a593Smuzhiyun 	return skip_to_next_device(bus, devp);
1005*4882a593Smuzhiyun }
1006*4882a593Smuzhiyun 
pci_conv_32_to_size(ulong value,uint offset,enum pci_size_t size)1007*4882a593Smuzhiyun ulong pci_conv_32_to_size(ulong value, uint offset, enum pci_size_t size)
1008*4882a593Smuzhiyun {
1009*4882a593Smuzhiyun 	switch (size) {
1010*4882a593Smuzhiyun 	case PCI_SIZE_8:
1011*4882a593Smuzhiyun 		return (value >> ((offset & 3) * 8)) & 0xff;
1012*4882a593Smuzhiyun 	case PCI_SIZE_16:
1013*4882a593Smuzhiyun 		return (value >> ((offset & 2) * 8)) & 0xffff;
1014*4882a593Smuzhiyun 	default:
1015*4882a593Smuzhiyun 		return value;
1016*4882a593Smuzhiyun 	}
1017*4882a593Smuzhiyun }
1018*4882a593Smuzhiyun 
pci_conv_size_to_32(ulong old,ulong value,uint offset,enum pci_size_t size)1019*4882a593Smuzhiyun ulong pci_conv_size_to_32(ulong old, ulong value, uint offset,
1020*4882a593Smuzhiyun 			  enum pci_size_t size)
1021*4882a593Smuzhiyun {
1022*4882a593Smuzhiyun 	uint off_mask;
1023*4882a593Smuzhiyun 	uint val_mask, shift;
1024*4882a593Smuzhiyun 	ulong ldata, mask;
1025*4882a593Smuzhiyun 
1026*4882a593Smuzhiyun 	switch (size) {
1027*4882a593Smuzhiyun 	case PCI_SIZE_8:
1028*4882a593Smuzhiyun 		off_mask = 3;
1029*4882a593Smuzhiyun 		val_mask = 0xff;
1030*4882a593Smuzhiyun 		break;
1031*4882a593Smuzhiyun 	case PCI_SIZE_16:
1032*4882a593Smuzhiyun 		off_mask = 2;
1033*4882a593Smuzhiyun 		val_mask = 0xffff;
1034*4882a593Smuzhiyun 		break;
1035*4882a593Smuzhiyun 	default:
1036*4882a593Smuzhiyun 		return value;
1037*4882a593Smuzhiyun 	}
1038*4882a593Smuzhiyun 	shift = (offset & off_mask) * 8;
1039*4882a593Smuzhiyun 	ldata = (value & val_mask) << shift;
1040*4882a593Smuzhiyun 	mask = val_mask << shift;
1041*4882a593Smuzhiyun 	value = (old & ~mask) | ldata;
1042*4882a593Smuzhiyun 
1043*4882a593Smuzhiyun 	return value;
1044*4882a593Smuzhiyun }
1045*4882a593Smuzhiyun 
pci_get_regions(struct udevice * dev,struct pci_region ** iop,struct pci_region ** memp,struct pci_region ** prefp)1046*4882a593Smuzhiyun int pci_get_regions(struct udevice *dev, struct pci_region **iop,
1047*4882a593Smuzhiyun 		    struct pci_region **memp, struct pci_region **prefp)
1048*4882a593Smuzhiyun {
1049*4882a593Smuzhiyun 	struct udevice *bus = pci_get_controller(dev);
1050*4882a593Smuzhiyun 	struct pci_controller *hose = dev_get_uclass_priv(bus);
1051*4882a593Smuzhiyun 	int i;
1052*4882a593Smuzhiyun 
1053*4882a593Smuzhiyun 	*iop = NULL;
1054*4882a593Smuzhiyun 	*memp = NULL;
1055*4882a593Smuzhiyun 	*prefp = NULL;
1056*4882a593Smuzhiyun 	for (i = 0; i < hose->region_count; i++) {
1057*4882a593Smuzhiyun 		switch (hose->regions[i].flags) {
1058*4882a593Smuzhiyun 		case PCI_REGION_IO:
1059*4882a593Smuzhiyun 			if (!*iop || (*iop)->size < hose->regions[i].size)
1060*4882a593Smuzhiyun 				*iop = hose->regions + i;
1061*4882a593Smuzhiyun 			break;
1062*4882a593Smuzhiyun 		case PCI_REGION_MEM:
1063*4882a593Smuzhiyun 			if (!*memp || (*memp)->size < hose->regions[i].size)
1064*4882a593Smuzhiyun 				*memp = hose->regions + i;
1065*4882a593Smuzhiyun 			break;
1066*4882a593Smuzhiyun 		case (PCI_REGION_MEM | PCI_REGION_PREFETCH):
1067*4882a593Smuzhiyun 			if (!*prefp || (*prefp)->size < hose->regions[i].size)
1068*4882a593Smuzhiyun 				*prefp = hose->regions + i;
1069*4882a593Smuzhiyun 			break;
1070*4882a593Smuzhiyun 		}
1071*4882a593Smuzhiyun 	}
1072*4882a593Smuzhiyun 
1073*4882a593Smuzhiyun 	return (*iop != NULL) + (*memp != NULL) + (*prefp != NULL);
1074*4882a593Smuzhiyun }
1075*4882a593Smuzhiyun 
dm_pci_read_bar32(struct udevice * dev,int barnum)1076*4882a593Smuzhiyun u32 dm_pci_read_bar32(struct udevice *dev, int barnum)
1077*4882a593Smuzhiyun {
1078*4882a593Smuzhiyun 	u32 addr;
1079*4882a593Smuzhiyun 	int bar;
1080*4882a593Smuzhiyun 
1081*4882a593Smuzhiyun 	bar = PCI_BASE_ADDRESS_0 + barnum * 4;
1082*4882a593Smuzhiyun 	dm_pci_read_config32(dev, bar, &addr);
1083*4882a593Smuzhiyun 	if (addr & PCI_BASE_ADDRESS_SPACE_IO)
1084*4882a593Smuzhiyun 		return addr & PCI_BASE_ADDRESS_IO_MASK;
1085*4882a593Smuzhiyun 	else
1086*4882a593Smuzhiyun 		return addr & PCI_BASE_ADDRESS_MEM_MASK;
1087*4882a593Smuzhiyun }
1088*4882a593Smuzhiyun 
dm_pci_write_bar32(struct udevice * dev,int barnum,u32 addr)1089*4882a593Smuzhiyun void dm_pci_write_bar32(struct udevice *dev, int barnum, u32 addr)
1090*4882a593Smuzhiyun {
1091*4882a593Smuzhiyun 	int bar;
1092*4882a593Smuzhiyun 
1093*4882a593Smuzhiyun 	bar = PCI_BASE_ADDRESS_0 + barnum * 4;
1094*4882a593Smuzhiyun 	dm_pci_write_config32(dev, bar, addr);
1095*4882a593Smuzhiyun }
1096*4882a593Smuzhiyun 
_dm_pci_bus_to_phys(struct udevice * ctlr,pci_addr_t bus_addr,unsigned long flags,unsigned long skip_mask,phys_addr_t * pa)1097*4882a593Smuzhiyun static int _dm_pci_bus_to_phys(struct udevice *ctlr,
1098*4882a593Smuzhiyun 			       pci_addr_t bus_addr, unsigned long flags,
1099*4882a593Smuzhiyun 			       unsigned long skip_mask, phys_addr_t *pa)
1100*4882a593Smuzhiyun {
1101*4882a593Smuzhiyun 	struct pci_controller *hose = dev_get_uclass_priv(ctlr);
1102*4882a593Smuzhiyun 	struct pci_region *res;
1103*4882a593Smuzhiyun 	int i;
1104*4882a593Smuzhiyun 
1105*4882a593Smuzhiyun 	for (i = 0; i < hose->region_count; i++) {
1106*4882a593Smuzhiyun 		res = &hose->regions[i];
1107*4882a593Smuzhiyun 
1108*4882a593Smuzhiyun 		if (((res->flags ^ flags) & PCI_REGION_TYPE) != 0)
1109*4882a593Smuzhiyun 			continue;
1110*4882a593Smuzhiyun 
1111*4882a593Smuzhiyun 		if (res->flags & skip_mask)
1112*4882a593Smuzhiyun 			continue;
1113*4882a593Smuzhiyun 
1114*4882a593Smuzhiyun 		if (bus_addr >= res->bus_start &&
1115*4882a593Smuzhiyun 		    (bus_addr - res->bus_start) < res->size) {
1116*4882a593Smuzhiyun 			*pa = (bus_addr - res->bus_start + res->phys_start);
1117*4882a593Smuzhiyun 			return 0;
1118*4882a593Smuzhiyun 		}
1119*4882a593Smuzhiyun 	}
1120*4882a593Smuzhiyun 
1121*4882a593Smuzhiyun 	return 1;
1122*4882a593Smuzhiyun }
1123*4882a593Smuzhiyun 
dm_pci_bus_to_phys(struct udevice * dev,pci_addr_t bus_addr,unsigned long flags)1124*4882a593Smuzhiyun phys_addr_t dm_pci_bus_to_phys(struct udevice *dev, pci_addr_t bus_addr,
1125*4882a593Smuzhiyun 			       unsigned long flags)
1126*4882a593Smuzhiyun {
1127*4882a593Smuzhiyun 	phys_addr_t phys_addr = 0;
1128*4882a593Smuzhiyun 	struct udevice *ctlr;
1129*4882a593Smuzhiyun 	int ret;
1130*4882a593Smuzhiyun 
1131*4882a593Smuzhiyun 	/* The root controller has the region information */
1132*4882a593Smuzhiyun 	ctlr = pci_get_controller(dev);
1133*4882a593Smuzhiyun 
1134*4882a593Smuzhiyun 	/*
1135*4882a593Smuzhiyun 	 * if PCI_REGION_MEM is set we do a two pass search with preference
1136*4882a593Smuzhiyun 	 * on matches that don't have PCI_REGION_SYS_MEMORY set
1137*4882a593Smuzhiyun 	 */
1138*4882a593Smuzhiyun 	if ((flags & PCI_REGION_TYPE) == PCI_REGION_MEM) {
1139*4882a593Smuzhiyun 		ret = _dm_pci_bus_to_phys(ctlr, bus_addr,
1140*4882a593Smuzhiyun 					  flags, PCI_REGION_SYS_MEMORY,
1141*4882a593Smuzhiyun 					  &phys_addr);
1142*4882a593Smuzhiyun 		if (!ret)
1143*4882a593Smuzhiyun 			return phys_addr;
1144*4882a593Smuzhiyun 	}
1145*4882a593Smuzhiyun 
1146*4882a593Smuzhiyun 	ret = _dm_pci_bus_to_phys(ctlr, bus_addr, flags, 0, &phys_addr);
1147*4882a593Smuzhiyun 
1148*4882a593Smuzhiyun 	if (ret)
1149*4882a593Smuzhiyun 		puts("pci_hose_bus_to_phys: invalid physical address\n");
1150*4882a593Smuzhiyun 
1151*4882a593Smuzhiyun 	return phys_addr;
1152*4882a593Smuzhiyun }
1153*4882a593Smuzhiyun 
_dm_pci_phys_to_bus(struct udevice * dev,phys_addr_t phys_addr,unsigned long flags,unsigned long skip_mask,pci_addr_t * ba)1154*4882a593Smuzhiyun int _dm_pci_phys_to_bus(struct udevice *dev, phys_addr_t phys_addr,
1155*4882a593Smuzhiyun 			unsigned long flags, unsigned long skip_mask,
1156*4882a593Smuzhiyun 			pci_addr_t *ba)
1157*4882a593Smuzhiyun {
1158*4882a593Smuzhiyun 	struct pci_region *res;
1159*4882a593Smuzhiyun 	struct udevice *ctlr;
1160*4882a593Smuzhiyun 	pci_addr_t bus_addr;
1161*4882a593Smuzhiyun 	int i;
1162*4882a593Smuzhiyun 	struct pci_controller *hose;
1163*4882a593Smuzhiyun 
1164*4882a593Smuzhiyun 	/* The root controller has the region information */
1165*4882a593Smuzhiyun 	ctlr = pci_get_controller(dev);
1166*4882a593Smuzhiyun 	hose = dev_get_uclass_priv(ctlr);
1167*4882a593Smuzhiyun 
1168*4882a593Smuzhiyun 	for (i = 0; i < hose->region_count; i++) {
1169*4882a593Smuzhiyun 		res = &hose->regions[i];
1170*4882a593Smuzhiyun 
1171*4882a593Smuzhiyun 		if (((res->flags ^ flags) & PCI_REGION_TYPE) != 0)
1172*4882a593Smuzhiyun 			continue;
1173*4882a593Smuzhiyun 
1174*4882a593Smuzhiyun 		if (res->flags & skip_mask)
1175*4882a593Smuzhiyun 			continue;
1176*4882a593Smuzhiyun 
1177*4882a593Smuzhiyun 		bus_addr = phys_addr - res->phys_start + res->bus_start;
1178*4882a593Smuzhiyun 
1179*4882a593Smuzhiyun 		if (bus_addr >= res->bus_start &&
1180*4882a593Smuzhiyun 		    (bus_addr - res->bus_start) < res->size) {
1181*4882a593Smuzhiyun 			*ba = bus_addr;
1182*4882a593Smuzhiyun 			return 0;
1183*4882a593Smuzhiyun 		}
1184*4882a593Smuzhiyun 	}
1185*4882a593Smuzhiyun 
1186*4882a593Smuzhiyun 	return 1;
1187*4882a593Smuzhiyun }
1188*4882a593Smuzhiyun 
dm_pci_phys_to_bus(struct udevice * dev,phys_addr_t phys_addr,unsigned long flags)1189*4882a593Smuzhiyun pci_addr_t dm_pci_phys_to_bus(struct udevice *dev, phys_addr_t phys_addr,
1190*4882a593Smuzhiyun 			      unsigned long flags)
1191*4882a593Smuzhiyun {
1192*4882a593Smuzhiyun 	pci_addr_t bus_addr = 0;
1193*4882a593Smuzhiyun 	int ret;
1194*4882a593Smuzhiyun 
1195*4882a593Smuzhiyun 	/*
1196*4882a593Smuzhiyun 	 * if PCI_REGION_MEM is set we do a two pass search with preference
1197*4882a593Smuzhiyun 	 * on matches that don't have PCI_REGION_SYS_MEMORY set
1198*4882a593Smuzhiyun 	 */
1199*4882a593Smuzhiyun 	if ((flags & PCI_REGION_TYPE) == PCI_REGION_MEM) {
1200*4882a593Smuzhiyun 		ret = _dm_pci_phys_to_bus(dev, phys_addr, flags,
1201*4882a593Smuzhiyun 					  PCI_REGION_SYS_MEMORY, &bus_addr);
1202*4882a593Smuzhiyun 		if (!ret)
1203*4882a593Smuzhiyun 			return bus_addr;
1204*4882a593Smuzhiyun 	}
1205*4882a593Smuzhiyun 
1206*4882a593Smuzhiyun 	ret = _dm_pci_phys_to_bus(dev, phys_addr, flags, 0, &bus_addr);
1207*4882a593Smuzhiyun 
1208*4882a593Smuzhiyun 	if (ret)
1209*4882a593Smuzhiyun 		puts("pci_hose_phys_to_bus: invalid physical address\n");
1210*4882a593Smuzhiyun 
1211*4882a593Smuzhiyun 	return bus_addr;
1212*4882a593Smuzhiyun }
1213*4882a593Smuzhiyun 
dm_pci_map_bar(struct udevice * dev,int bar,int flags)1214*4882a593Smuzhiyun void *dm_pci_map_bar(struct udevice *dev, int bar, int flags)
1215*4882a593Smuzhiyun {
1216*4882a593Smuzhiyun 	pci_addr_t pci_bus_addr;
1217*4882a593Smuzhiyun 	u32 bar_response;
1218*4882a593Smuzhiyun 
1219*4882a593Smuzhiyun 	/* read BAR address */
1220*4882a593Smuzhiyun 	dm_pci_read_config32(dev, bar, &bar_response);
1221*4882a593Smuzhiyun 	pci_bus_addr = (pci_addr_t)(bar_response & ~0xf);
1222*4882a593Smuzhiyun 
1223*4882a593Smuzhiyun 	/*
1224*4882a593Smuzhiyun 	 * Pass "0" as the length argument to pci_bus_to_virt.  The arg
1225*4882a593Smuzhiyun 	 * isn't actualy used on any platform because u-boot assumes a static
1226*4882a593Smuzhiyun 	 * linear mapping.  In the future, this could read the BAR size
1227*4882a593Smuzhiyun 	 * and pass that as the size if needed.
1228*4882a593Smuzhiyun 	 */
1229*4882a593Smuzhiyun 	return dm_pci_bus_to_virt(dev, pci_bus_addr, flags, 0, MAP_NOCACHE);
1230*4882a593Smuzhiyun }
1231*4882a593Smuzhiyun 
1232*4882a593Smuzhiyun UCLASS_DRIVER(pci) = {
1233*4882a593Smuzhiyun 	.id		= UCLASS_PCI,
1234*4882a593Smuzhiyun 	.name		= "pci",
1235*4882a593Smuzhiyun 	.flags		= DM_UC_FLAG_SEQ_ALIAS,
1236*4882a593Smuzhiyun 	.post_bind	= dm_scan_fdt_dev,
1237*4882a593Smuzhiyun 	.pre_probe	= pci_uclass_pre_probe,
1238*4882a593Smuzhiyun 	.post_probe	= pci_uclass_post_probe,
1239*4882a593Smuzhiyun 	.child_post_bind = pci_uclass_child_post_bind,
1240*4882a593Smuzhiyun 	.per_device_auto_alloc_size = sizeof(struct pci_controller),
1241*4882a593Smuzhiyun 	.per_child_platdata_auto_alloc_size =
1242*4882a593Smuzhiyun 			sizeof(struct pci_child_platdata),
1243*4882a593Smuzhiyun };
1244*4882a593Smuzhiyun 
1245*4882a593Smuzhiyun static const struct dm_pci_ops pci_bridge_ops = {
1246*4882a593Smuzhiyun 	.read_config	= pci_bridge_read_config,
1247*4882a593Smuzhiyun 	.write_config	= pci_bridge_write_config,
1248*4882a593Smuzhiyun };
1249*4882a593Smuzhiyun 
1250*4882a593Smuzhiyun static const struct udevice_id pci_bridge_ids[] = {
1251*4882a593Smuzhiyun 	{ .compatible = "pci-bridge" },
1252*4882a593Smuzhiyun 	{ }
1253*4882a593Smuzhiyun };
1254*4882a593Smuzhiyun 
1255*4882a593Smuzhiyun U_BOOT_DRIVER(pci_bridge_drv) = {
1256*4882a593Smuzhiyun 	.name		= "pci_bridge_drv",
1257*4882a593Smuzhiyun 	.id		= UCLASS_PCI,
1258*4882a593Smuzhiyun 	.of_match	= pci_bridge_ids,
1259*4882a593Smuzhiyun 	.ops		= &pci_bridge_ops,
1260*4882a593Smuzhiyun };
1261*4882a593Smuzhiyun 
1262*4882a593Smuzhiyun UCLASS_DRIVER(pci_generic) = {
1263*4882a593Smuzhiyun 	.id		= UCLASS_PCI_GENERIC,
1264*4882a593Smuzhiyun 	.name		= "pci_generic",
1265*4882a593Smuzhiyun };
1266*4882a593Smuzhiyun 
1267*4882a593Smuzhiyun static const struct udevice_id pci_generic_ids[] = {
1268*4882a593Smuzhiyun 	{ .compatible = "pci-generic" },
1269*4882a593Smuzhiyun 	{ }
1270*4882a593Smuzhiyun };
1271*4882a593Smuzhiyun 
1272*4882a593Smuzhiyun U_BOOT_DRIVER(pci_generic_drv) = {
1273*4882a593Smuzhiyun 	.name		= "pci_generic_drv",
1274*4882a593Smuzhiyun 	.id		= UCLASS_PCI_GENERIC,
1275*4882a593Smuzhiyun 	.of_match	= pci_generic_ids,
1276*4882a593Smuzhiyun };
1277*4882a593Smuzhiyun 
pci_init(void)1278*4882a593Smuzhiyun void pci_init(void)
1279*4882a593Smuzhiyun {
1280*4882a593Smuzhiyun 	struct udevice *bus;
1281*4882a593Smuzhiyun 
1282*4882a593Smuzhiyun 	/*
1283*4882a593Smuzhiyun 	 * Enumerate all known controller devices. Enumeration has the side-
1284*4882a593Smuzhiyun 	 * effect of probing them, so PCIe devices will be enumerated too.
1285*4882a593Smuzhiyun 	 */
1286*4882a593Smuzhiyun 	for (uclass_first_device(UCLASS_PCI, &bus);
1287*4882a593Smuzhiyun 	     bus;
1288*4882a593Smuzhiyun 	     uclass_next_device(&bus)) {
1289*4882a593Smuzhiyun 		;
1290*4882a593Smuzhiyun 	}
1291*4882a593Smuzhiyun }
1292