1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun #define pr_fmt(fmt) "OF: " fmt
3*4882a593Smuzhiyun
4*4882a593Smuzhiyun #include <linux/device.h>
5*4882a593Smuzhiyun #include <linux/fwnode.h>
6*4882a593Smuzhiyun #include <linux/io.h>
7*4882a593Smuzhiyun #include <linux/ioport.h>
8*4882a593Smuzhiyun #include <linux/logic_pio.h>
9*4882a593Smuzhiyun #include <linux/module.h>
10*4882a593Smuzhiyun #include <linux/of_address.h>
11*4882a593Smuzhiyun #include <linux/pci.h>
12*4882a593Smuzhiyun #include <linux/pci_regs.h>
13*4882a593Smuzhiyun #include <linux/sizes.h>
14*4882a593Smuzhiyun #include <linux/slab.h>
15*4882a593Smuzhiyun #include <linux/string.h>
16*4882a593Smuzhiyun #include <linux/dma-direct.h> /* for bus_dma_region */
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #include "of_private.h"
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun /* Max address size we deal with */
21*4882a593Smuzhiyun #define OF_MAX_ADDR_CELLS 4
22*4882a593Smuzhiyun #define OF_CHECK_ADDR_COUNT(na) ((na) > 0 && (na) <= OF_MAX_ADDR_CELLS)
23*4882a593Smuzhiyun #define OF_CHECK_COUNTS(na, ns) (OF_CHECK_ADDR_COUNT(na) && (ns) > 0)
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun static struct of_bus *of_match_bus(struct device_node *np);
26*4882a593Smuzhiyun static int __of_address_to_resource(struct device_node *dev,
27*4882a593Smuzhiyun const __be32 *addrp, u64 size, unsigned int flags,
28*4882a593Smuzhiyun const char *name, struct resource *r);
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun /* Debug utility */
31*4882a593Smuzhiyun #ifdef DEBUG
of_dump_addr(const char * s,const __be32 * addr,int na)32*4882a593Smuzhiyun static void of_dump_addr(const char *s, const __be32 *addr, int na)
33*4882a593Smuzhiyun {
34*4882a593Smuzhiyun pr_debug("%s", s);
35*4882a593Smuzhiyun while (na--)
36*4882a593Smuzhiyun pr_cont(" %08x", be32_to_cpu(*(addr++)));
37*4882a593Smuzhiyun pr_cont("\n");
38*4882a593Smuzhiyun }
39*4882a593Smuzhiyun #else
of_dump_addr(const char * s,const __be32 * addr,int na)40*4882a593Smuzhiyun static void of_dump_addr(const char *s, const __be32 *addr, int na) { }
41*4882a593Smuzhiyun #endif
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun /* Callbacks for bus specific translators */
44*4882a593Smuzhiyun struct of_bus {
45*4882a593Smuzhiyun const char *name;
46*4882a593Smuzhiyun const char *addresses;
47*4882a593Smuzhiyun int (*match)(struct device_node *parent);
48*4882a593Smuzhiyun void (*count_cells)(struct device_node *child,
49*4882a593Smuzhiyun int *addrc, int *sizec);
50*4882a593Smuzhiyun u64 (*map)(__be32 *addr, const __be32 *range,
51*4882a593Smuzhiyun int na, int ns, int pna);
52*4882a593Smuzhiyun int (*translate)(__be32 *addr, u64 offset, int na);
53*4882a593Smuzhiyun bool has_flags;
54*4882a593Smuzhiyun unsigned int (*get_flags)(const __be32 *addr);
55*4882a593Smuzhiyun };
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun /*
58*4882a593Smuzhiyun * Default translator (generic bus)
59*4882a593Smuzhiyun */
60*4882a593Smuzhiyun
of_bus_default_count_cells(struct device_node * dev,int * addrc,int * sizec)61*4882a593Smuzhiyun static void of_bus_default_count_cells(struct device_node *dev,
62*4882a593Smuzhiyun int *addrc, int *sizec)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun if (addrc)
65*4882a593Smuzhiyun *addrc = of_n_addr_cells(dev);
66*4882a593Smuzhiyun if (sizec)
67*4882a593Smuzhiyun *sizec = of_n_size_cells(dev);
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun
of_bus_default_map(__be32 * addr,const __be32 * range,int na,int ns,int pna)70*4882a593Smuzhiyun static u64 of_bus_default_map(__be32 *addr, const __be32 *range,
71*4882a593Smuzhiyun int na, int ns, int pna)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun u64 cp, s, da;
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun cp = of_read_number(range, na);
76*4882a593Smuzhiyun s = of_read_number(range + na + pna, ns);
77*4882a593Smuzhiyun da = of_read_number(addr, na);
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun pr_debug("default map, cp=%llx, s=%llx, da=%llx\n",
80*4882a593Smuzhiyun (unsigned long long)cp, (unsigned long long)s,
81*4882a593Smuzhiyun (unsigned long long)da);
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun if (da < cp || da >= (cp + s))
84*4882a593Smuzhiyun return OF_BAD_ADDR;
85*4882a593Smuzhiyun return da - cp;
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun
of_bus_default_translate(__be32 * addr,u64 offset,int na)88*4882a593Smuzhiyun static int of_bus_default_translate(__be32 *addr, u64 offset, int na)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun u64 a = of_read_number(addr, na);
91*4882a593Smuzhiyun memset(addr, 0, na * 4);
92*4882a593Smuzhiyun a += offset;
93*4882a593Smuzhiyun if (na > 1)
94*4882a593Smuzhiyun addr[na - 2] = cpu_to_be32(a >> 32);
95*4882a593Smuzhiyun addr[na - 1] = cpu_to_be32(a & 0xffffffffu);
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun return 0;
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun
of_bus_default_get_flags(const __be32 * addr)100*4882a593Smuzhiyun static unsigned int of_bus_default_get_flags(const __be32 *addr)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun return IORESOURCE_MEM;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun #ifdef CONFIG_PCI
of_bus_pci_get_flags(const __be32 * addr)106*4882a593Smuzhiyun static unsigned int of_bus_pci_get_flags(const __be32 *addr)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun unsigned int flags = 0;
109*4882a593Smuzhiyun u32 w = be32_to_cpup(addr);
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun if (!IS_ENABLED(CONFIG_PCI))
112*4882a593Smuzhiyun return 0;
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun switch((w >> 24) & 0x03) {
115*4882a593Smuzhiyun case 0x01:
116*4882a593Smuzhiyun flags |= IORESOURCE_IO;
117*4882a593Smuzhiyun break;
118*4882a593Smuzhiyun case 0x02: /* 32 bits */
119*4882a593Smuzhiyun case 0x03: /* 64 bits */
120*4882a593Smuzhiyun flags |= IORESOURCE_MEM;
121*4882a593Smuzhiyun break;
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun if (w & 0x40000000)
124*4882a593Smuzhiyun flags |= IORESOURCE_PREFETCH;
125*4882a593Smuzhiyun return flags;
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun /*
129*4882a593Smuzhiyun * PCI bus specific translator
130*4882a593Smuzhiyun */
131*4882a593Smuzhiyun
of_node_is_pcie(struct device_node * np)132*4882a593Smuzhiyun static bool of_node_is_pcie(struct device_node *np)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun bool is_pcie = of_node_name_eq(np, "pcie");
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun if (is_pcie)
137*4882a593Smuzhiyun pr_warn_once("%pOF: Missing device_type\n", np);
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun return is_pcie;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun
of_bus_pci_match(struct device_node * np)142*4882a593Smuzhiyun static int of_bus_pci_match(struct device_node *np)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun /*
145*4882a593Smuzhiyun * "pciex" is PCI Express
146*4882a593Smuzhiyun * "vci" is for the /chaos bridge on 1st-gen PCI powermacs
147*4882a593Smuzhiyun * "ht" is hypertransport
148*4882a593Smuzhiyun *
149*4882a593Smuzhiyun * If none of the device_type match, and that the node name is
150*4882a593Smuzhiyun * "pcie", accept the device as PCI (with a warning).
151*4882a593Smuzhiyun */
152*4882a593Smuzhiyun return of_node_is_type(np, "pci") || of_node_is_type(np, "pciex") ||
153*4882a593Smuzhiyun of_node_is_type(np, "vci") || of_node_is_type(np, "ht") ||
154*4882a593Smuzhiyun of_node_is_pcie(np);
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun
of_bus_pci_count_cells(struct device_node * np,int * addrc,int * sizec)157*4882a593Smuzhiyun static void of_bus_pci_count_cells(struct device_node *np,
158*4882a593Smuzhiyun int *addrc, int *sizec)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun if (addrc)
161*4882a593Smuzhiyun *addrc = 3;
162*4882a593Smuzhiyun if (sizec)
163*4882a593Smuzhiyun *sizec = 2;
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun
of_bus_pci_map(__be32 * addr,const __be32 * range,int na,int ns,int pna)166*4882a593Smuzhiyun static u64 of_bus_pci_map(__be32 *addr, const __be32 *range, int na, int ns,
167*4882a593Smuzhiyun int pna)
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun u64 cp, s, da;
170*4882a593Smuzhiyun unsigned int af, rf;
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun af = of_bus_pci_get_flags(addr);
173*4882a593Smuzhiyun rf = of_bus_pci_get_flags(range);
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun /* Check address type match */
176*4882a593Smuzhiyun if ((af ^ rf) & (IORESOURCE_MEM | IORESOURCE_IO))
177*4882a593Smuzhiyun return OF_BAD_ADDR;
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun /* Read address values, skipping high cell */
180*4882a593Smuzhiyun cp = of_read_number(range + 1, na - 1);
181*4882a593Smuzhiyun s = of_read_number(range + na + pna, ns);
182*4882a593Smuzhiyun da = of_read_number(addr + 1, na - 1);
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun pr_debug("PCI map, cp=%llx, s=%llx, da=%llx\n",
185*4882a593Smuzhiyun (unsigned long long)cp, (unsigned long long)s,
186*4882a593Smuzhiyun (unsigned long long)da);
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun if (da < cp || da >= (cp + s))
189*4882a593Smuzhiyun return OF_BAD_ADDR;
190*4882a593Smuzhiyun return da - cp;
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun
of_bus_pci_translate(__be32 * addr,u64 offset,int na)193*4882a593Smuzhiyun static int of_bus_pci_translate(__be32 *addr, u64 offset, int na)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun return of_bus_default_translate(addr + 1, offset, na - 1);
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun
of_get_pci_address(struct device_node * dev,int bar_no,u64 * size,unsigned int * flags)198*4882a593Smuzhiyun const __be32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size,
199*4882a593Smuzhiyun unsigned int *flags)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun const __be32 *prop;
202*4882a593Smuzhiyun unsigned int psize;
203*4882a593Smuzhiyun struct device_node *parent;
204*4882a593Smuzhiyun struct of_bus *bus;
205*4882a593Smuzhiyun int onesize, i, na, ns;
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun /* Get parent & match bus type */
208*4882a593Smuzhiyun parent = of_get_parent(dev);
209*4882a593Smuzhiyun if (parent == NULL)
210*4882a593Smuzhiyun return NULL;
211*4882a593Smuzhiyun bus = of_match_bus(parent);
212*4882a593Smuzhiyun if (strcmp(bus->name, "pci")) {
213*4882a593Smuzhiyun of_node_put(parent);
214*4882a593Smuzhiyun return NULL;
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun bus->count_cells(dev, &na, &ns);
217*4882a593Smuzhiyun of_node_put(parent);
218*4882a593Smuzhiyun if (!OF_CHECK_ADDR_COUNT(na))
219*4882a593Smuzhiyun return NULL;
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun /* Get "reg" or "assigned-addresses" property */
222*4882a593Smuzhiyun prop = of_get_property(dev, bus->addresses, &psize);
223*4882a593Smuzhiyun if (prop == NULL)
224*4882a593Smuzhiyun return NULL;
225*4882a593Smuzhiyun psize /= 4;
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun onesize = na + ns;
228*4882a593Smuzhiyun for (i = 0; psize >= onesize; psize -= onesize, prop += onesize, i++) {
229*4882a593Smuzhiyun u32 val = be32_to_cpu(prop[0]);
230*4882a593Smuzhiyun if ((val & 0xff) == ((bar_no * 4) + PCI_BASE_ADDRESS_0)) {
231*4882a593Smuzhiyun if (size)
232*4882a593Smuzhiyun *size = of_read_number(prop + na, ns);
233*4882a593Smuzhiyun if (flags)
234*4882a593Smuzhiyun *flags = bus->get_flags(prop);
235*4882a593Smuzhiyun return prop;
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun return NULL;
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun EXPORT_SYMBOL(of_get_pci_address);
241*4882a593Smuzhiyun
of_pci_address_to_resource(struct device_node * dev,int bar,struct resource * r)242*4882a593Smuzhiyun int of_pci_address_to_resource(struct device_node *dev, int bar,
243*4882a593Smuzhiyun struct resource *r)
244*4882a593Smuzhiyun {
245*4882a593Smuzhiyun const __be32 *addrp;
246*4882a593Smuzhiyun u64 size;
247*4882a593Smuzhiyun unsigned int flags;
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun addrp = of_get_pci_address(dev, bar, &size, &flags);
250*4882a593Smuzhiyun if (addrp == NULL)
251*4882a593Smuzhiyun return -EINVAL;
252*4882a593Smuzhiyun return __of_address_to_resource(dev, addrp, size, flags, NULL, r);
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(of_pci_address_to_resource);
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun /*
257*4882a593Smuzhiyun * of_pci_range_to_resource - Create a resource from an of_pci_range
258*4882a593Smuzhiyun * @range: the PCI range that describes the resource
259*4882a593Smuzhiyun * @np: device node where the range belongs to
260*4882a593Smuzhiyun * @res: pointer to a valid resource that will be updated to
261*4882a593Smuzhiyun * reflect the values contained in the range.
262*4882a593Smuzhiyun *
263*4882a593Smuzhiyun * Returns EINVAL if the range cannot be converted to resource.
264*4882a593Smuzhiyun *
265*4882a593Smuzhiyun * Note that if the range is an IO range, the resource will be converted
266*4882a593Smuzhiyun * using pci_address_to_pio() which can fail if it is called too early or
267*4882a593Smuzhiyun * if the range cannot be matched to any host bridge IO space (our case here).
268*4882a593Smuzhiyun * To guard against that we try to register the IO range first.
269*4882a593Smuzhiyun * If that fails we know that pci_address_to_pio() will do too.
270*4882a593Smuzhiyun */
of_pci_range_to_resource(struct of_pci_range * range,struct device_node * np,struct resource * res)271*4882a593Smuzhiyun int of_pci_range_to_resource(struct of_pci_range *range,
272*4882a593Smuzhiyun struct device_node *np, struct resource *res)
273*4882a593Smuzhiyun {
274*4882a593Smuzhiyun int err;
275*4882a593Smuzhiyun res->flags = range->flags;
276*4882a593Smuzhiyun res->parent = res->child = res->sibling = NULL;
277*4882a593Smuzhiyun res->name = np->full_name;
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun if (res->flags & IORESOURCE_IO) {
280*4882a593Smuzhiyun unsigned long port;
281*4882a593Smuzhiyun err = pci_register_io_range(&np->fwnode, range->cpu_addr,
282*4882a593Smuzhiyun range->size);
283*4882a593Smuzhiyun if (err)
284*4882a593Smuzhiyun goto invalid_range;
285*4882a593Smuzhiyun port = pci_address_to_pio(range->cpu_addr);
286*4882a593Smuzhiyun if (port == (unsigned long)-1) {
287*4882a593Smuzhiyun err = -EINVAL;
288*4882a593Smuzhiyun goto invalid_range;
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun res->start = port;
291*4882a593Smuzhiyun } else {
292*4882a593Smuzhiyun if ((sizeof(resource_size_t) < 8) &&
293*4882a593Smuzhiyun upper_32_bits(range->cpu_addr)) {
294*4882a593Smuzhiyun err = -EINVAL;
295*4882a593Smuzhiyun goto invalid_range;
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun res->start = range->cpu_addr;
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun res->end = res->start + range->size - 1;
301*4882a593Smuzhiyun return 0;
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun invalid_range:
304*4882a593Smuzhiyun res->start = (resource_size_t)OF_BAD_ADDR;
305*4882a593Smuzhiyun res->end = (resource_size_t)OF_BAD_ADDR;
306*4882a593Smuzhiyun return err;
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun EXPORT_SYMBOL(of_pci_range_to_resource);
309*4882a593Smuzhiyun #endif /* CONFIG_PCI */
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun /*
312*4882a593Smuzhiyun * ISA bus specific translator
313*4882a593Smuzhiyun */
314*4882a593Smuzhiyun
of_bus_isa_match(struct device_node * np)315*4882a593Smuzhiyun static int of_bus_isa_match(struct device_node *np)
316*4882a593Smuzhiyun {
317*4882a593Smuzhiyun return of_node_name_eq(np, "isa");
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun
of_bus_isa_count_cells(struct device_node * child,int * addrc,int * sizec)320*4882a593Smuzhiyun static void of_bus_isa_count_cells(struct device_node *child,
321*4882a593Smuzhiyun int *addrc, int *sizec)
322*4882a593Smuzhiyun {
323*4882a593Smuzhiyun if (addrc)
324*4882a593Smuzhiyun *addrc = 2;
325*4882a593Smuzhiyun if (sizec)
326*4882a593Smuzhiyun *sizec = 1;
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun
of_bus_isa_map(__be32 * addr,const __be32 * range,int na,int ns,int pna)329*4882a593Smuzhiyun static u64 of_bus_isa_map(__be32 *addr, const __be32 *range, int na, int ns,
330*4882a593Smuzhiyun int pna)
331*4882a593Smuzhiyun {
332*4882a593Smuzhiyun u64 cp, s, da;
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun /* Check address type match */
335*4882a593Smuzhiyun if ((addr[0] ^ range[0]) & cpu_to_be32(1))
336*4882a593Smuzhiyun return OF_BAD_ADDR;
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun /* Read address values, skipping high cell */
339*4882a593Smuzhiyun cp = of_read_number(range + 1, na - 1);
340*4882a593Smuzhiyun s = of_read_number(range + na + pna, ns);
341*4882a593Smuzhiyun da = of_read_number(addr + 1, na - 1);
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun pr_debug("ISA map, cp=%llx, s=%llx, da=%llx\n",
344*4882a593Smuzhiyun (unsigned long long)cp, (unsigned long long)s,
345*4882a593Smuzhiyun (unsigned long long)da);
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun if (da < cp || da >= (cp + s))
348*4882a593Smuzhiyun return OF_BAD_ADDR;
349*4882a593Smuzhiyun return da - cp;
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun
of_bus_isa_translate(__be32 * addr,u64 offset,int na)352*4882a593Smuzhiyun static int of_bus_isa_translate(__be32 *addr, u64 offset, int na)
353*4882a593Smuzhiyun {
354*4882a593Smuzhiyun return of_bus_default_translate(addr + 1, offset, na - 1);
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun
of_bus_isa_get_flags(const __be32 * addr)357*4882a593Smuzhiyun static unsigned int of_bus_isa_get_flags(const __be32 *addr)
358*4882a593Smuzhiyun {
359*4882a593Smuzhiyun unsigned int flags = 0;
360*4882a593Smuzhiyun u32 w = be32_to_cpup(addr);
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun if (w & 1)
363*4882a593Smuzhiyun flags |= IORESOURCE_IO;
364*4882a593Smuzhiyun else
365*4882a593Smuzhiyun flags |= IORESOURCE_MEM;
366*4882a593Smuzhiyun return flags;
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun /*
370*4882a593Smuzhiyun * Array of bus specific translators
371*4882a593Smuzhiyun */
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun static struct of_bus of_busses[] = {
374*4882a593Smuzhiyun #ifdef CONFIG_PCI
375*4882a593Smuzhiyun /* PCI */
376*4882a593Smuzhiyun {
377*4882a593Smuzhiyun .name = "pci",
378*4882a593Smuzhiyun .addresses = "assigned-addresses",
379*4882a593Smuzhiyun .match = of_bus_pci_match,
380*4882a593Smuzhiyun .count_cells = of_bus_pci_count_cells,
381*4882a593Smuzhiyun .map = of_bus_pci_map,
382*4882a593Smuzhiyun .translate = of_bus_pci_translate,
383*4882a593Smuzhiyun .has_flags = true,
384*4882a593Smuzhiyun .get_flags = of_bus_pci_get_flags,
385*4882a593Smuzhiyun },
386*4882a593Smuzhiyun #endif /* CONFIG_PCI */
387*4882a593Smuzhiyun /* ISA */
388*4882a593Smuzhiyun {
389*4882a593Smuzhiyun .name = "isa",
390*4882a593Smuzhiyun .addresses = "reg",
391*4882a593Smuzhiyun .match = of_bus_isa_match,
392*4882a593Smuzhiyun .count_cells = of_bus_isa_count_cells,
393*4882a593Smuzhiyun .map = of_bus_isa_map,
394*4882a593Smuzhiyun .translate = of_bus_isa_translate,
395*4882a593Smuzhiyun .has_flags = true,
396*4882a593Smuzhiyun .get_flags = of_bus_isa_get_flags,
397*4882a593Smuzhiyun },
398*4882a593Smuzhiyun /* Default */
399*4882a593Smuzhiyun {
400*4882a593Smuzhiyun .name = "default",
401*4882a593Smuzhiyun .addresses = "reg",
402*4882a593Smuzhiyun .match = NULL,
403*4882a593Smuzhiyun .count_cells = of_bus_default_count_cells,
404*4882a593Smuzhiyun .map = of_bus_default_map,
405*4882a593Smuzhiyun .translate = of_bus_default_translate,
406*4882a593Smuzhiyun .get_flags = of_bus_default_get_flags,
407*4882a593Smuzhiyun },
408*4882a593Smuzhiyun };
409*4882a593Smuzhiyun
of_match_bus(struct device_node * np)410*4882a593Smuzhiyun static struct of_bus *of_match_bus(struct device_node *np)
411*4882a593Smuzhiyun {
412*4882a593Smuzhiyun int i;
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(of_busses); i++)
415*4882a593Smuzhiyun if (!of_busses[i].match || of_busses[i].match(np))
416*4882a593Smuzhiyun return &of_busses[i];
417*4882a593Smuzhiyun BUG();
418*4882a593Smuzhiyun return NULL;
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun
of_empty_ranges_quirk(struct device_node * np)421*4882a593Smuzhiyun static int of_empty_ranges_quirk(struct device_node *np)
422*4882a593Smuzhiyun {
423*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_PPC)) {
424*4882a593Smuzhiyun /* To save cycles, we cache the result for global "Mac" setting */
425*4882a593Smuzhiyun static int quirk_state = -1;
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun /* PA-SEMI sdc DT bug */
428*4882a593Smuzhiyun if (of_device_is_compatible(np, "1682m-sdc"))
429*4882a593Smuzhiyun return true;
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun /* Make quirk cached */
432*4882a593Smuzhiyun if (quirk_state < 0)
433*4882a593Smuzhiyun quirk_state =
434*4882a593Smuzhiyun of_machine_is_compatible("Power Macintosh") ||
435*4882a593Smuzhiyun of_machine_is_compatible("MacRISC");
436*4882a593Smuzhiyun return quirk_state;
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun return false;
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun
of_translate_one(struct device_node * parent,struct of_bus * bus,struct of_bus * pbus,__be32 * addr,int na,int ns,int pna,const char * rprop)441*4882a593Smuzhiyun static int of_translate_one(struct device_node *parent, struct of_bus *bus,
442*4882a593Smuzhiyun struct of_bus *pbus, __be32 *addr,
443*4882a593Smuzhiyun int na, int ns, int pna, const char *rprop)
444*4882a593Smuzhiyun {
445*4882a593Smuzhiyun const __be32 *ranges;
446*4882a593Smuzhiyun unsigned int rlen;
447*4882a593Smuzhiyun int rone;
448*4882a593Smuzhiyun u64 offset = OF_BAD_ADDR;
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun /*
451*4882a593Smuzhiyun * Normally, an absence of a "ranges" property means we are
452*4882a593Smuzhiyun * crossing a non-translatable boundary, and thus the addresses
453*4882a593Smuzhiyun * below the current cannot be converted to CPU physical ones.
454*4882a593Smuzhiyun * Unfortunately, while this is very clear in the spec, it's not
455*4882a593Smuzhiyun * what Apple understood, and they do have things like /uni-n or
456*4882a593Smuzhiyun * /ht nodes with no "ranges" property and a lot of perfectly
457*4882a593Smuzhiyun * useable mapped devices below them. Thus we treat the absence of
458*4882a593Smuzhiyun * "ranges" as equivalent to an empty "ranges" property which means
459*4882a593Smuzhiyun * a 1:1 translation at that level. It's up to the caller not to try
460*4882a593Smuzhiyun * to translate addresses that aren't supposed to be translated in
461*4882a593Smuzhiyun * the first place. --BenH.
462*4882a593Smuzhiyun *
463*4882a593Smuzhiyun * As far as we know, this damage only exists on Apple machines, so
464*4882a593Smuzhiyun * This code is only enabled on powerpc. --gcl
465*4882a593Smuzhiyun *
466*4882a593Smuzhiyun * This quirk also applies for 'dma-ranges' which frequently exist in
467*4882a593Smuzhiyun * child nodes without 'dma-ranges' in the parent nodes. --RobH
468*4882a593Smuzhiyun */
469*4882a593Smuzhiyun ranges = of_get_property(parent, rprop, &rlen);
470*4882a593Smuzhiyun if (ranges == NULL && !of_empty_ranges_quirk(parent) &&
471*4882a593Smuzhiyun strcmp(rprop, "dma-ranges")) {
472*4882a593Smuzhiyun pr_debug("no ranges; cannot translate\n");
473*4882a593Smuzhiyun return 1;
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun if (ranges == NULL || rlen == 0) {
476*4882a593Smuzhiyun offset = of_read_number(addr, na);
477*4882a593Smuzhiyun memset(addr, 0, pna * 4);
478*4882a593Smuzhiyun pr_debug("empty ranges; 1:1 translation\n");
479*4882a593Smuzhiyun goto finish;
480*4882a593Smuzhiyun }
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun pr_debug("walking ranges...\n");
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun /* Now walk through the ranges */
485*4882a593Smuzhiyun rlen /= 4;
486*4882a593Smuzhiyun rone = na + pna + ns;
487*4882a593Smuzhiyun for (; rlen >= rone; rlen -= rone, ranges += rone) {
488*4882a593Smuzhiyun offset = bus->map(addr, ranges, na, ns, pna);
489*4882a593Smuzhiyun if (offset != OF_BAD_ADDR)
490*4882a593Smuzhiyun break;
491*4882a593Smuzhiyun }
492*4882a593Smuzhiyun if (offset == OF_BAD_ADDR) {
493*4882a593Smuzhiyun pr_debug("not found !\n");
494*4882a593Smuzhiyun return 1;
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun memcpy(addr, ranges + na, 4 * pna);
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun finish:
499*4882a593Smuzhiyun of_dump_addr("parent translation for:", addr, pna);
500*4882a593Smuzhiyun pr_debug("with offset: %llx\n", (unsigned long long)offset);
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun /* Translate it into parent bus space */
503*4882a593Smuzhiyun return pbus->translate(addr, offset, pna);
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun /*
507*4882a593Smuzhiyun * Translate an address from the device-tree into a CPU physical address,
508*4882a593Smuzhiyun * this walks up the tree and applies the various bus mappings on the
509*4882a593Smuzhiyun * way.
510*4882a593Smuzhiyun *
511*4882a593Smuzhiyun * Note: We consider that crossing any level with #size-cells == 0 to mean
512*4882a593Smuzhiyun * that translation is impossible (that is we are not dealing with a value
513*4882a593Smuzhiyun * that can be mapped to a cpu physical address). This is not really specified
514*4882a593Smuzhiyun * that way, but this is traditionally the way IBM at least do things
515*4882a593Smuzhiyun *
516*4882a593Smuzhiyun * Whenever the translation fails, the *host pointer will be set to the
517*4882a593Smuzhiyun * device that had registered logical PIO mapping, and the return code is
518*4882a593Smuzhiyun * relative to that node.
519*4882a593Smuzhiyun */
__of_translate_address(struct device_node * dev,struct device_node * (* get_parent)(const struct device_node *),const __be32 * in_addr,const char * rprop,struct device_node ** host)520*4882a593Smuzhiyun static u64 __of_translate_address(struct device_node *dev,
521*4882a593Smuzhiyun struct device_node *(*get_parent)(const struct device_node *),
522*4882a593Smuzhiyun const __be32 *in_addr, const char *rprop,
523*4882a593Smuzhiyun struct device_node **host)
524*4882a593Smuzhiyun {
525*4882a593Smuzhiyun struct device_node *parent = NULL;
526*4882a593Smuzhiyun struct of_bus *bus, *pbus;
527*4882a593Smuzhiyun __be32 addr[OF_MAX_ADDR_CELLS];
528*4882a593Smuzhiyun int na, ns, pna, pns;
529*4882a593Smuzhiyun u64 result = OF_BAD_ADDR;
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun pr_debug("** translation for device %pOF **\n", dev);
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun /* Increase refcount at current level */
534*4882a593Smuzhiyun of_node_get(dev);
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun *host = NULL;
537*4882a593Smuzhiyun /* Get parent & match bus type */
538*4882a593Smuzhiyun parent = get_parent(dev);
539*4882a593Smuzhiyun if (parent == NULL)
540*4882a593Smuzhiyun goto bail;
541*4882a593Smuzhiyun bus = of_match_bus(parent);
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun /* Count address cells & copy address locally */
544*4882a593Smuzhiyun bus->count_cells(dev, &na, &ns);
545*4882a593Smuzhiyun if (!OF_CHECK_COUNTS(na, ns)) {
546*4882a593Smuzhiyun pr_debug("Bad cell count for %pOF\n", dev);
547*4882a593Smuzhiyun goto bail;
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun memcpy(addr, in_addr, na * 4);
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun pr_debug("bus is %s (na=%d, ns=%d) on %pOF\n",
552*4882a593Smuzhiyun bus->name, na, ns, parent);
553*4882a593Smuzhiyun of_dump_addr("translating address:", addr, na);
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun /* Translate */
556*4882a593Smuzhiyun for (;;) {
557*4882a593Smuzhiyun struct logic_pio_hwaddr *iorange;
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun /* Switch to parent bus */
560*4882a593Smuzhiyun of_node_put(dev);
561*4882a593Smuzhiyun dev = parent;
562*4882a593Smuzhiyun parent = get_parent(dev);
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun /* If root, we have finished */
565*4882a593Smuzhiyun if (parent == NULL) {
566*4882a593Smuzhiyun pr_debug("reached root node\n");
567*4882a593Smuzhiyun result = of_read_number(addr, na);
568*4882a593Smuzhiyun break;
569*4882a593Smuzhiyun }
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun /*
572*4882a593Smuzhiyun * For indirectIO device which has no ranges property, get
573*4882a593Smuzhiyun * the address from reg directly.
574*4882a593Smuzhiyun */
575*4882a593Smuzhiyun iorange = find_io_range_by_fwnode(&dev->fwnode);
576*4882a593Smuzhiyun if (iorange && (iorange->flags != LOGIC_PIO_CPU_MMIO)) {
577*4882a593Smuzhiyun result = of_read_number(addr + 1, na - 1);
578*4882a593Smuzhiyun pr_debug("indirectIO matched(%pOF) 0x%llx\n",
579*4882a593Smuzhiyun dev, result);
580*4882a593Smuzhiyun *host = of_node_get(dev);
581*4882a593Smuzhiyun break;
582*4882a593Smuzhiyun }
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun /* Get new parent bus and counts */
585*4882a593Smuzhiyun pbus = of_match_bus(parent);
586*4882a593Smuzhiyun pbus->count_cells(dev, &pna, &pns);
587*4882a593Smuzhiyun if (!OF_CHECK_COUNTS(pna, pns)) {
588*4882a593Smuzhiyun pr_err("Bad cell count for %pOF\n", dev);
589*4882a593Smuzhiyun break;
590*4882a593Smuzhiyun }
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun pr_debug("parent bus is %s (na=%d, ns=%d) on %pOF\n",
593*4882a593Smuzhiyun pbus->name, pna, pns, parent);
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun /* Apply bus translation */
596*4882a593Smuzhiyun if (of_translate_one(dev, bus, pbus, addr, na, ns, pna, rprop))
597*4882a593Smuzhiyun break;
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun /* Complete the move up one level */
600*4882a593Smuzhiyun na = pna;
601*4882a593Smuzhiyun ns = pns;
602*4882a593Smuzhiyun bus = pbus;
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun of_dump_addr("one level translation:", addr, na);
605*4882a593Smuzhiyun }
606*4882a593Smuzhiyun bail:
607*4882a593Smuzhiyun of_node_put(parent);
608*4882a593Smuzhiyun of_node_put(dev);
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun return result;
611*4882a593Smuzhiyun }
612*4882a593Smuzhiyun
of_translate_address(struct device_node * dev,const __be32 * in_addr)613*4882a593Smuzhiyun u64 of_translate_address(struct device_node *dev, const __be32 *in_addr)
614*4882a593Smuzhiyun {
615*4882a593Smuzhiyun struct device_node *host;
616*4882a593Smuzhiyun u64 ret;
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun ret = __of_translate_address(dev, of_get_parent,
619*4882a593Smuzhiyun in_addr, "ranges", &host);
620*4882a593Smuzhiyun if (host) {
621*4882a593Smuzhiyun of_node_put(host);
622*4882a593Smuzhiyun return OF_BAD_ADDR;
623*4882a593Smuzhiyun }
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun return ret;
626*4882a593Smuzhiyun }
627*4882a593Smuzhiyun EXPORT_SYMBOL(of_translate_address);
628*4882a593Smuzhiyun
__of_get_dma_parent(const struct device_node * np)629*4882a593Smuzhiyun static struct device_node *__of_get_dma_parent(const struct device_node *np)
630*4882a593Smuzhiyun {
631*4882a593Smuzhiyun struct of_phandle_args args;
632*4882a593Smuzhiyun int ret, index;
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun index = of_property_match_string(np, "interconnect-names", "dma-mem");
635*4882a593Smuzhiyun if (index < 0)
636*4882a593Smuzhiyun return of_get_parent(np);
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun ret = of_parse_phandle_with_args(np, "interconnects",
639*4882a593Smuzhiyun "#interconnect-cells",
640*4882a593Smuzhiyun index, &args);
641*4882a593Smuzhiyun if (ret < 0)
642*4882a593Smuzhiyun return of_get_parent(np);
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun return of_node_get(args.np);
645*4882a593Smuzhiyun }
646*4882a593Smuzhiyun
of_get_next_dma_parent(struct device_node * np)647*4882a593Smuzhiyun static struct device_node *of_get_next_dma_parent(struct device_node *np)
648*4882a593Smuzhiyun {
649*4882a593Smuzhiyun struct device_node *parent;
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun parent = __of_get_dma_parent(np);
652*4882a593Smuzhiyun of_node_put(np);
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun return parent;
655*4882a593Smuzhiyun }
656*4882a593Smuzhiyun
of_translate_dma_address(struct device_node * dev,const __be32 * in_addr)657*4882a593Smuzhiyun u64 of_translate_dma_address(struct device_node *dev, const __be32 *in_addr)
658*4882a593Smuzhiyun {
659*4882a593Smuzhiyun struct device_node *host;
660*4882a593Smuzhiyun u64 ret;
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun ret = __of_translate_address(dev, __of_get_dma_parent,
663*4882a593Smuzhiyun in_addr, "dma-ranges", &host);
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun if (host) {
666*4882a593Smuzhiyun of_node_put(host);
667*4882a593Smuzhiyun return OF_BAD_ADDR;
668*4882a593Smuzhiyun }
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun return ret;
671*4882a593Smuzhiyun }
672*4882a593Smuzhiyun EXPORT_SYMBOL(of_translate_dma_address);
673*4882a593Smuzhiyun
of_get_address(struct device_node * dev,int index,u64 * size,unsigned int * flags)674*4882a593Smuzhiyun const __be32 *of_get_address(struct device_node *dev, int index, u64 *size,
675*4882a593Smuzhiyun unsigned int *flags)
676*4882a593Smuzhiyun {
677*4882a593Smuzhiyun const __be32 *prop;
678*4882a593Smuzhiyun unsigned int psize;
679*4882a593Smuzhiyun struct device_node *parent;
680*4882a593Smuzhiyun struct of_bus *bus;
681*4882a593Smuzhiyun int onesize, i, na, ns;
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun /* Get parent & match bus type */
684*4882a593Smuzhiyun parent = of_get_parent(dev);
685*4882a593Smuzhiyun if (parent == NULL)
686*4882a593Smuzhiyun return NULL;
687*4882a593Smuzhiyun bus = of_match_bus(parent);
688*4882a593Smuzhiyun bus->count_cells(dev, &na, &ns);
689*4882a593Smuzhiyun of_node_put(parent);
690*4882a593Smuzhiyun if (!OF_CHECK_ADDR_COUNT(na))
691*4882a593Smuzhiyun return NULL;
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun /* Get "reg" or "assigned-addresses" property */
694*4882a593Smuzhiyun prop = of_get_property(dev, bus->addresses, &psize);
695*4882a593Smuzhiyun if (prop == NULL)
696*4882a593Smuzhiyun return NULL;
697*4882a593Smuzhiyun psize /= 4;
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun onesize = na + ns;
700*4882a593Smuzhiyun for (i = 0; psize >= onesize; psize -= onesize, prop += onesize, i++)
701*4882a593Smuzhiyun if (i == index) {
702*4882a593Smuzhiyun if (size)
703*4882a593Smuzhiyun *size = of_read_number(prop + na, ns);
704*4882a593Smuzhiyun if (flags)
705*4882a593Smuzhiyun *flags = bus->get_flags(prop);
706*4882a593Smuzhiyun return prop;
707*4882a593Smuzhiyun }
708*4882a593Smuzhiyun return NULL;
709*4882a593Smuzhiyun }
710*4882a593Smuzhiyun EXPORT_SYMBOL(of_get_address);
711*4882a593Smuzhiyun
parser_init(struct of_pci_range_parser * parser,struct device_node * node,const char * name)712*4882a593Smuzhiyun static int parser_init(struct of_pci_range_parser *parser,
713*4882a593Smuzhiyun struct device_node *node, const char *name)
714*4882a593Smuzhiyun {
715*4882a593Smuzhiyun int rlen;
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun parser->node = node;
718*4882a593Smuzhiyun parser->pna = of_n_addr_cells(node);
719*4882a593Smuzhiyun parser->na = of_bus_n_addr_cells(node);
720*4882a593Smuzhiyun parser->ns = of_bus_n_size_cells(node);
721*4882a593Smuzhiyun parser->dma = !strcmp(name, "dma-ranges");
722*4882a593Smuzhiyun parser->bus = of_match_bus(node);
723*4882a593Smuzhiyun
724*4882a593Smuzhiyun parser->range = of_get_property(node, name, &rlen);
725*4882a593Smuzhiyun if (parser->range == NULL)
726*4882a593Smuzhiyun return -ENOENT;
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun parser->end = parser->range + rlen / sizeof(__be32);
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun return 0;
731*4882a593Smuzhiyun }
732*4882a593Smuzhiyun
of_pci_range_parser_init(struct of_pci_range_parser * parser,struct device_node * node)733*4882a593Smuzhiyun int of_pci_range_parser_init(struct of_pci_range_parser *parser,
734*4882a593Smuzhiyun struct device_node *node)
735*4882a593Smuzhiyun {
736*4882a593Smuzhiyun return parser_init(parser, node, "ranges");
737*4882a593Smuzhiyun }
738*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(of_pci_range_parser_init);
739*4882a593Smuzhiyun
of_pci_dma_range_parser_init(struct of_pci_range_parser * parser,struct device_node * node)740*4882a593Smuzhiyun int of_pci_dma_range_parser_init(struct of_pci_range_parser *parser,
741*4882a593Smuzhiyun struct device_node *node)
742*4882a593Smuzhiyun {
743*4882a593Smuzhiyun return parser_init(parser, node, "dma-ranges");
744*4882a593Smuzhiyun }
745*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(of_pci_dma_range_parser_init);
746*4882a593Smuzhiyun #define of_dma_range_parser_init of_pci_dma_range_parser_init
747*4882a593Smuzhiyun
of_pci_range_parser_one(struct of_pci_range_parser * parser,struct of_pci_range * range)748*4882a593Smuzhiyun struct of_pci_range *of_pci_range_parser_one(struct of_pci_range_parser *parser,
749*4882a593Smuzhiyun struct of_pci_range *range)
750*4882a593Smuzhiyun {
751*4882a593Smuzhiyun int na = parser->na;
752*4882a593Smuzhiyun int ns = parser->ns;
753*4882a593Smuzhiyun int np = parser->pna + na + ns;
754*4882a593Smuzhiyun int busflag_na = 0;
755*4882a593Smuzhiyun
756*4882a593Smuzhiyun if (!range)
757*4882a593Smuzhiyun return NULL;
758*4882a593Smuzhiyun
759*4882a593Smuzhiyun if (!parser->range || parser->range + np > parser->end)
760*4882a593Smuzhiyun return NULL;
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun range->flags = parser->bus->get_flags(parser->range);
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun /* A extra cell for resource flags */
765*4882a593Smuzhiyun if (parser->bus->has_flags)
766*4882a593Smuzhiyun busflag_na = 1;
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun range->bus_addr = of_read_number(parser->range + busflag_na, na - busflag_na);
769*4882a593Smuzhiyun
770*4882a593Smuzhiyun if (parser->dma)
771*4882a593Smuzhiyun range->cpu_addr = of_translate_dma_address(parser->node,
772*4882a593Smuzhiyun parser->range + na);
773*4882a593Smuzhiyun else
774*4882a593Smuzhiyun range->cpu_addr = of_translate_address(parser->node,
775*4882a593Smuzhiyun parser->range + na);
776*4882a593Smuzhiyun range->size = of_read_number(parser->range + parser->pna + na, ns);
777*4882a593Smuzhiyun
778*4882a593Smuzhiyun parser->range += np;
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun /* Now consume following elements while they are contiguous */
781*4882a593Smuzhiyun while (parser->range + np <= parser->end) {
782*4882a593Smuzhiyun u32 flags = 0;
783*4882a593Smuzhiyun u64 bus_addr, cpu_addr, size;
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun flags = parser->bus->get_flags(parser->range);
786*4882a593Smuzhiyun bus_addr = of_read_number(parser->range + busflag_na, na - busflag_na);
787*4882a593Smuzhiyun if (parser->dma)
788*4882a593Smuzhiyun cpu_addr = of_translate_dma_address(parser->node,
789*4882a593Smuzhiyun parser->range + na);
790*4882a593Smuzhiyun else
791*4882a593Smuzhiyun cpu_addr = of_translate_address(parser->node,
792*4882a593Smuzhiyun parser->range + na);
793*4882a593Smuzhiyun size = of_read_number(parser->range + parser->pna + na, ns);
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun if (flags != range->flags)
796*4882a593Smuzhiyun break;
797*4882a593Smuzhiyun if (bus_addr != range->bus_addr + range->size ||
798*4882a593Smuzhiyun cpu_addr != range->cpu_addr + range->size)
799*4882a593Smuzhiyun break;
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun range->size += size;
802*4882a593Smuzhiyun parser->range += np;
803*4882a593Smuzhiyun }
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun return range;
806*4882a593Smuzhiyun }
807*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(of_pci_range_parser_one);
808*4882a593Smuzhiyun
of_translate_ioport(struct device_node * dev,const __be32 * in_addr,u64 size)809*4882a593Smuzhiyun static u64 of_translate_ioport(struct device_node *dev, const __be32 *in_addr,
810*4882a593Smuzhiyun u64 size)
811*4882a593Smuzhiyun {
812*4882a593Smuzhiyun u64 taddr;
813*4882a593Smuzhiyun unsigned long port;
814*4882a593Smuzhiyun struct device_node *host;
815*4882a593Smuzhiyun
816*4882a593Smuzhiyun taddr = __of_translate_address(dev, of_get_parent,
817*4882a593Smuzhiyun in_addr, "ranges", &host);
818*4882a593Smuzhiyun if (host) {
819*4882a593Smuzhiyun /* host-specific port access */
820*4882a593Smuzhiyun port = logic_pio_trans_hwaddr(&host->fwnode, taddr, size);
821*4882a593Smuzhiyun of_node_put(host);
822*4882a593Smuzhiyun } else {
823*4882a593Smuzhiyun /* memory-mapped I/O range */
824*4882a593Smuzhiyun port = pci_address_to_pio(taddr);
825*4882a593Smuzhiyun }
826*4882a593Smuzhiyun
827*4882a593Smuzhiyun if (port == (unsigned long)-1)
828*4882a593Smuzhiyun return OF_BAD_ADDR;
829*4882a593Smuzhiyun
830*4882a593Smuzhiyun return port;
831*4882a593Smuzhiyun }
832*4882a593Smuzhiyun
__of_address_to_resource(struct device_node * dev,const __be32 * addrp,u64 size,unsigned int flags,const char * name,struct resource * r)833*4882a593Smuzhiyun static int __of_address_to_resource(struct device_node *dev,
834*4882a593Smuzhiyun const __be32 *addrp, u64 size, unsigned int flags,
835*4882a593Smuzhiyun const char *name, struct resource *r)
836*4882a593Smuzhiyun {
837*4882a593Smuzhiyun u64 taddr;
838*4882a593Smuzhiyun
839*4882a593Smuzhiyun if (flags & IORESOURCE_MEM)
840*4882a593Smuzhiyun taddr = of_translate_address(dev, addrp);
841*4882a593Smuzhiyun else if (flags & IORESOURCE_IO)
842*4882a593Smuzhiyun taddr = of_translate_ioport(dev, addrp, size);
843*4882a593Smuzhiyun else
844*4882a593Smuzhiyun return -EINVAL;
845*4882a593Smuzhiyun
846*4882a593Smuzhiyun if (taddr == OF_BAD_ADDR)
847*4882a593Smuzhiyun return -EINVAL;
848*4882a593Smuzhiyun memset(r, 0, sizeof(struct resource));
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun r->start = taddr;
851*4882a593Smuzhiyun r->end = taddr + size - 1;
852*4882a593Smuzhiyun r->flags = flags;
853*4882a593Smuzhiyun r->name = name ? name : dev->full_name;
854*4882a593Smuzhiyun
855*4882a593Smuzhiyun return 0;
856*4882a593Smuzhiyun }
857*4882a593Smuzhiyun
858*4882a593Smuzhiyun /**
859*4882a593Smuzhiyun * of_address_to_resource - Translate device tree address and return as resource
860*4882a593Smuzhiyun *
861*4882a593Smuzhiyun * Note that if your address is a PIO address, the conversion will fail if
862*4882a593Smuzhiyun * the physical address can't be internally converted to an IO token with
863*4882a593Smuzhiyun * pci_address_to_pio(), that is because it's either called too early or it
864*4882a593Smuzhiyun * can't be matched to any host bridge IO space
865*4882a593Smuzhiyun */
of_address_to_resource(struct device_node * dev,int index,struct resource * r)866*4882a593Smuzhiyun int of_address_to_resource(struct device_node *dev, int index,
867*4882a593Smuzhiyun struct resource *r)
868*4882a593Smuzhiyun {
869*4882a593Smuzhiyun const __be32 *addrp;
870*4882a593Smuzhiyun u64 size;
871*4882a593Smuzhiyun unsigned int flags;
872*4882a593Smuzhiyun const char *name = NULL;
873*4882a593Smuzhiyun
874*4882a593Smuzhiyun addrp = of_get_address(dev, index, &size, &flags);
875*4882a593Smuzhiyun if (addrp == NULL)
876*4882a593Smuzhiyun return -EINVAL;
877*4882a593Smuzhiyun
878*4882a593Smuzhiyun /* Get optional "reg-names" property to add a name to a resource */
879*4882a593Smuzhiyun of_property_read_string_index(dev, "reg-names", index, &name);
880*4882a593Smuzhiyun
881*4882a593Smuzhiyun return __of_address_to_resource(dev, addrp, size, flags, name, r);
882*4882a593Smuzhiyun }
883*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(of_address_to_resource);
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun /**
886*4882a593Smuzhiyun * of_iomap - Maps the memory mapped IO for a given device_node
887*4882a593Smuzhiyun * @np: the device whose io range will be mapped
888*4882a593Smuzhiyun * @index: index of the io range
889*4882a593Smuzhiyun *
890*4882a593Smuzhiyun * Returns a pointer to the mapped memory
891*4882a593Smuzhiyun */
of_iomap(struct device_node * np,int index)892*4882a593Smuzhiyun void __iomem *of_iomap(struct device_node *np, int index)
893*4882a593Smuzhiyun {
894*4882a593Smuzhiyun struct resource res;
895*4882a593Smuzhiyun
896*4882a593Smuzhiyun if (of_address_to_resource(np, index, &res))
897*4882a593Smuzhiyun return NULL;
898*4882a593Smuzhiyun
899*4882a593Smuzhiyun return ioremap(res.start, resource_size(&res));
900*4882a593Smuzhiyun }
901*4882a593Smuzhiyun EXPORT_SYMBOL(of_iomap);
902*4882a593Smuzhiyun
903*4882a593Smuzhiyun /*
904*4882a593Smuzhiyun * of_io_request_and_map - Requests a resource and maps the memory mapped IO
905*4882a593Smuzhiyun * for a given device_node
906*4882a593Smuzhiyun * @device: the device whose io range will be mapped
907*4882a593Smuzhiyun * @index: index of the io range
908*4882a593Smuzhiyun * @name: name "override" for the memory region request or NULL
909*4882a593Smuzhiyun *
910*4882a593Smuzhiyun * Returns a pointer to the requested and mapped memory or an ERR_PTR() encoded
911*4882a593Smuzhiyun * error code on failure. Usage example:
912*4882a593Smuzhiyun *
913*4882a593Smuzhiyun * base = of_io_request_and_map(node, 0, "foo");
914*4882a593Smuzhiyun * if (IS_ERR(base))
915*4882a593Smuzhiyun * return PTR_ERR(base);
916*4882a593Smuzhiyun */
of_io_request_and_map(struct device_node * np,int index,const char * name)917*4882a593Smuzhiyun void __iomem *of_io_request_and_map(struct device_node *np, int index,
918*4882a593Smuzhiyun const char *name)
919*4882a593Smuzhiyun {
920*4882a593Smuzhiyun struct resource res;
921*4882a593Smuzhiyun void __iomem *mem;
922*4882a593Smuzhiyun
923*4882a593Smuzhiyun if (of_address_to_resource(np, index, &res))
924*4882a593Smuzhiyun return IOMEM_ERR_PTR(-EINVAL);
925*4882a593Smuzhiyun
926*4882a593Smuzhiyun if (!name)
927*4882a593Smuzhiyun name = res.name;
928*4882a593Smuzhiyun if (!request_mem_region(res.start, resource_size(&res), name))
929*4882a593Smuzhiyun return IOMEM_ERR_PTR(-EBUSY);
930*4882a593Smuzhiyun
931*4882a593Smuzhiyun mem = ioremap(res.start, resource_size(&res));
932*4882a593Smuzhiyun if (!mem) {
933*4882a593Smuzhiyun release_mem_region(res.start, resource_size(&res));
934*4882a593Smuzhiyun return IOMEM_ERR_PTR(-ENOMEM);
935*4882a593Smuzhiyun }
936*4882a593Smuzhiyun
937*4882a593Smuzhiyun return mem;
938*4882a593Smuzhiyun }
939*4882a593Smuzhiyun EXPORT_SYMBOL(of_io_request_and_map);
940*4882a593Smuzhiyun
941*4882a593Smuzhiyun #ifdef CONFIG_HAS_DMA
942*4882a593Smuzhiyun /**
943*4882a593Smuzhiyun * of_dma_get_range - Get DMA range info and put it into a map array
944*4882a593Smuzhiyun * @np: device node to get DMA range info
945*4882a593Smuzhiyun * @map: dma range structure to return
946*4882a593Smuzhiyun *
947*4882a593Smuzhiyun * Look in bottom up direction for the first "dma-ranges" property
948*4882a593Smuzhiyun * and parse it. Put the information into a DMA offset map array.
949*4882a593Smuzhiyun *
950*4882a593Smuzhiyun * dma-ranges format:
951*4882a593Smuzhiyun * DMA addr (dma_addr) : naddr cells
952*4882a593Smuzhiyun * CPU addr (phys_addr_t) : pna cells
953*4882a593Smuzhiyun * size : nsize cells
954*4882a593Smuzhiyun *
955*4882a593Smuzhiyun * It returns -ENODEV if "dma-ranges" property was not found for this
956*4882a593Smuzhiyun * device in the DT.
957*4882a593Smuzhiyun */
of_dma_get_range(struct device_node * np,const struct bus_dma_region ** map)958*4882a593Smuzhiyun int of_dma_get_range(struct device_node *np, const struct bus_dma_region **map)
959*4882a593Smuzhiyun {
960*4882a593Smuzhiyun struct device_node *node = of_node_get(np);
961*4882a593Smuzhiyun const __be32 *ranges = NULL;
962*4882a593Smuzhiyun bool found_dma_ranges = false;
963*4882a593Smuzhiyun struct of_range_parser parser;
964*4882a593Smuzhiyun struct of_range range;
965*4882a593Smuzhiyun struct bus_dma_region *r;
966*4882a593Smuzhiyun int len, num_ranges = 0;
967*4882a593Smuzhiyun int ret = 0;
968*4882a593Smuzhiyun
969*4882a593Smuzhiyun while (node) {
970*4882a593Smuzhiyun ranges = of_get_property(node, "dma-ranges", &len);
971*4882a593Smuzhiyun
972*4882a593Smuzhiyun /* Ignore empty ranges, they imply no translation required */
973*4882a593Smuzhiyun if (ranges && len > 0)
974*4882a593Smuzhiyun break;
975*4882a593Smuzhiyun
976*4882a593Smuzhiyun /* Once we find 'dma-ranges', then a missing one is an error */
977*4882a593Smuzhiyun if (found_dma_ranges && !ranges) {
978*4882a593Smuzhiyun ret = -ENODEV;
979*4882a593Smuzhiyun goto out;
980*4882a593Smuzhiyun }
981*4882a593Smuzhiyun found_dma_ranges = true;
982*4882a593Smuzhiyun
983*4882a593Smuzhiyun node = of_get_next_dma_parent(node);
984*4882a593Smuzhiyun }
985*4882a593Smuzhiyun
986*4882a593Smuzhiyun if (!node || !ranges) {
987*4882a593Smuzhiyun pr_debug("no dma-ranges found for node(%pOF)\n", np);
988*4882a593Smuzhiyun ret = -ENODEV;
989*4882a593Smuzhiyun goto out;
990*4882a593Smuzhiyun }
991*4882a593Smuzhiyun
992*4882a593Smuzhiyun of_dma_range_parser_init(&parser, node);
993*4882a593Smuzhiyun for_each_of_range(&parser, &range)
994*4882a593Smuzhiyun num_ranges++;
995*4882a593Smuzhiyun
996*4882a593Smuzhiyun r = kcalloc(num_ranges + 1, sizeof(*r), GFP_KERNEL);
997*4882a593Smuzhiyun if (!r) {
998*4882a593Smuzhiyun ret = -ENOMEM;
999*4882a593Smuzhiyun goto out;
1000*4882a593Smuzhiyun }
1001*4882a593Smuzhiyun
1002*4882a593Smuzhiyun /*
1003*4882a593Smuzhiyun * Record all info in the generic DMA ranges array for struct device.
1004*4882a593Smuzhiyun */
1005*4882a593Smuzhiyun *map = r;
1006*4882a593Smuzhiyun of_dma_range_parser_init(&parser, node);
1007*4882a593Smuzhiyun for_each_of_range(&parser, &range) {
1008*4882a593Smuzhiyun pr_debug("dma_addr(%llx) cpu_addr(%llx) size(%llx)\n",
1009*4882a593Smuzhiyun range.bus_addr, range.cpu_addr, range.size);
1010*4882a593Smuzhiyun if (range.cpu_addr == OF_BAD_ADDR) {
1011*4882a593Smuzhiyun pr_err("translation of DMA address(%llx) to CPU address failed node(%pOF)\n",
1012*4882a593Smuzhiyun range.bus_addr, node);
1013*4882a593Smuzhiyun continue;
1014*4882a593Smuzhiyun }
1015*4882a593Smuzhiyun r->cpu_start = range.cpu_addr;
1016*4882a593Smuzhiyun r->dma_start = range.bus_addr;
1017*4882a593Smuzhiyun r->size = range.size;
1018*4882a593Smuzhiyun r->offset = range.cpu_addr - range.bus_addr;
1019*4882a593Smuzhiyun r++;
1020*4882a593Smuzhiyun }
1021*4882a593Smuzhiyun out:
1022*4882a593Smuzhiyun of_node_put(node);
1023*4882a593Smuzhiyun return ret;
1024*4882a593Smuzhiyun }
1025*4882a593Smuzhiyun #endif /* CONFIG_HAS_DMA */
1026*4882a593Smuzhiyun
1027*4882a593Smuzhiyun /**
1028*4882a593Smuzhiyun * of_dma_get_max_cpu_address - Gets highest CPU address suitable for DMA
1029*4882a593Smuzhiyun * @np: The node to start searching from or NULL to start from the root
1030*4882a593Smuzhiyun *
1031*4882a593Smuzhiyun * Gets the highest CPU physical address that is addressable by all DMA masters
1032*4882a593Smuzhiyun * in the sub-tree pointed by np, or the whole tree if NULL is passed. If no
1033*4882a593Smuzhiyun * DMA constrained device is found, it returns PHYS_ADDR_MAX.
1034*4882a593Smuzhiyun */
of_dma_get_max_cpu_address(struct device_node * np)1035*4882a593Smuzhiyun phys_addr_t __init of_dma_get_max_cpu_address(struct device_node *np)
1036*4882a593Smuzhiyun {
1037*4882a593Smuzhiyun phys_addr_t max_cpu_addr = PHYS_ADDR_MAX;
1038*4882a593Smuzhiyun struct of_range_parser parser;
1039*4882a593Smuzhiyun phys_addr_t subtree_max_addr;
1040*4882a593Smuzhiyun struct device_node *child;
1041*4882a593Smuzhiyun struct of_range range;
1042*4882a593Smuzhiyun const __be32 *ranges;
1043*4882a593Smuzhiyun u64 cpu_end = 0;
1044*4882a593Smuzhiyun int len;
1045*4882a593Smuzhiyun
1046*4882a593Smuzhiyun if (!np)
1047*4882a593Smuzhiyun np = of_root;
1048*4882a593Smuzhiyun
1049*4882a593Smuzhiyun ranges = of_get_property(np, "dma-ranges", &len);
1050*4882a593Smuzhiyun if (ranges && len) {
1051*4882a593Smuzhiyun of_dma_range_parser_init(&parser, np);
1052*4882a593Smuzhiyun for_each_of_range(&parser, &range)
1053*4882a593Smuzhiyun if (range.cpu_addr + range.size > cpu_end)
1054*4882a593Smuzhiyun cpu_end = range.cpu_addr + range.size - 1;
1055*4882a593Smuzhiyun
1056*4882a593Smuzhiyun if (max_cpu_addr > cpu_end)
1057*4882a593Smuzhiyun max_cpu_addr = cpu_end;
1058*4882a593Smuzhiyun }
1059*4882a593Smuzhiyun
1060*4882a593Smuzhiyun for_each_available_child_of_node(np, child) {
1061*4882a593Smuzhiyun subtree_max_addr = of_dma_get_max_cpu_address(child);
1062*4882a593Smuzhiyun if (max_cpu_addr > subtree_max_addr)
1063*4882a593Smuzhiyun max_cpu_addr = subtree_max_addr;
1064*4882a593Smuzhiyun }
1065*4882a593Smuzhiyun
1066*4882a593Smuzhiyun return max_cpu_addr;
1067*4882a593Smuzhiyun }
1068*4882a593Smuzhiyun
1069*4882a593Smuzhiyun /**
1070*4882a593Smuzhiyun * of_dma_is_coherent - Check if device is coherent
1071*4882a593Smuzhiyun * @np: device node
1072*4882a593Smuzhiyun *
1073*4882a593Smuzhiyun * It returns true if "dma-coherent" property was found
1074*4882a593Smuzhiyun * for this device in the DT, or if DMA is coherent by
1075*4882a593Smuzhiyun * default for OF devices on the current platform.
1076*4882a593Smuzhiyun */
of_dma_is_coherent(struct device_node * np)1077*4882a593Smuzhiyun bool of_dma_is_coherent(struct device_node *np)
1078*4882a593Smuzhiyun {
1079*4882a593Smuzhiyun struct device_node *node;
1080*4882a593Smuzhiyun
1081*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_OF_DMA_DEFAULT_COHERENT))
1082*4882a593Smuzhiyun return true;
1083*4882a593Smuzhiyun
1084*4882a593Smuzhiyun node = of_node_get(np);
1085*4882a593Smuzhiyun
1086*4882a593Smuzhiyun while (node) {
1087*4882a593Smuzhiyun if (of_property_read_bool(node, "dma-coherent")) {
1088*4882a593Smuzhiyun of_node_put(node);
1089*4882a593Smuzhiyun return true;
1090*4882a593Smuzhiyun }
1091*4882a593Smuzhiyun node = of_get_next_dma_parent(node);
1092*4882a593Smuzhiyun }
1093*4882a593Smuzhiyun of_node_put(node);
1094*4882a593Smuzhiyun return false;
1095*4882a593Smuzhiyun }
1096*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(of_dma_is_coherent);
1097