1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * OF helpers for IOMMU
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/export.h>
9*4882a593Smuzhiyun #include <linux/iommu.h>
10*4882a593Smuzhiyun #include <linux/limits.h>
11*4882a593Smuzhiyun #include <linux/module.h>
12*4882a593Smuzhiyun #include <linux/msi.h>
13*4882a593Smuzhiyun #include <linux/of.h>
14*4882a593Smuzhiyun #include <linux/of_iommu.h>
15*4882a593Smuzhiyun #include <linux/of_pci.h>
16*4882a593Smuzhiyun #include <linux/pci.h>
17*4882a593Smuzhiyun #include <linux/slab.h>
18*4882a593Smuzhiyun #include <linux/fsl/mc.h>
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun #define NO_IOMMU 1
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun /**
23*4882a593Smuzhiyun * of_get_dma_window - Parse *dma-window property and returns 0 if found.
24*4882a593Smuzhiyun *
25*4882a593Smuzhiyun * @dn: device node
26*4882a593Smuzhiyun * @prefix: prefix for property name if any
27*4882a593Smuzhiyun * @index: index to start to parse
28*4882a593Smuzhiyun * @busno: Returns busno if supported. Otherwise pass NULL
29*4882a593Smuzhiyun * @addr: Returns address that DMA starts
30*4882a593Smuzhiyun * @size: Returns the range that DMA can handle
31*4882a593Smuzhiyun *
32*4882a593Smuzhiyun * This supports different formats flexibly. "prefix" can be
33*4882a593Smuzhiyun * configured if any. "busno" and "index" are optionally
34*4882a593Smuzhiyun * specified. Set 0(or NULL) if not used.
35*4882a593Smuzhiyun */
of_get_dma_window(struct device_node * dn,const char * prefix,int index,unsigned long * busno,dma_addr_t * addr,size_t * size)36*4882a593Smuzhiyun int of_get_dma_window(struct device_node *dn, const char *prefix, int index,
37*4882a593Smuzhiyun unsigned long *busno, dma_addr_t *addr, size_t *size)
38*4882a593Smuzhiyun {
39*4882a593Smuzhiyun const __be32 *dma_window, *end;
40*4882a593Smuzhiyun int bytes, cur_index = 0;
41*4882a593Smuzhiyun char propname[NAME_MAX], addrname[NAME_MAX], sizename[NAME_MAX];
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun if (!dn || !addr || !size)
44*4882a593Smuzhiyun return -EINVAL;
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun if (!prefix)
47*4882a593Smuzhiyun prefix = "";
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun snprintf(propname, sizeof(propname), "%sdma-window", prefix);
50*4882a593Smuzhiyun snprintf(addrname, sizeof(addrname), "%s#dma-address-cells", prefix);
51*4882a593Smuzhiyun snprintf(sizename, sizeof(sizename), "%s#dma-size-cells", prefix);
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun dma_window = of_get_property(dn, propname, &bytes);
54*4882a593Smuzhiyun if (!dma_window)
55*4882a593Smuzhiyun return -ENODEV;
56*4882a593Smuzhiyun end = dma_window + bytes / sizeof(*dma_window);
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun while (dma_window < end) {
59*4882a593Smuzhiyun u32 cells;
60*4882a593Smuzhiyun const void *prop;
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun /* busno is one cell if supported */
63*4882a593Smuzhiyun if (busno)
64*4882a593Smuzhiyun *busno = be32_to_cpup(dma_window++);
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun prop = of_get_property(dn, addrname, NULL);
67*4882a593Smuzhiyun if (!prop)
68*4882a593Smuzhiyun prop = of_get_property(dn, "#address-cells", NULL);
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun cells = prop ? be32_to_cpup(prop) : of_n_addr_cells(dn);
71*4882a593Smuzhiyun if (!cells)
72*4882a593Smuzhiyun return -EINVAL;
73*4882a593Smuzhiyun *addr = of_read_number(dma_window, cells);
74*4882a593Smuzhiyun dma_window += cells;
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun prop = of_get_property(dn, sizename, NULL);
77*4882a593Smuzhiyun cells = prop ? be32_to_cpup(prop) : of_n_size_cells(dn);
78*4882a593Smuzhiyun if (!cells)
79*4882a593Smuzhiyun return -EINVAL;
80*4882a593Smuzhiyun *size = of_read_number(dma_window, cells);
81*4882a593Smuzhiyun dma_window += cells;
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun if (cur_index++ == index)
84*4882a593Smuzhiyun break;
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun return 0;
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(of_get_dma_window);
89*4882a593Smuzhiyun
of_iommu_xlate(struct device * dev,struct of_phandle_args * iommu_spec)90*4882a593Smuzhiyun static int of_iommu_xlate(struct device *dev,
91*4882a593Smuzhiyun struct of_phandle_args *iommu_spec)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun const struct iommu_ops *ops;
94*4882a593Smuzhiyun struct fwnode_handle *fwnode = &iommu_spec->np->fwnode;
95*4882a593Smuzhiyun int ret;
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun ops = iommu_ops_from_fwnode(fwnode);
98*4882a593Smuzhiyun if ((ops && !ops->of_xlate) ||
99*4882a593Smuzhiyun !of_device_is_available(iommu_spec->np))
100*4882a593Smuzhiyun return NO_IOMMU;
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun ret = iommu_fwspec_init(dev, &iommu_spec->np->fwnode, ops);
103*4882a593Smuzhiyun if (ret)
104*4882a593Smuzhiyun return ret;
105*4882a593Smuzhiyun /*
106*4882a593Smuzhiyun * The otherwise-empty fwspec handily serves to indicate the specific
107*4882a593Smuzhiyun * IOMMU device we're waiting for, which will be useful if we ever get
108*4882a593Smuzhiyun * a proper probe-ordering dependency mechanism in future.
109*4882a593Smuzhiyun */
110*4882a593Smuzhiyun if (!ops)
111*4882a593Smuzhiyun return driver_deferred_probe_check_state(dev);
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun if (!try_module_get(ops->owner))
114*4882a593Smuzhiyun return -ENODEV;
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun ret = ops->of_xlate(dev, iommu_spec);
117*4882a593Smuzhiyun module_put(ops->owner);
118*4882a593Smuzhiyun return ret;
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun
of_iommu_configure_dev_id(struct device_node * master_np,struct device * dev,const u32 * id)121*4882a593Smuzhiyun static int of_iommu_configure_dev_id(struct device_node *master_np,
122*4882a593Smuzhiyun struct device *dev,
123*4882a593Smuzhiyun const u32 *id)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun struct of_phandle_args iommu_spec = { .args_count = 1 };
126*4882a593Smuzhiyun int err;
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun err = of_map_id(master_np, *id, "iommu-map",
129*4882a593Smuzhiyun "iommu-map-mask", &iommu_spec.np,
130*4882a593Smuzhiyun iommu_spec.args);
131*4882a593Smuzhiyun if (err)
132*4882a593Smuzhiyun return err == -ENODEV ? NO_IOMMU : err;
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun err = of_iommu_xlate(dev, &iommu_spec);
135*4882a593Smuzhiyun of_node_put(iommu_spec.np);
136*4882a593Smuzhiyun return err;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun
of_iommu_configure_dev(struct device_node * master_np,struct device * dev)139*4882a593Smuzhiyun static int of_iommu_configure_dev(struct device_node *master_np,
140*4882a593Smuzhiyun struct device *dev)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun struct of_phandle_args iommu_spec;
143*4882a593Smuzhiyun int err = NO_IOMMU, idx = 0;
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun while (!of_parse_phandle_with_args(master_np, "iommus",
146*4882a593Smuzhiyun "#iommu-cells",
147*4882a593Smuzhiyun idx, &iommu_spec)) {
148*4882a593Smuzhiyun err = of_iommu_xlate(dev, &iommu_spec);
149*4882a593Smuzhiyun of_node_put(iommu_spec.np);
150*4882a593Smuzhiyun idx++;
151*4882a593Smuzhiyun if (err)
152*4882a593Smuzhiyun break;
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun return err;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun struct of_pci_iommu_alias_info {
159*4882a593Smuzhiyun struct device *dev;
160*4882a593Smuzhiyun struct device_node *np;
161*4882a593Smuzhiyun };
162*4882a593Smuzhiyun
of_pci_iommu_init(struct pci_dev * pdev,u16 alias,void * data)163*4882a593Smuzhiyun static int of_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
164*4882a593Smuzhiyun {
165*4882a593Smuzhiyun struct of_pci_iommu_alias_info *info = data;
166*4882a593Smuzhiyun u32 input_id = alias;
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun return of_iommu_configure_dev_id(info->np, info->dev, &input_id);
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun
of_iommu_configure_device(struct device_node * master_np,struct device * dev,const u32 * id)171*4882a593Smuzhiyun static int of_iommu_configure_device(struct device_node *master_np,
172*4882a593Smuzhiyun struct device *dev, const u32 *id)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun return (id) ? of_iommu_configure_dev_id(master_np, dev, id) :
175*4882a593Smuzhiyun of_iommu_configure_dev(master_np, dev);
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun
of_iommu_configure(struct device * dev,struct device_node * master_np,const u32 * id)178*4882a593Smuzhiyun const struct iommu_ops *of_iommu_configure(struct device *dev,
179*4882a593Smuzhiyun struct device_node *master_np,
180*4882a593Smuzhiyun const u32 *id)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun const struct iommu_ops *ops = NULL;
183*4882a593Smuzhiyun struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
184*4882a593Smuzhiyun int err = NO_IOMMU;
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun if (!master_np)
187*4882a593Smuzhiyun return NULL;
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun if (fwspec) {
190*4882a593Smuzhiyun if (fwspec->ops)
191*4882a593Smuzhiyun return fwspec->ops;
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun /* In the deferred case, start again from scratch */
194*4882a593Smuzhiyun iommu_fwspec_free(dev);
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun /*
198*4882a593Smuzhiyun * We don't currently walk up the tree looking for a parent IOMMU.
199*4882a593Smuzhiyun * See the `Notes:' section of
200*4882a593Smuzhiyun * Documentation/devicetree/bindings/iommu/iommu.txt
201*4882a593Smuzhiyun */
202*4882a593Smuzhiyun if (dev_is_pci(dev)) {
203*4882a593Smuzhiyun struct of_pci_iommu_alias_info info = {
204*4882a593Smuzhiyun .dev = dev,
205*4882a593Smuzhiyun .np = master_np,
206*4882a593Smuzhiyun };
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun pci_request_acs();
209*4882a593Smuzhiyun err = pci_for_each_dma_alias(to_pci_dev(dev),
210*4882a593Smuzhiyun of_pci_iommu_init, &info);
211*4882a593Smuzhiyun } else {
212*4882a593Smuzhiyun err = of_iommu_configure_device(master_np, dev, id);
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun fwspec = dev_iommu_fwspec_get(dev);
215*4882a593Smuzhiyun if (!err && fwspec)
216*4882a593Smuzhiyun of_property_read_u32(master_np, "pasid-num-bits",
217*4882a593Smuzhiyun &fwspec->num_pasid_bits);
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun /*
221*4882a593Smuzhiyun * Two success conditions can be represented by non-negative err here:
222*4882a593Smuzhiyun * >0 : there is no IOMMU, or one was unavailable for non-fatal reasons
223*4882a593Smuzhiyun * 0 : we found an IOMMU, and dev->fwspec is initialised appropriately
224*4882a593Smuzhiyun * <0 : any actual error
225*4882a593Smuzhiyun */
226*4882a593Smuzhiyun if (!err) {
227*4882a593Smuzhiyun /* The fwspec pointer changed, read it again */
228*4882a593Smuzhiyun fwspec = dev_iommu_fwspec_get(dev);
229*4882a593Smuzhiyun ops = fwspec->ops;
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun /*
232*4882a593Smuzhiyun * If we have reason to believe the IOMMU driver missed the initial
233*4882a593Smuzhiyun * probe for dev, replay it to get things in order.
234*4882a593Smuzhiyun */
235*4882a593Smuzhiyun if (!err && dev->bus && !device_iommu_mapped(dev))
236*4882a593Smuzhiyun err = iommu_probe_device(dev);
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun /* Ignore all other errors apart from EPROBE_DEFER */
239*4882a593Smuzhiyun if (err == -EPROBE_DEFER) {
240*4882a593Smuzhiyun ops = ERR_PTR(err);
241*4882a593Smuzhiyun } else if (err < 0) {
242*4882a593Smuzhiyun dev_dbg(dev, "Adding to IOMMU failed: %d\n", err);
243*4882a593Smuzhiyun ops = NULL;
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun return ops;
247*4882a593Smuzhiyun }
248