1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * A fairly generic DMA-API to IOMMU-API glue layer.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2014-2015 ARM Ltd.
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * based in part on arch/arm/mm/dma-mapping.c:
8*4882a593Smuzhiyun * Copyright (C) 2000-2004 Russell King
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <linux/acpi_iort.h>
12*4882a593Smuzhiyun #include <linux/device.h>
13*4882a593Smuzhiyun #include <linux/dma-map-ops.h>
14*4882a593Smuzhiyun #include <linux/dma-iommu.h>
15*4882a593Smuzhiyun #include <linux/gfp.h>
16*4882a593Smuzhiyun #include <linux/huge_mm.h>
17*4882a593Smuzhiyun #include <linux/iommu.h>
18*4882a593Smuzhiyun #include <linux/iova.h>
19*4882a593Smuzhiyun #include <linux/irq.h>
20*4882a593Smuzhiyun #include <linux/mm.h>
21*4882a593Smuzhiyun #include <linux/mutex.h>
22*4882a593Smuzhiyun #include <linux/pci.h>
23*4882a593Smuzhiyun #include <linux/scatterlist.h>
24*4882a593Smuzhiyun #include <linux/vmalloc.h>
25*4882a593Smuzhiyun #include <linux/crash_dump.h>
26*4882a593Smuzhiyun #include <trace/hooks/iommu.h>
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun struct iommu_dma_msi_page {
29*4882a593Smuzhiyun struct list_head list;
30*4882a593Smuzhiyun dma_addr_t iova;
31*4882a593Smuzhiyun phys_addr_t phys;
32*4882a593Smuzhiyun };
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun enum iommu_dma_cookie_type {
35*4882a593Smuzhiyun IOMMU_DMA_IOVA_COOKIE,
36*4882a593Smuzhiyun IOMMU_DMA_MSI_COOKIE,
37*4882a593Smuzhiyun };
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun struct iommu_dma_cookie {
40*4882a593Smuzhiyun enum iommu_dma_cookie_type type;
41*4882a593Smuzhiyun union {
42*4882a593Smuzhiyun /* Full allocator for IOMMU_DMA_IOVA_COOKIE */
43*4882a593Smuzhiyun struct iova_domain iovad;
44*4882a593Smuzhiyun /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
45*4882a593Smuzhiyun dma_addr_t msi_iova;
46*4882a593Smuzhiyun };
47*4882a593Smuzhiyun struct list_head msi_page_list;
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun /* Domain for flush queue callback; NULL if flush queue not in use */
50*4882a593Smuzhiyun struct iommu_domain *fq_domain;
51*4882a593Smuzhiyun };
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun struct iommu_dma_cookie_ext {
54*4882a593Smuzhiyun struct iommu_dma_cookie cookie;
55*4882a593Smuzhiyun struct mutex mutex;
56*4882a593Smuzhiyun };
57*4882a593Smuzhiyun
cookie_msi_granule(struct iommu_dma_cookie * cookie)58*4882a593Smuzhiyun static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
61*4882a593Smuzhiyun return cookie->iovad.granule;
62*4882a593Smuzhiyun return PAGE_SIZE;
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun
cookie_alloc(enum iommu_dma_cookie_type type)65*4882a593Smuzhiyun static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun struct iommu_dma_cookie_ext *cookie;
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
70*4882a593Smuzhiyun if (cookie) {
71*4882a593Smuzhiyun INIT_LIST_HEAD(&cookie->cookie.msi_page_list);
72*4882a593Smuzhiyun cookie->cookie.type = type;
73*4882a593Smuzhiyun mutex_init(&cookie->mutex);
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun return &cookie->cookie;
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun /**
79*4882a593Smuzhiyun * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
80*4882a593Smuzhiyun * @domain: IOMMU domain to prepare for DMA-API usage
81*4882a593Smuzhiyun *
82*4882a593Smuzhiyun * IOMMU drivers should normally call this from their domain_alloc
83*4882a593Smuzhiyun * callback when domain->type == IOMMU_DOMAIN_DMA.
84*4882a593Smuzhiyun */
iommu_get_dma_cookie(struct iommu_domain * domain)85*4882a593Smuzhiyun int iommu_get_dma_cookie(struct iommu_domain *domain)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun if (domain->iova_cookie)
88*4882a593Smuzhiyun return -EEXIST;
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
91*4882a593Smuzhiyun if (!domain->iova_cookie)
92*4882a593Smuzhiyun return -ENOMEM;
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun return 0;
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun EXPORT_SYMBOL(iommu_get_dma_cookie);
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun /**
99*4882a593Smuzhiyun * iommu_get_msi_cookie - Acquire just MSI remapping resources
100*4882a593Smuzhiyun * @domain: IOMMU domain to prepare
101*4882a593Smuzhiyun * @base: Start address of IOVA region for MSI mappings
102*4882a593Smuzhiyun *
103*4882a593Smuzhiyun * Users who manage their own IOVA allocation and do not want DMA API support,
104*4882a593Smuzhiyun * but would still like to take advantage of automatic MSI remapping, can use
105*4882a593Smuzhiyun * this to initialise their own domain appropriately. Users should reserve a
106*4882a593Smuzhiyun * contiguous IOVA region, starting at @base, large enough to accommodate the
107*4882a593Smuzhiyun * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
108*4882a593Smuzhiyun * used by the devices attached to @domain.
109*4882a593Smuzhiyun */
iommu_get_msi_cookie(struct iommu_domain * domain,dma_addr_t base)110*4882a593Smuzhiyun int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun struct iommu_dma_cookie *cookie;
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun if (domain->type != IOMMU_DOMAIN_UNMANAGED)
115*4882a593Smuzhiyun return -EINVAL;
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun if (domain->iova_cookie)
118*4882a593Smuzhiyun return -EEXIST;
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
121*4882a593Smuzhiyun if (!cookie)
122*4882a593Smuzhiyun return -ENOMEM;
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun cookie->msi_iova = base;
125*4882a593Smuzhiyun domain->iova_cookie = cookie;
126*4882a593Smuzhiyun return 0;
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun EXPORT_SYMBOL(iommu_get_msi_cookie);
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun /**
131*4882a593Smuzhiyun * iommu_put_dma_cookie - Release a domain's DMA mapping resources
132*4882a593Smuzhiyun * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
133*4882a593Smuzhiyun * iommu_get_msi_cookie()
134*4882a593Smuzhiyun *
135*4882a593Smuzhiyun * IOMMU drivers should normally call this from their domain_free callback.
136*4882a593Smuzhiyun */
iommu_put_dma_cookie(struct iommu_domain * domain)137*4882a593Smuzhiyun void iommu_put_dma_cookie(struct iommu_domain *domain)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun struct iommu_dma_cookie *cookie = domain->iova_cookie;
140*4882a593Smuzhiyun struct iommu_dma_msi_page *msi, *tmp;
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun if (!cookie)
143*4882a593Smuzhiyun return;
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule)
146*4882a593Smuzhiyun put_iova_domain(&cookie->iovad);
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
149*4882a593Smuzhiyun list_del(&msi->list);
150*4882a593Smuzhiyun kfree(msi);
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun kfree(cookie);
153*4882a593Smuzhiyun domain->iova_cookie = NULL;
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun EXPORT_SYMBOL(iommu_put_dma_cookie);
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun /**
158*4882a593Smuzhiyun * iommu_dma_get_resv_regions - Reserved region driver helper
159*4882a593Smuzhiyun * @dev: Device from iommu_get_resv_regions()
160*4882a593Smuzhiyun * @list: Reserved region list from iommu_get_resv_regions()
161*4882a593Smuzhiyun *
162*4882a593Smuzhiyun * IOMMU drivers can use this to implement their .get_resv_regions callback
163*4882a593Smuzhiyun * for general non-IOMMU-specific reservations. Currently, this covers GICv3
164*4882a593Smuzhiyun * ITS region reservation on ACPI based ARM platforms that may require HW MSI
165*4882a593Smuzhiyun * reservation.
166*4882a593Smuzhiyun */
iommu_dma_get_resv_regions(struct device * dev,struct list_head * list)167*4882a593Smuzhiyun void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode))
171*4882a593Smuzhiyun iort_iommu_msi_get_resv_regions(dev, list);
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun EXPORT_SYMBOL(iommu_dma_get_resv_regions);
175*4882a593Smuzhiyun
cookie_init_hw_msi_region(struct iommu_dma_cookie * cookie,phys_addr_t start,phys_addr_t end)176*4882a593Smuzhiyun static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
177*4882a593Smuzhiyun phys_addr_t start, phys_addr_t end)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun struct iova_domain *iovad = &cookie->iovad;
180*4882a593Smuzhiyun struct iommu_dma_msi_page *msi_page;
181*4882a593Smuzhiyun int i, num_pages;
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun start -= iova_offset(iovad, start);
184*4882a593Smuzhiyun num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun for (i = 0; i < num_pages; i++) {
187*4882a593Smuzhiyun msi_page = kmalloc(sizeof(*msi_page), GFP_KERNEL);
188*4882a593Smuzhiyun if (!msi_page)
189*4882a593Smuzhiyun return -ENOMEM;
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun msi_page->phys = start;
192*4882a593Smuzhiyun msi_page->iova = start;
193*4882a593Smuzhiyun INIT_LIST_HEAD(&msi_page->list);
194*4882a593Smuzhiyun list_add(&msi_page->list, &cookie->msi_page_list);
195*4882a593Smuzhiyun start += iovad->granule;
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun return 0;
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun
iova_reserve_pci_windows(struct pci_dev * dev,struct iova_domain * iovad)201*4882a593Smuzhiyun static int iova_reserve_pci_windows(struct pci_dev *dev,
202*4882a593Smuzhiyun struct iova_domain *iovad)
203*4882a593Smuzhiyun {
204*4882a593Smuzhiyun struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
205*4882a593Smuzhiyun struct resource_entry *window;
206*4882a593Smuzhiyun unsigned long lo, hi;
207*4882a593Smuzhiyun phys_addr_t start = 0, end;
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun resource_list_for_each_entry(window, &bridge->windows) {
210*4882a593Smuzhiyun if (resource_type(window->res) != IORESOURCE_MEM)
211*4882a593Smuzhiyun continue;
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun lo = iova_pfn(iovad, window->res->start - window->offset);
214*4882a593Smuzhiyun hi = iova_pfn(iovad, window->res->end - window->offset);
215*4882a593Smuzhiyun reserve_iova(iovad, lo, hi);
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun /* Get reserved DMA windows from host bridge */
219*4882a593Smuzhiyun resource_list_for_each_entry(window, &bridge->dma_ranges) {
220*4882a593Smuzhiyun end = window->res->start - window->offset;
221*4882a593Smuzhiyun resv_iova:
222*4882a593Smuzhiyun if (end > start) {
223*4882a593Smuzhiyun lo = iova_pfn(iovad, start);
224*4882a593Smuzhiyun hi = iova_pfn(iovad, end);
225*4882a593Smuzhiyun reserve_iova(iovad, lo, hi);
226*4882a593Smuzhiyun } else if (end < start) {
227*4882a593Smuzhiyun /* dma_ranges list should be sorted */
228*4882a593Smuzhiyun dev_err(&dev->dev,
229*4882a593Smuzhiyun "Failed to reserve IOVA [%pa-%pa]\n",
230*4882a593Smuzhiyun &start, &end);
231*4882a593Smuzhiyun return -EINVAL;
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun start = window->res->end - window->offset + 1;
235*4882a593Smuzhiyun /* If window is last entry */
236*4882a593Smuzhiyun if (window->node.next == &bridge->dma_ranges &&
237*4882a593Smuzhiyun end != ~(phys_addr_t)0) {
238*4882a593Smuzhiyun end = ~(phys_addr_t)0;
239*4882a593Smuzhiyun goto resv_iova;
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun return 0;
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun
iova_reserve_iommu_regions(struct device * dev,struct iommu_domain * domain)246*4882a593Smuzhiyun static int iova_reserve_iommu_regions(struct device *dev,
247*4882a593Smuzhiyun struct iommu_domain *domain)
248*4882a593Smuzhiyun {
249*4882a593Smuzhiyun struct iommu_dma_cookie *cookie = domain->iova_cookie;
250*4882a593Smuzhiyun struct iova_domain *iovad = &cookie->iovad;
251*4882a593Smuzhiyun struct iommu_resv_region *region;
252*4882a593Smuzhiyun LIST_HEAD(resv_regions);
253*4882a593Smuzhiyun int ret = 0;
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun if (dev_is_pci(dev)) {
256*4882a593Smuzhiyun ret = iova_reserve_pci_windows(to_pci_dev(dev), iovad);
257*4882a593Smuzhiyun if (ret)
258*4882a593Smuzhiyun return ret;
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun iommu_get_resv_regions(dev, &resv_regions);
262*4882a593Smuzhiyun list_for_each_entry(region, &resv_regions, list) {
263*4882a593Smuzhiyun unsigned long lo, hi;
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun /* We ARE the software that manages these! */
266*4882a593Smuzhiyun if (region->type == IOMMU_RESV_SW_MSI)
267*4882a593Smuzhiyun continue;
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun lo = iova_pfn(iovad, region->start);
270*4882a593Smuzhiyun hi = iova_pfn(iovad, region->start + region->length - 1);
271*4882a593Smuzhiyun reserve_iova(iovad, lo, hi);
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun if (region->type == IOMMU_RESV_MSI)
274*4882a593Smuzhiyun ret = cookie_init_hw_msi_region(cookie, region->start,
275*4882a593Smuzhiyun region->start + region->length);
276*4882a593Smuzhiyun if (ret)
277*4882a593Smuzhiyun break;
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun iommu_put_resv_regions(dev, &resv_regions);
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun return ret;
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun
iommu_dma_flush_iotlb_all(struct iova_domain * iovad)284*4882a593Smuzhiyun static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun struct iommu_dma_cookie *cookie;
287*4882a593Smuzhiyun struct iommu_domain *domain;
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun cookie = container_of(iovad, struct iommu_dma_cookie, iovad);
290*4882a593Smuzhiyun domain = cookie->fq_domain;
291*4882a593Smuzhiyun /*
292*4882a593Smuzhiyun * The IOMMU driver supporting DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE
293*4882a593Smuzhiyun * implies that ops->flush_iotlb_all must be non-NULL.
294*4882a593Smuzhiyun */
295*4882a593Smuzhiyun domain->ops->flush_iotlb_all(domain);
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun /**
299*4882a593Smuzhiyun * iommu_dma_init_domain - Initialise a DMA mapping domain
300*4882a593Smuzhiyun * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
301*4882a593Smuzhiyun * @base: IOVA at which the mappable address space starts
302*4882a593Smuzhiyun * @size: Size of IOVA space
303*4882a593Smuzhiyun * @dev: Device the domain is being initialised for
304*4882a593Smuzhiyun *
305*4882a593Smuzhiyun * @base and @size should be exact multiples of IOMMU page granularity to
306*4882a593Smuzhiyun * avoid rounding surprises. If necessary, we reserve the page at address 0
307*4882a593Smuzhiyun * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
308*4882a593Smuzhiyun * any change which could make prior IOVAs invalid will fail.
309*4882a593Smuzhiyun */
iommu_dma_init_domain(struct iommu_domain * domain,dma_addr_t base,u64 size,struct device * dev)310*4882a593Smuzhiyun static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
311*4882a593Smuzhiyun u64 size, struct device *dev)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun struct iommu_dma_cookie *cookie = domain->iova_cookie;
314*4882a593Smuzhiyun struct iommu_dma_cookie_ext *cookie_ext;
315*4882a593Smuzhiyun unsigned long order, base_pfn;
316*4882a593Smuzhiyun struct iova_domain *iovad;
317*4882a593Smuzhiyun int attr;
318*4882a593Smuzhiyun int ret;
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
321*4882a593Smuzhiyun return -EINVAL;
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun iovad = &cookie->iovad;
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun /* Use the smallest supported page size for IOVA granularity */
326*4882a593Smuzhiyun order = __ffs(domain->pgsize_bitmap);
327*4882a593Smuzhiyun base_pfn = max_t(unsigned long, 1, base >> order);
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun /* Check the domain allows at least some access to the device... */
330*4882a593Smuzhiyun if (domain->geometry.force_aperture) {
331*4882a593Smuzhiyun if (base > domain->geometry.aperture_end ||
332*4882a593Smuzhiyun base + size <= domain->geometry.aperture_start) {
333*4882a593Smuzhiyun pr_warn("specified DMA range outside IOMMU capability\n");
334*4882a593Smuzhiyun return -EFAULT;
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun /* ...then finally give it a kicking to make sure it fits */
337*4882a593Smuzhiyun base_pfn = max_t(unsigned long, base_pfn,
338*4882a593Smuzhiyun domain->geometry.aperture_start >> order);
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun /* start_pfn is always nonzero for an already-initialised domain */
342*4882a593Smuzhiyun cookie_ext = container_of(cookie, struct iommu_dma_cookie_ext, cookie);
343*4882a593Smuzhiyun mutex_lock(&cookie_ext->mutex);
344*4882a593Smuzhiyun if (iovad->start_pfn) {
345*4882a593Smuzhiyun if (1UL << order != iovad->granule ||
346*4882a593Smuzhiyun base_pfn != iovad->start_pfn) {
347*4882a593Smuzhiyun pr_warn("Incompatible range for DMA domain\n");
348*4882a593Smuzhiyun ret = -EFAULT;
349*4882a593Smuzhiyun goto done_unlock;
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun ret = 0;
353*4882a593Smuzhiyun goto done_unlock;
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun init_iova_domain(iovad, 1UL << order, base_pfn);
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun if (!cookie->fq_domain && !iommu_domain_get_attr(domain,
359*4882a593Smuzhiyun DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) {
360*4882a593Smuzhiyun if (init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all,
361*4882a593Smuzhiyun NULL))
362*4882a593Smuzhiyun pr_warn("iova flush queue initialization failed\n");
363*4882a593Smuzhiyun else
364*4882a593Smuzhiyun cookie->fq_domain = domain;
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun if (!dev) {
368*4882a593Smuzhiyun ret = 0;
369*4882a593Smuzhiyun goto done_unlock;
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun ret = iova_reserve_iommu_regions(dev, domain);
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun done_unlock:
375*4882a593Smuzhiyun mutex_unlock(&cookie_ext->mutex);
376*4882a593Smuzhiyun return ret;
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun
iommu_dma_deferred_attach(struct device * dev,struct iommu_domain * domain)379*4882a593Smuzhiyun static int iommu_dma_deferred_attach(struct device *dev,
380*4882a593Smuzhiyun struct iommu_domain *domain)
381*4882a593Smuzhiyun {
382*4882a593Smuzhiyun const struct iommu_ops *ops = domain->ops;
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun if (!is_kdump_kernel())
385*4882a593Smuzhiyun return 0;
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun if (unlikely(ops->is_attach_deferred &&
388*4882a593Smuzhiyun ops->is_attach_deferred(domain, dev)))
389*4882a593Smuzhiyun return iommu_attach_device(domain, dev);
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun return 0;
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun /*
395*4882a593Smuzhiyun * Should be called prior to using dma-apis
396*4882a593Smuzhiyun */
iommu_dma_reserve_iova(struct device * dev,dma_addr_t base,u64 size)397*4882a593Smuzhiyun int iommu_dma_reserve_iova(struct device *dev, dma_addr_t base,
398*4882a593Smuzhiyun u64 size)
399*4882a593Smuzhiyun {
400*4882a593Smuzhiyun struct iommu_domain *domain;
401*4882a593Smuzhiyun struct iommu_dma_cookie *cookie;
402*4882a593Smuzhiyun struct iova_domain *iovad;
403*4882a593Smuzhiyun unsigned long pfn_lo, pfn_hi;
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun domain = iommu_get_domain_for_dev(dev);
406*4882a593Smuzhiyun if (!domain || !domain->iova_cookie)
407*4882a593Smuzhiyun return -EINVAL;
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun cookie = domain->iova_cookie;
410*4882a593Smuzhiyun iovad = &cookie->iovad;
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun /* iova will be freed automatically by put_iova_domain() */
413*4882a593Smuzhiyun pfn_lo = iova_pfn(iovad, base);
414*4882a593Smuzhiyun pfn_hi = iova_pfn(iovad, base + size - 1);
415*4882a593Smuzhiyun if (!reserve_iova(iovad, pfn_lo, pfn_hi))
416*4882a593Smuzhiyun return -EINVAL;
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun return 0;
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun EXPORT_SYMBOL(iommu_dma_reserve_iova);
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun /*
423*4882a593Smuzhiyun * Should be called prior to using dma-apis.
424*4882a593Smuzhiyun */
iommu_dma_enable_best_fit_algo(struct device * dev)425*4882a593Smuzhiyun int iommu_dma_enable_best_fit_algo(struct device *dev)
426*4882a593Smuzhiyun {
427*4882a593Smuzhiyun struct iommu_domain *domain;
428*4882a593Smuzhiyun struct iova_domain *iovad;
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun domain = iommu_get_domain_for_dev(dev);
431*4882a593Smuzhiyun if (!domain || !domain->iova_cookie)
432*4882a593Smuzhiyun return -EINVAL;
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun iovad = &((struct iommu_dma_cookie *)domain->iova_cookie)->iovad;
435*4882a593Smuzhiyun iovad->best_fit = true;
436*4882a593Smuzhiyun return 0;
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun EXPORT_SYMBOL(iommu_dma_enable_best_fit_algo);
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun /**
441*4882a593Smuzhiyun * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
442*4882a593Smuzhiyun * page flags.
443*4882a593Smuzhiyun * @dir: Direction of DMA transfer
444*4882a593Smuzhiyun * @coherent: Is the DMA master cache-coherent?
445*4882a593Smuzhiyun * @attrs: DMA attributes for the mapping
446*4882a593Smuzhiyun *
447*4882a593Smuzhiyun * Return: corresponding IOMMU API page protection flags
448*4882a593Smuzhiyun */
dma_info_to_prot(enum dma_data_direction dir,bool coherent,unsigned long attrs)449*4882a593Smuzhiyun static int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
450*4882a593Smuzhiyun unsigned long attrs)
451*4882a593Smuzhiyun {
452*4882a593Smuzhiyun int prot = coherent ? IOMMU_CACHE : 0;
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun if (attrs & DMA_ATTR_PRIVILEGED)
455*4882a593Smuzhiyun prot |= IOMMU_PRIV;
456*4882a593Smuzhiyun if (attrs & DMA_ATTR_SYS_CACHE_ONLY)
457*4882a593Smuzhiyun prot |= IOMMU_SYS_CACHE;
458*4882a593Smuzhiyun if (attrs & DMA_ATTR_SYS_CACHE_ONLY_NWA)
459*4882a593Smuzhiyun prot |= IOMMU_SYS_CACHE_NWA;
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun switch (dir) {
462*4882a593Smuzhiyun case DMA_BIDIRECTIONAL:
463*4882a593Smuzhiyun return prot | IOMMU_READ | IOMMU_WRITE;
464*4882a593Smuzhiyun case DMA_TO_DEVICE:
465*4882a593Smuzhiyun return prot | IOMMU_READ;
466*4882a593Smuzhiyun case DMA_FROM_DEVICE:
467*4882a593Smuzhiyun return prot | IOMMU_WRITE;
468*4882a593Smuzhiyun default:
469*4882a593Smuzhiyun return 0;
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun }
472*4882a593Smuzhiyun
iommu_dma_alloc_iova(struct iommu_domain * domain,size_t size,u64 dma_limit,struct device * dev)473*4882a593Smuzhiyun static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
474*4882a593Smuzhiyun size_t size, u64 dma_limit, struct device *dev)
475*4882a593Smuzhiyun {
476*4882a593Smuzhiyun struct iommu_dma_cookie *cookie = domain->iova_cookie;
477*4882a593Smuzhiyun struct iova_domain *iovad = &cookie->iovad;
478*4882a593Smuzhiyun unsigned long shift, iova_len, iova = 0;
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun if (cookie->type == IOMMU_DMA_MSI_COOKIE) {
481*4882a593Smuzhiyun cookie->msi_iova += size;
482*4882a593Smuzhiyun return cookie->msi_iova - size;
483*4882a593Smuzhiyun }
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun shift = iova_shift(iovad);
486*4882a593Smuzhiyun iova_len = size >> shift;
487*4882a593Smuzhiyun /*
488*4882a593Smuzhiyun * Freeing non-power-of-two-sized allocations back into the IOVA caches
489*4882a593Smuzhiyun * will come back to bite us badly, so we have to waste a bit of space
490*4882a593Smuzhiyun * rounding up anything cacheable to make sure that can't happen. The
491*4882a593Smuzhiyun * order of the unadjusted size will still match upon freeing.
492*4882a593Smuzhiyun */
493*4882a593Smuzhiyun if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
494*4882a593Smuzhiyun iova_len = roundup_pow_of_two(iova_len);
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit);
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun if (domain->geometry.force_aperture)
499*4882a593Smuzhiyun dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end);
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun /* Try to get PCI devices a SAC address */
502*4882a593Smuzhiyun if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
503*4882a593Smuzhiyun iova = alloc_iova_fast(iovad, iova_len,
504*4882a593Smuzhiyun DMA_BIT_MASK(32) >> shift, false);
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun if (!iova)
507*4882a593Smuzhiyun iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift,
508*4882a593Smuzhiyun true);
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun trace_android_vh_iommu_alloc_iova(dev, (dma_addr_t)iova << shift, size);
511*4882a593Smuzhiyun trace_android_vh_iommu_iovad_alloc_iova(dev, iovad, (dma_addr_t)iova << shift, size);
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun return (dma_addr_t)iova << shift;
514*4882a593Smuzhiyun }
515*4882a593Smuzhiyun
iommu_dma_free_iova(struct iommu_dma_cookie * cookie,dma_addr_t iova,size_t size)516*4882a593Smuzhiyun static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
517*4882a593Smuzhiyun dma_addr_t iova, size_t size)
518*4882a593Smuzhiyun {
519*4882a593Smuzhiyun struct iova_domain *iovad = &cookie->iovad;
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun /* The MSI case is only ever cleaning up its most recent allocation */
522*4882a593Smuzhiyun if (cookie->type == IOMMU_DMA_MSI_COOKIE)
523*4882a593Smuzhiyun cookie->msi_iova -= size;
524*4882a593Smuzhiyun else if (cookie->fq_domain) /* non-strict mode */
525*4882a593Smuzhiyun queue_iova(iovad, iova_pfn(iovad, iova),
526*4882a593Smuzhiyun size >> iova_shift(iovad), 0);
527*4882a593Smuzhiyun else
528*4882a593Smuzhiyun free_iova_fast(iovad, iova_pfn(iovad, iova),
529*4882a593Smuzhiyun size >> iova_shift(iovad));
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun trace_android_vh_iommu_free_iova(iova, size);
532*4882a593Smuzhiyun trace_android_vh_iommu_iovad_free_iova(iovad, iova, size);
533*4882a593Smuzhiyun }
534*4882a593Smuzhiyun
__iommu_dma_unmap(struct device * dev,dma_addr_t dma_addr,size_t size)535*4882a593Smuzhiyun static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
536*4882a593Smuzhiyun size_t size)
537*4882a593Smuzhiyun {
538*4882a593Smuzhiyun struct iommu_domain *domain = iommu_get_dma_domain(dev);
539*4882a593Smuzhiyun struct iommu_dma_cookie *cookie = domain->iova_cookie;
540*4882a593Smuzhiyun struct iova_domain *iovad = &cookie->iovad;
541*4882a593Smuzhiyun size_t iova_off = iova_offset(iovad, dma_addr);
542*4882a593Smuzhiyun struct iommu_iotlb_gather iotlb_gather;
543*4882a593Smuzhiyun size_t unmapped;
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun dma_addr -= iova_off;
546*4882a593Smuzhiyun size = iova_align(iovad, size + iova_off);
547*4882a593Smuzhiyun iommu_iotlb_gather_init(&iotlb_gather);
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun unmapped = iommu_unmap_fast(domain, dma_addr, size, &iotlb_gather);
550*4882a593Smuzhiyun WARN_ON(unmapped != size);
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun if (!cookie->fq_domain)
553*4882a593Smuzhiyun iommu_iotlb_sync(domain, &iotlb_gather);
554*4882a593Smuzhiyun iommu_dma_free_iova(cookie, dma_addr, size);
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun
__iommu_dma_map(struct device * dev,phys_addr_t phys,size_t size,int prot,u64 dma_mask)557*4882a593Smuzhiyun static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
558*4882a593Smuzhiyun size_t size, int prot, u64 dma_mask)
559*4882a593Smuzhiyun {
560*4882a593Smuzhiyun struct iommu_domain *domain = iommu_get_dma_domain(dev);
561*4882a593Smuzhiyun struct iommu_dma_cookie *cookie = domain->iova_cookie;
562*4882a593Smuzhiyun struct iova_domain *iovad = &cookie->iovad;
563*4882a593Smuzhiyun size_t iova_off = iova_offset(iovad, phys);
564*4882a593Smuzhiyun dma_addr_t iova;
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun if (unlikely(iommu_dma_deferred_attach(dev, domain)))
567*4882a593Smuzhiyun return DMA_MAPPING_ERROR;
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun size = iova_align(iovad, size + iova_off);
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev);
572*4882a593Smuzhiyun if (!iova)
573*4882a593Smuzhiyun return DMA_MAPPING_ERROR;
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun if (iommu_map_atomic(domain, iova, phys - iova_off, size, prot)) {
576*4882a593Smuzhiyun iommu_dma_free_iova(cookie, iova, size);
577*4882a593Smuzhiyun return DMA_MAPPING_ERROR;
578*4882a593Smuzhiyun }
579*4882a593Smuzhiyun return iova + iova_off;
580*4882a593Smuzhiyun }
581*4882a593Smuzhiyun
__iommu_dma_free_pages(struct page ** pages,int count)582*4882a593Smuzhiyun static void __iommu_dma_free_pages(struct page **pages, int count)
583*4882a593Smuzhiyun {
584*4882a593Smuzhiyun while (count--)
585*4882a593Smuzhiyun __free_page(pages[count]);
586*4882a593Smuzhiyun kvfree(pages);
587*4882a593Smuzhiyun }
588*4882a593Smuzhiyun
__iommu_dma_alloc_pages(struct device * dev,unsigned int count,unsigned long order_mask,gfp_t gfp)589*4882a593Smuzhiyun static struct page **__iommu_dma_alloc_pages(struct device *dev,
590*4882a593Smuzhiyun unsigned int count, unsigned long order_mask, gfp_t gfp)
591*4882a593Smuzhiyun {
592*4882a593Smuzhiyun struct page **pages;
593*4882a593Smuzhiyun unsigned int i = 0, nid = dev_to_node(dev);
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun order_mask &= (2U << MAX_ORDER) - 1;
596*4882a593Smuzhiyun if (!order_mask)
597*4882a593Smuzhiyun return NULL;
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun pages = kvzalloc(count * sizeof(*pages), GFP_KERNEL);
600*4882a593Smuzhiyun if (!pages)
601*4882a593Smuzhiyun return NULL;
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun /* IOMMU can map any pages, so himem can also be used here */
604*4882a593Smuzhiyun gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun /* It makes no sense to muck about with huge pages */
607*4882a593Smuzhiyun gfp &= ~__GFP_COMP;
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun while (count) {
610*4882a593Smuzhiyun struct page *page = NULL;
611*4882a593Smuzhiyun unsigned int order_size;
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun /*
614*4882a593Smuzhiyun * Higher-order allocations are a convenience rather
615*4882a593Smuzhiyun * than a necessity, hence using __GFP_NORETRY until
616*4882a593Smuzhiyun * falling back to minimum-order allocations.
617*4882a593Smuzhiyun */
618*4882a593Smuzhiyun for (order_mask &= (2U << __fls(count)) - 1;
619*4882a593Smuzhiyun order_mask; order_mask &= ~order_size) {
620*4882a593Smuzhiyun unsigned int order = __fls(order_mask);
621*4882a593Smuzhiyun gfp_t alloc_flags = gfp;
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun order_size = 1U << order;
624*4882a593Smuzhiyun if (order_mask > order_size)
625*4882a593Smuzhiyun alloc_flags |= __GFP_NORETRY;
626*4882a593Smuzhiyun page = alloc_pages_node(nid, alloc_flags, order);
627*4882a593Smuzhiyun if (!page)
628*4882a593Smuzhiyun continue;
629*4882a593Smuzhiyun if (order)
630*4882a593Smuzhiyun split_page(page, order);
631*4882a593Smuzhiyun break;
632*4882a593Smuzhiyun }
633*4882a593Smuzhiyun if (!page) {
634*4882a593Smuzhiyun __iommu_dma_free_pages(pages, i);
635*4882a593Smuzhiyun return NULL;
636*4882a593Smuzhiyun }
637*4882a593Smuzhiyun count -= order_size;
638*4882a593Smuzhiyun while (order_size--)
639*4882a593Smuzhiyun pages[i++] = page++;
640*4882a593Smuzhiyun }
641*4882a593Smuzhiyun return pages;
642*4882a593Smuzhiyun }
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun /**
645*4882a593Smuzhiyun * iommu_dma_alloc_remap - Allocate and map a buffer contiguous in IOVA space
646*4882a593Smuzhiyun * @dev: Device to allocate memory for. Must be a real device
647*4882a593Smuzhiyun * attached to an iommu_dma_domain
648*4882a593Smuzhiyun * @size: Size of buffer in bytes
649*4882a593Smuzhiyun * @dma_handle: Out argument for allocated DMA handle
650*4882a593Smuzhiyun * @gfp: Allocation flags
651*4882a593Smuzhiyun * @prot: pgprot_t to use for the remapped mapping
652*4882a593Smuzhiyun * @attrs: DMA attributes for this allocation
653*4882a593Smuzhiyun *
654*4882a593Smuzhiyun * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
655*4882a593Smuzhiyun * but an IOMMU which supports smaller pages might not map the whole thing.
656*4882a593Smuzhiyun *
657*4882a593Smuzhiyun * Return: Mapped virtual address, or NULL on failure.
658*4882a593Smuzhiyun */
iommu_dma_alloc_remap(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t gfp,pgprot_t prot,unsigned long attrs)659*4882a593Smuzhiyun static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
660*4882a593Smuzhiyun dma_addr_t *dma_handle, gfp_t gfp, pgprot_t prot,
661*4882a593Smuzhiyun unsigned long attrs)
662*4882a593Smuzhiyun {
663*4882a593Smuzhiyun struct iommu_domain *domain = iommu_get_dma_domain(dev);
664*4882a593Smuzhiyun struct iommu_dma_cookie *cookie = domain->iova_cookie;
665*4882a593Smuzhiyun struct iova_domain *iovad = &cookie->iovad;
666*4882a593Smuzhiyun bool coherent = dev_is_dma_coherent(dev);
667*4882a593Smuzhiyun int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
668*4882a593Smuzhiyun unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
669*4882a593Smuzhiyun struct page **pages;
670*4882a593Smuzhiyun struct sg_table sgt;
671*4882a593Smuzhiyun dma_addr_t iova;
672*4882a593Smuzhiyun void *vaddr;
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun *dma_handle = DMA_MAPPING_ERROR;
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun if (unlikely(iommu_dma_deferred_attach(dev, domain)))
677*4882a593Smuzhiyun return NULL;
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun min_size = alloc_sizes & -alloc_sizes;
680*4882a593Smuzhiyun if (min_size < PAGE_SIZE) {
681*4882a593Smuzhiyun min_size = PAGE_SIZE;
682*4882a593Smuzhiyun alloc_sizes |= PAGE_SIZE;
683*4882a593Smuzhiyun } else {
684*4882a593Smuzhiyun size = ALIGN(size, min_size);
685*4882a593Smuzhiyun }
686*4882a593Smuzhiyun if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
687*4882a593Smuzhiyun alloc_sizes = min_size;
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun count = PAGE_ALIGN(size) >> PAGE_SHIFT;
690*4882a593Smuzhiyun pages = __iommu_dma_alloc_pages(dev, count, alloc_sizes >> PAGE_SHIFT,
691*4882a593Smuzhiyun gfp);
692*4882a593Smuzhiyun if (!pages)
693*4882a593Smuzhiyun return NULL;
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun size = iova_align(iovad, size);
696*4882a593Smuzhiyun iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
697*4882a593Smuzhiyun if (!iova)
698*4882a593Smuzhiyun goto out_free_pages;
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
701*4882a593Smuzhiyun goto out_free_iova;
702*4882a593Smuzhiyun
703*4882a593Smuzhiyun if (!(ioprot & IOMMU_CACHE)) {
704*4882a593Smuzhiyun struct scatterlist *sg;
705*4882a593Smuzhiyun int i;
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun for_each_sg(sgt.sgl, sg, sgt.orig_nents, i)
708*4882a593Smuzhiyun arch_dma_prep_coherent(sg_page(sg), sg->length);
709*4882a593Smuzhiyun }
710*4882a593Smuzhiyun
711*4882a593Smuzhiyun if (iommu_map_sg_atomic(domain, iova, sgt.sgl, sgt.orig_nents, ioprot)
712*4882a593Smuzhiyun < size)
713*4882a593Smuzhiyun goto out_free_sg;
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun vaddr = dma_common_pages_remap(pages, size, prot,
716*4882a593Smuzhiyun __builtin_return_address(0));
717*4882a593Smuzhiyun if (!vaddr)
718*4882a593Smuzhiyun goto out_unmap;
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun *dma_handle = iova;
721*4882a593Smuzhiyun sg_free_table(&sgt);
722*4882a593Smuzhiyun return vaddr;
723*4882a593Smuzhiyun
724*4882a593Smuzhiyun out_unmap:
725*4882a593Smuzhiyun __iommu_dma_unmap(dev, iova, size);
726*4882a593Smuzhiyun out_free_sg:
727*4882a593Smuzhiyun sg_free_table(&sgt);
728*4882a593Smuzhiyun out_free_iova:
729*4882a593Smuzhiyun iommu_dma_free_iova(cookie, iova, size);
730*4882a593Smuzhiyun out_free_pages:
731*4882a593Smuzhiyun __iommu_dma_free_pages(pages, count);
732*4882a593Smuzhiyun return NULL;
733*4882a593Smuzhiyun }
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun /**
736*4882a593Smuzhiyun * __iommu_dma_mmap - Map a buffer into provided user VMA
737*4882a593Smuzhiyun * @pages: Array representing buffer from __iommu_dma_alloc()
738*4882a593Smuzhiyun * @size: Size of buffer in bytes
739*4882a593Smuzhiyun * @vma: VMA describing requested userspace mapping
740*4882a593Smuzhiyun *
741*4882a593Smuzhiyun * Maps the pages of the buffer in @pages into @vma. The caller is responsible
742*4882a593Smuzhiyun * for verifying the correct size and protection of @vma beforehand.
743*4882a593Smuzhiyun */
__iommu_dma_mmap(struct page ** pages,size_t size,struct vm_area_struct * vma)744*4882a593Smuzhiyun static int __iommu_dma_mmap(struct page **pages, size_t size,
745*4882a593Smuzhiyun struct vm_area_struct *vma)
746*4882a593Smuzhiyun {
747*4882a593Smuzhiyun return vm_map_pages(vma, pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
748*4882a593Smuzhiyun }
749*4882a593Smuzhiyun
iommu_dma_sync_single_for_cpu(struct device * dev,dma_addr_t dma_handle,size_t size,enum dma_data_direction dir)750*4882a593Smuzhiyun static void iommu_dma_sync_single_for_cpu(struct device *dev,
751*4882a593Smuzhiyun dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
752*4882a593Smuzhiyun {
753*4882a593Smuzhiyun phys_addr_t phys;
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun if (dev_is_dma_coherent(dev))
756*4882a593Smuzhiyun return;
757*4882a593Smuzhiyun
758*4882a593Smuzhiyun phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
759*4882a593Smuzhiyun arch_sync_dma_for_cpu(phys, size, dir);
760*4882a593Smuzhiyun }
761*4882a593Smuzhiyun
iommu_dma_sync_single_for_device(struct device * dev,dma_addr_t dma_handle,size_t size,enum dma_data_direction dir)762*4882a593Smuzhiyun static void iommu_dma_sync_single_for_device(struct device *dev,
763*4882a593Smuzhiyun dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
764*4882a593Smuzhiyun {
765*4882a593Smuzhiyun phys_addr_t phys;
766*4882a593Smuzhiyun
767*4882a593Smuzhiyun if (dev_is_dma_coherent(dev))
768*4882a593Smuzhiyun return;
769*4882a593Smuzhiyun
770*4882a593Smuzhiyun phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
771*4882a593Smuzhiyun arch_sync_dma_for_device(phys, size, dir);
772*4882a593Smuzhiyun }
773*4882a593Smuzhiyun
iommu_dma_sync_sg_for_cpu(struct device * dev,struct scatterlist * sgl,int nelems,enum dma_data_direction dir)774*4882a593Smuzhiyun static void iommu_dma_sync_sg_for_cpu(struct device *dev,
775*4882a593Smuzhiyun struct scatterlist *sgl, int nelems,
776*4882a593Smuzhiyun enum dma_data_direction dir)
777*4882a593Smuzhiyun {
778*4882a593Smuzhiyun struct scatterlist *sg;
779*4882a593Smuzhiyun int i;
780*4882a593Smuzhiyun
781*4882a593Smuzhiyun if (dev_is_dma_coherent(dev))
782*4882a593Smuzhiyun return;
783*4882a593Smuzhiyun
784*4882a593Smuzhiyun for_each_sg(sgl, sg, nelems, i)
785*4882a593Smuzhiyun arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
786*4882a593Smuzhiyun }
787*4882a593Smuzhiyun
iommu_dma_sync_sg_for_device(struct device * dev,struct scatterlist * sgl,int nelems,enum dma_data_direction dir)788*4882a593Smuzhiyun static void iommu_dma_sync_sg_for_device(struct device *dev,
789*4882a593Smuzhiyun struct scatterlist *sgl, int nelems,
790*4882a593Smuzhiyun enum dma_data_direction dir)
791*4882a593Smuzhiyun {
792*4882a593Smuzhiyun struct scatterlist *sg;
793*4882a593Smuzhiyun int i;
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun if (dev_is_dma_coherent(dev))
796*4882a593Smuzhiyun return;
797*4882a593Smuzhiyun
798*4882a593Smuzhiyun for_each_sg(sgl, sg, nelems, i)
799*4882a593Smuzhiyun arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
800*4882a593Smuzhiyun }
801*4882a593Smuzhiyun
iommu_dma_map_page(struct device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction dir,unsigned long attrs)802*4882a593Smuzhiyun static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
803*4882a593Smuzhiyun unsigned long offset, size_t size, enum dma_data_direction dir,
804*4882a593Smuzhiyun unsigned long attrs)
805*4882a593Smuzhiyun {
806*4882a593Smuzhiyun phys_addr_t phys = page_to_phys(page) + offset;
807*4882a593Smuzhiyun bool coherent = dev_is_dma_coherent(dev);
808*4882a593Smuzhiyun int prot = dma_info_to_prot(dir, coherent, attrs);
809*4882a593Smuzhiyun dma_addr_t dma_handle;
810*4882a593Smuzhiyun
811*4882a593Smuzhiyun dma_handle = __iommu_dma_map(dev, phys, size, prot, dma_get_mask(dev));
812*4882a593Smuzhiyun if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
813*4882a593Smuzhiyun dma_handle != DMA_MAPPING_ERROR)
814*4882a593Smuzhiyun arch_sync_dma_for_device(phys, size, dir);
815*4882a593Smuzhiyun return dma_handle;
816*4882a593Smuzhiyun }
817*4882a593Smuzhiyun
iommu_dma_unmap_page(struct device * dev,dma_addr_t dma_handle,size_t size,enum dma_data_direction dir,unsigned long attrs)818*4882a593Smuzhiyun static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
819*4882a593Smuzhiyun size_t size, enum dma_data_direction dir, unsigned long attrs)
820*4882a593Smuzhiyun {
821*4882a593Smuzhiyun if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
822*4882a593Smuzhiyun iommu_dma_sync_single_for_cpu(dev, dma_handle, size, dir);
823*4882a593Smuzhiyun __iommu_dma_unmap(dev, dma_handle, size);
824*4882a593Smuzhiyun }
825*4882a593Smuzhiyun
826*4882a593Smuzhiyun /*
827*4882a593Smuzhiyun * Prepare a successfully-mapped scatterlist to give back to the caller.
828*4882a593Smuzhiyun *
829*4882a593Smuzhiyun * At this point the segments are already laid out by iommu_dma_map_sg() to
830*4882a593Smuzhiyun * avoid individually crossing any boundaries, so we merely need to check a
831*4882a593Smuzhiyun * segment's start address to avoid concatenating across one.
832*4882a593Smuzhiyun */
__finalise_sg(struct device * dev,struct scatterlist * sg,int nents,dma_addr_t dma_addr)833*4882a593Smuzhiyun static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
834*4882a593Smuzhiyun dma_addr_t dma_addr)
835*4882a593Smuzhiyun {
836*4882a593Smuzhiyun struct scatterlist *s, *cur = sg;
837*4882a593Smuzhiyun unsigned long seg_mask = dma_get_seg_boundary(dev);
838*4882a593Smuzhiyun unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
839*4882a593Smuzhiyun int i, count = 0;
840*4882a593Smuzhiyun
841*4882a593Smuzhiyun for_each_sg(sg, s, nents, i) {
842*4882a593Smuzhiyun /* Restore this segment's original unaligned fields first */
843*4882a593Smuzhiyun unsigned int s_iova_off = sg_dma_address(s);
844*4882a593Smuzhiyun unsigned int s_length = sg_dma_len(s);
845*4882a593Smuzhiyun unsigned int s_iova_len = s->length;
846*4882a593Smuzhiyun
847*4882a593Smuzhiyun s->offset += s_iova_off;
848*4882a593Smuzhiyun s->length = s_length;
849*4882a593Smuzhiyun sg_dma_address(s) = DMA_MAPPING_ERROR;
850*4882a593Smuzhiyun sg_dma_len(s) = 0;
851*4882a593Smuzhiyun
852*4882a593Smuzhiyun /*
853*4882a593Smuzhiyun * Now fill in the real DMA data. If...
854*4882a593Smuzhiyun * - there is a valid output segment to append to
855*4882a593Smuzhiyun * - and this segment starts on an IOVA page boundary
856*4882a593Smuzhiyun * - but doesn't fall at a segment boundary
857*4882a593Smuzhiyun * - and wouldn't make the resulting output segment too long
858*4882a593Smuzhiyun */
859*4882a593Smuzhiyun if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
860*4882a593Smuzhiyun (max_len - cur_len >= s_length)) {
861*4882a593Smuzhiyun /* ...then concatenate it with the previous one */
862*4882a593Smuzhiyun cur_len += s_length;
863*4882a593Smuzhiyun } else {
864*4882a593Smuzhiyun /* Otherwise start the next output segment */
865*4882a593Smuzhiyun if (i > 0)
866*4882a593Smuzhiyun cur = sg_next(cur);
867*4882a593Smuzhiyun cur_len = s_length;
868*4882a593Smuzhiyun count++;
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun sg_dma_address(cur) = dma_addr + s_iova_off;
871*4882a593Smuzhiyun }
872*4882a593Smuzhiyun
873*4882a593Smuzhiyun sg_dma_len(cur) = cur_len;
874*4882a593Smuzhiyun dma_addr += s_iova_len;
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun if (s_length + s_iova_off < s_iova_len)
877*4882a593Smuzhiyun cur_len = 0;
878*4882a593Smuzhiyun }
879*4882a593Smuzhiyun return count;
880*4882a593Smuzhiyun }
881*4882a593Smuzhiyun
882*4882a593Smuzhiyun /*
883*4882a593Smuzhiyun * If mapping failed, then just restore the original list,
884*4882a593Smuzhiyun * but making sure the DMA fields are invalidated.
885*4882a593Smuzhiyun */
__invalidate_sg(struct scatterlist * sg,int nents)886*4882a593Smuzhiyun static void __invalidate_sg(struct scatterlist *sg, int nents)
887*4882a593Smuzhiyun {
888*4882a593Smuzhiyun struct scatterlist *s;
889*4882a593Smuzhiyun int i;
890*4882a593Smuzhiyun
891*4882a593Smuzhiyun for_each_sg(sg, s, nents, i) {
892*4882a593Smuzhiyun if (sg_dma_address(s) != DMA_MAPPING_ERROR)
893*4882a593Smuzhiyun s->offset += sg_dma_address(s);
894*4882a593Smuzhiyun if (sg_dma_len(s))
895*4882a593Smuzhiyun s->length = sg_dma_len(s);
896*4882a593Smuzhiyun sg_dma_address(s) = DMA_MAPPING_ERROR;
897*4882a593Smuzhiyun sg_dma_len(s) = 0;
898*4882a593Smuzhiyun }
899*4882a593Smuzhiyun }
900*4882a593Smuzhiyun
901*4882a593Smuzhiyun /*
902*4882a593Smuzhiyun * The DMA API client is passing in a scatterlist which could describe
903*4882a593Smuzhiyun * any old buffer layout, but the IOMMU API requires everything to be
904*4882a593Smuzhiyun * aligned to IOMMU pages. Hence the need for this complicated bit of
905*4882a593Smuzhiyun * impedance-matching, to be able to hand off a suitably-aligned list,
906*4882a593Smuzhiyun * but still preserve the original offsets and sizes for the caller.
907*4882a593Smuzhiyun */
iommu_dma_map_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)908*4882a593Smuzhiyun static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
909*4882a593Smuzhiyun int nents, enum dma_data_direction dir, unsigned long attrs)
910*4882a593Smuzhiyun {
911*4882a593Smuzhiyun struct iommu_domain *domain = iommu_get_dma_domain(dev);
912*4882a593Smuzhiyun struct iommu_dma_cookie *cookie = domain->iova_cookie;
913*4882a593Smuzhiyun struct iova_domain *iovad = &cookie->iovad;
914*4882a593Smuzhiyun struct scatterlist *s, *prev = NULL;
915*4882a593Smuzhiyun int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs);
916*4882a593Smuzhiyun dma_addr_t iova;
917*4882a593Smuzhiyun size_t iova_len = 0;
918*4882a593Smuzhiyun unsigned long mask = dma_get_seg_boundary(dev);
919*4882a593Smuzhiyun int i;
920*4882a593Smuzhiyun
921*4882a593Smuzhiyun if (unlikely(iommu_dma_deferred_attach(dev, domain)))
922*4882a593Smuzhiyun return 0;
923*4882a593Smuzhiyun
924*4882a593Smuzhiyun if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
925*4882a593Smuzhiyun iommu_dma_sync_sg_for_device(dev, sg, nents, dir);
926*4882a593Smuzhiyun
927*4882a593Smuzhiyun /*
928*4882a593Smuzhiyun * Work out how much IOVA space we need, and align the segments to
929*4882a593Smuzhiyun * IOVA granules for the IOMMU driver to handle. With some clever
930*4882a593Smuzhiyun * trickery we can modify the list in-place, but reversibly, by
931*4882a593Smuzhiyun * stashing the unaligned parts in the as-yet-unused DMA fields.
932*4882a593Smuzhiyun */
933*4882a593Smuzhiyun for_each_sg(sg, s, nents, i) {
934*4882a593Smuzhiyun size_t s_iova_off = iova_offset(iovad, s->offset);
935*4882a593Smuzhiyun size_t s_length = s->length;
936*4882a593Smuzhiyun size_t pad_len = (mask - iova_len + 1) & mask;
937*4882a593Smuzhiyun
938*4882a593Smuzhiyun sg_dma_address(s) = s_iova_off;
939*4882a593Smuzhiyun sg_dma_len(s) = s_length;
940*4882a593Smuzhiyun s->offset -= s_iova_off;
941*4882a593Smuzhiyun s_length = iova_align(iovad, s_length + s_iova_off);
942*4882a593Smuzhiyun s->length = s_length;
943*4882a593Smuzhiyun
944*4882a593Smuzhiyun /*
945*4882a593Smuzhiyun * Due to the alignment of our single IOVA allocation, we can
946*4882a593Smuzhiyun * depend on these assumptions about the segment boundary mask:
947*4882a593Smuzhiyun * - If mask size >= IOVA size, then the IOVA range cannot
948*4882a593Smuzhiyun * possibly fall across a boundary, so we don't care.
949*4882a593Smuzhiyun * - If mask size < IOVA size, then the IOVA range must start
950*4882a593Smuzhiyun * exactly on a boundary, therefore we can lay things out
951*4882a593Smuzhiyun * based purely on segment lengths without needing to know
952*4882a593Smuzhiyun * the actual addresses beforehand.
953*4882a593Smuzhiyun * - The mask must be a power of 2, so pad_len == 0 if
954*4882a593Smuzhiyun * iova_len == 0, thus we cannot dereference prev the first
955*4882a593Smuzhiyun * time through here (i.e. before it has a meaningful value).
956*4882a593Smuzhiyun */
957*4882a593Smuzhiyun if (pad_len && pad_len < s_length - 1) {
958*4882a593Smuzhiyun prev->length += pad_len;
959*4882a593Smuzhiyun iova_len += pad_len;
960*4882a593Smuzhiyun }
961*4882a593Smuzhiyun
962*4882a593Smuzhiyun iova_len += s_length;
963*4882a593Smuzhiyun prev = s;
964*4882a593Smuzhiyun }
965*4882a593Smuzhiyun
966*4882a593Smuzhiyun iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
967*4882a593Smuzhiyun if (!iova)
968*4882a593Smuzhiyun goto out_restore_sg;
969*4882a593Smuzhiyun
970*4882a593Smuzhiyun /*
971*4882a593Smuzhiyun * We'll leave any physical concatenation to the IOMMU driver's
972*4882a593Smuzhiyun * implementation - it knows better than we do.
973*4882a593Smuzhiyun */
974*4882a593Smuzhiyun if (iommu_map_sg_atomic(domain, iova, sg, nents, prot) < iova_len)
975*4882a593Smuzhiyun goto out_free_iova;
976*4882a593Smuzhiyun
977*4882a593Smuzhiyun return __finalise_sg(dev, sg, nents, iova);
978*4882a593Smuzhiyun
979*4882a593Smuzhiyun out_free_iova:
980*4882a593Smuzhiyun iommu_dma_free_iova(cookie, iova, iova_len);
981*4882a593Smuzhiyun out_restore_sg:
982*4882a593Smuzhiyun __invalidate_sg(sg, nents);
983*4882a593Smuzhiyun return 0;
984*4882a593Smuzhiyun }
985*4882a593Smuzhiyun
iommu_dma_unmap_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)986*4882a593Smuzhiyun static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
987*4882a593Smuzhiyun int nents, enum dma_data_direction dir, unsigned long attrs)
988*4882a593Smuzhiyun {
989*4882a593Smuzhiyun dma_addr_t start, end;
990*4882a593Smuzhiyun struct scatterlist *tmp;
991*4882a593Smuzhiyun int i;
992*4882a593Smuzhiyun
993*4882a593Smuzhiyun if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
994*4882a593Smuzhiyun iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir);
995*4882a593Smuzhiyun
996*4882a593Smuzhiyun /*
997*4882a593Smuzhiyun * The scatterlist segments are mapped into a single
998*4882a593Smuzhiyun * contiguous IOVA allocation, so this is incredibly easy.
999*4882a593Smuzhiyun */
1000*4882a593Smuzhiyun start = sg_dma_address(sg);
1001*4882a593Smuzhiyun for_each_sg(sg_next(sg), tmp, nents - 1, i) {
1002*4882a593Smuzhiyun if (sg_dma_len(tmp) == 0)
1003*4882a593Smuzhiyun break;
1004*4882a593Smuzhiyun sg = tmp;
1005*4882a593Smuzhiyun }
1006*4882a593Smuzhiyun end = sg_dma_address(sg) + sg_dma_len(sg);
1007*4882a593Smuzhiyun __iommu_dma_unmap(dev, start, end - start);
1008*4882a593Smuzhiyun }
1009*4882a593Smuzhiyun
iommu_dma_map_resource(struct device * dev,phys_addr_t phys,size_t size,enum dma_data_direction dir,unsigned long attrs)1010*4882a593Smuzhiyun static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
1011*4882a593Smuzhiyun size_t size, enum dma_data_direction dir, unsigned long attrs)
1012*4882a593Smuzhiyun {
1013*4882a593Smuzhiyun return __iommu_dma_map(dev, phys, size,
1014*4882a593Smuzhiyun dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO,
1015*4882a593Smuzhiyun dma_get_mask(dev));
1016*4882a593Smuzhiyun }
1017*4882a593Smuzhiyun
iommu_dma_unmap_resource(struct device * dev,dma_addr_t handle,size_t size,enum dma_data_direction dir,unsigned long attrs)1018*4882a593Smuzhiyun static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
1019*4882a593Smuzhiyun size_t size, enum dma_data_direction dir, unsigned long attrs)
1020*4882a593Smuzhiyun {
1021*4882a593Smuzhiyun __iommu_dma_unmap(dev, handle, size);
1022*4882a593Smuzhiyun }
1023*4882a593Smuzhiyun
__iommu_dma_free(struct device * dev,size_t size,void * cpu_addr)1024*4882a593Smuzhiyun static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
1025*4882a593Smuzhiyun {
1026*4882a593Smuzhiyun size_t alloc_size = PAGE_ALIGN(size);
1027*4882a593Smuzhiyun int count = alloc_size >> PAGE_SHIFT;
1028*4882a593Smuzhiyun struct page *page = NULL, **pages = NULL;
1029*4882a593Smuzhiyun
1030*4882a593Smuzhiyun /* Non-coherent atomic allocation? Easy */
1031*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
1032*4882a593Smuzhiyun dma_free_from_pool(dev, cpu_addr, alloc_size))
1033*4882a593Smuzhiyun return;
1034*4882a593Smuzhiyun
1035*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
1036*4882a593Smuzhiyun /*
1037*4882a593Smuzhiyun * If it the address is remapped, then it's either non-coherent
1038*4882a593Smuzhiyun * or highmem CMA, or an iommu_dma_alloc_remap() construction.
1039*4882a593Smuzhiyun */
1040*4882a593Smuzhiyun pages = dma_common_find_pages(cpu_addr);
1041*4882a593Smuzhiyun if (!pages)
1042*4882a593Smuzhiyun page = vmalloc_to_page(cpu_addr);
1043*4882a593Smuzhiyun dma_common_free_remap(cpu_addr, alloc_size);
1044*4882a593Smuzhiyun } else {
1045*4882a593Smuzhiyun /* Lowmem means a coherent atomic or CMA allocation */
1046*4882a593Smuzhiyun page = virt_to_page(cpu_addr);
1047*4882a593Smuzhiyun }
1048*4882a593Smuzhiyun
1049*4882a593Smuzhiyun if (pages)
1050*4882a593Smuzhiyun __iommu_dma_free_pages(pages, count);
1051*4882a593Smuzhiyun if (page)
1052*4882a593Smuzhiyun dma_free_contiguous(dev, page, alloc_size);
1053*4882a593Smuzhiyun }
1054*4882a593Smuzhiyun
iommu_dma_free(struct device * dev,size_t size,void * cpu_addr,dma_addr_t handle,unsigned long attrs)1055*4882a593Smuzhiyun static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
1056*4882a593Smuzhiyun dma_addr_t handle, unsigned long attrs)
1057*4882a593Smuzhiyun {
1058*4882a593Smuzhiyun __iommu_dma_unmap(dev, handle, size);
1059*4882a593Smuzhiyun __iommu_dma_free(dev, size, cpu_addr);
1060*4882a593Smuzhiyun }
1061*4882a593Smuzhiyun
iommu_dma_alloc_pages(struct device * dev,size_t size,struct page ** pagep,gfp_t gfp,unsigned long attrs)1062*4882a593Smuzhiyun static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
1063*4882a593Smuzhiyun struct page **pagep, gfp_t gfp, unsigned long attrs)
1064*4882a593Smuzhiyun {
1065*4882a593Smuzhiyun bool coherent = dev_is_dma_coherent(dev);
1066*4882a593Smuzhiyun size_t alloc_size = PAGE_ALIGN(size);
1067*4882a593Smuzhiyun int node = dev_to_node(dev);
1068*4882a593Smuzhiyun struct page *page = NULL;
1069*4882a593Smuzhiyun void *cpu_addr;
1070*4882a593Smuzhiyun
1071*4882a593Smuzhiyun page = dma_alloc_contiguous(dev, alloc_size, gfp);
1072*4882a593Smuzhiyun if (!page)
1073*4882a593Smuzhiyun page = alloc_pages_node(node, gfp, get_order(alloc_size));
1074*4882a593Smuzhiyun if (!page)
1075*4882a593Smuzhiyun return NULL;
1076*4882a593Smuzhiyun
1077*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) {
1078*4882a593Smuzhiyun pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
1079*4882a593Smuzhiyun
1080*4882a593Smuzhiyun cpu_addr = dma_common_contiguous_remap(page, alloc_size,
1081*4882a593Smuzhiyun prot, __builtin_return_address(0));
1082*4882a593Smuzhiyun if (!cpu_addr)
1083*4882a593Smuzhiyun goto out_free_pages;
1084*4882a593Smuzhiyun
1085*4882a593Smuzhiyun if (!coherent)
1086*4882a593Smuzhiyun arch_dma_prep_coherent(page, size);
1087*4882a593Smuzhiyun } else {
1088*4882a593Smuzhiyun cpu_addr = page_address(page);
1089*4882a593Smuzhiyun }
1090*4882a593Smuzhiyun
1091*4882a593Smuzhiyun *pagep = page;
1092*4882a593Smuzhiyun memset(cpu_addr, 0, alloc_size);
1093*4882a593Smuzhiyun return cpu_addr;
1094*4882a593Smuzhiyun out_free_pages:
1095*4882a593Smuzhiyun dma_free_contiguous(dev, page, alloc_size);
1096*4882a593Smuzhiyun return NULL;
1097*4882a593Smuzhiyun }
1098*4882a593Smuzhiyun
iommu_dma_alloc(struct device * dev,size_t size,dma_addr_t * handle,gfp_t gfp,unsigned long attrs)1099*4882a593Smuzhiyun static void *iommu_dma_alloc(struct device *dev, size_t size,
1100*4882a593Smuzhiyun dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
1101*4882a593Smuzhiyun {
1102*4882a593Smuzhiyun bool coherent = dev_is_dma_coherent(dev);
1103*4882a593Smuzhiyun int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
1104*4882a593Smuzhiyun struct page *page = NULL;
1105*4882a593Smuzhiyun void *cpu_addr;
1106*4882a593Smuzhiyun
1107*4882a593Smuzhiyun gfp |= __GFP_ZERO;
1108*4882a593Smuzhiyun
1109*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_DMA_REMAP) && gfpflags_allow_blocking(gfp) &&
1110*4882a593Smuzhiyun !(attrs & DMA_ATTR_FORCE_CONTIGUOUS)) {
1111*4882a593Smuzhiyun return iommu_dma_alloc_remap(dev, size, handle, gfp,
1112*4882a593Smuzhiyun dma_pgprot(dev, PAGE_KERNEL, attrs), attrs);
1113*4882a593Smuzhiyun }
1114*4882a593Smuzhiyun
1115*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
1116*4882a593Smuzhiyun !gfpflags_allow_blocking(gfp) && !coherent)
1117*4882a593Smuzhiyun page = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &cpu_addr,
1118*4882a593Smuzhiyun gfp, NULL);
1119*4882a593Smuzhiyun else
1120*4882a593Smuzhiyun cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs);
1121*4882a593Smuzhiyun if (!cpu_addr)
1122*4882a593Smuzhiyun return NULL;
1123*4882a593Smuzhiyun
1124*4882a593Smuzhiyun *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot,
1125*4882a593Smuzhiyun dev->coherent_dma_mask);
1126*4882a593Smuzhiyun if (*handle == DMA_MAPPING_ERROR) {
1127*4882a593Smuzhiyun __iommu_dma_free(dev, size, cpu_addr);
1128*4882a593Smuzhiyun return NULL;
1129*4882a593Smuzhiyun }
1130*4882a593Smuzhiyun
1131*4882a593Smuzhiyun return cpu_addr;
1132*4882a593Smuzhiyun }
1133*4882a593Smuzhiyun
1134*4882a593Smuzhiyun #ifdef CONFIG_DMA_REMAP
iommu_dma_alloc_noncoherent(struct device * dev,size_t size,dma_addr_t * handle,enum dma_data_direction dir,gfp_t gfp)1135*4882a593Smuzhiyun static void *iommu_dma_alloc_noncoherent(struct device *dev, size_t size,
1136*4882a593Smuzhiyun dma_addr_t *handle, enum dma_data_direction dir, gfp_t gfp)
1137*4882a593Smuzhiyun {
1138*4882a593Smuzhiyun if (!gfpflags_allow_blocking(gfp)) {
1139*4882a593Smuzhiyun struct page *page;
1140*4882a593Smuzhiyun
1141*4882a593Smuzhiyun page = dma_common_alloc_pages(dev, size, handle, dir, gfp);
1142*4882a593Smuzhiyun if (!page)
1143*4882a593Smuzhiyun return NULL;
1144*4882a593Smuzhiyun return page_address(page);
1145*4882a593Smuzhiyun }
1146*4882a593Smuzhiyun
1147*4882a593Smuzhiyun return iommu_dma_alloc_remap(dev, size, handle, gfp | __GFP_ZERO,
1148*4882a593Smuzhiyun PAGE_KERNEL, 0);
1149*4882a593Smuzhiyun }
1150*4882a593Smuzhiyun
iommu_dma_free_noncoherent(struct device * dev,size_t size,void * cpu_addr,dma_addr_t handle,enum dma_data_direction dir)1151*4882a593Smuzhiyun static void iommu_dma_free_noncoherent(struct device *dev, size_t size,
1152*4882a593Smuzhiyun void *cpu_addr, dma_addr_t handle, enum dma_data_direction dir)
1153*4882a593Smuzhiyun {
1154*4882a593Smuzhiyun __iommu_dma_unmap(dev, handle, size);
1155*4882a593Smuzhiyun __iommu_dma_free(dev, size, cpu_addr);
1156*4882a593Smuzhiyun }
1157*4882a593Smuzhiyun #else
1158*4882a593Smuzhiyun #define iommu_dma_alloc_noncoherent NULL
1159*4882a593Smuzhiyun #define iommu_dma_free_noncoherent NULL
1160*4882a593Smuzhiyun #endif /* CONFIG_DMA_REMAP */
1161*4882a593Smuzhiyun
iommu_dma_mmap(struct device * dev,struct vm_area_struct * vma,void * cpu_addr,dma_addr_t dma_addr,size_t size,unsigned long attrs)1162*4882a593Smuzhiyun static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
1163*4882a593Smuzhiyun void *cpu_addr, dma_addr_t dma_addr, size_t size,
1164*4882a593Smuzhiyun unsigned long attrs)
1165*4882a593Smuzhiyun {
1166*4882a593Smuzhiyun unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
1167*4882a593Smuzhiyun unsigned long pfn, off = vma->vm_pgoff;
1168*4882a593Smuzhiyun int ret;
1169*4882a593Smuzhiyun
1170*4882a593Smuzhiyun vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
1171*4882a593Smuzhiyun
1172*4882a593Smuzhiyun if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
1173*4882a593Smuzhiyun return ret;
1174*4882a593Smuzhiyun
1175*4882a593Smuzhiyun if (off >= nr_pages || vma_pages(vma) > nr_pages - off)
1176*4882a593Smuzhiyun return -ENXIO;
1177*4882a593Smuzhiyun
1178*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
1179*4882a593Smuzhiyun struct page **pages = dma_common_find_pages(cpu_addr);
1180*4882a593Smuzhiyun
1181*4882a593Smuzhiyun if (pages)
1182*4882a593Smuzhiyun return __iommu_dma_mmap(pages, size, vma);
1183*4882a593Smuzhiyun pfn = vmalloc_to_pfn(cpu_addr);
1184*4882a593Smuzhiyun } else {
1185*4882a593Smuzhiyun pfn = page_to_pfn(virt_to_page(cpu_addr));
1186*4882a593Smuzhiyun }
1187*4882a593Smuzhiyun
1188*4882a593Smuzhiyun return remap_pfn_range(vma, vma->vm_start, pfn + off,
1189*4882a593Smuzhiyun vma->vm_end - vma->vm_start,
1190*4882a593Smuzhiyun vma->vm_page_prot);
1191*4882a593Smuzhiyun }
1192*4882a593Smuzhiyun
iommu_dma_get_sgtable(struct device * dev,struct sg_table * sgt,void * cpu_addr,dma_addr_t dma_addr,size_t size,unsigned long attrs)1193*4882a593Smuzhiyun static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
1194*4882a593Smuzhiyun void *cpu_addr, dma_addr_t dma_addr, size_t size,
1195*4882a593Smuzhiyun unsigned long attrs)
1196*4882a593Smuzhiyun {
1197*4882a593Smuzhiyun struct page *page;
1198*4882a593Smuzhiyun int ret;
1199*4882a593Smuzhiyun
1200*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
1201*4882a593Smuzhiyun struct page **pages = dma_common_find_pages(cpu_addr);
1202*4882a593Smuzhiyun
1203*4882a593Smuzhiyun if (pages) {
1204*4882a593Smuzhiyun return sg_alloc_table_from_pages(sgt, pages,
1205*4882a593Smuzhiyun PAGE_ALIGN(size) >> PAGE_SHIFT,
1206*4882a593Smuzhiyun 0, size, GFP_KERNEL);
1207*4882a593Smuzhiyun }
1208*4882a593Smuzhiyun
1209*4882a593Smuzhiyun page = vmalloc_to_page(cpu_addr);
1210*4882a593Smuzhiyun } else {
1211*4882a593Smuzhiyun page = virt_to_page(cpu_addr);
1212*4882a593Smuzhiyun }
1213*4882a593Smuzhiyun
1214*4882a593Smuzhiyun ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
1215*4882a593Smuzhiyun if (!ret)
1216*4882a593Smuzhiyun sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
1217*4882a593Smuzhiyun return ret;
1218*4882a593Smuzhiyun }
1219*4882a593Smuzhiyun
iommu_dma_get_merge_boundary(struct device * dev)1220*4882a593Smuzhiyun static unsigned long iommu_dma_get_merge_boundary(struct device *dev)
1221*4882a593Smuzhiyun {
1222*4882a593Smuzhiyun struct iommu_domain *domain = iommu_get_dma_domain(dev);
1223*4882a593Smuzhiyun
1224*4882a593Smuzhiyun return (1UL << __ffs(domain->pgsize_bitmap)) - 1;
1225*4882a593Smuzhiyun }
1226*4882a593Smuzhiyun
1227*4882a593Smuzhiyun static const struct dma_map_ops iommu_dma_ops = {
1228*4882a593Smuzhiyun .alloc = iommu_dma_alloc,
1229*4882a593Smuzhiyun .free = iommu_dma_free,
1230*4882a593Smuzhiyun .alloc_pages = dma_common_alloc_pages,
1231*4882a593Smuzhiyun .free_pages = dma_common_free_pages,
1232*4882a593Smuzhiyun .alloc_noncoherent = iommu_dma_alloc_noncoherent,
1233*4882a593Smuzhiyun .free_noncoherent = iommu_dma_free_noncoherent,
1234*4882a593Smuzhiyun .mmap = iommu_dma_mmap,
1235*4882a593Smuzhiyun .get_sgtable = iommu_dma_get_sgtable,
1236*4882a593Smuzhiyun .map_page = iommu_dma_map_page,
1237*4882a593Smuzhiyun .unmap_page = iommu_dma_unmap_page,
1238*4882a593Smuzhiyun .map_sg = iommu_dma_map_sg,
1239*4882a593Smuzhiyun .unmap_sg = iommu_dma_unmap_sg,
1240*4882a593Smuzhiyun .sync_single_for_cpu = iommu_dma_sync_single_for_cpu,
1241*4882a593Smuzhiyun .sync_single_for_device = iommu_dma_sync_single_for_device,
1242*4882a593Smuzhiyun .sync_sg_for_cpu = iommu_dma_sync_sg_for_cpu,
1243*4882a593Smuzhiyun .sync_sg_for_device = iommu_dma_sync_sg_for_device,
1244*4882a593Smuzhiyun .map_resource = iommu_dma_map_resource,
1245*4882a593Smuzhiyun .unmap_resource = iommu_dma_unmap_resource,
1246*4882a593Smuzhiyun .get_merge_boundary = iommu_dma_get_merge_boundary,
1247*4882a593Smuzhiyun };
1248*4882a593Smuzhiyun
1249*4882a593Smuzhiyun /*
1250*4882a593Smuzhiyun * The IOMMU core code allocates the default DMA domain, which the underlying
1251*4882a593Smuzhiyun * IOMMU driver needs to support via the dma-iommu layer.
1252*4882a593Smuzhiyun */
iommu_setup_dma_ops(struct device * dev,u64 dma_base,u64 size)1253*4882a593Smuzhiyun void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size)
1254*4882a593Smuzhiyun {
1255*4882a593Smuzhiyun struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1256*4882a593Smuzhiyun
1257*4882a593Smuzhiyun if (!domain)
1258*4882a593Smuzhiyun goto out_err;
1259*4882a593Smuzhiyun
1260*4882a593Smuzhiyun /*
1261*4882a593Smuzhiyun * The IOMMU core code allocates the default DMA domain, which the
1262*4882a593Smuzhiyun * underlying IOMMU driver needs to support via the dma-iommu layer.
1263*4882a593Smuzhiyun */
1264*4882a593Smuzhiyun if (domain->type == IOMMU_DOMAIN_DMA) {
1265*4882a593Smuzhiyun if (iommu_dma_init_domain(domain, dma_base, size, dev))
1266*4882a593Smuzhiyun goto out_err;
1267*4882a593Smuzhiyun dev->dma_ops = &iommu_dma_ops;
1268*4882a593Smuzhiyun }
1269*4882a593Smuzhiyun
1270*4882a593Smuzhiyun return;
1271*4882a593Smuzhiyun out_err:
1272*4882a593Smuzhiyun pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
1273*4882a593Smuzhiyun dev_name(dev));
1274*4882a593Smuzhiyun }
1275*4882a593Smuzhiyun
iommu_dma_get_msi_page(struct device * dev,phys_addr_t msi_addr,struct iommu_domain * domain)1276*4882a593Smuzhiyun static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
1277*4882a593Smuzhiyun phys_addr_t msi_addr, struct iommu_domain *domain)
1278*4882a593Smuzhiyun {
1279*4882a593Smuzhiyun struct iommu_dma_cookie *cookie = domain->iova_cookie;
1280*4882a593Smuzhiyun struct iommu_dma_msi_page *msi_page;
1281*4882a593Smuzhiyun dma_addr_t iova;
1282*4882a593Smuzhiyun int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1283*4882a593Smuzhiyun size_t size = cookie_msi_granule(cookie);
1284*4882a593Smuzhiyun
1285*4882a593Smuzhiyun msi_addr &= ~(phys_addr_t)(size - 1);
1286*4882a593Smuzhiyun list_for_each_entry(msi_page, &cookie->msi_page_list, list)
1287*4882a593Smuzhiyun if (msi_page->phys == msi_addr)
1288*4882a593Smuzhiyun return msi_page;
1289*4882a593Smuzhiyun
1290*4882a593Smuzhiyun msi_page = kzalloc(sizeof(*msi_page), GFP_KERNEL);
1291*4882a593Smuzhiyun if (!msi_page)
1292*4882a593Smuzhiyun return NULL;
1293*4882a593Smuzhiyun
1294*4882a593Smuzhiyun iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
1295*4882a593Smuzhiyun if (!iova)
1296*4882a593Smuzhiyun goto out_free_page;
1297*4882a593Smuzhiyun
1298*4882a593Smuzhiyun if (iommu_map(domain, iova, msi_addr, size, prot))
1299*4882a593Smuzhiyun goto out_free_iova;
1300*4882a593Smuzhiyun
1301*4882a593Smuzhiyun INIT_LIST_HEAD(&msi_page->list);
1302*4882a593Smuzhiyun msi_page->phys = msi_addr;
1303*4882a593Smuzhiyun msi_page->iova = iova;
1304*4882a593Smuzhiyun list_add(&msi_page->list, &cookie->msi_page_list);
1305*4882a593Smuzhiyun return msi_page;
1306*4882a593Smuzhiyun
1307*4882a593Smuzhiyun out_free_iova:
1308*4882a593Smuzhiyun iommu_dma_free_iova(cookie, iova, size);
1309*4882a593Smuzhiyun out_free_page:
1310*4882a593Smuzhiyun kfree(msi_page);
1311*4882a593Smuzhiyun return NULL;
1312*4882a593Smuzhiyun }
1313*4882a593Smuzhiyun
iommu_dma_prepare_msi(struct msi_desc * desc,phys_addr_t msi_addr)1314*4882a593Smuzhiyun int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
1315*4882a593Smuzhiyun {
1316*4882a593Smuzhiyun struct device *dev = msi_desc_to_dev(desc);
1317*4882a593Smuzhiyun struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1318*4882a593Smuzhiyun struct iommu_dma_msi_page *msi_page;
1319*4882a593Smuzhiyun static DEFINE_MUTEX(msi_prepare_lock); /* see below */
1320*4882a593Smuzhiyun
1321*4882a593Smuzhiyun if (!domain || !domain->iova_cookie) {
1322*4882a593Smuzhiyun desc->iommu_cookie = NULL;
1323*4882a593Smuzhiyun return 0;
1324*4882a593Smuzhiyun }
1325*4882a593Smuzhiyun
1326*4882a593Smuzhiyun /*
1327*4882a593Smuzhiyun * In fact the whole prepare operation should already be serialised by
1328*4882a593Smuzhiyun * irq_domain_mutex further up the callchain, but that's pretty subtle
1329*4882a593Smuzhiyun * on its own, so consider this locking as failsafe documentation...
1330*4882a593Smuzhiyun */
1331*4882a593Smuzhiyun mutex_lock(&msi_prepare_lock);
1332*4882a593Smuzhiyun msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
1333*4882a593Smuzhiyun mutex_unlock(&msi_prepare_lock);
1334*4882a593Smuzhiyun
1335*4882a593Smuzhiyun msi_desc_set_iommu_cookie(desc, msi_page);
1336*4882a593Smuzhiyun
1337*4882a593Smuzhiyun if (!msi_page)
1338*4882a593Smuzhiyun return -ENOMEM;
1339*4882a593Smuzhiyun return 0;
1340*4882a593Smuzhiyun }
1341*4882a593Smuzhiyun
iommu_dma_compose_msi_msg(struct msi_desc * desc,struct msi_msg * msg)1342*4882a593Smuzhiyun void iommu_dma_compose_msi_msg(struct msi_desc *desc,
1343*4882a593Smuzhiyun struct msi_msg *msg)
1344*4882a593Smuzhiyun {
1345*4882a593Smuzhiyun struct device *dev = msi_desc_to_dev(desc);
1346*4882a593Smuzhiyun const struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1347*4882a593Smuzhiyun const struct iommu_dma_msi_page *msi_page;
1348*4882a593Smuzhiyun
1349*4882a593Smuzhiyun msi_page = msi_desc_get_iommu_cookie(desc);
1350*4882a593Smuzhiyun
1351*4882a593Smuzhiyun if (!domain || !domain->iova_cookie || WARN_ON(!msi_page))
1352*4882a593Smuzhiyun return;
1353*4882a593Smuzhiyun
1354*4882a593Smuzhiyun msg->address_hi = upper_32_bits(msi_page->iova);
1355*4882a593Smuzhiyun msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1;
1356*4882a593Smuzhiyun msg->address_lo += lower_32_bits(msi_page->iova);
1357*4882a593Smuzhiyun }
1358*4882a593Smuzhiyun
iommu_dma_init(void)1359*4882a593Smuzhiyun static int iommu_dma_init(void)
1360*4882a593Smuzhiyun {
1361*4882a593Smuzhiyun return iova_cache_get();
1362*4882a593Smuzhiyun }
1363*4882a593Smuzhiyun arch_initcall(iommu_dma_init);
1364