1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * A fairly generic DMA-API to IOMMU-API glue layer.
4 *
5 * Copyright (C) 2014-2015 ARM Ltd.
6 *
7 * based in part on arch/arm/mm/dma-mapping.c:
8 * Copyright (C) 2000-2004 Russell King
9 */
10
11 #include <linux/acpi_iort.h>
12 #include <linux/device.h>
13 #include <linux/dma-map-ops.h>
14 #include <linux/dma-iommu.h>
15 #include <linux/gfp.h>
16 #include <linux/huge_mm.h>
17 #include <linux/iommu.h>
18 #include <linux/iova.h>
19 #include <linux/irq.h>
20 #include <linux/mm.h>
21 #include <linux/mutex.h>
22 #include <linux/pci.h>
23 #include <linux/scatterlist.h>
24 #include <linux/vmalloc.h>
25 #include <linux/crash_dump.h>
26 #include <trace/hooks/iommu.h>
27
28 struct iommu_dma_msi_page {
29 struct list_head list;
30 dma_addr_t iova;
31 phys_addr_t phys;
32 };
33
34 enum iommu_dma_cookie_type {
35 IOMMU_DMA_IOVA_COOKIE,
36 IOMMU_DMA_MSI_COOKIE,
37 };
38
39 struct iommu_dma_cookie {
40 enum iommu_dma_cookie_type type;
41 union {
42 /* Full allocator for IOMMU_DMA_IOVA_COOKIE */
43 struct iova_domain iovad;
44 /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
45 dma_addr_t msi_iova;
46 };
47 struct list_head msi_page_list;
48
49 /* Domain for flush queue callback; NULL if flush queue not in use */
50 struct iommu_domain *fq_domain;
51 };
52
53 struct iommu_dma_cookie_ext {
54 struct iommu_dma_cookie cookie;
55 struct mutex mutex;
56 };
57
cookie_msi_granule(struct iommu_dma_cookie * cookie)58 static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
59 {
60 if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
61 return cookie->iovad.granule;
62 return PAGE_SIZE;
63 }
64
cookie_alloc(enum iommu_dma_cookie_type type)65 static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
66 {
67 struct iommu_dma_cookie_ext *cookie;
68
69 cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
70 if (cookie) {
71 INIT_LIST_HEAD(&cookie->cookie.msi_page_list);
72 cookie->cookie.type = type;
73 mutex_init(&cookie->mutex);
74 }
75 return &cookie->cookie;
76 }
77
78 /**
79 * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
80 * @domain: IOMMU domain to prepare for DMA-API usage
81 *
82 * IOMMU drivers should normally call this from their domain_alloc
83 * callback when domain->type == IOMMU_DOMAIN_DMA.
84 */
iommu_get_dma_cookie(struct iommu_domain * domain)85 int iommu_get_dma_cookie(struct iommu_domain *domain)
86 {
87 if (domain->iova_cookie)
88 return -EEXIST;
89
90 domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
91 if (!domain->iova_cookie)
92 return -ENOMEM;
93
94 return 0;
95 }
96 EXPORT_SYMBOL(iommu_get_dma_cookie);
97
98 /**
99 * iommu_get_msi_cookie - Acquire just MSI remapping resources
100 * @domain: IOMMU domain to prepare
101 * @base: Start address of IOVA region for MSI mappings
102 *
103 * Users who manage their own IOVA allocation and do not want DMA API support,
104 * but would still like to take advantage of automatic MSI remapping, can use
105 * this to initialise their own domain appropriately. Users should reserve a
106 * contiguous IOVA region, starting at @base, large enough to accommodate the
107 * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
108 * used by the devices attached to @domain.
109 */
iommu_get_msi_cookie(struct iommu_domain * domain,dma_addr_t base)110 int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
111 {
112 struct iommu_dma_cookie *cookie;
113
114 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
115 return -EINVAL;
116
117 if (domain->iova_cookie)
118 return -EEXIST;
119
120 cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
121 if (!cookie)
122 return -ENOMEM;
123
124 cookie->msi_iova = base;
125 domain->iova_cookie = cookie;
126 return 0;
127 }
128 EXPORT_SYMBOL(iommu_get_msi_cookie);
129
130 /**
131 * iommu_put_dma_cookie - Release a domain's DMA mapping resources
132 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
133 * iommu_get_msi_cookie()
134 *
135 * IOMMU drivers should normally call this from their domain_free callback.
136 */
iommu_put_dma_cookie(struct iommu_domain * domain)137 void iommu_put_dma_cookie(struct iommu_domain *domain)
138 {
139 struct iommu_dma_cookie *cookie = domain->iova_cookie;
140 struct iommu_dma_msi_page *msi, *tmp;
141
142 if (!cookie)
143 return;
144
145 if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule)
146 put_iova_domain(&cookie->iovad);
147
148 list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
149 list_del(&msi->list);
150 kfree(msi);
151 }
152 kfree(cookie);
153 domain->iova_cookie = NULL;
154 }
155 EXPORT_SYMBOL(iommu_put_dma_cookie);
156
157 /**
158 * iommu_dma_get_resv_regions - Reserved region driver helper
159 * @dev: Device from iommu_get_resv_regions()
160 * @list: Reserved region list from iommu_get_resv_regions()
161 *
162 * IOMMU drivers can use this to implement their .get_resv_regions callback
163 * for general non-IOMMU-specific reservations. Currently, this covers GICv3
164 * ITS region reservation on ACPI based ARM platforms that may require HW MSI
165 * reservation.
166 */
iommu_dma_get_resv_regions(struct device * dev,struct list_head * list)167 void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
168 {
169
170 if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode))
171 iort_iommu_msi_get_resv_regions(dev, list);
172
173 }
174 EXPORT_SYMBOL(iommu_dma_get_resv_regions);
175
cookie_init_hw_msi_region(struct iommu_dma_cookie * cookie,phys_addr_t start,phys_addr_t end)176 static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
177 phys_addr_t start, phys_addr_t end)
178 {
179 struct iova_domain *iovad = &cookie->iovad;
180 struct iommu_dma_msi_page *msi_page;
181 int i, num_pages;
182
183 start -= iova_offset(iovad, start);
184 num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);
185
186 for (i = 0; i < num_pages; i++) {
187 msi_page = kmalloc(sizeof(*msi_page), GFP_KERNEL);
188 if (!msi_page)
189 return -ENOMEM;
190
191 msi_page->phys = start;
192 msi_page->iova = start;
193 INIT_LIST_HEAD(&msi_page->list);
194 list_add(&msi_page->list, &cookie->msi_page_list);
195 start += iovad->granule;
196 }
197
198 return 0;
199 }
200
iova_reserve_pci_windows(struct pci_dev * dev,struct iova_domain * iovad)201 static int iova_reserve_pci_windows(struct pci_dev *dev,
202 struct iova_domain *iovad)
203 {
204 struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
205 struct resource_entry *window;
206 unsigned long lo, hi;
207 phys_addr_t start = 0, end;
208
209 resource_list_for_each_entry(window, &bridge->windows) {
210 if (resource_type(window->res) != IORESOURCE_MEM)
211 continue;
212
213 lo = iova_pfn(iovad, window->res->start - window->offset);
214 hi = iova_pfn(iovad, window->res->end - window->offset);
215 reserve_iova(iovad, lo, hi);
216 }
217
218 /* Get reserved DMA windows from host bridge */
219 resource_list_for_each_entry(window, &bridge->dma_ranges) {
220 end = window->res->start - window->offset;
221 resv_iova:
222 if (end > start) {
223 lo = iova_pfn(iovad, start);
224 hi = iova_pfn(iovad, end);
225 reserve_iova(iovad, lo, hi);
226 } else if (end < start) {
227 /* dma_ranges list should be sorted */
228 dev_err(&dev->dev,
229 "Failed to reserve IOVA [%pa-%pa]\n",
230 &start, &end);
231 return -EINVAL;
232 }
233
234 start = window->res->end - window->offset + 1;
235 /* If window is last entry */
236 if (window->node.next == &bridge->dma_ranges &&
237 end != ~(phys_addr_t)0) {
238 end = ~(phys_addr_t)0;
239 goto resv_iova;
240 }
241 }
242
243 return 0;
244 }
245
iova_reserve_iommu_regions(struct device * dev,struct iommu_domain * domain)246 static int iova_reserve_iommu_regions(struct device *dev,
247 struct iommu_domain *domain)
248 {
249 struct iommu_dma_cookie *cookie = domain->iova_cookie;
250 struct iova_domain *iovad = &cookie->iovad;
251 struct iommu_resv_region *region;
252 LIST_HEAD(resv_regions);
253 int ret = 0;
254
255 if (dev_is_pci(dev)) {
256 ret = iova_reserve_pci_windows(to_pci_dev(dev), iovad);
257 if (ret)
258 return ret;
259 }
260
261 iommu_get_resv_regions(dev, &resv_regions);
262 list_for_each_entry(region, &resv_regions, list) {
263 unsigned long lo, hi;
264
265 /* We ARE the software that manages these! */
266 if (region->type == IOMMU_RESV_SW_MSI)
267 continue;
268
269 lo = iova_pfn(iovad, region->start);
270 hi = iova_pfn(iovad, region->start + region->length - 1);
271 reserve_iova(iovad, lo, hi);
272
273 if (region->type == IOMMU_RESV_MSI)
274 ret = cookie_init_hw_msi_region(cookie, region->start,
275 region->start + region->length);
276 if (ret)
277 break;
278 }
279 iommu_put_resv_regions(dev, &resv_regions);
280
281 return ret;
282 }
283
iommu_dma_flush_iotlb_all(struct iova_domain * iovad)284 static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad)
285 {
286 struct iommu_dma_cookie *cookie;
287 struct iommu_domain *domain;
288
289 cookie = container_of(iovad, struct iommu_dma_cookie, iovad);
290 domain = cookie->fq_domain;
291 /*
292 * The IOMMU driver supporting DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE
293 * implies that ops->flush_iotlb_all must be non-NULL.
294 */
295 domain->ops->flush_iotlb_all(domain);
296 }
297
298 /**
299 * iommu_dma_init_domain - Initialise a DMA mapping domain
300 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
301 * @base: IOVA at which the mappable address space starts
302 * @size: Size of IOVA space
303 * @dev: Device the domain is being initialised for
304 *
305 * @base and @size should be exact multiples of IOMMU page granularity to
306 * avoid rounding surprises. If necessary, we reserve the page at address 0
307 * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
308 * any change which could make prior IOVAs invalid will fail.
309 */
iommu_dma_init_domain(struct iommu_domain * domain,dma_addr_t base,u64 size,struct device * dev)310 static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
311 u64 size, struct device *dev)
312 {
313 struct iommu_dma_cookie *cookie = domain->iova_cookie;
314 struct iommu_dma_cookie_ext *cookie_ext;
315 unsigned long order, base_pfn;
316 struct iova_domain *iovad;
317 int attr;
318 int ret;
319
320 if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
321 return -EINVAL;
322
323 iovad = &cookie->iovad;
324
325 /* Use the smallest supported page size for IOVA granularity */
326 order = __ffs(domain->pgsize_bitmap);
327 base_pfn = max_t(unsigned long, 1, base >> order);
328
329 /* Check the domain allows at least some access to the device... */
330 if (domain->geometry.force_aperture) {
331 if (base > domain->geometry.aperture_end ||
332 base + size <= domain->geometry.aperture_start) {
333 pr_warn("specified DMA range outside IOMMU capability\n");
334 return -EFAULT;
335 }
336 /* ...then finally give it a kicking to make sure it fits */
337 base_pfn = max_t(unsigned long, base_pfn,
338 domain->geometry.aperture_start >> order);
339 }
340
341 /* start_pfn is always nonzero for an already-initialised domain */
342 cookie_ext = container_of(cookie, struct iommu_dma_cookie_ext, cookie);
343 mutex_lock(&cookie_ext->mutex);
344 if (iovad->start_pfn) {
345 if (1UL << order != iovad->granule ||
346 base_pfn != iovad->start_pfn) {
347 pr_warn("Incompatible range for DMA domain\n");
348 ret = -EFAULT;
349 goto done_unlock;
350 }
351
352 ret = 0;
353 goto done_unlock;
354 }
355
356 init_iova_domain(iovad, 1UL << order, base_pfn);
357
358 if (!cookie->fq_domain && !iommu_domain_get_attr(domain,
359 DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) {
360 if (init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all,
361 NULL))
362 pr_warn("iova flush queue initialization failed\n");
363 else
364 cookie->fq_domain = domain;
365 }
366
367 if (!dev) {
368 ret = 0;
369 goto done_unlock;
370 }
371
372 ret = iova_reserve_iommu_regions(dev, domain);
373
374 done_unlock:
375 mutex_unlock(&cookie_ext->mutex);
376 return ret;
377 }
378
iommu_dma_deferred_attach(struct device * dev,struct iommu_domain * domain)379 static int iommu_dma_deferred_attach(struct device *dev,
380 struct iommu_domain *domain)
381 {
382 const struct iommu_ops *ops = domain->ops;
383
384 if (!is_kdump_kernel())
385 return 0;
386
387 if (unlikely(ops->is_attach_deferred &&
388 ops->is_attach_deferred(domain, dev)))
389 return iommu_attach_device(domain, dev);
390
391 return 0;
392 }
393
394 /*
395 * Should be called prior to using dma-apis
396 */
iommu_dma_reserve_iova(struct device * dev,dma_addr_t base,u64 size)397 int iommu_dma_reserve_iova(struct device *dev, dma_addr_t base,
398 u64 size)
399 {
400 struct iommu_domain *domain;
401 struct iommu_dma_cookie *cookie;
402 struct iova_domain *iovad;
403 unsigned long pfn_lo, pfn_hi;
404
405 domain = iommu_get_domain_for_dev(dev);
406 if (!domain || !domain->iova_cookie)
407 return -EINVAL;
408
409 cookie = domain->iova_cookie;
410 iovad = &cookie->iovad;
411
412 /* iova will be freed automatically by put_iova_domain() */
413 pfn_lo = iova_pfn(iovad, base);
414 pfn_hi = iova_pfn(iovad, base + size - 1);
415 if (!reserve_iova(iovad, pfn_lo, pfn_hi))
416 return -EINVAL;
417
418 return 0;
419 }
420 EXPORT_SYMBOL(iommu_dma_reserve_iova);
421
422 /*
423 * Should be called prior to using dma-apis.
424 */
iommu_dma_enable_best_fit_algo(struct device * dev)425 int iommu_dma_enable_best_fit_algo(struct device *dev)
426 {
427 struct iommu_domain *domain;
428 struct iova_domain *iovad;
429
430 domain = iommu_get_domain_for_dev(dev);
431 if (!domain || !domain->iova_cookie)
432 return -EINVAL;
433
434 iovad = &((struct iommu_dma_cookie *)domain->iova_cookie)->iovad;
435 iovad->best_fit = true;
436 return 0;
437 }
438 EXPORT_SYMBOL(iommu_dma_enable_best_fit_algo);
439
440 /**
441 * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
442 * page flags.
443 * @dir: Direction of DMA transfer
444 * @coherent: Is the DMA master cache-coherent?
445 * @attrs: DMA attributes for the mapping
446 *
447 * Return: corresponding IOMMU API page protection flags
448 */
dma_info_to_prot(enum dma_data_direction dir,bool coherent,unsigned long attrs)449 static int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
450 unsigned long attrs)
451 {
452 int prot = coherent ? IOMMU_CACHE : 0;
453
454 if (attrs & DMA_ATTR_PRIVILEGED)
455 prot |= IOMMU_PRIV;
456 if (attrs & DMA_ATTR_SYS_CACHE_ONLY)
457 prot |= IOMMU_SYS_CACHE;
458 if (attrs & DMA_ATTR_SYS_CACHE_ONLY_NWA)
459 prot |= IOMMU_SYS_CACHE_NWA;
460
461 switch (dir) {
462 case DMA_BIDIRECTIONAL:
463 return prot | IOMMU_READ | IOMMU_WRITE;
464 case DMA_TO_DEVICE:
465 return prot | IOMMU_READ;
466 case DMA_FROM_DEVICE:
467 return prot | IOMMU_WRITE;
468 default:
469 return 0;
470 }
471 }
472
iommu_dma_alloc_iova(struct iommu_domain * domain,size_t size,u64 dma_limit,struct device * dev)473 static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
474 size_t size, u64 dma_limit, struct device *dev)
475 {
476 struct iommu_dma_cookie *cookie = domain->iova_cookie;
477 struct iova_domain *iovad = &cookie->iovad;
478 unsigned long shift, iova_len, iova = 0;
479
480 if (cookie->type == IOMMU_DMA_MSI_COOKIE) {
481 cookie->msi_iova += size;
482 return cookie->msi_iova - size;
483 }
484
485 shift = iova_shift(iovad);
486 iova_len = size >> shift;
487 /*
488 * Freeing non-power-of-two-sized allocations back into the IOVA caches
489 * will come back to bite us badly, so we have to waste a bit of space
490 * rounding up anything cacheable to make sure that can't happen. The
491 * order of the unadjusted size will still match upon freeing.
492 */
493 if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
494 iova_len = roundup_pow_of_two(iova_len);
495
496 dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit);
497
498 if (domain->geometry.force_aperture)
499 dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end);
500
501 /* Try to get PCI devices a SAC address */
502 if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
503 iova = alloc_iova_fast(iovad, iova_len,
504 DMA_BIT_MASK(32) >> shift, false);
505
506 if (!iova)
507 iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift,
508 true);
509
510 trace_android_vh_iommu_alloc_iova(dev, (dma_addr_t)iova << shift, size);
511 trace_android_vh_iommu_iovad_alloc_iova(dev, iovad, (dma_addr_t)iova << shift, size);
512
513 return (dma_addr_t)iova << shift;
514 }
515
iommu_dma_free_iova(struct iommu_dma_cookie * cookie,dma_addr_t iova,size_t size)516 static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
517 dma_addr_t iova, size_t size)
518 {
519 struct iova_domain *iovad = &cookie->iovad;
520
521 /* The MSI case is only ever cleaning up its most recent allocation */
522 if (cookie->type == IOMMU_DMA_MSI_COOKIE)
523 cookie->msi_iova -= size;
524 else if (cookie->fq_domain) /* non-strict mode */
525 queue_iova(iovad, iova_pfn(iovad, iova),
526 size >> iova_shift(iovad), 0);
527 else
528 free_iova_fast(iovad, iova_pfn(iovad, iova),
529 size >> iova_shift(iovad));
530
531 trace_android_vh_iommu_free_iova(iova, size);
532 trace_android_vh_iommu_iovad_free_iova(iovad, iova, size);
533 }
534
__iommu_dma_unmap(struct device * dev,dma_addr_t dma_addr,size_t size)535 static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
536 size_t size)
537 {
538 struct iommu_domain *domain = iommu_get_dma_domain(dev);
539 struct iommu_dma_cookie *cookie = domain->iova_cookie;
540 struct iova_domain *iovad = &cookie->iovad;
541 size_t iova_off = iova_offset(iovad, dma_addr);
542 struct iommu_iotlb_gather iotlb_gather;
543 size_t unmapped;
544
545 dma_addr -= iova_off;
546 size = iova_align(iovad, size + iova_off);
547 iommu_iotlb_gather_init(&iotlb_gather);
548
549 unmapped = iommu_unmap_fast(domain, dma_addr, size, &iotlb_gather);
550 WARN_ON(unmapped != size);
551
552 if (!cookie->fq_domain)
553 iommu_iotlb_sync(domain, &iotlb_gather);
554 iommu_dma_free_iova(cookie, dma_addr, size);
555 }
556
__iommu_dma_map(struct device * dev,phys_addr_t phys,size_t size,int prot,u64 dma_mask)557 static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
558 size_t size, int prot, u64 dma_mask)
559 {
560 struct iommu_domain *domain = iommu_get_dma_domain(dev);
561 struct iommu_dma_cookie *cookie = domain->iova_cookie;
562 struct iova_domain *iovad = &cookie->iovad;
563 size_t iova_off = iova_offset(iovad, phys);
564 dma_addr_t iova;
565
566 if (unlikely(iommu_dma_deferred_attach(dev, domain)))
567 return DMA_MAPPING_ERROR;
568
569 size = iova_align(iovad, size + iova_off);
570
571 iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev);
572 if (!iova)
573 return DMA_MAPPING_ERROR;
574
575 if (iommu_map_atomic(domain, iova, phys - iova_off, size, prot)) {
576 iommu_dma_free_iova(cookie, iova, size);
577 return DMA_MAPPING_ERROR;
578 }
579 return iova + iova_off;
580 }
581
__iommu_dma_free_pages(struct page ** pages,int count)582 static void __iommu_dma_free_pages(struct page **pages, int count)
583 {
584 while (count--)
585 __free_page(pages[count]);
586 kvfree(pages);
587 }
588
__iommu_dma_alloc_pages(struct device * dev,unsigned int count,unsigned long order_mask,gfp_t gfp)589 static struct page **__iommu_dma_alloc_pages(struct device *dev,
590 unsigned int count, unsigned long order_mask, gfp_t gfp)
591 {
592 struct page **pages;
593 unsigned int i = 0, nid = dev_to_node(dev);
594
595 order_mask &= (2U << MAX_ORDER) - 1;
596 if (!order_mask)
597 return NULL;
598
599 pages = kvzalloc(count * sizeof(*pages), GFP_KERNEL);
600 if (!pages)
601 return NULL;
602
603 /* IOMMU can map any pages, so himem can also be used here */
604 gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
605
606 /* It makes no sense to muck about with huge pages */
607 gfp &= ~__GFP_COMP;
608
609 while (count) {
610 struct page *page = NULL;
611 unsigned int order_size;
612
613 /*
614 * Higher-order allocations are a convenience rather
615 * than a necessity, hence using __GFP_NORETRY until
616 * falling back to minimum-order allocations.
617 */
618 for (order_mask &= (2U << __fls(count)) - 1;
619 order_mask; order_mask &= ~order_size) {
620 unsigned int order = __fls(order_mask);
621 gfp_t alloc_flags = gfp;
622
623 order_size = 1U << order;
624 if (order_mask > order_size)
625 alloc_flags |= __GFP_NORETRY;
626 page = alloc_pages_node(nid, alloc_flags, order);
627 if (!page)
628 continue;
629 if (order)
630 split_page(page, order);
631 break;
632 }
633 if (!page) {
634 __iommu_dma_free_pages(pages, i);
635 return NULL;
636 }
637 count -= order_size;
638 while (order_size--)
639 pages[i++] = page++;
640 }
641 return pages;
642 }
643
644 /**
645 * iommu_dma_alloc_remap - Allocate and map a buffer contiguous in IOVA space
646 * @dev: Device to allocate memory for. Must be a real device
647 * attached to an iommu_dma_domain
648 * @size: Size of buffer in bytes
649 * @dma_handle: Out argument for allocated DMA handle
650 * @gfp: Allocation flags
651 * @prot: pgprot_t to use for the remapped mapping
652 * @attrs: DMA attributes for this allocation
653 *
654 * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
655 * but an IOMMU which supports smaller pages might not map the whole thing.
656 *
657 * Return: Mapped virtual address, or NULL on failure.
658 */
iommu_dma_alloc_remap(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t gfp,pgprot_t prot,unsigned long attrs)659 static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
660 dma_addr_t *dma_handle, gfp_t gfp, pgprot_t prot,
661 unsigned long attrs)
662 {
663 struct iommu_domain *domain = iommu_get_dma_domain(dev);
664 struct iommu_dma_cookie *cookie = domain->iova_cookie;
665 struct iova_domain *iovad = &cookie->iovad;
666 bool coherent = dev_is_dma_coherent(dev);
667 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
668 unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
669 struct page **pages;
670 struct sg_table sgt;
671 dma_addr_t iova;
672 void *vaddr;
673
674 *dma_handle = DMA_MAPPING_ERROR;
675
676 if (unlikely(iommu_dma_deferred_attach(dev, domain)))
677 return NULL;
678
679 min_size = alloc_sizes & -alloc_sizes;
680 if (min_size < PAGE_SIZE) {
681 min_size = PAGE_SIZE;
682 alloc_sizes |= PAGE_SIZE;
683 } else {
684 size = ALIGN(size, min_size);
685 }
686 if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
687 alloc_sizes = min_size;
688
689 count = PAGE_ALIGN(size) >> PAGE_SHIFT;
690 pages = __iommu_dma_alloc_pages(dev, count, alloc_sizes >> PAGE_SHIFT,
691 gfp);
692 if (!pages)
693 return NULL;
694
695 size = iova_align(iovad, size);
696 iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
697 if (!iova)
698 goto out_free_pages;
699
700 if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
701 goto out_free_iova;
702
703 if (!(ioprot & IOMMU_CACHE)) {
704 struct scatterlist *sg;
705 int i;
706
707 for_each_sg(sgt.sgl, sg, sgt.orig_nents, i)
708 arch_dma_prep_coherent(sg_page(sg), sg->length);
709 }
710
711 if (iommu_map_sg_atomic(domain, iova, sgt.sgl, sgt.orig_nents, ioprot)
712 < size)
713 goto out_free_sg;
714
715 vaddr = dma_common_pages_remap(pages, size, prot,
716 __builtin_return_address(0));
717 if (!vaddr)
718 goto out_unmap;
719
720 *dma_handle = iova;
721 sg_free_table(&sgt);
722 return vaddr;
723
724 out_unmap:
725 __iommu_dma_unmap(dev, iova, size);
726 out_free_sg:
727 sg_free_table(&sgt);
728 out_free_iova:
729 iommu_dma_free_iova(cookie, iova, size);
730 out_free_pages:
731 __iommu_dma_free_pages(pages, count);
732 return NULL;
733 }
734
735 /**
736 * __iommu_dma_mmap - Map a buffer into provided user VMA
737 * @pages: Array representing buffer from __iommu_dma_alloc()
738 * @size: Size of buffer in bytes
739 * @vma: VMA describing requested userspace mapping
740 *
741 * Maps the pages of the buffer in @pages into @vma. The caller is responsible
742 * for verifying the correct size and protection of @vma beforehand.
743 */
__iommu_dma_mmap(struct page ** pages,size_t size,struct vm_area_struct * vma)744 static int __iommu_dma_mmap(struct page **pages, size_t size,
745 struct vm_area_struct *vma)
746 {
747 return vm_map_pages(vma, pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
748 }
749
iommu_dma_sync_single_for_cpu(struct device * dev,dma_addr_t dma_handle,size_t size,enum dma_data_direction dir)750 static void iommu_dma_sync_single_for_cpu(struct device *dev,
751 dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
752 {
753 phys_addr_t phys;
754
755 if (dev_is_dma_coherent(dev))
756 return;
757
758 phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
759 arch_sync_dma_for_cpu(phys, size, dir);
760 }
761
iommu_dma_sync_single_for_device(struct device * dev,dma_addr_t dma_handle,size_t size,enum dma_data_direction dir)762 static void iommu_dma_sync_single_for_device(struct device *dev,
763 dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
764 {
765 phys_addr_t phys;
766
767 if (dev_is_dma_coherent(dev))
768 return;
769
770 phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
771 arch_sync_dma_for_device(phys, size, dir);
772 }
773
iommu_dma_sync_sg_for_cpu(struct device * dev,struct scatterlist * sgl,int nelems,enum dma_data_direction dir)774 static void iommu_dma_sync_sg_for_cpu(struct device *dev,
775 struct scatterlist *sgl, int nelems,
776 enum dma_data_direction dir)
777 {
778 struct scatterlist *sg;
779 int i;
780
781 if (dev_is_dma_coherent(dev))
782 return;
783
784 for_each_sg(sgl, sg, nelems, i)
785 arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
786 }
787
iommu_dma_sync_sg_for_device(struct device * dev,struct scatterlist * sgl,int nelems,enum dma_data_direction dir)788 static void iommu_dma_sync_sg_for_device(struct device *dev,
789 struct scatterlist *sgl, int nelems,
790 enum dma_data_direction dir)
791 {
792 struct scatterlist *sg;
793 int i;
794
795 if (dev_is_dma_coherent(dev))
796 return;
797
798 for_each_sg(sgl, sg, nelems, i)
799 arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
800 }
801
iommu_dma_map_page(struct device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction dir,unsigned long attrs)802 static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
803 unsigned long offset, size_t size, enum dma_data_direction dir,
804 unsigned long attrs)
805 {
806 phys_addr_t phys = page_to_phys(page) + offset;
807 bool coherent = dev_is_dma_coherent(dev);
808 int prot = dma_info_to_prot(dir, coherent, attrs);
809 dma_addr_t dma_handle;
810
811 dma_handle = __iommu_dma_map(dev, phys, size, prot, dma_get_mask(dev));
812 if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
813 dma_handle != DMA_MAPPING_ERROR)
814 arch_sync_dma_for_device(phys, size, dir);
815 return dma_handle;
816 }
817
iommu_dma_unmap_page(struct device * dev,dma_addr_t dma_handle,size_t size,enum dma_data_direction dir,unsigned long attrs)818 static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
819 size_t size, enum dma_data_direction dir, unsigned long attrs)
820 {
821 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
822 iommu_dma_sync_single_for_cpu(dev, dma_handle, size, dir);
823 __iommu_dma_unmap(dev, dma_handle, size);
824 }
825
826 /*
827 * Prepare a successfully-mapped scatterlist to give back to the caller.
828 *
829 * At this point the segments are already laid out by iommu_dma_map_sg() to
830 * avoid individually crossing any boundaries, so we merely need to check a
831 * segment's start address to avoid concatenating across one.
832 */
__finalise_sg(struct device * dev,struct scatterlist * sg,int nents,dma_addr_t dma_addr)833 static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
834 dma_addr_t dma_addr)
835 {
836 struct scatterlist *s, *cur = sg;
837 unsigned long seg_mask = dma_get_seg_boundary(dev);
838 unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
839 int i, count = 0;
840
841 for_each_sg(sg, s, nents, i) {
842 /* Restore this segment's original unaligned fields first */
843 unsigned int s_iova_off = sg_dma_address(s);
844 unsigned int s_length = sg_dma_len(s);
845 unsigned int s_iova_len = s->length;
846
847 s->offset += s_iova_off;
848 s->length = s_length;
849 sg_dma_address(s) = DMA_MAPPING_ERROR;
850 sg_dma_len(s) = 0;
851
852 /*
853 * Now fill in the real DMA data. If...
854 * - there is a valid output segment to append to
855 * - and this segment starts on an IOVA page boundary
856 * - but doesn't fall at a segment boundary
857 * - and wouldn't make the resulting output segment too long
858 */
859 if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
860 (max_len - cur_len >= s_length)) {
861 /* ...then concatenate it with the previous one */
862 cur_len += s_length;
863 } else {
864 /* Otherwise start the next output segment */
865 if (i > 0)
866 cur = sg_next(cur);
867 cur_len = s_length;
868 count++;
869
870 sg_dma_address(cur) = dma_addr + s_iova_off;
871 }
872
873 sg_dma_len(cur) = cur_len;
874 dma_addr += s_iova_len;
875
876 if (s_length + s_iova_off < s_iova_len)
877 cur_len = 0;
878 }
879 return count;
880 }
881
882 /*
883 * If mapping failed, then just restore the original list,
884 * but making sure the DMA fields are invalidated.
885 */
__invalidate_sg(struct scatterlist * sg,int nents)886 static void __invalidate_sg(struct scatterlist *sg, int nents)
887 {
888 struct scatterlist *s;
889 int i;
890
891 for_each_sg(sg, s, nents, i) {
892 if (sg_dma_address(s) != DMA_MAPPING_ERROR)
893 s->offset += sg_dma_address(s);
894 if (sg_dma_len(s))
895 s->length = sg_dma_len(s);
896 sg_dma_address(s) = DMA_MAPPING_ERROR;
897 sg_dma_len(s) = 0;
898 }
899 }
900
901 /*
902 * The DMA API client is passing in a scatterlist which could describe
903 * any old buffer layout, but the IOMMU API requires everything to be
904 * aligned to IOMMU pages. Hence the need for this complicated bit of
905 * impedance-matching, to be able to hand off a suitably-aligned list,
906 * but still preserve the original offsets and sizes for the caller.
907 */
iommu_dma_map_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)908 static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
909 int nents, enum dma_data_direction dir, unsigned long attrs)
910 {
911 struct iommu_domain *domain = iommu_get_dma_domain(dev);
912 struct iommu_dma_cookie *cookie = domain->iova_cookie;
913 struct iova_domain *iovad = &cookie->iovad;
914 struct scatterlist *s, *prev = NULL;
915 int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs);
916 dma_addr_t iova;
917 size_t iova_len = 0;
918 unsigned long mask = dma_get_seg_boundary(dev);
919 int i;
920
921 if (unlikely(iommu_dma_deferred_attach(dev, domain)))
922 return 0;
923
924 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
925 iommu_dma_sync_sg_for_device(dev, sg, nents, dir);
926
927 /*
928 * Work out how much IOVA space we need, and align the segments to
929 * IOVA granules for the IOMMU driver to handle. With some clever
930 * trickery we can modify the list in-place, but reversibly, by
931 * stashing the unaligned parts in the as-yet-unused DMA fields.
932 */
933 for_each_sg(sg, s, nents, i) {
934 size_t s_iova_off = iova_offset(iovad, s->offset);
935 size_t s_length = s->length;
936 size_t pad_len = (mask - iova_len + 1) & mask;
937
938 sg_dma_address(s) = s_iova_off;
939 sg_dma_len(s) = s_length;
940 s->offset -= s_iova_off;
941 s_length = iova_align(iovad, s_length + s_iova_off);
942 s->length = s_length;
943
944 /*
945 * Due to the alignment of our single IOVA allocation, we can
946 * depend on these assumptions about the segment boundary mask:
947 * - If mask size >= IOVA size, then the IOVA range cannot
948 * possibly fall across a boundary, so we don't care.
949 * - If mask size < IOVA size, then the IOVA range must start
950 * exactly on a boundary, therefore we can lay things out
951 * based purely on segment lengths without needing to know
952 * the actual addresses beforehand.
953 * - The mask must be a power of 2, so pad_len == 0 if
954 * iova_len == 0, thus we cannot dereference prev the first
955 * time through here (i.e. before it has a meaningful value).
956 */
957 if (pad_len && pad_len < s_length - 1) {
958 prev->length += pad_len;
959 iova_len += pad_len;
960 }
961
962 iova_len += s_length;
963 prev = s;
964 }
965
966 iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
967 if (!iova)
968 goto out_restore_sg;
969
970 /*
971 * We'll leave any physical concatenation to the IOMMU driver's
972 * implementation - it knows better than we do.
973 */
974 if (iommu_map_sg_atomic(domain, iova, sg, nents, prot) < iova_len)
975 goto out_free_iova;
976
977 return __finalise_sg(dev, sg, nents, iova);
978
979 out_free_iova:
980 iommu_dma_free_iova(cookie, iova, iova_len);
981 out_restore_sg:
982 __invalidate_sg(sg, nents);
983 return 0;
984 }
985
iommu_dma_unmap_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)986 static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
987 int nents, enum dma_data_direction dir, unsigned long attrs)
988 {
989 dma_addr_t start, end;
990 struct scatterlist *tmp;
991 int i;
992
993 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
994 iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir);
995
996 /*
997 * The scatterlist segments are mapped into a single
998 * contiguous IOVA allocation, so this is incredibly easy.
999 */
1000 start = sg_dma_address(sg);
1001 for_each_sg(sg_next(sg), tmp, nents - 1, i) {
1002 if (sg_dma_len(tmp) == 0)
1003 break;
1004 sg = tmp;
1005 }
1006 end = sg_dma_address(sg) + sg_dma_len(sg);
1007 __iommu_dma_unmap(dev, start, end - start);
1008 }
1009
iommu_dma_map_resource(struct device * dev,phys_addr_t phys,size_t size,enum dma_data_direction dir,unsigned long attrs)1010 static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
1011 size_t size, enum dma_data_direction dir, unsigned long attrs)
1012 {
1013 return __iommu_dma_map(dev, phys, size,
1014 dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO,
1015 dma_get_mask(dev));
1016 }
1017
iommu_dma_unmap_resource(struct device * dev,dma_addr_t handle,size_t size,enum dma_data_direction dir,unsigned long attrs)1018 static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
1019 size_t size, enum dma_data_direction dir, unsigned long attrs)
1020 {
1021 __iommu_dma_unmap(dev, handle, size);
1022 }
1023
__iommu_dma_free(struct device * dev,size_t size,void * cpu_addr)1024 static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
1025 {
1026 size_t alloc_size = PAGE_ALIGN(size);
1027 int count = alloc_size >> PAGE_SHIFT;
1028 struct page *page = NULL, **pages = NULL;
1029
1030 /* Non-coherent atomic allocation? Easy */
1031 if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
1032 dma_free_from_pool(dev, cpu_addr, alloc_size))
1033 return;
1034
1035 if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
1036 /*
1037 * If it the address is remapped, then it's either non-coherent
1038 * or highmem CMA, or an iommu_dma_alloc_remap() construction.
1039 */
1040 pages = dma_common_find_pages(cpu_addr);
1041 if (!pages)
1042 page = vmalloc_to_page(cpu_addr);
1043 dma_common_free_remap(cpu_addr, alloc_size);
1044 } else {
1045 /* Lowmem means a coherent atomic or CMA allocation */
1046 page = virt_to_page(cpu_addr);
1047 }
1048
1049 if (pages)
1050 __iommu_dma_free_pages(pages, count);
1051 if (page)
1052 dma_free_contiguous(dev, page, alloc_size);
1053 }
1054
iommu_dma_free(struct device * dev,size_t size,void * cpu_addr,dma_addr_t handle,unsigned long attrs)1055 static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
1056 dma_addr_t handle, unsigned long attrs)
1057 {
1058 __iommu_dma_unmap(dev, handle, size);
1059 __iommu_dma_free(dev, size, cpu_addr);
1060 }
1061
iommu_dma_alloc_pages(struct device * dev,size_t size,struct page ** pagep,gfp_t gfp,unsigned long attrs)1062 static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
1063 struct page **pagep, gfp_t gfp, unsigned long attrs)
1064 {
1065 bool coherent = dev_is_dma_coherent(dev);
1066 size_t alloc_size = PAGE_ALIGN(size);
1067 int node = dev_to_node(dev);
1068 struct page *page = NULL;
1069 void *cpu_addr;
1070
1071 page = dma_alloc_contiguous(dev, alloc_size, gfp);
1072 if (!page)
1073 page = alloc_pages_node(node, gfp, get_order(alloc_size));
1074 if (!page)
1075 return NULL;
1076
1077 if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) {
1078 pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
1079
1080 cpu_addr = dma_common_contiguous_remap(page, alloc_size,
1081 prot, __builtin_return_address(0));
1082 if (!cpu_addr)
1083 goto out_free_pages;
1084
1085 if (!coherent)
1086 arch_dma_prep_coherent(page, size);
1087 } else {
1088 cpu_addr = page_address(page);
1089 }
1090
1091 *pagep = page;
1092 memset(cpu_addr, 0, alloc_size);
1093 return cpu_addr;
1094 out_free_pages:
1095 dma_free_contiguous(dev, page, alloc_size);
1096 return NULL;
1097 }
1098
iommu_dma_alloc(struct device * dev,size_t size,dma_addr_t * handle,gfp_t gfp,unsigned long attrs)1099 static void *iommu_dma_alloc(struct device *dev, size_t size,
1100 dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
1101 {
1102 bool coherent = dev_is_dma_coherent(dev);
1103 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
1104 struct page *page = NULL;
1105 void *cpu_addr;
1106
1107 gfp |= __GFP_ZERO;
1108
1109 if (IS_ENABLED(CONFIG_DMA_REMAP) && gfpflags_allow_blocking(gfp) &&
1110 !(attrs & DMA_ATTR_FORCE_CONTIGUOUS)) {
1111 return iommu_dma_alloc_remap(dev, size, handle, gfp,
1112 dma_pgprot(dev, PAGE_KERNEL, attrs), attrs);
1113 }
1114
1115 if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
1116 !gfpflags_allow_blocking(gfp) && !coherent)
1117 page = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &cpu_addr,
1118 gfp, NULL);
1119 else
1120 cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs);
1121 if (!cpu_addr)
1122 return NULL;
1123
1124 *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot,
1125 dev->coherent_dma_mask);
1126 if (*handle == DMA_MAPPING_ERROR) {
1127 __iommu_dma_free(dev, size, cpu_addr);
1128 return NULL;
1129 }
1130
1131 return cpu_addr;
1132 }
1133
1134 #ifdef CONFIG_DMA_REMAP
iommu_dma_alloc_noncoherent(struct device * dev,size_t size,dma_addr_t * handle,enum dma_data_direction dir,gfp_t gfp)1135 static void *iommu_dma_alloc_noncoherent(struct device *dev, size_t size,
1136 dma_addr_t *handle, enum dma_data_direction dir, gfp_t gfp)
1137 {
1138 if (!gfpflags_allow_blocking(gfp)) {
1139 struct page *page;
1140
1141 page = dma_common_alloc_pages(dev, size, handle, dir, gfp);
1142 if (!page)
1143 return NULL;
1144 return page_address(page);
1145 }
1146
1147 return iommu_dma_alloc_remap(dev, size, handle, gfp | __GFP_ZERO,
1148 PAGE_KERNEL, 0);
1149 }
1150
iommu_dma_free_noncoherent(struct device * dev,size_t size,void * cpu_addr,dma_addr_t handle,enum dma_data_direction dir)1151 static void iommu_dma_free_noncoherent(struct device *dev, size_t size,
1152 void *cpu_addr, dma_addr_t handle, enum dma_data_direction dir)
1153 {
1154 __iommu_dma_unmap(dev, handle, size);
1155 __iommu_dma_free(dev, size, cpu_addr);
1156 }
1157 #else
1158 #define iommu_dma_alloc_noncoherent NULL
1159 #define iommu_dma_free_noncoherent NULL
1160 #endif /* CONFIG_DMA_REMAP */
1161
iommu_dma_mmap(struct device * dev,struct vm_area_struct * vma,void * cpu_addr,dma_addr_t dma_addr,size_t size,unsigned long attrs)1162 static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
1163 void *cpu_addr, dma_addr_t dma_addr, size_t size,
1164 unsigned long attrs)
1165 {
1166 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
1167 unsigned long pfn, off = vma->vm_pgoff;
1168 int ret;
1169
1170 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
1171
1172 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
1173 return ret;
1174
1175 if (off >= nr_pages || vma_pages(vma) > nr_pages - off)
1176 return -ENXIO;
1177
1178 if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
1179 struct page **pages = dma_common_find_pages(cpu_addr);
1180
1181 if (pages)
1182 return __iommu_dma_mmap(pages, size, vma);
1183 pfn = vmalloc_to_pfn(cpu_addr);
1184 } else {
1185 pfn = page_to_pfn(virt_to_page(cpu_addr));
1186 }
1187
1188 return remap_pfn_range(vma, vma->vm_start, pfn + off,
1189 vma->vm_end - vma->vm_start,
1190 vma->vm_page_prot);
1191 }
1192
iommu_dma_get_sgtable(struct device * dev,struct sg_table * sgt,void * cpu_addr,dma_addr_t dma_addr,size_t size,unsigned long attrs)1193 static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
1194 void *cpu_addr, dma_addr_t dma_addr, size_t size,
1195 unsigned long attrs)
1196 {
1197 struct page *page;
1198 int ret;
1199
1200 if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
1201 struct page **pages = dma_common_find_pages(cpu_addr);
1202
1203 if (pages) {
1204 return sg_alloc_table_from_pages(sgt, pages,
1205 PAGE_ALIGN(size) >> PAGE_SHIFT,
1206 0, size, GFP_KERNEL);
1207 }
1208
1209 page = vmalloc_to_page(cpu_addr);
1210 } else {
1211 page = virt_to_page(cpu_addr);
1212 }
1213
1214 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
1215 if (!ret)
1216 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
1217 return ret;
1218 }
1219
iommu_dma_get_merge_boundary(struct device * dev)1220 static unsigned long iommu_dma_get_merge_boundary(struct device *dev)
1221 {
1222 struct iommu_domain *domain = iommu_get_dma_domain(dev);
1223
1224 return (1UL << __ffs(domain->pgsize_bitmap)) - 1;
1225 }
1226
1227 static const struct dma_map_ops iommu_dma_ops = {
1228 .alloc = iommu_dma_alloc,
1229 .free = iommu_dma_free,
1230 .alloc_pages = dma_common_alloc_pages,
1231 .free_pages = dma_common_free_pages,
1232 .alloc_noncoherent = iommu_dma_alloc_noncoherent,
1233 .free_noncoherent = iommu_dma_free_noncoherent,
1234 .mmap = iommu_dma_mmap,
1235 .get_sgtable = iommu_dma_get_sgtable,
1236 .map_page = iommu_dma_map_page,
1237 .unmap_page = iommu_dma_unmap_page,
1238 .map_sg = iommu_dma_map_sg,
1239 .unmap_sg = iommu_dma_unmap_sg,
1240 .sync_single_for_cpu = iommu_dma_sync_single_for_cpu,
1241 .sync_single_for_device = iommu_dma_sync_single_for_device,
1242 .sync_sg_for_cpu = iommu_dma_sync_sg_for_cpu,
1243 .sync_sg_for_device = iommu_dma_sync_sg_for_device,
1244 .map_resource = iommu_dma_map_resource,
1245 .unmap_resource = iommu_dma_unmap_resource,
1246 .get_merge_boundary = iommu_dma_get_merge_boundary,
1247 };
1248
1249 /*
1250 * The IOMMU core code allocates the default DMA domain, which the underlying
1251 * IOMMU driver needs to support via the dma-iommu layer.
1252 */
iommu_setup_dma_ops(struct device * dev,u64 dma_base,u64 size)1253 void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size)
1254 {
1255 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1256
1257 if (!domain)
1258 goto out_err;
1259
1260 /*
1261 * The IOMMU core code allocates the default DMA domain, which the
1262 * underlying IOMMU driver needs to support via the dma-iommu layer.
1263 */
1264 if (domain->type == IOMMU_DOMAIN_DMA) {
1265 if (iommu_dma_init_domain(domain, dma_base, size, dev))
1266 goto out_err;
1267 dev->dma_ops = &iommu_dma_ops;
1268 }
1269
1270 return;
1271 out_err:
1272 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
1273 dev_name(dev));
1274 }
1275
iommu_dma_get_msi_page(struct device * dev,phys_addr_t msi_addr,struct iommu_domain * domain)1276 static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
1277 phys_addr_t msi_addr, struct iommu_domain *domain)
1278 {
1279 struct iommu_dma_cookie *cookie = domain->iova_cookie;
1280 struct iommu_dma_msi_page *msi_page;
1281 dma_addr_t iova;
1282 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1283 size_t size = cookie_msi_granule(cookie);
1284
1285 msi_addr &= ~(phys_addr_t)(size - 1);
1286 list_for_each_entry(msi_page, &cookie->msi_page_list, list)
1287 if (msi_page->phys == msi_addr)
1288 return msi_page;
1289
1290 msi_page = kzalloc(sizeof(*msi_page), GFP_KERNEL);
1291 if (!msi_page)
1292 return NULL;
1293
1294 iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
1295 if (!iova)
1296 goto out_free_page;
1297
1298 if (iommu_map(domain, iova, msi_addr, size, prot))
1299 goto out_free_iova;
1300
1301 INIT_LIST_HEAD(&msi_page->list);
1302 msi_page->phys = msi_addr;
1303 msi_page->iova = iova;
1304 list_add(&msi_page->list, &cookie->msi_page_list);
1305 return msi_page;
1306
1307 out_free_iova:
1308 iommu_dma_free_iova(cookie, iova, size);
1309 out_free_page:
1310 kfree(msi_page);
1311 return NULL;
1312 }
1313
iommu_dma_prepare_msi(struct msi_desc * desc,phys_addr_t msi_addr)1314 int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
1315 {
1316 struct device *dev = msi_desc_to_dev(desc);
1317 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1318 struct iommu_dma_msi_page *msi_page;
1319 static DEFINE_MUTEX(msi_prepare_lock); /* see below */
1320
1321 if (!domain || !domain->iova_cookie) {
1322 desc->iommu_cookie = NULL;
1323 return 0;
1324 }
1325
1326 /*
1327 * In fact the whole prepare operation should already be serialised by
1328 * irq_domain_mutex further up the callchain, but that's pretty subtle
1329 * on its own, so consider this locking as failsafe documentation...
1330 */
1331 mutex_lock(&msi_prepare_lock);
1332 msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
1333 mutex_unlock(&msi_prepare_lock);
1334
1335 msi_desc_set_iommu_cookie(desc, msi_page);
1336
1337 if (!msi_page)
1338 return -ENOMEM;
1339 return 0;
1340 }
1341
iommu_dma_compose_msi_msg(struct msi_desc * desc,struct msi_msg * msg)1342 void iommu_dma_compose_msi_msg(struct msi_desc *desc,
1343 struct msi_msg *msg)
1344 {
1345 struct device *dev = msi_desc_to_dev(desc);
1346 const struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1347 const struct iommu_dma_msi_page *msi_page;
1348
1349 msi_page = msi_desc_get_iommu_cookie(desc);
1350
1351 if (!domain || !domain->iova_cookie || WARN_ON(!msi_page))
1352 return;
1353
1354 msg->address_hi = upper_32_bits(msi_page->iova);
1355 msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1;
1356 msg->address_lo += lower_32_bits(msi_page->iova);
1357 }
1358
iommu_dma_init(void)1359 static int iommu_dma_init(void)
1360 {
1361 return iova_cache_get();
1362 }
1363 arch_initcall(iommu_dma_init);
1364