Lines Matching +full:page +full:- +full:size
1 // SPDX-License-Identifier: GPL-2.0-only
3 * A fairly generic DMA-API to IOMMU-API glue layer.
5 * Copyright (C) 2014-2015 ARM Ltd.
7 * based in part on arch/arm/mm/dma-mapping.c:
8 * Copyright (C) 2000-2004 Russell King
13 #include <linux/dma-map-ops.h>
14 #include <linux/dma-iommu.h>
44 /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
60 if (cookie->type == IOMMU_DMA_IOVA_COOKIE) in cookie_msi_granule()
61 return cookie->iovad.granule; in cookie_msi_granule()
71 INIT_LIST_HEAD(&cookie->cookie.msi_page_list); in cookie_alloc()
72 cookie->cookie.type = type; in cookie_alloc()
73 mutex_init(&cookie->mutex); in cookie_alloc()
75 return &cookie->cookie; in cookie_alloc()
79 * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
80 * @domain: IOMMU domain to prepare for DMA-API usage
83 * callback when domain->type == IOMMU_DOMAIN_DMA.
87 if (domain->iova_cookie) in iommu_get_dma_cookie()
88 return -EEXIST; in iommu_get_dma_cookie()
90 domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE); in iommu_get_dma_cookie()
91 if (!domain->iova_cookie) in iommu_get_dma_cookie()
92 return -ENOMEM; in iommu_get_dma_cookie()
99 * iommu_get_msi_cookie - Acquire just MSI remapping resources
114 if (domain->type != IOMMU_DOMAIN_UNMANAGED) in iommu_get_msi_cookie()
115 return -EINVAL; in iommu_get_msi_cookie()
117 if (domain->iova_cookie) in iommu_get_msi_cookie()
118 return -EEXIST; in iommu_get_msi_cookie()
122 return -ENOMEM; in iommu_get_msi_cookie()
124 cookie->msi_iova = base; in iommu_get_msi_cookie()
125 domain->iova_cookie = cookie; in iommu_get_msi_cookie()
131 * iommu_put_dma_cookie - Release a domain's DMA mapping resources
139 struct iommu_dma_cookie *cookie = domain->iova_cookie; in iommu_put_dma_cookie()
145 if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule) in iommu_put_dma_cookie()
146 put_iova_domain(&cookie->iovad); in iommu_put_dma_cookie()
148 list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) { in iommu_put_dma_cookie()
149 list_del(&msi->list); in iommu_put_dma_cookie()
153 domain->iova_cookie = NULL; in iommu_put_dma_cookie()
158 * iommu_dma_get_resv_regions - Reserved region driver helper
163 * for general non-IOMMU-specific reservations. Currently, this covers GICv3
170 if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode)) in iommu_dma_get_resv_regions()
179 struct iova_domain *iovad = &cookie->iovad; in cookie_init_hw_msi_region()
183 start -= iova_offset(iovad, start); in cookie_init_hw_msi_region()
184 num_pages = iova_align(iovad, end - start) >> iova_shift(iovad); in cookie_init_hw_msi_region()
189 return -ENOMEM; in cookie_init_hw_msi_region()
191 msi_page->phys = start; in cookie_init_hw_msi_region()
192 msi_page->iova = start; in cookie_init_hw_msi_region()
193 INIT_LIST_HEAD(&msi_page->list); in cookie_init_hw_msi_region()
194 list_add(&msi_page->list, &cookie->msi_page_list); in cookie_init_hw_msi_region()
195 start += iovad->granule; in cookie_init_hw_msi_region()
204 struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus); in iova_reserve_pci_windows()
209 resource_list_for_each_entry(window, &bridge->windows) { in iova_reserve_pci_windows()
210 if (resource_type(window->res) != IORESOURCE_MEM) in iova_reserve_pci_windows()
213 lo = iova_pfn(iovad, window->res->start - window->offset); in iova_reserve_pci_windows()
214 hi = iova_pfn(iovad, window->res->end - window->offset); in iova_reserve_pci_windows()
219 resource_list_for_each_entry(window, &bridge->dma_ranges) { in iova_reserve_pci_windows()
220 end = window->res->start - window->offset; in iova_reserve_pci_windows()
228 dev_err(&dev->dev, in iova_reserve_pci_windows()
229 "Failed to reserve IOVA [%pa-%pa]\n", in iova_reserve_pci_windows()
231 return -EINVAL; in iova_reserve_pci_windows()
234 start = window->res->end - window->offset + 1; in iova_reserve_pci_windows()
236 if (window->node.next == &bridge->dma_ranges && in iova_reserve_pci_windows()
249 struct iommu_dma_cookie *cookie = domain->iova_cookie; in iova_reserve_iommu_regions()
250 struct iova_domain *iovad = &cookie->iovad; in iova_reserve_iommu_regions()
266 if (region->type == IOMMU_RESV_SW_MSI) in iova_reserve_iommu_regions()
269 lo = iova_pfn(iovad, region->start); in iova_reserve_iommu_regions()
270 hi = iova_pfn(iovad, region->start + region->length - 1); in iova_reserve_iommu_regions()
273 if (region->type == IOMMU_RESV_MSI) in iova_reserve_iommu_regions()
274 ret = cookie_init_hw_msi_region(cookie, region->start, in iova_reserve_iommu_regions()
275 region->start + region->length); in iova_reserve_iommu_regions()
290 domain = cookie->fq_domain; in iommu_dma_flush_iotlb_all()
293 * implies that ops->flush_iotlb_all must be non-NULL. in iommu_dma_flush_iotlb_all()
295 domain->ops->flush_iotlb_all(domain); in iommu_dma_flush_iotlb_all()
299 * iommu_dma_init_domain - Initialise a DMA mapping domain
302 * @size: Size of IOVA space
305 * @base and @size should be exact multiples of IOMMU page granularity to
306 * avoid rounding surprises. If necessary, we reserve the page at address 0
311 u64 size, struct device *dev) in iommu_dma_init_domain() argument
313 struct iommu_dma_cookie *cookie = domain->iova_cookie; in iommu_dma_init_domain()
320 if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE) in iommu_dma_init_domain()
321 return -EINVAL; in iommu_dma_init_domain()
323 iovad = &cookie->iovad; in iommu_dma_init_domain()
325 /* Use the smallest supported page size for IOVA granularity */ in iommu_dma_init_domain()
326 order = __ffs(domain->pgsize_bitmap); in iommu_dma_init_domain()
330 if (domain->geometry.force_aperture) { in iommu_dma_init_domain()
331 if (base > domain->geometry.aperture_end || in iommu_dma_init_domain()
332 base + size <= domain->geometry.aperture_start) { in iommu_dma_init_domain()
334 return -EFAULT; in iommu_dma_init_domain()
338 domain->geometry.aperture_start >> order); in iommu_dma_init_domain()
341 /* start_pfn is always nonzero for an already-initialised domain */ in iommu_dma_init_domain()
343 mutex_lock(&cookie_ext->mutex); in iommu_dma_init_domain()
344 if (iovad->start_pfn) { in iommu_dma_init_domain()
345 if (1UL << order != iovad->granule || in iommu_dma_init_domain()
346 base_pfn != iovad->start_pfn) { in iommu_dma_init_domain()
348 ret = -EFAULT; in iommu_dma_init_domain()
358 if (!cookie->fq_domain && !iommu_domain_get_attr(domain, in iommu_dma_init_domain()
364 cookie->fq_domain = domain; in iommu_dma_init_domain()
375 mutex_unlock(&cookie_ext->mutex); in iommu_dma_init_domain()
382 const struct iommu_ops *ops = domain->ops; in iommu_dma_deferred_attach()
387 if (unlikely(ops->is_attach_deferred && in iommu_dma_deferred_attach()
388 ops->is_attach_deferred(domain, dev))) in iommu_dma_deferred_attach()
395 * Should be called prior to using dma-apis
398 u64 size) in iommu_dma_reserve_iova() argument
406 if (!domain || !domain->iova_cookie) in iommu_dma_reserve_iova()
407 return -EINVAL; in iommu_dma_reserve_iova()
409 cookie = domain->iova_cookie; in iommu_dma_reserve_iova()
410 iovad = &cookie->iovad; in iommu_dma_reserve_iova()
414 pfn_hi = iova_pfn(iovad, base + size - 1); in iommu_dma_reserve_iova()
416 return -EINVAL; in iommu_dma_reserve_iova()
423 * Should be called prior to using dma-apis.
431 if (!domain || !domain->iova_cookie) in iommu_dma_enable_best_fit_algo()
432 return -EINVAL; in iommu_dma_enable_best_fit_algo()
434 iovad = &((struct iommu_dma_cookie *)domain->iova_cookie)->iovad; in iommu_dma_enable_best_fit_algo()
435 iovad->best_fit = true; in iommu_dma_enable_best_fit_algo()
441 * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
442 * page flags.
444 * @coherent: Is the DMA master cache-coherent?
447 * Return: corresponding IOMMU API page protection flags
474 size_t size, u64 dma_limit, struct device *dev) in iommu_dma_alloc_iova() argument
476 struct iommu_dma_cookie *cookie = domain->iova_cookie; in iommu_dma_alloc_iova()
477 struct iova_domain *iovad = &cookie->iovad; in iommu_dma_alloc_iova()
480 if (cookie->type == IOMMU_DMA_MSI_COOKIE) { in iommu_dma_alloc_iova()
481 cookie->msi_iova += size; in iommu_dma_alloc_iova()
482 return cookie->msi_iova - size; in iommu_dma_alloc_iova()
486 iova_len = size >> shift; in iommu_dma_alloc_iova()
488 * Freeing non-power-of-two-sized allocations back into the IOVA caches in iommu_dma_alloc_iova()
491 * order of the unadjusted size will still match upon freeing. in iommu_dma_alloc_iova()
493 if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1))) in iommu_dma_alloc_iova()
496 dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit); in iommu_dma_alloc_iova()
498 if (domain->geometry.force_aperture) in iommu_dma_alloc_iova()
499 dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end); in iommu_dma_alloc_iova()
510 trace_android_vh_iommu_alloc_iova(dev, (dma_addr_t)iova << shift, size); in iommu_dma_alloc_iova()
511 trace_android_vh_iommu_iovad_alloc_iova(dev, iovad, (dma_addr_t)iova << shift, size); in iommu_dma_alloc_iova()
517 dma_addr_t iova, size_t size) in iommu_dma_free_iova() argument
519 struct iova_domain *iovad = &cookie->iovad; in iommu_dma_free_iova()
522 if (cookie->type == IOMMU_DMA_MSI_COOKIE) in iommu_dma_free_iova()
523 cookie->msi_iova -= size; in iommu_dma_free_iova()
524 else if (cookie->fq_domain) /* non-strict mode */ in iommu_dma_free_iova()
526 size >> iova_shift(iovad), 0); in iommu_dma_free_iova()
529 size >> iova_shift(iovad)); in iommu_dma_free_iova()
531 trace_android_vh_iommu_free_iova(iova, size); in iommu_dma_free_iova()
532 trace_android_vh_iommu_iovad_free_iova(iovad, iova, size); in iommu_dma_free_iova()
536 size_t size) in __iommu_dma_unmap() argument
539 struct iommu_dma_cookie *cookie = domain->iova_cookie; in __iommu_dma_unmap()
540 struct iova_domain *iovad = &cookie->iovad; in __iommu_dma_unmap()
545 dma_addr -= iova_off; in __iommu_dma_unmap()
546 size = iova_align(iovad, size + iova_off); in __iommu_dma_unmap()
549 unmapped = iommu_unmap_fast(domain, dma_addr, size, &iotlb_gather); in __iommu_dma_unmap()
550 WARN_ON(unmapped != size); in __iommu_dma_unmap()
552 if (!cookie->fq_domain) in __iommu_dma_unmap()
554 iommu_dma_free_iova(cookie, dma_addr, size); in __iommu_dma_unmap()
558 size_t size, int prot, u64 dma_mask) in __iommu_dma_map() argument
561 struct iommu_dma_cookie *cookie = domain->iova_cookie; in __iommu_dma_map()
562 struct iova_domain *iovad = &cookie->iovad; in __iommu_dma_map()
569 size = iova_align(iovad, size + iova_off); in __iommu_dma_map()
571 iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev); in __iommu_dma_map()
575 if (iommu_map_atomic(domain, iova, phys - iova_off, size, prot)) { in __iommu_dma_map()
576 iommu_dma_free_iova(cookie, iova, size); in __iommu_dma_map()
582 static void __iommu_dma_free_pages(struct page **pages, int count) in __iommu_dma_free_pages()
584 while (count--) in __iommu_dma_free_pages()
589 static struct page **__iommu_dma_alloc_pages(struct device *dev, in __iommu_dma_alloc_pages()
592 struct page **pages; in __iommu_dma_alloc_pages()
595 order_mask &= (2U << MAX_ORDER) - 1; in __iommu_dma_alloc_pages()
610 struct page *page = NULL; in __iommu_dma_alloc_pages() local
614 * Higher-order allocations are a convenience rather in __iommu_dma_alloc_pages()
616 * falling back to minimum-order allocations. in __iommu_dma_alloc_pages()
618 for (order_mask &= (2U << __fls(count)) - 1; in __iommu_dma_alloc_pages()
626 page = alloc_pages_node(nid, alloc_flags, order); in __iommu_dma_alloc_pages()
627 if (!page) in __iommu_dma_alloc_pages()
630 split_page(page, order); in __iommu_dma_alloc_pages()
633 if (!page) { in __iommu_dma_alloc_pages()
637 count -= order_size; in __iommu_dma_alloc_pages()
638 while (order_size--) in __iommu_dma_alloc_pages()
639 pages[i++] = page++; in __iommu_dma_alloc_pages()
645 * iommu_dma_alloc_remap - Allocate and map a buffer contiguous in IOVA space
648 * @size: Size of buffer in bytes
654 * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
659 static void *iommu_dma_alloc_remap(struct device *dev, size_t size, in iommu_dma_alloc_remap() argument
664 struct iommu_dma_cookie *cookie = domain->iova_cookie; in iommu_dma_alloc_remap()
665 struct iova_domain *iovad = &cookie->iovad; in iommu_dma_alloc_remap()
668 unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap; in iommu_dma_alloc_remap()
669 struct page **pages; in iommu_dma_alloc_remap()
679 min_size = alloc_sizes & -alloc_sizes; in iommu_dma_alloc_remap()
684 size = ALIGN(size, min_size); in iommu_dma_alloc_remap()
689 count = PAGE_ALIGN(size) >> PAGE_SHIFT; in iommu_dma_alloc_remap()
695 size = iova_align(iovad, size); in iommu_dma_alloc_remap()
696 iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev); in iommu_dma_alloc_remap()
700 if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL)) in iommu_dma_alloc_remap()
708 arch_dma_prep_coherent(sg_page(sg), sg->length); in iommu_dma_alloc_remap()
712 < size) in iommu_dma_alloc_remap()
715 vaddr = dma_common_pages_remap(pages, size, prot, in iommu_dma_alloc_remap()
725 __iommu_dma_unmap(dev, iova, size); in iommu_dma_alloc_remap()
729 iommu_dma_free_iova(cookie, iova, size); in iommu_dma_alloc_remap()
736 * __iommu_dma_mmap - Map a buffer into provided user VMA
738 * @size: Size of buffer in bytes
742 * for verifying the correct size and protection of @vma beforehand.
744 static int __iommu_dma_mmap(struct page **pages, size_t size, in __iommu_dma_mmap() argument
747 return vm_map_pages(vma, pages, PAGE_ALIGN(size) >> PAGE_SHIFT); in __iommu_dma_mmap()
751 dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) in iommu_dma_sync_single_for_cpu() argument
759 arch_sync_dma_for_cpu(phys, size, dir); in iommu_dma_sync_single_for_cpu()
763 dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) in iommu_dma_sync_single_for_device() argument
771 arch_sync_dma_for_device(phys, size, dir); in iommu_dma_sync_single_for_device()
785 arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir); in iommu_dma_sync_sg_for_cpu()
799 arch_sync_dma_for_device(sg_phys(sg), sg->length, dir); in iommu_dma_sync_sg_for_device()
802 static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, in iommu_dma_map_page() argument
803 unsigned long offset, size_t size, enum dma_data_direction dir, in iommu_dma_map_page() argument
806 phys_addr_t phys = page_to_phys(page) + offset; in iommu_dma_map_page()
811 dma_handle = __iommu_dma_map(dev, phys, size, prot, dma_get_mask(dev)); in iommu_dma_map_page()
814 arch_sync_dma_for_device(phys, size, dir); in iommu_dma_map_page()
819 size_t size, enum dma_data_direction dir, unsigned long attrs) in iommu_dma_unmap_page() argument
822 iommu_dma_sync_single_for_cpu(dev, dma_handle, size, dir); in iommu_dma_unmap_page()
823 __iommu_dma_unmap(dev, dma_handle, size); in iommu_dma_unmap_page()
827 * Prepare a successfully-mapped scatterlist to give back to the caller.
845 unsigned int s_iova_len = s->length; in __finalise_sg()
847 s->offset += s_iova_off; in __finalise_sg()
848 s->length = s_length; in __finalise_sg()
854 * - there is a valid output segment to append to in __finalise_sg()
855 * - and this segment starts on an IOVA page boundary in __finalise_sg()
856 * - but doesn't fall at a segment boundary in __finalise_sg()
857 * - and wouldn't make the resulting output segment too long in __finalise_sg()
860 (max_len - cur_len >= s_length)) { in __finalise_sg()
893 s->offset += sg_dma_address(s); in __invalidate_sg()
895 s->length = sg_dma_len(s); in __invalidate_sg()
905 * impedance-matching, to be able to hand off a suitably-aligned list,
912 struct iommu_dma_cookie *cookie = domain->iova_cookie; in iommu_dma_map_sg()
913 struct iova_domain *iovad = &cookie->iovad; in iommu_dma_map_sg()
930 * trickery we can modify the list in-place, but reversibly, by in iommu_dma_map_sg()
931 * stashing the unaligned parts in the as-yet-unused DMA fields. in iommu_dma_map_sg()
934 size_t s_iova_off = iova_offset(iovad, s->offset); in iommu_dma_map_sg()
935 size_t s_length = s->length; in iommu_dma_map_sg()
936 size_t pad_len = (mask - iova_len + 1) & mask; in iommu_dma_map_sg()
940 s->offset -= s_iova_off; in iommu_dma_map_sg()
942 s->length = s_length; in iommu_dma_map_sg()
947 * - If mask size >= IOVA size, then the IOVA range cannot in iommu_dma_map_sg()
949 * - If mask size < IOVA size, then the IOVA range must start in iommu_dma_map_sg()
953 * - The mask must be a power of 2, so pad_len == 0 if in iommu_dma_map_sg()
957 if (pad_len && pad_len < s_length - 1) { in iommu_dma_map_sg()
958 prev->length += pad_len; in iommu_dma_map_sg()
972 * implementation - it knows better than we do. in iommu_dma_map_sg()
1001 for_each_sg(sg_next(sg), tmp, nents - 1, i) { in iommu_dma_unmap_sg()
1007 __iommu_dma_unmap(dev, start, end - start); in iommu_dma_unmap_sg()
1011 size_t size, enum dma_data_direction dir, unsigned long attrs) in iommu_dma_map_resource() argument
1013 return __iommu_dma_map(dev, phys, size, in iommu_dma_map_resource()
1019 size_t size, enum dma_data_direction dir, unsigned long attrs) in iommu_dma_unmap_resource() argument
1021 __iommu_dma_unmap(dev, handle, size); in iommu_dma_unmap_resource()
1024 static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr) in __iommu_dma_free() argument
1026 size_t alloc_size = PAGE_ALIGN(size); in __iommu_dma_free()
1028 struct page *page = NULL, **pages = NULL; in __iommu_dma_free() local
1030 /* Non-coherent atomic allocation? Easy */ in __iommu_dma_free()
1037 * If it the address is remapped, then it's either non-coherent in __iommu_dma_free()
1042 page = vmalloc_to_page(cpu_addr); in __iommu_dma_free()
1046 page = virt_to_page(cpu_addr); in __iommu_dma_free()
1051 if (page) in __iommu_dma_free()
1052 dma_free_contiguous(dev, page, alloc_size); in __iommu_dma_free()
1055 static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr, in iommu_dma_free() argument
1058 __iommu_dma_unmap(dev, handle, size); in iommu_dma_free()
1059 __iommu_dma_free(dev, size, cpu_addr); in iommu_dma_free()
1062 static void *iommu_dma_alloc_pages(struct device *dev, size_t size, in iommu_dma_alloc_pages() argument
1063 struct page **pagep, gfp_t gfp, unsigned long attrs) in iommu_dma_alloc_pages()
1066 size_t alloc_size = PAGE_ALIGN(size); in iommu_dma_alloc_pages()
1068 struct page *page = NULL; in iommu_dma_alloc_pages() local
1071 page = dma_alloc_contiguous(dev, alloc_size, gfp); in iommu_dma_alloc_pages()
1072 if (!page) in iommu_dma_alloc_pages()
1073 page = alloc_pages_node(node, gfp, get_order(alloc_size)); in iommu_dma_alloc_pages()
1074 if (!page) in iommu_dma_alloc_pages()
1077 if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) { in iommu_dma_alloc_pages()
1080 cpu_addr = dma_common_contiguous_remap(page, alloc_size, in iommu_dma_alloc_pages()
1086 arch_dma_prep_coherent(page, size); in iommu_dma_alloc_pages()
1088 cpu_addr = page_address(page); in iommu_dma_alloc_pages()
1091 *pagep = page; in iommu_dma_alloc_pages()
1095 dma_free_contiguous(dev, page, alloc_size); in iommu_dma_alloc_pages()
1099 static void *iommu_dma_alloc(struct device *dev, size_t size, in iommu_dma_alloc() argument
1104 struct page *page = NULL; in iommu_dma_alloc() local
1111 return iommu_dma_alloc_remap(dev, size, handle, gfp, in iommu_dma_alloc()
1117 page = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &cpu_addr, in iommu_dma_alloc()
1120 cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs); in iommu_dma_alloc()
1124 *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot, in iommu_dma_alloc()
1125 dev->coherent_dma_mask); in iommu_dma_alloc()
1127 __iommu_dma_free(dev, size, cpu_addr); in iommu_dma_alloc()
1135 static void *iommu_dma_alloc_noncoherent(struct device *dev, size_t size, in iommu_dma_alloc_noncoherent() argument
1139 struct page *page; in iommu_dma_alloc_noncoherent() local
1141 page = dma_common_alloc_pages(dev, size, handle, dir, gfp); in iommu_dma_alloc_noncoherent()
1142 if (!page) in iommu_dma_alloc_noncoherent()
1144 return page_address(page); in iommu_dma_alloc_noncoherent()
1147 return iommu_dma_alloc_remap(dev, size, handle, gfp | __GFP_ZERO, in iommu_dma_alloc_noncoherent()
1151 static void iommu_dma_free_noncoherent(struct device *dev, size_t size, in iommu_dma_free_noncoherent() argument
1154 __iommu_dma_unmap(dev, handle, size); in iommu_dma_free_noncoherent()
1155 __iommu_dma_free(dev, size, cpu_addr); in iommu_dma_free_noncoherent()
1163 void *cpu_addr, dma_addr_t dma_addr, size_t size, in iommu_dma_mmap() argument
1166 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; in iommu_dma_mmap()
1167 unsigned long pfn, off = vma->vm_pgoff; in iommu_dma_mmap()
1170 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); in iommu_dma_mmap()
1172 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) in iommu_dma_mmap()
1175 if (off >= nr_pages || vma_pages(vma) > nr_pages - off) in iommu_dma_mmap()
1176 return -ENXIO; in iommu_dma_mmap()
1179 struct page **pages = dma_common_find_pages(cpu_addr); in iommu_dma_mmap()
1182 return __iommu_dma_mmap(pages, size, vma); in iommu_dma_mmap()
1188 return remap_pfn_range(vma, vma->vm_start, pfn + off, in iommu_dma_mmap()
1189 vma->vm_end - vma->vm_start, in iommu_dma_mmap()
1190 vma->vm_page_prot); in iommu_dma_mmap()
1194 void *cpu_addr, dma_addr_t dma_addr, size_t size, in iommu_dma_get_sgtable() argument
1197 struct page *page; in iommu_dma_get_sgtable() local
1201 struct page **pages = dma_common_find_pages(cpu_addr); in iommu_dma_get_sgtable()
1205 PAGE_ALIGN(size) >> PAGE_SHIFT, in iommu_dma_get_sgtable()
1206 0, size, GFP_KERNEL); in iommu_dma_get_sgtable()
1209 page = vmalloc_to_page(cpu_addr); in iommu_dma_get_sgtable()
1211 page = virt_to_page(cpu_addr); in iommu_dma_get_sgtable()
1216 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); in iommu_dma_get_sgtable()
1224 return (1UL << __ffs(domain->pgsize_bitmap)) - 1; in iommu_dma_get_merge_boundary()
1251 * IOMMU driver needs to support via the dma-iommu layer.
1253 void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size) in iommu_setup_dma_ops() argument
1262 * underlying IOMMU driver needs to support via the dma-iommu layer. in iommu_setup_dma_ops()
1264 if (domain->type == IOMMU_DOMAIN_DMA) { in iommu_setup_dma_ops()
1265 if (iommu_dma_init_domain(domain, dma_base, size, dev)) in iommu_setup_dma_ops()
1267 dev->dma_ops = &iommu_dma_ops; in iommu_setup_dma_ops()
1279 struct iommu_dma_cookie *cookie = domain->iova_cookie; in iommu_dma_get_msi_page()
1283 size_t size = cookie_msi_granule(cookie); in iommu_dma_get_msi_page() local
1285 msi_addr &= ~(phys_addr_t)(size - 1); in iommu_dma_get_msi_page()
1286 list_for_each_entry(msi_page, &cookie->msi_page_list, list) in iommu_dma_get_msi_page()
1287 if (msi_page->phys == msi_addr) in iommu_dma_get_msi_page()
1294 iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); in iommu_dma_get_msi_page()
1298 if (iommu_map(domain, iova, msi_addr, size, prot)) in iommu_dma_get_msi_page()
1301 INIT_LIST_HEAD(&msi_page->list); in iommu_dma_get_msi_page()
1302 msi_page->phys = msi_addr; in iommu_dma_get_msi_page()
1303 msi_page->iova = iova; in iommu_dma_get_msi_page()
1304 list_add(&msi_page->list, &cookie->msi_page_list); in iommu_dma_get_msi_page()
1308 iommu_dma_free_iova(cookie, iova, size); in iommu_dma_get_msi_page()
1321 if (!domain || !domain->iova_cookie) { in iommu_dma_prepare_msi()
1322 desc->iommu_cookie = NULL; in iommu_dma_prepare_msi()
1338 return -ENOMEM; in iommu_dma_prepare_msi()
1351 if (!domain || !domain->iova_cookie || WARN_ON(!msi_page)) in iommu_dma_compose_msi_msg()
1354 msg->address_hi = upper_32_bits(msi_page->iova); in iommu_dma_compose_msi_msg()
1355 msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1; in iommu_dma_compose_msi_msg()
1356 msg->address_lo += lower_32_bits(msi_page->iova); in iommu_dma_compose_msi_msg()