Lines Matching refs:domain
47 struct iommu_domain *domain; member
88 static int __iommu_attach_device(struct iommu_domain *domain,
90 static int __iommu_attach_group(struct iommu_domain *domain,
92 static void __iommu_detach_group(struct iommu_domain *domain,
732 struct iommu_domain *domain = group->default_domain; in iommu_create_device_direct_mappings() local
738 if (!domain || domain->type != IOMMU_DOMAIN_DMA) in iommu_create_device_direct_mappings()
741 BUG_ON(!domain->pgsize_bitmap); in iommu_create_device_direct_mappings()
743 pg_size = 1UL << __ffs(domain->pgsize_bitmap); in iommu_create_device_direct_mappings()
752 if (domain->ops->apply_resv_region) in iommu_create_device_direct_mappings()
753 domain->ops->apply_resv_region(dev, domain, entry); in iommu_create_device_direct_mappings()
765 phys_addr = iommu_iova_to_phys(domain, addr); in iommu_create_device_direct_mappings()
769 ret = iommu_map(domain, addr, addr, pg_size, entry->prot); in iommu_create_device_direct_mappings()
776 iommu_flush_iotlb_all(domain); in iommu_create_device_direct_mappings()
784 static bool iommu_is_attach_deferred(struct iommu_domain *domain, in iommu_is_attach_deferred() argument
787 if (domain->ops->is_attach_deferred) in iommu_is_attach_deferred()
788 return domain->ops->is_attach_deferred(domain, dev); in iommu_is_attach_deferred()
845 if (group->domain && !iommu_is_attach_deferred(group->domain, dev)) in iommu_group_add_device()
846 ret = __iommu_attach_device(group->domain, dev); in iommu_group_add_device()
1208 struct iommu_domain *domain = iommu_get_domain_for_dev(dev); in iommu_page_response() local
1210 if (!domain || !domain->ops->page_response) in iommu_page_response()
1251 ret = domain->ops->page_response(dev, evt, msg); in iommu_page_response()
1503 if (!group->domain) in iommu_group_alloc_default_domain()
1504 group->domain = dom; in iommu_group_alloc_default_domain()
1707 struct iommu_domain *domain = data; in iommu_group_do_dma_attach() local
1710 if (!iommu_is_attach_deferred(domain, dev)) in iommu_group_do_dma_attach()
1711 ret = __iommu_attach_device(domain, dev); in iommu_group_do_dma_attach()
1724 struct iommu_domain *domain = data; in iommu_group_do_probe_finalize() local
1726 if (domain->ops->probe_finalize) in iommu_group_do_probe_finalize()
1727 domain->ops->probe_finalize(dev); in iommu_group_do_probe_finalize()
1893 void iommu_set_fault_handler(struct iommu_domain *domain, in iommu_set_fault_handler() argument
1897 BUG_ON(!domain); in iommu_set_fault_handler()
1899 domain->handler = handler; in iommu_set_fault_handler()
1900 domain->handler_token = token; in iommu_set_fault_handler()
1907 struct iommu_domain *domain; in __iommu_domain_alloc() local
1912 domain = bus->iommu_ops->domain_alloc(type); in __iommu_domain_alloc()
1913 if (!domain) in __iommu_domain_alloc()
1916 domain->ops = bus->iommu_ops; in __iommu_domain_alloc()
1917 domain->type = type; in __iommu_domain_alloc()
1919 domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap; in __iommu_domain_alloc()
1921 return domain; in __iommu_domain_alloc()
1930 void iommu_domain_free(struct iommu_domain *domain) in iommu_domain_free() argument
1932 domain->ops->domain_free(domain); in iommu_domain_free()
1936 static int __iommu_attach_device(struct iommu_domain *domain, in __iommu_attach_device() argument
1941 if (unlikely(domain->ops->attach_dev == NULL)) in __iommu_attach_device()
1944 ret = domain->ops->attach_dev(domain, dev); in __iommu_attach_device()
1950 int iommu_attach_device(struct iommu_domain *domain, struct device *dev) in iommu_attach_device() argument
1970 ret = __iommu_attach_group(domain, group); in iommu_attach_device()
2036 int iommu_uapi_cache_invalidate(struct iommu_domain *domain, struct device *dev, in iommu_uapi_cache_invalidate() argument
2043 if (unlikely(!domain->ops->cache_invalidate)) in iommu_uapi_cache_invalidate()
2084 return domain->ops->cache_invalidate(domain, dev, &inv_info); in iommu_uapi_cache_invalidate()
2146 int iommu_uapi_sva_bind_gpasid(struct iommu_domain *domain, struct device *dev, in iommu_uapi_sva_bind_gpasid() argument
2152 if (unlikely(!domain->ops->sva_bind_gpasid)) in iommu_uapi_sva_bind_gpasid()
2159 return domain->ops->sva_bind_gpasid(domain, dev, &data); in iommu_uapi_sva_bind_gpasid()
2163 int iommu_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev, in iommu_sva_unbind_gpasid() argument
2166 if (unlikely(!domain->ops->sva_unbind_gpasid)) in iommu_sva_unbind_gpasid()
2169 return domain->ops->sva_unbind_gpasid(dev, pasid); in iommu_sva_unbind_gpasid()
2173 int iommu_uapi_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev, in iommu_uapi_sva_unbind_gpasid() argument
2179 if (unlikely(!domain->ops->sva_bind_gpasid)) in iommu_uapi_sva_unbind_gpasid()
2186 return iommu_sva_unbind_gpasid(domain, dev, data.hpasid); in iommu_uapi_sva_unbind_gpasid()
2190 static void __iommu_detach_device(struct iommu_domain *domain, in __iommu_detach_device() argument
2193 if (iommu_is_attach_deferred(domain, dev)) in __iommu_detach_device()
2196 if (unlikely(domain->ops->detach_dev == NULL)) in __iommu_detach_device()
2199 domain->ops->detach_dev(domain, dev); in __iommu_detach_device()
2203 void iommu_detach_device(struct iommu_domain *domain, struct device *dev) in iommu_detach_device() argument
2218 __iommu_detach_group(domain, group); in iommu_detach_device()
2228 struct iommu_domain *domain; in iommu_get_domain_for_dev() local
2235 domain = group->domain; in iommu_get_domain_for_dev()
2239 return domain; in iommu_get_domain_for_dev()
2264 struct iommu_domain *domain = data; in iommu_group_do_attach_device() local
2266 return __iommu_attach_device(domain, dev); in iommu_group_do_attach_device()
2269 static int __iommu_attach_group(struct iommu_domain *domain, in __iommu_attach_group() argument
2274 if (group->default_domain && group->domain != group->default_domain) in __iommu_attach_group()
2277 ret = __iommu_group_for_each_dev(group, domain, in __iommu_attach_group()
2280 group->domain = domain; in __iommu_attach_group()
2285 int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) in iommu_attach_group() argument
2290 ret = __iommu_attach_group(domain, group); in iommu_attach_group()
2299 struct iommu_domain *domain = data; in iommu_group_do_detach_device() local
2301 __iommu_detach_device(domain, dev); in iommu_group_do_detach_device()
2306 static void __iommu_detach_group(struct iommu_domain *domain, in __iommu_detach_group() argument
2312 __iommu_group_for_each_dev(group, domain, in __iommu_detach_group()
2314 group->domain = NULL; in __iommu_detach_group()
2318 if (group->domain == group->default_domain) in __iommu_detach_group()
2327 group->domain = group->default_domain; in __iommu_detach_group()
2330 void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group) in iommu_detach_group() argument
2333 __iommu_detach_group(domain, group); in iommu_detach_group()
2338 phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) in iommu_iova_to_phys() argument
2340 if (unlikely(domain->ops->iova_to_phys == NULL)) in iommu_iova_to_phys()
2343 return domain->ops->iova_to_phys(domain, iova); in iommu_iova_to_phys()
2347 static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova, in iommu_pgsize() argument
2356 pgsizes = domain->pgsize_bitmap & GENMASK(__fls(size), 0); in iommu_pgsize()
2373 pgsizes = domain->pgsize_bitmap & ~GENMASK(pgsize_idx, 0); in iommu_pgsize()
2402 static int __iommu_map_pages(struct iommu_domain *domain, unsigned long iova, in __iommu_map_pages() argument
2406 const struct iommu_ops *ops = domain->ops; in __iommu_map_pages()
2410 pgsize = iommu_pgsize(domain, iova, paddr, size, &count); in __iommu_map_pages()
2416 ret = ops->map_pages(domain, iova, paddr, pgsize, count, prot, in __iommu_map_pages()
2419 ret = ops->map(domain, iova, paddr, pgsize, prot, gfp); in __iommu_map_pages()
2426 static int __iommu_map(struct iommu_domain *domain, unsigned long iova, in __iommu_map() argument
2429 const struct iommu_ops *ops = domain->ops; in __iommu_map()
2437 domain->pgsize_bitmap == 0UL)) in __iommu_map()
2440 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) in __iommu_map()
2444 min_pagesz = 1 << __ffs(domain->pgsize_bitmap); in __iommu_map()
2462 ret = __iommu_map_pages(domain, iova, paddr, size, prot, gfp, in __iommu_map()
2479 iommu_unmap(domain, orig_iova, orig_size - size); in __iommu_map()
2486 static int _iommu_map(struct iommu_domain *domain, unsigned long iova, in _iommu_map() argument
2489 const struct iommu_ops *ops = domain->ops; in _iommu_map()
2492 ret = __iommu_map(domain, iova, paddr, size, prot, gfp); in _iommu_map()
2494 ops->iotlb_sync_map(domain, iova, size); in _iommu_map()
2499 int iommu_map(struct iommu_domain *domain, unsigned long iova, in iommu_map() argument
2503 return _iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL); in iommu_map()
2507 int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova, in iommu_map_atomic() argument
2510 return _iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC); in iommu_map_atomic()
2514 static size_t __iommu_unmap_pages(struct iommu_domain *domain, in __iommu_unmap_pages() argument
2518 const struct iommu_ops *ops = domain->ops; in __iommu_unmap_pages()
2521 pgsize = iommu_pgsize(domain, iova, iova, size, &count); in __iommu_unmap_pages()
2523 ops->unmap_pages(domain, iova, pgsize, count, iotlb_gather) : in __iommu_unmap_pages()
2524 ops->unmap(domain, iova, pgsize, iotlb_gather); in __iommu_unmap_pages()
2527 static size_t __iommu_unmap(struct iommu_domain *domain, in __iommu_unmap() argument
2531 const struct iommu_ops *ops = domain->ops; in __iommu_unmap()
2537 domain->pgsize_bitmap == 0UL)) in __iommu_unmap()
2540 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) in __iommu_unmap()
2544 min_pagesz = 1 << __ffs(domain->pgsize_bitmap); in __iommu_unmap()
2564 unmapped_page = __iommu_unmap_pages(domain, iova, in __iommu_unmap()
2581 size_t iommu_unmap(struct iommu_domain *domain, in iommu_unmap() argument
2588 ret = __iommu_unmap(domain, iova, size, &iotlb_gather); in iommu_unmap()
2589 iommu_iotlb_sync(domain, &iotlb_gather); in iommu_unmap()
2595 size_t iommu_unmap_fast(struct iommu_domain *domain, in iommu_unmap_fast() argument
2599 return __iommu_unmap(domain, iova, size, iotlb_gather); in iommu_unmap_fast()
2603 static size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova, in __iommu_map_sg() argument
2607 const struct iommu_ops *ops = domain->ops; in __iommu_map_sg()
2614 ret = ops->map_sg(domain, iova, sg, nents, prot, gfp, &mapped); in __iommu_map_sg()
2617 ops->iotlb_sync_map(domain, iova, mapped); in __iommu_map_sg()
2629 ret = __iommu_map(domain, iova + mapped, start, in __iommu_map_sg()
2651 ops->iotlb_sync_map(domain, iova, mapped); in __iommu_map_sg()
2656 iommu_unmap(domain, iova, mapped); in __iommu_map_sg()
2662 size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, in iommu_map_sg() argument
2666 return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_KERNEL); in iommu_map_sg()
2670 size_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova, in iommu_map_sg_atomic() argument
2673 return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC); in iommu_map_sg_atomic()
2677 int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, in iommu_domain_window_enable() argument
2680 if (unlikely(domain->ops->domain_window_enable == NULL)) in iommu_domain_window_enable()
2683 return domain->ops->domain_window_enable(domain, wnd_nr, paddr, size, in iommu_domain_window_enable()
2688 void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr) in iommu_domain_window_disable() argument
2690 if (unlikely(domain->ops->domain_window_disable == NULL)) in iommu_domain_window_disable()
2693 return domain->ops->domain_window_disable(domain, wnd_nr); in iommu_domain_window_disable()
2721 int report_iommu_fault(struct iommu_domain *domain, struct device *dev, in report_iommu_fault() argument
2730 if (domain->handler) in report_iommu_fault()
2731 ret = domain->handler(domain, dev, iova, flags, in report_iommu_fault()
2732 domain->handler_token); in report_iommu_fault()
2751 int iommu_domain_get_attr(struct iommu_domain *domain, in iommu_domain_get_attr() argument
2761 *geometry = domain->geometry; in iommu_domain_get_attr()
2766 *paging = (domain->pgsize_bitmap != 0UL); in iommu_domain_get_attr()
2769 if (!domain->ops->domain_get_attr) in iommu_domain_get_attr()
2772 ret = domain->ops->domain_get_attr(domain, attr, data); in iommu_domain_get_attr()
2779 int iommu_domain_set_attr(struct iommu_domain *domain, in iommu_domain_set_attr() argument
2786 if (domain->ops->domain_set_attr == NULL) in iommu_domain_set_attr()
2789 ret = domain->ops->domain_set_attr(domain, attr, data); in iommu_domain_set_attr()
3017 int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev) in iommu_aux_attach_device() argument
3021 if (domain->ops->aux_attach_dev) in iommu_aux_attach_device()
3022 ret = domain->ops->aux_attach_dev(domain, dev); in iommu_aux_attach_device()
3031 void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev) in iommu_aux_detach_device() argument
3033 if (domain->ops->aux_detach_dev) { in iommu_aux_detach_device()
3034 domain->ops->aux_detach_dev(domain, dev); in iommu_aux_detach_device()
3040 int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev) in iommu_aux_get_pasid() argument
3044 if (domain->ops->aux_get_pasid) in iommu_aux_get_pasid()
3045 ret = domain->ops->aux_get_pasid(domain, dev); in iommu_aux_get_pasid()