Lines Matching refs:domain
299 #define for_each_domain_iommu(idx, domain) \ argument
301 if (domain->iommu_refcnt[idx])
329 static void domain_exit(struct dmar_domain *domain);
330 static void domain_remove_dev_info(struct dmar_domain *domain);
333 static int intel_iommu_attach_device(struct iommu_domain *domain,
335 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
492 struct dmar_domain *domain) in set_iommu_domain() argument
506 domains[did & 0xff] = domain; in set_iommu_domain()
545 static inline int domain_type_is_si(struct dmar_domain *domain) in domain_type_is_si() argument
547 return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY; in domain_type_is_si()
550 static inline bool domain_use_first_level(struct dmar_domain *domain) in domain_use_first_level() argument
552 return domain->flags & DOMAIN_FLAG_USE_FIRST_LEVEL; in domain_use_first_level()
555 static inline int domain_pfn_supported(struct dmar_domain *domain, in domain_pfn_supported() argument
558 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; in domain_pfn_supported()
619 struct intel_iommu *domain_get_iommu(struct dmar_domain *domain) in domain_get_iommu() argument
624 if (WARN_ON(domain->domain.type != IOMMU_DOMAIN_DMA)) in domain_get_iommu()
627 for_each_domain_iommu(iommu_id, domain) in domain_get_iommu()
642 static void domain_update_iommu_coherency(struct dmar_domain *domain) in domain_update_iommu_coherency() argument
649 domain->iommu_coherency = 1; in domain_update_iommu_coherency()
651 for_each_domain_iommu(i, domain) { in domain_update_iommu_coherency()
654 domain->iommu_coherency = 0; in domain_update_iommu_coherency()
665 domain->iommu_coherency = 0; in domain_update_iommu_coherency()
699 static int domain_update_iommu_superpage(struct dmar_domain *domain, in domain_update_iommu_superpage() argument
714 if (domain && domain_use_first_level(domain)) { in domain_update_iommu_superpage()
730 static int domain_update_device_node(struct dmar_domain *domain) in domain_update_device_node() argument
737 if (list_empty(&domain->devices)) in domain_update_device_node()
740 list_for_each_entry(info, &domain->devices, link) { in domain_update_device_node()
759 static void domain_update_iommu_cap(struct dmar_domain *domain) in domain_update_iommu_cap() argument
761 domain_update_iommu_coherency(domain); in domain_update_iommu_cap()
762 domain->iommu_snooping = domain_update_iommu_snooping(NULL); in domain_update_iommu_cap()
763 domain->iommu_superpage = domain_update_iommu_superpage(domain, NULL); in domain_update_iommu_cap()
769 if (domain->nid == NUMA_NO_NODE) in domain_update_iommu_cap()
770 domain->nid = domain_update_device_node(domain); in domain_update_iommu_cap()
779 if (domain_use_first_level(domain)) in domain_update_iommu_cap()
780 domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw - 1); in domain_update_iommu_cap()
782 domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw); in domain_update_iommu_cap()
968 static void domain_flush_cache(struct dmar_domain *domain, in domain_flush_cache() argument
971 if (!domain->iommu_coherency) in domain_flush_cache()
1018 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, in pfn_to_dma_pte() argument
1022 int level = agaw_to_level(domain->agaw); in pfn_to_dma_pte()
1025 BUG_ON(!domain->pgd); in pfn_to_dma_pte()
1027 if (!domain_pfn_supported(domain, pfn)) in pfn_to_dma_pte()
1031 parent = domain->pgd; in pfn_to_dma_pte()
1046 tmp_page = alloc_pgtable_page(domain->nid); in pfn_to_dma_pte()
1051 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE); in pfn_to_dma_pte()
1053 if (domain_use_first_level(domain)) { in pfn_to_dma_pte()
1055 if (domain->domain.type == IOMMU_DOMAIN_DMA) in pfn_to_dma_pte()
1062 domain_flush_cache(domain, pte, sizeof(*pte)); in pfn_to_dma_pte()
1078 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain, in dma_pfn_level_pte() argument
1083 int total = agaw_to_level(domain->agaw); in dma_pfn_level_pte()
1086 parent = domain->pgd; in dma_pfn_level_pte()
1110 static void dma_pte_clear_range(struct dmar_domain *domain, in dma_pte_clear_range() argument
1117 BUG_ON(!domain_pfn_supported(domain, start_pfn)); in dma_pte_clear_range()
1118 BUG_ON(!domain_pfn_supported(domain, last_pfn)); in dma_pte_clear_range()
1124 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page); in dma_pte_clear_range()
1135 domain_flush_cache(domain, first_pte, in dma_pte_clear_range()
1141 static void dma_pte_free_level(struct dmar_domain *domain, int level, in dma_pte_free_level() argument
1160 dma_pte_free_level(domain, level - 1, retain_level, in dma_pte_free_level()
1172 domain_flush_cache(domain, pte, sizeof(*pte)); in dma_pte_free_level()
1184 static void dma_pte_free_pagetable(struct dmar_domain *domain, in dma_pte_free_pagetable() argument
1189 BUG_ON(!domain_pfn_supported(domain, start_pfn)); in dma_pte_free_pagetable()
1190 BUG_ON(!domain_pfn_supported(domain, last_pfn)); in dma_pte_free_pagetable()
1193 dma_pte_clear_range(domain, start_pfn, last_pfn); in dma_pte_free_pagetable()
1196 dma_pte_free_level(domain, agaw_to_level(domain->agaw), retain_level, in dma_pte_free_pagetable()
1197 domain->pgd, 0, start_pfn, last_pfn); in dma_pte_free_pagetable()
1200 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) { in dma_pte_free_pagetable()
1201 free_pgtable_page(domain->pgd); in dma_pte_free_pagetable()
1202 domain->pgd = NULL; in dma_pte_free_pagetable()
1212 static struct page *dma_pte_list_pagetables(struct dmar_domain *domain, in dma_pte_list_pagetables() argument
1228 freelist = dma_pte_list_pagetables(domain, level - 1, in dma_pte_list_pagetables()
1236 static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level, in dma_pte_clear_level() argument
1261 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist); in dma_pte_clear_level()
1269 freelist = dma_pte_clear_level(domain, level - 1, in dma_pte_clear_level()
1279 domain_flush_cache(domain, first_pte, in dma_pte_clear_level()
1288 static struct page *domain_unmap(struct dmar_domain *domain, in domain_unmap() argument
1294 BUG_ON(!domain_pfn_supported(domain, start_pfn)); in domain_unmap()
1295 BUG_ON(!domain_pfn_supported(domain, last_pfn)); in domain_unmap()
1299 freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw), in domain_unmap()
1300 domain->pgd, 0, start_pfn, last_pfn, NULL); in domain_unmap()
1303 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) { in domain_unmap()
1304 struct page *pgd_page = virt_to_page(domain->pgd); in domain_unmap()
1308 domain->pgd = NULL; in domain_unmap()
1490 iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu, in iommu_support_dev_iotlb() argument
1500 list_for_each_entry(info, &domain->devices, link) in iommu_support_dev_iotlb()
1511 static void domain_update_iotlb(struct dmar_domain *domain) in domain_update_iotlb() argument
1518 list_for_each_entry(info, &domain->devices, link) { in domain_update_iotlb()
1531 domain->has_iotlb_device = has_iotlb_device; in domain_update_iotlb()
1576 domain_update_iotlb(info->domain); in iommu_enable_dev_iotlb()
1595 domain_update_iotlb(info->domain); in iommu_disable_dev_iotlb()
1609 static void iommu_flush_dev_iotlb(struct dmar_domain *domain, in iommu_flush_dev_iotlb() argument
1616 if (!domain->has_iotlb_device) in iommu_flush_dev_iotlb()
1620 list_for_each_entry(info, &domain->devices, link) { in iommu_flush_dev_iotlb()
1633 struct dmar_domain *domain, in domain_flush_piotlb() argument
1636 u16 did = domain->iommu_did[iommu->seq_id]; in domain_flush_piotlb()
1638 if (domain->default_pasid) in domain_flush_piotlb()
1639 qi_flush_piotlb(iommu, did, domain->default_pasid, in domain_flush_piotlb()
1642 if (!list_empty(&domain->devices)) in domain_flush_piotlb()
1647 struct dmar_domain *domain, in iommu_flush_iotlb_psi() argument
1654 u16 did = domain->iommu_did[iommu->seq_id]; in iommu_flush_iotlb_psi()
1661 if (domain_use_first_level(domain)) { in iommu_flush_iotlb_psi()
1662 domain_flush_piotlb(iommu, domain, addr, pages, ih); in iommu_flush_iotlb_psi()
1703 iommu_flush_dev_iotlb(domain, addr, mask); in iommu_flush_iotlb_psi()
1708 struct dmar_domain *domain, in __mapping_notify_one() argument
1715 if (cap_caching_mode(iommu->cap) && !domain_use_first_level(domain)) in __mapping_notify_one()
1716 iommu_flush_iotlb_psi(iommu, domain, pfn, pages, 0, 1); in __mapping_notify_one()
1723 struct dmar_domain *domain; in iommu_flush_iova() local
1726 domain = container_of(iovad, struct dmar_domain, iovad); in iommu_flush_iova()
1728 for_each_domain_iommu(idx, domain) { in iommu_flush_iova()
1730 u16 did = domain->iommu_did[iommu->seq_id]; in iommu_flush_iova()
1732 if (domain_use_first_level(domain)) in iommu_flush_iova()
1733 domain_flush_piotlb(iommu, domain, 0, -1, 0); in iommu_flush_iova()
1871 if (!info->dev || !info->domain) in disable_dmar_iommu()
1941 struct dmar_domain *domain; in alloc_domain() local
1943 domain = alloc_domain_mem(); in alloc_domain()
1944 if (!domain) in alloc_domain()
1947 memset(domain, 0, sizeof(*domain)); in alloc_domain()
1948 domain->nid = NUMA_NO_NODE; in alloc_domain()
1949 domain->flags = flags; in alloc_domain()
1951 domain->flags |= DOMAIN_FLAG_USE_FIRST_LEVEL; in alloc_domain()
1952 domain->has_iotlb_device = false; in alloc_domain()
1953 INIT_LIST_HEAD(&domain->devices); in alloc_domain()
1955 return domain; in alloc_domain()
1959 static int domain_attach_iommu(struct dmar_domain *domain, in domain_attach_iommu() argument
1968 domain->iommu_refcnt[iommu->seq_id] += 1; in domain_attach_iommu()
1969 domain->iommu_count += 1; in domain_attach_iommu()
1970 if (domain->iommu_refcnt[iommu->seq_id] == 1) { in domain_attach_iommu()
1976 domain->iommu_refcnt[iommu->seq_id] -= 1; in domain_attach_iommu()
1977 domain->iommu_count -= 1; in domain_attach_iommu()
1982 set_iommu_domain(iommu, num, domain); in domain_attach_iommu()
1984 domain->iommu_did[iommu->seq_id] = num; in domain_attach_iommu()
1985 domain->nid = iommu->node; in domain_attach_iommu()
1987 domain_update_iommu_cap(domain); in domain_attach_iommu()
1993 static int domain_detach_iommu(struct dmar_domain *domain, in domain_detach_iommu() argument
2001 domain->iommu_refcnt[iommu->seq_id] -= 1; in domain_detach_iommu()
2002 count = --domain->iommu_count; in domain_detach_iommu()
2003 if (domain->iommu_refcnt[iommu->seq_id] == 0) { in domain_detach_iommu()
2004 num = domain->iommu_did[iommu->seq_id]; in domain_detach_iommu()
2008 domain_update_iommu_cap(domain); in domain_detach_iommu()
2009 domain->iommu_did[iommu->seq_id] = 0; in domain_detach_iommu()
2071 static void domain_exit(struct dmar_domain *domain) in domain_exit() argument
2075 domain_remove_dev_info(domain); in domain_exit()
2078 if (domain->domain.type == IOMMU_DOMAIN_DMA) in domain_exit()
2079 put_iova_domain(&domain->iovad); in domain_exit()
2081 if (domain->pgd) { in domain_exit()
2084 freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); in domain_exit()
2088 free_domain_mem(domain); in domain_exit()
2140 static int domain_context_mapping_one(struct dmar_domain *domain, in domain_context_mapping_one() argument
2145 u16 did = domain->iommu_did[iommu->seq_id]; in domain_context_mapping_one()
2154 if (hw_pass_through && domain_type_is_si(domain)) in domain_context_mapping_one()
2160 BUG_ON(!domain->pgd); in domain_context_mapping_one()
2215 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn); in domain_context_mapping_one()
2221 struct dma_pte *pgd = domain->pgd; in domain_context_mapping_one()
2231 for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) { in domain_context_mapping_one()
2238 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn); in domain_context_mapping_one()
2290 struct dmar_domain *domain; member
2300 return domain_context_mapping_one(data->domain, data->iommu, in domain_context_mapping_cb()
2306 domain_context_mapping(struct dmar_domain *domain, struct device *dev) in domain_context_mapping() argument
2320 return domain_context_mapping_one(domain, iommu, table, in domain_context_mapping()
2323 data.domain = domain; in domain_context_mapping()
2364 static inline int hardware_largepage_caps(struct dmar_domain *domain, in hardware_largepage_caps() argument
2372 support = domain->iommu_superpage; in hardware_largepage_caps()
2391 static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, in __domain_mapping() argument
2402 BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1)); in __domain_mapping()
2409 if (domain_use_first_level(domain)) { in __domain_mapping()
2412 if (domain->domain.type == IOMMU_DOMAIN_DMA) { in __domain_mapping()
2438 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res); in __domain_mapping()
2440 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl); in __domain_mapping()
2459 dma_pte_free_pagetable(domain, iov_pfn, end_pfn, in __domain_mapping()
2506 domain_flush_cache(domain, first_pte, in __domain_mapping()
2517 static int domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, in domain_mapping() argument
2525 ret = __domain_mapping(domain, iov_pfn, sg, phys_pfn, nr_pages, prot); in domain_mapping()
2529 for_each_domain_iommu(iommu_id, domain) { in domain_mapping()
2531 __mapping_notify_one(iommu, domain, iov_pfn, nr_pages); in domain_mapping()
2537 static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn, in domain_sg_mapping() argument
2541 return domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot); in domain_sg_mapping()
2544 static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn, in domain_pfn_mapping() argument
2548 return domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot); in domain_pfn_mapping()
2595 static void domain_remove_dev_info(struct dmar_domain *domain) in domain_remove_dev_info() argument
2601 list_for_each_entry_safe(info, tmp, &domain->devices, link) in domain_remove_dev_info()
2619 return info->domain; in find_domain()
2626 struct iommu_domain *domain; in do_deferred_attach() local
2629 domain = iommu_get_domain_for_dev(dev); in do_deferred_attach()
2630 if (domain) in do_deferred_attach()
2631 intel_iommu_attach_device(domain, dev); in do_deferred_attach()
2648 struct dmar_domain *domain, in domain_setup_first_level() argument
2652 struct dma_pte *pgd = domain->pgd; in domain_setup_first_level()
2660 for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) { in domain_setup_first_level()
2675 if (domain->domain.type == IOMMU_DOMAIN_UNMANAGED) in domain_setup_first_level()
2679 domain->iommu_did[iommu->seq_id], in domain_setup_first_level()
2692 struct dmar_domain *domain) in dmar_insert_one_dev_info() argument
2719 info->domain = domain; in dmar_insert_one_dev_info()
2755 found = info2->domain; in dmar_insert_one_dev_info()
2768 ret = domain_attach_iommu(domain, iommu); in dmar_insert_one_dev_info()
2777 list_add(&info->link, &domain->devices); in dmar_insert_one_dev_info()
2794 if (hw_pass_through && domain_type_is_si(domain)) in dmar_insert_one_dev_info()
2795 ret = intel_pasid_setup_pass_through(iommu, domain, in dmar_insert_one_dev_info()
2797 else if (domain_use_first_level(domain)) in dmar_insert_one_dev_info()
2798 ret = domain_setup_first_level(iommu, domain, dev, in dmar_insert_one_dev_info()
2801 ret = intel_pasid_setup_second_level(iommu, domain, in dmar_insert_one_dev_info()
2811 if (dev && domain_context_mapping(domain, dev)) { in dmar_insert_one_dev_info()
2817 return domain; in dmar_insert_one_dev_info()
2820 static int iommu_domain_identity_map(struct dmar_domain *domain, in iommu_domain_identity_map() argument
2828 dma_pte_clear_range(domain, first_vpfn, last_vpfn); in iommu_domain_identity_map()
2830 return __domain_mapping(domain, first_vpfn, NULL, in iommu_domain_identity_map()
2835 static int md_domain_init(struct dmar_domain *domain, int guest_width);
2894 static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev) in domain_add_dev_info() argument
2904 ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain); in domain_add_dev_info()
2905 if (ndomain != domain) in domain_add_dev_info()
3522 struct dmar_domain *domain, in intel_alloc_iova() argument
3535 if (domain_use_first_level(domain)) in intel_alloc_iova()
3536 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw - 1), in intel_alloc_iova()
3539 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), in intel_alloc_iova()
3551 iova_pfn = alloc_iova_fast(&domain->iovad, nrpages, in intel_alloc_iova()
3556 iova_pfn = alloc_iova_fast(&domain->iovad, nrpages, in intel_alloc_iova()
3570 struct dmar_domain *domain; in __intel_map_single() local
3583 domain = find_domain(dev); in __intel_map_single()
3584 if (!domain) in __intel_map_single()
3587 iommu = domain_get_iommu(domain); in __intel_map_single()
3590 iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask); in __intel_map_single()
3609 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn), in __intel_map_single()
3623 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size)); in __intel_map_single()
3647 struct dmar_domain *domain; in intel_unmap() local
3655 domain = find_domain(dev); in intel_unmap()
3656 BUG_ON(!domain); in intel_unmap()
3658 iommu = domain_get_iommu(domain); in intel_unmap()
3669 freelist = domain_unmap(domain, start_pfn, last_pfn); in intel_unmap()
3671 !has_iova_flush_queue(&domain->iovad)) { in intel_unmap()
3672 iommu_flush_iotlb_psi(iommu, domain, start_pfn, in intel_unmap()
3675 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages)); in intel_unmap()
3678 queue_iova(&domain->iovad, iova_pfn, nrpages, in intel_unmap()
3775 struct dmar_domain *domain; in intel_map_sg() local
3789 domain = find_domain(dev); in intel_map_sg()
3790 if (!domain) in intel_map_sg()
3793 iommu = domain_get_iommu(domain); in intel_map_sg()
3798 iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), in intel_map_sg()
3817 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot); in intel_map_sg()
3819 dma_pte_free_pagetable(domain, start_vpfn, in intel_map_sg()
3821 agaw_to_level(domain->agaw) + 1); in intel_map_sg()
3822 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size)); in intel_map_sg()
3858 struct dmar_domain *domain; in bounce_sync_single() local
3861 domain = find_domain(dev); in bounce_sync_single()
3862 if (WARN_ON(!domain)) in bounce_sync_single()
3865 tlb_addr = intel_iommu_iova_to_phys(&domain->domain, addr); in bounce_sync_single()
3876 struct dmar_domain *domain; in bounce_map_single() local
3887 domain = find_domain(dev); in bounce_map_single()
3889 if (WARN_ON(dir == DMA_NONE || !domain)) in bounce_map_single()
3892 iommu = domain_get_iommu(domain); in bounce_map_single()
3897 iova_pfn = intel_alloc_iova(dev, domain, in bounce_map_single()
3939 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn), in bounce_map_single()
3953 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages)); in bounce_map_single()
3965 struct dmar_domain *domain; in bounce_unmap_single() local
3968 domain = find_domain(dev); in bounce_unmap_single()
3969 if (WARN_ON(!domain)) in bounce_unmap_single()
3972 tlb_addr = intel_iommu_iova_to_phys(&domain->domain, dev_addr); in bounce_unmap_single()
4755 struct dmar_domain *domain; in free_all_cpu_cached_iovas() local
4762 domain = get_iommu_domain(iommu, (u16)did); in free_all_cpu_cached_iovas()
4764 if (!domain || domain->domain.type != IOMMU_DOMAIN_DMA) in free_all_cpu_cached_iovas()
4767 free_cpu_cached_iovas(cpu, &domain->iovad); in free_all_cpu_cached_iovas()
5132 struct dmar_domain *domain; in __dmar_remove_one_dev_info() local
5142 domain = info->domain; in __dmar_remove_one_dev_info()
5158 domain_detach_iommu(domain, iommu); in __dmar_remove_one_dev_info()
5176 static int md_domain_init(struct dmar_domain *domain, int guest_width) in md_domain_init() argument
5181 domain->gaw = guest_width; in md_domain_init()
5183 domain->agaw = width_to_agaw(adjust_width); in md_domain_init()
5185 domain->iommu_coherency = 0; in md_domain_init()
5186 domain->iommu_snooping = 0; in md_domain_init()
5187 domain->iommu_superpage = 0; in md_domain_init()
5188 domain->max_addr = 0; in md_domain_init()
5191 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid); in md_domain_init()
5192 if (!domain->pgd) in md_domain_init()
5194 domain_flush_cache(domain, domain->pgd, PAGE_SIZE); in md_domain_init()
5212 struct iommu_domain *domain; in intel_iommu_domain_alloc() local
5231 domain = &dmar_domain->domain; in intel_iommu_domain_alloc()
5232 domain->geometry.aperture_start = 0; in intel_iommu_domain_alloc()
5233 domain->geometry.aperture_end = in intel_iommu_domain_alloc()
5235 domain->geometry.force_aperture = true; in intel_iommu_domain_alloc()
5237 return domain; in intel_iommu_domain_alloc()
5239 return &si_domain->domain; in intel_iommu_domain_alloc()
5247 static void intel_iommu_domain_free(struct iommu_domain *domain) in intel_iommu_domain_free() argument
5249 if (domain != &si_domain->domain) in intel_iommu_domain_free()
5250 domain_exit(to_dmar_domain(domain)); in intel_iommu_domain_free()
5258 is_aux_domain(struct device *dev, struct iommu_domain *domain) in is_aux_domain() argument
5263 domain->type == IOMMU_DOMAIN_UNMANAGED; in is_aux_domain()
5266 static void auxiliary_link_device(struct dmar_domain *domain, in auxiliary_link_device() argument
5275 domain->auxd_refcnt++; in auxiliary_link_device()
5276 list_add(&domain->auxd, &info->auxiliary_domains); in auxiliary_link_device()
5279 static void auxiliary_unlink_device(struct dmar_domain *domain, in auxiliary_unlink_device() argument
5288 list_del(&domain->auxd); in auxiliary_unlink_device()
5289 domain->auxd_refcnt--; in auxiliary_unlink_device()
5291 if (!domain->auxd_refcnt && domain->default_pasid > 0) in auxiliary_unlink_device()
5292 ioasid_free(domain->default_pasid); in auxiliary_unlink_device()
5295 static int aux_domain_add_dev(struct dmar_domain *domain, in aux_domain_add_dev() argument
5306 if (domain->default_pasid <= 0) { in aux_domain_add_dev()
5317 domain->default_pasid = pasid; in aux_domain_add_dev()
5326 ret = domain_attach_iommu(domain, iommu); in aux_domain_add_dev()
5331 if (domain_use_first_level(domain)) in aux_domain_add_dev()
5332 ret = domain_setup_first_level(iommu, domain, dev, in aux_domain_add_dev()
5333 domain->default_pasid); in aux_domain_add_dev()
5335 ret = intel_pasid_setup_second_level(iommu, domain, dev, in aux_domain_add_dev()
5336 domain->default_pasid); in aux_domain_add_dev()
5341 auxiliary_link_device(domain, dev); in aux_domain_add_dev()
5348 domain_detach_iommu(domain, iommu); in aux_domain_add_dev()
5352 if (!domain->auxd_refcnt && domain->default_pasid > 0) in aux_domain_add_dev()
5353 ioasid_free(domain->default_pasid); in aux_domain_add_dev()
5358 static void aux_domain_remove_dev(struct dmar_domain *domain, in aux_domain_remove_dev() argument
5365 if (!is_aux_domain(dev, &domain->domain)) in aux_domain_remove_dev()
5372 auxiliary_unlink_device(domain, dev); in aux_domain_remove_dev()
5375 intel_pasid_tear_down_entry(iommu, dev, domain->default_pasid, false); in aux_domain_remove_dev()
5376 domain_detach_iommu(domain, iommu); in aux_domain_remove_dev()
5382 static int prepare_domain_attach_device(struct iommu_domain *domain, in prepare_domain_attach_device() argument
5385 struct dmar_domain *dmar_domain = to_dmar_domain(domain); in prepare_domain_attach_device()
5424 static int intel_iommu_attach_device(struct iommu_domain *domain, in intel_iommu_attach_device() argument
5429 if (domain->type == IOMMU_DOMAIN_UNMANAGED && in intel_iommu_attach_device()
5435 if (is_aux_domain(dev, domain)) in intel_iommu_attach_device()
5447 ret = prepare_domain_attach_device(domain, dev); in intel_iommu_attach_device()
5451 return domain_add_dev_info(to_dmar_domain(domain), dev); in intel_iommu_attach_device()
5454 static int intel_iommu_aux_attach_device(struct iommu_domain *domain, in intel_iommu_aux_attach_device() argument
5459 if (!is_aux_domain(dev, domain)) in intel_iommu_aux_attach_device()
5462 ret = prepare_domain_attach_device(domain, dev); in intel_iommu_aux_attach_device()
5466 return aux_domain_add_dev(to_dmar_domain(domain), dev); in intel_iommu_aux_attach_device()
5469 static void intel_iommu_detach_device(struct iommu_domain *domain, in intel_iommu_detach_device() argument
5475 static void intel_iommu_aux_detach_device(struct iommu_domain *domain, in intel_iommu_aux_detach_device() argument
5478 aux_domain_remove_dev(to_dmar_domain(domain), dev); in intel_iommu_aux_detach_device()
5529 intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev, in intel_iommu_sva_invalidate() argument
5532 struct dmar_domain *dmar_domain = to_dmar_domain(domain); in intel_iommu_sva_invalidate()
5660 static int intel_iommu_map(struct iommu_domain *domain, in intel_iommu_map() argument
5664 struct dmar_domain *dmar_domain = to_dmar_domain(domain); in intel_iommu_map()
5698 static size_t intel_iommu_unmap(struct iommu_domain *domain, in intel_iommu_unmap() argument
5702 struct dmar_domain *dmar_domain = to_dmar_domain(domain); in intel_iommu_unmap()
5734 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, in intel_iommu_iova_to_phys() argument
5737 struct dmar_domain *dmar_domain = to_dmar_domain(domain); in intel_iommu_iova_to_phys()
5844 struct iommu_domain *domain; in intel_iommu_probe_finalize() local
5846 domain = iommu_get_domain_for_dev(dev); in intel_iommu_probe_finalize()
5849 else if (domain && domain->type == IOMMU_DOMAIN_DMA) in intel_iommu_probe_finalize()
5916 struct dmar_domain *domain; in intel_iommu_enable_pasid() local
5921 domain = find_domain(dev); in intel_iommu_enable_pasid()
5922 if (!domain) in intel_iommu_enable_pasid()
5944 domain->iommu_did[iommu->seq_id], in intel_iommu_enable_pasid()
5964 struct iommu_domain *domain, in intel_iommu_apply_resv_region() argument
5967 struct dmar_domain *dmar_domain = to_dmar_domain(domain); in intel_iommu_apply_resv_region()
6119 intel_iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev) in intel_iommu_aux_get_pasid() argument
6121 struct dmar_domain *dmar_domain = to_dmar_domain(domain); in intel_iommu_aux_get_pasid()
6127 static bool intel_iommu_is_attach_deferred(struct iommu_domain *domain, in intel_iommu_is_attach_deferred() argument
6134 intel_iommu_domain_set_attr(struct iommu_domain *domain, in intel_iommu_domain_set_attr() argument
6137 struct dmar_domain *dmar_domain = to_dmar_domain(domain); in intel_iommu_domain_set_attr()
6141 if (domain->type != IOMMU_DOMAIN_UNMANAGED) in intel_iommu_domain_set_attr()