Lines Matching refs:domain
100 static void update_domain(struct protection_domain *domain);
102 static void update_and_flush_device_table(struct protection_domain *domain,
152 return container_of(dom, struct protection_domain, domain); in to_pdomain()
155 static void amd_iommu_domain_get_pgtable(struct protection_domain *domain, in amd_iommu_domain_get_pgtable() argument
158 u64 pt_root = atomic64_read(&domain->pt_root); in amd_iommu_domain_get_pgtable()
164 static void amd_iommu_domain_set_pt_root(struct protection_domain *domain, u64 root) in amd_iommu_domain_set_pt_root() argument
166 atomic64_set(&domain->pt_root, root); in amd_iommu_domain_set_pt_root()
169 static void amd_iommu_domain_clr_pt_root(struct protection_domain *domain) in amd_iommu_domain_clr_pt_root() argument
171 amd_iommu_domain_set_pt_root(domain, 0); in amd_iommu_domain_clr_pt_root()
174 static void amd_iommu_domain_set_pgtable(struct protection_domain *domain, in amd_iommu_domain_set_pgtable() argument
183 amd_iommu_domain_set_pt_root(domain, pt_root); in amd_iommu_domain_set_pgtable()
430 if (dev_data->domain) in amd_iommu_uninit_device()
1308 static void __domain_flush_pages(struct protection_domain *domain, in __domain_flush_pages() argument
1315 build_inv_iommu_pages(&cmd, address, size, domain->id, pde); in __domain_flush_pages()
1318 if (!domain->dev_iommu[i]) in __domain_flush_pages()
1328 list_for_each_entry(dev_data, &domain->dev_list, list) { in __domain_flush_pages()
1339 static void domain_flush_pages(struct protection_domain *domain, in domain_flush_pages() argument
1342 __domain_flush_pages(domain, address, size, 0); in domain_flush_pages()
1346 static void domain_flush_tlb_pde(struct protection_domain *domain) in domain_flush_tlb_pde() argument
1348 __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1); in domain_flush_tlb_pde()
1351 static void domain_flush_complete(struct protection_domain *domain) in domain_flush_complete() argument
1356 if (domain && !domain->dev_iommu[i]) in domain_flush_complete()
1368 static void domain_flush_np_cache(struct protection_domain *domain, in domain_flush_np_cache() argument
1374 spin_lock_irqsave(&domain->lock, flags); in domain_flush_np_cache()
1375 domain_flush_pages(domain, iova, size); in domain_flush_np_cache()
1376 domain_flush_complete(domain); in domain_flush_np_cache()
1377 spin_unlock_irqrestore(&domain->lock, flags); in domain_flush_np_cache()
1385 static void domain_flush_devices(struct protection_domain *domain) in domain_flush_devices() argument
1389 list_for_each_entry(dev_data, &domain->dev_list, list) in domain_flush_devices()
1504 static bool increase_address_space(struct protection_domain *domain, in increase_address_space() argument
1517 spin_lock_irqsave(&domain->lock, flags); in increase_address_space()
1519 amd_iommu_domain_get_pgtable(domain, &pgtable); in increase_address_space()
1532 update_and_flush_device_table(domain, &pgtable); in increase_address_space()
1533 domain_flush_complete(domain); in increase_address_space()
1539 amd_iommu_domain_set_pgtable(domain, pte, pgtable.mode); in increase_address_space()
1545 spin_unlock_irqrestore(&domain->lock, flags); in increase_address_space()
1551 static u64 *alloc_pte(struct protection_domain *domain, in alloc_pte() argument
1564 amd_iommu_domain_get_pgtable(domain, &pgtable); in alloc_pte()
1571 if (!increase_address_space(domain, address, gfp)) in alloc_pte()
1575 amd_iommu_domain_get_pgtable(domain, &pgtable); in alloc_pte()
1652 static u64 *fetch_pte(struct protection_domain *domain, in fetch_pte() argument
1662 amd_iommu_domain_get_pgtable(domain, &pgtable); in fetch_pte()
1896 static void free_gcr3_table(struct protection_domain *domain) in free_gcr3_table() argument
1898 if (domain->glx == 2) in free_gcr3_table()
1899 free_gcr3_tbl_level2(domain->gcr3_tbl); in free_gcr3_table()
1900 else if (domain->glx == 1) in free_gcr3_table()
1901 free_gcr3_tbl_level1(domain->gcr3_tbl); in free_gcr3_table()
1903 BUG_ON(domain->glx != 0); in free_gcr3_table()
1905 free_page((unsigned long)domain->gcr3_tbl); in free_gcr3_table()
1908 static void set_dte_entry(u16 devid, struct protection_domain *domain, in set_dte_entry() argument
1935 if (domain->flags & PD_IOMMUV2_MASK) { in set_dte_entry()
1936 u64 gcr3 = iommu_virt_to_phys(domain->gcr3_tbl); in set_dte_entry()
1937 u64 glx = domain->glx; in set_dte_entry()
1962 flags |= domain->id; in set_dte_entry()
1990 struct protection_domain *domain) in do_attach() argument
2000 dev_data->domain = domain; in do_attach()
2001 list_add(&dev_data->list, &domain->dev_list); in do_attach()
2004 domain->dev_iommu[iommu->index] += 1; in do_attach()
2005 domain->dev_cnt += 1; in do_attach()
2008 amd_iommu_domain_get_pgtable(domain, &pgtable); in do_attach()
2009 set_dte_entry(dev_data->devid, domain, &pgtable, in do_attach()
2018 struct protection_domain *domain = dev_data->domain; in do_detach() local
2024 dev_data->domain = NULL; in do_detach()
2033 domain_flush_tlb_pde(domain); in do_detach()
2036 domain_flush_complete(domain); in do_detach()
2039 domain->dev_iommu[iommu->index] -= 1; in do_detach()
2040 domain->dev_cnt -= 1; in do_detach()
2117 struct protection_domain *domain) in attach_device() argument
2124 spin_lock_irqsave(&domain->lock, flags); in attach_device()
2131 if (dev_data->domain != NULL) in attach_device()
2138 if (domain->flags & PD_IOMMUV2_MASK) { in attach_device()
2162 do_attach(dev_data, domain); in attach_device()
2169 domain_flush_tlb_pde(domain); in attach_device()
2171 domain_flush_complete(domain); in attach_device()
2176 spin_unlock_irqrestore(&domain->lock, flags); in attach_device()
2186 struct protection_domain *domain; in detach_device() local
2191 domain = dev_data->domain; in detach_device()
2193 spin_lock_irqsave(&domain->lock, flags); in detach_device()
2203 if (WARN_ON(!dev_data->domain)) in detach_device()
2211 if (domain->flags & PD_IOMMUV2_MASK && dev_data->iommu_v2) in detach_device()
2221 spin_unlock_irqrestore(&domain->lock, flags); in detach_device()
2260 struct iommu_domain *domain; in amd_iommu_probe_finalize() local
2263 domain = iommu_get_domain_for_dev(dev); in amd_iommu_probe_finalize()
2264 if (domain->type == IOMMU_DOMAIN_DMA) in amd_iommu_probe_finalize()
2290 static int amd_iommu_domain_get_attr(struct iommu_domain *domain, in amd_iommu_domain_get_attr() argument
2293 switch (domain->type) { in amd_iommu_domain_get_attr()
2316 static void update_device_table(struct protection_domain *domain, in update_device_table() argument
2321 list_for_each_entry(dev_data, &domain->dev_list, list) { in update_device_table()
2322 set_dte_entry(dev_data->devid, domain, pgtable, in update_device_table()
2328 static void update_and_flush_device_table(struct protection_domain *domain, in update_and_flush_device_table() argument
2331 update_device_table(domain, pgtable); in update_and_flush_device_table()
2332 domain_flush_devices(domain); in update_and_flush_device_table()
2335 static void update_domain(struct protection_domain *domain) in update_domain() argument
2340 amd_iommu_domain_get_pgtable(domain, &pgtable); in update_domain()
2341 update_and_flush_device_table(domain, &pgtable); in update_domain()
2344 domain_flush_tlb_pde(domain); in update_domain()
2345 domain_flush_complete(domain); in update_domain()
2394 static void cleanup_domain(struct protection_domain *domain) in cleanup_domain() argument
2399 spin_lock_irqsave(&domain->lock, flags); in cleanup_domain()
2401 while (!list_empty(&domain->dev_list)) { in cleanup_domain()
2402 entry = list_first_entry(&domain->dev_list, in cleanup_domain()
2404 BUG_ON(!entry->domain); in cleanup_domain()
2408 spin_unlock_irqrestore(&domain->lock, flags); in cleanup_domain()
2411 static void protection_domain_free(struct protection_domain *domain) in protection_domain_free() argument
2415 if (!domain) in protection_domain_free()
2418 if (domain->id) in protection_domain_free()
2419 domain_id_free(domain->id); in protection_domain_free()
2421 amd_iommu_domain_get_pgtable(domain, &pgtable); in protection_domain_free()
2422 amd_iommu_domain_clr_pt_root(domain); in protection_domain_free()
2425 kfree(domain); in protection_domain_free()
2428 static int protection_domain_init(struct protection_domain *domain, int mode) in protection_domain_init() argument
2434 spin_lock_init(&domain->lock); in protection_domain_init()
2435 domain->id = domain_id_alloc(); in protection_domain_init()
2436 if (!domain->id) in protection_domain_init()
2438 INIT_LIST_HEAD(&domain->dev_list); in protection_domain_init()
2446 amd_iommu_domain_set_pgtable(domain, pt_root, mode); in protection_domain_init()
2453 struct protection_domain *domain; in protection_domain_alloc() local
2455 domain = kzalloc(sizeof(*domain), GFP_KERNEL); in protection_domain_alloc()
2456 if (!domain) in protection_domain_alloc()
2459 if (protection_domain_init(domain, mode)) in protection_domain_alloc()
2462 return domain; in protection_domain_alloc()
2465 kfree(domain); in protection_domain_alloc()
2472 struct protection_domain *domain; in amd_iommu_domain_alloc() local
2478 domain = protection_domain_alloc(mode); in amd_iommu_domain_alloc()
2479 if (!domain) in amd_iommu_domain_alloc()
2482 domain->domain.geometry.aperture_start = 0; in amd_iommu_domain_alloc()
2483 domain->domain.geometry.aperture_end = ~0ULL; in amd_iommu_domain_alloc()
2484 domain->domain.geometry.force_aperture = true; in amd_iommu_domain_alloc()
2487 iommu_get_dma_cookie(&domain->domain) == -ENOMEM) in amd_iommu_domain_alloc()
2490 return &domain->domain; in amd_iommu_domain_alloc()
2493 protection_domain_free(domain); in amd_iommu_domain_alloc()
2500 struct protection_domain *domain; in amd_iommu_domain_free() local
2502 domain = to_pdomain(dom); in amd_iommu_domain_free()
2504 if (domain->dev_cnt > 0) in amd_iommu_domain_free()
2505 cleanup_domain(domain); in amd_iommu_domain_free()
2507 BUG_ON(domain->dev_cnt != 0); in amd_iommu_domain_free()
2513 iommu_put_dma_cookie(&domain->domain); in amd_iommu_domain_free()
2515 if (domain->flags & PD_IOMMUV2_MASK) in amd_iommu_domain_free()
2516 free_gcr3_table(domain); in amd_iommu_domain_free()
2518 protection_domain_free(domain); in amd_iommu_domain_free()
2535 if (dev_data->domain != NULL) in amd_iommu_detach_device()
2554 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_attach_device() local
2569 if (dev_data->domain) in amd_iommu_attach_device()
2572 ret = attach_device(dev, domain); in amd_iommu_attach_device()
2592 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_map() local
2597 amd_iommu_domain_get_pgtable(domain, &pgtable); in amd_iommu_map()
2606 ret = iommu_map_page(domain, iova, paddr, page_size, prot, gfp); in amd_iommu_map()
2608 domain_flush_np_cache(domain, iova, page_size); in amd_iommu_map()
2617 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_unmap() local
2620 amd_iommu_domain_get_pgtable(domain, &pgtable); in amd_iommu_unmap()
2624 return iommu_unmap_page(domain, iova, page_size); in amd_iommu_unmap()
2630 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_iova_to_phys() local
2635 amd_iommu_domain_get_pgtable(domain, &pgtable); in amd_iommu_iova_to_phys()
2639 pte = fetch_pte(domain, iova, &pte_pgsize); in amd_iommu_iova_to_phys()
2718 bool amd_iommu_is_attach_deferred(struct iommu_domain *domain, in amd_iommu_is_attach_deferred() argument
2727 static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain) in amd_iommu_flush_iotlb_all() argument
2729 struct protection_domain *dom = to_pdomain(domain); in amd_iommu_flush_iotlb_all()
2738 static void amd_iommu_iotlb_sync(struct iommu_domain *domain, in amd_iommu_iotlb_sync() argument
2741 amd_iommu_flush_iotlb_all(domain); in amd_iommu_iotlb_sync()
2811 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_domain_direct_map() local
2815 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_domain_direct_map()
2818 amd_iommu_domain_get_pgtable(domain, &pgtable); in amd_iommu_domain_direct_map()
2821 amd_iommu_domain_clr_pt_root(domain); in amd_iommu_domain_direct_map()
2824 update_domain(domain); in amd_iommu_domain_direct_map()
2829 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_domain_direct_map()
2835 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_domain_enable_v2() local
2849 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_domain_enable_v2()
2857 if (domain->dev_cnt > 0 || domain->flags & PD_IOMMUV2_MASK) in amd_iommu_domain_enable_v2()
2861 domain->gcr3_tbl = (void *)get_zeroed_page(GFP_ATOMIC); in amd_iommu_domain_enable_v2()
2862 if (domain->gcr3_tbl == NULL) in amd_iommu_domain_enable_v2()
2865 domain->glx = levels; in amd_iommu_domain_enable_v2()
2866 domain->flags |= PD_IOMMUV2_MASK; in amd_iommu_domain_enable_v2()
2868 update_domain(domain); in amd_iommu_domain_enable_v2()
2873 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_domain_enable_v2()
2879 static int __flush_pasid(struct protection_domain *domain, u32 pasid, in __flush_pasid() argument
2886 if (!(domain->flags & PD_IOMMUV2_MASK)) in __flush_pasid()
2889 build_inv_iommu_pasid(&cmd, domain->id, pasid, address, size); in __flush_pasid()
2896 if (domain->dev_iommu[i] == 0) in __flush_pasid()
2905 domain_flush_complete(domain); in __flush_pasid()
2908 list_for_each_entry(dev_data, &domain->dev_list, list) { in __flush_pasid()
2931 domain_flush_complete(domain); in __flush_pasid()
2940 static int __amd_iommu_flush_page(struct protection_domain *domain, u32 pasid, in __amd_iommu_flush_page() argument
2943 return __flush_pasid(domain, pasid, address, false); in __amd_iommu_flush_page()
2949 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_flush_page() local
2953 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_flush_page()
2954 ret = __amd_iommu_flush_page(domain, pasid, address); in amd_iommu_flush_page()
2955 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_flush_page()
2961 static int __amd_iommu_flush_tlb(struct protection_domain *domain, u32 pasid) in __amd_iommu_flush_tlb() argument
2963 return __flush_pasid(domain, pasid, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, in __amd_iommu_flush_tlb()
2969 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_flush_tlb() local
2973 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_flush_tlb()
2974 ret = __amd_iommu_flush_tlb(domain, pasid); in amd_iommu_flush_tlb()
2975 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_flush_tlb()
3013 static int __set_gcr3(struct protection_domain *domain, u32 pasid, in __set_gcr3() argument
3019 amd_iommu_domain_get_pgtable(domain, &pgtable); in __set_gcr3()
3023 pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, true); in __set_gcr3()
3029 return __amd_iommu_flush_tlb(domain, pasid); in __set_gcr3()
3032 static int __clear_gcr3(struct protection_domain *domain, u32 pasid) in __clear_gcr3() argument
3037 amd_iommu_domain_get_pgtable(domain, &pgtable); in __clear_gcr3()
3041 pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, false); in __clear_gcr3()
3047 return __amd_iommu_flush_tlb(domain, pasid); in __clear_gcr3()
3053 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_domain_set_gcr3() local
3057 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_domain_set_gcr3()
3058 ret = __set_gcr3(domain, pasid, cr3); in amd_iommu_domain_set_gcr3()
3059 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_domain_set_gcr3()
3067 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_domain_clear_gcr3() local
3071 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_domain_clear_gcr3()
3072 ret = __clear_gcr3(domain, pasid); in amd_iommu_domain_clear_gcr3()
3073 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_domain_clear_gcr3()
3107 pdomain = dev_data->domain; in amd_iommu_get_v2_domain()
3126 return &pdomain->domain; in amd_iommu_get_v2_domain()
3736 static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq, in irq_remapping_alloc() argument
3763 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg); in irq_remapping_alloc()
3805 irq_data = irq_domain_get_irq_data(domain, virq + i); in irq_remapping_alloc()
3838 irq_data = irq_domain_get_irq_data(domain, virq + i); in irq_remapping_alloc()
3845 irq_domain_free_irqs_common(domain, virq, nr_irqs); in irq_remapping_alloc()
3849 static void irq_remapping_free(struct irq_domain *domain, unsigned int virq, in irq_remapping_free() argument
3858 irq_data = irq_domain_get_irq_data(domain, virq + i); in irq_remapping_free()
3867 irq_domain_free_irqs_common(domain, virq, nr_irqs); in irq_remapping_free()
3875 static int irq_remapping_activate(struct irq_domain *domain, in irq_remapping_activate() argument
3892 static void irq_remapping_deactivate(struct irq_domain *domain, in irq_remapping_deactivate() argument