Lines Matching refs:s390_domain

24 struct s390_domain {  struct
37 static struct s390_domain *to_s390_domain(struct iommu_domain *dom) in to_s390_domain() argument
39 return container_of(dom, struct s390_domain, domain); in to_s390_domain()
56 struct s390_domain *s390_domain; in s390_domain_alloc() local
61 s390_domain = kzalloc(sizeof(*s390_domain), GFP_KERNEL); in s390_domain_alloc()
62 if (!s390_domain) in s390_domain_alloc()
65 s390_domain->dma_table = dma_alloc_cpu_table(); in s390_domain_alloc()
66 if (!s390_domain->dma_table) { in s390_domain_alloc()
67 kfree(s390_domain); in s390_domain_alloc()
71 spin_lock_init(&s390_domain->dma_table_lock); in s390_domain_alloc()
72 spin_lock_init(&s390_domain->list_lock); in s390_domain_alloc()
73 INIT_LIST_HEAD(&s390_domain->devices); in s390_domain_alloc()
75 return &s390_domain->domain; in s390_domain_alloc()
80 struct s390_domain *s390_domain = to_s390_domain(domain); in s390_domain_free() local
82 dma_cleanup_tables(s390_domain->dma_table); in s390_domain_free()
83 kfree(s390_domain); in s390_domain_free()
89 struct s390_domain *s390_domain = to_s390_domain(domain); in s390_iommu_attach_device() local
105 zdev->dma_table = s390_domain->dma_table; in s390_iommu_attach_device()
111 spin_lock_irqsave(&s390_domain->list_lock, flags); in s390_iommu_attach_device()
113 if (list_empty(&s390_domain->devices)) { in s390_iommu_attach_device()
121 spin_unlock_irqrestore(&s390_domain->list_lock, flags); in s390_iommu_attach_device()
125 zdev->s390_domain = s390_domain; in s390_iommu_attach_device()
126 list_add(&domain_device->list, &s390_domain->devices); in s390_iommu_attach_device()
127 spin_unlock_irqrestore(&s390_domain->list_lock, flags); in s390_iommu_attach_device()
141 struct s390_domain *s390_domain = to_s390_domain(domain); in s390_iommu_detach_device() local
150 spin_lock_irqsave(&s390_domain->list_lock, flags); in s390_iommu_detach_device()
151 list_for_each_entry_safe(domain_device, tmp, &s390_domain->devices, in s390_iommu_detach_device()
160 spin_unlock_irqrestore(&s390_domain->list_lock, flags); in s390_iommu_detach_device()
163 zdev->s390_domain = NULL; in s390_iommu_detach_device()
192 if (zdev && zdev->s390_domain) { in s390_iommu_release_device()
199 static int s390_iommu_update_trans(struct s390_domain *s390_domain, in s390_iommu_update_trans() argument
210 if (dma_addr < s390_domain->domain.geometry.aperture_start || in s390_iommu_update_trans()
211 dma_addr + size > s390_domain->domain.geometry.aperture_end) in s390_iommu_update_trans()
218 spin_lock_irqsave(&s390_domain->dma_table_lock, irq_flags); in s390_iommu_update_trans()
220 entry = dma_walk_cpu_trans(s390_domain->dma_table, dma_addr); in s390_iommu_update_trans()
230 spin_lock(&s390_domain->list_lock); in s390_iommu_update_trans()
231 list_for_each_entry(domain_device, &s390_domain->devices, list) { in s390_iommu_update_trans()
237 spin_unlock(&s390_domain->list_lock); in s390_iommu_update_trans()
245 entry = dma_walk_cpu_trans(s390_domain->dma_table, in s390_iommu_update_trans()
252 spin_unlock_irqrestore(&s390_domain->dma_table_lock, irq_flags); in s390_iommu_update_trans()
260 struct s390_domain *s390_domain = to_s390_domain(domain); in s390_iommu_map() local
269 rc = s390_iommu_update_trans(s390_domain, (unsigned long) paddr, iova, in s390_iommu_map()
278 struct s390_domain *s390_domain = to_s390_domain(domain); in s390_iommu_iova_to_phys() local
290 rto = s390_domain->dma_table; in s390_iommu_iova_to_phys()
292 spin_lock_irqsave(&s390_domain->dma_table_lock, flags); in s390_iommu_iova_to_phys()
301 spin_unlock_irqrestore(&s390_domain->dma_table_lock, flags); in s390_iommu_iova_to_phys()
310 struct s390_domain *s390_domain = to_s390_domain(domain); in s390_iommu_unmap() local
319 rc = s390_iommu_update_trans(s390_domain, (unsigned long) paddr, iova, in s390_iommu_unmap()