Lines Matching refs:rk_domain

803 	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);  in rk_iommu_iova_to_phys()  local
809 spin_lock_irqsave(&rk_domain->dt_lock, flags); in rk_iommu_iova_to_phys()
811 dte = rk_domain->dt[rk_iova_dte_index(iova)]; in rk_iommu_iova_to_phys()
823 spin_unlock_irqrestore(&rk_domain->dt_lock, flags); in rk_iommu_iova_to_phys()
828 static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain, in rk_iommu_zap_iova() argument
835 if (rk_domain->shootdown_entire) in rk_iommu_zap_iova()
839 spin_lock_irqsave(&rk_domain->iommus_lock, flags); in rk_iommu_zap_iova()
840 list_for_each(pos, &rk_domain->iommus) { in rk_iommu_zap_iova()
858 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); in rk_iommu_zap_iova()
861 static void rk_iommu_zap_iova_first_last(struct rk_iommu_domain *rk_domain, in rk_iommu_zap_iova_first_last() argument
864 rk_iommu_zap_iova(rk_domain, iova, SPAGE_SIZE); in rk_iommu_zap_iova_first_last()
866 rk_iommu_zap_iova(rk_domain, iova + size - SPAGE_SIZE, in rk_iommu_zap_iova_first_last()
870 static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain, in rk_dte_get_page_table() argument
878 assert_spin_locked(&rk_domain->dt_lock); in rk_dte_get_page_table()
881 dte_addr = &rk_domain->dt[dte_index]; in rk_dte_get_page_table()
900 rk_table_flush(rk_domain, in rk_dte_get_page_table()
901 rk_domain->dt_dma + dte_index * sizeof(u32), 1); in rk_dte_get_page_table()
907 static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain, in rk_iommu_unmap_iova() argument
915 assert_spin_locked(&rk_domain->dt_lock); in rk_iommu_unmap_iova()
929 rk_table_flush(rk_domain, pte_dma, pte_count); in rk_iommu_unmap_iova()
934 static struct rk_iommu *rk_iommu_get(struct rk_iommu_domain *rk_domain) in rk_iommu_get() argument
940 spin_lock_irqsave(&rk_domain->iommus_lock, flags); in rk_iommu_get()
941 list_for_each(pos, &rk_domain->iommus) { in rk_iommu_get()
946 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); in rk_iommu_get()
951 static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr, in rk_iommu_map_iova() argument
959 assert_spin_locked(&rk_domain->dt_lock); in rk_iommu_map_iova()
976 rk_table_flush(rk_domain, pte_dma, pte_total); in rk_iommu_map_iova()
984 rk_iommu_zap_iova_first_last(rk_domain, iova, size); in rk_iommu_map_iova()
989 rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, in rk_iommu_map_iova()
1003 struct rk_iommu_domain *rk_domain = to_rk_domain(domain); in rk_iommu_map() local
1010 spin_lock_irqsave(&rk_domain->dt_lock, flags); in rk_iommu_map()
1019 page_table = rk_dte_get_page_table(rk_domain, iova); in rk_iommu_map()
1021 spin_unlock_irqrestore(&rk_domain->dt_lock, flags); in rk_iommu_map()
1025 dte = rk_domain->dt[rk_iova_dte_index(iova)]; in rk_iommu_map()
1029 ret = rk_iommu_map_iova(rk_domain, pte_addr, pte_dma, iova, in rk_iommu_map()
1032 spin_unlock_irqrestore(&rk_domain->dt_lock, flags); in rk_iommu_map()
1040 struct rk_iommu_domain *rk_domain = to_rk_domain(domain); in rk_iommu_unmap() local
1047 struct rk_iommu *iommu = rk_iommu_get(rk_domain); in rk_iommu_unmap()
1049 spin_lock_irqsave(&rk_domain->dt_lock, flags); in rk_iommu_unmap()
1058 dte = rk_domain->dt[rk_iova_dte_index(iova)]; in rk_iommu_unmap()
1061 spin_unlock_irqrestore(&rk_domain->dt_lock, flags); in rk_iommu_unmap()
1068 unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size, in rk_iommu_unmap()
1071 spin_unlock_irqrestore(&rk_domain->dt_lock, flags); in rk_iommu_unmap()
1074 rk_iommu_zap_iova(rk_domain, iova, unmap_size); in rk_iommu_unmap()
1081 struct rk_iommu_domain *rk_domain = to_rk_domain(domain); in rk_iommu_flush_tlb_all() local
1086 spin_lock_irqsave(&rk_domain->iommus_lock, flags); in rk_iommu_flush_tlb_all()
1087 list_for_each(pos, &rk_domain->iommus) { in rk_iommu_flush_tlb_all()
1105 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); in rk_iommu_flush_tlb_all()
1152 struct rk_iommu_domain *rk_domain = to_rk_domain(domain); in rk_iommu_enable() local
1170 rk_ops->dma_addr_dte(rk_domain->dt_dma)); in rk_iommu_enable()
1243 struct rk_iommu_domain *rk_domain = to_rk_domain(domain); in rk_iommu_detach_device() local
1259 spin_lock_irqsave(&rk_domain->iommus_lock, flags); in rk_iommu_detach_device()
1261 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); in rk_iommu_detach_device()
1275 struct rk_iommu_domain *rk_domain = to_rk_domain(domain); in rk_iommu_attach_device() local
1298 spin_lock_irqsave(&rk_domain->iommus_lock, flags); in rk_iommu_attach_device()
1299 list_add_tail(&iommu->node, &rk_domain->iommus); in rk_iommu_attach_device()
1300 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); in rk_iommu_attach_device()
1302 rk_domain->shootdown_entire = iommu->shootdown_entire; in rk_iommu_attach_device()
1318 struct rk_iommu_domain *rk_domain; in rk_iommu_domain_alloc() local
1326 rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL); in rk_iommu_domain_alloc()
1327 if (!rk_domain) in rk_iommu_domain_alloc()
1331 iommu_get_dma_cookie(&rk_domain->domain)) in rk_iommu_domain_alloc()
1339 rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32); in rk_iommu_domain_alloc()
1340 if (!rk_domain->dt) in rk_iommu_domain_alloc()
1343 rk_domain->dt_dma = dma_map_single(dma_dev, rk_domain->dt, in rk_iommu_domain_alloc()
1345 if (dma_mapping_error(dma_dev, rk_domain->dt_dma)) { in rk_iommu_domain_alloc()
1350 spin_lock_init(&rk_domain->iommus_lock); in rk_iommu_domain_alloc()
1351 spin_lock_init(&rk_domain->dt_lock); in rk_iommu_domain_alloc()
1352 INIT_LIST_HEAD(&rk_domain->iommus); in rk_iommu_domain_alloc()
1354 rk_domain->domain.geometry.aperture_start = 0; in rk_iommu_domain_alloc()
1355 rk_domain->domain.geometry.aperture_end = DMA_BIT_MASK(32); in rk_iommu_domain_alloc()
1356 rk_domain->domain.geometry.force_aperture = true; in rk_iommu_domain_alloc()
1358 return &rk_domain->domain; in rk_iommu_domain_alloc()
1361 free_page((unsigned long)rk_domain->dt); in rk_iommu_domain_alloc()
1364 iommu_put_dma_cookie(&rk_domain->domain); in rk_iommu_domain_alloc()
1366 kfree(rk_domain); in rk_iommu_domain_alloc()
1373 struct rk_iommu_domain *rk_domain = to_rk_domain(domain); in rk_iommu_domain_free() local
1376 WARN_ON(!list_empty(&rk_domain->iommus)); in rk_iommu_domain_free()
1379 u32 dte = rk_domain->dt[i]; in rk_iommu_domain_free()
1389 dma_unmap_single(dma_dev, rk_domain->dt_dma, in rk_iommu_domain_free()
1391 free_page((unsigned long)rk_domain->dt); in rk_iommu_domain_free()
1393 kfree(rk_domain); in rk_iommu_domain_free()