Lines Matching full:domain
210 static u32 ipmmu_ctx_read_root(struct ipmmu_vmsa_domain *domain, in ipmmu_ctx_read_root() argument
213 return ipmmu_ctx_read(domain->mmu->root, domain->context_id, reg); in ipmmu_ctx_read_root()
216 static void ipmmu_ctx_write_root(struct ipmmu_vmsa_domain *domain, in ipmmu_ctx_write_root() argument
219 ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data); in ipmmu_ctx_write_root()
222 static void ipmmu_ctx_write_all(struct ipmmu_vmsa_domain *domain, in ipmmu_ctx_write_all() argument
225 if (domain->mmu != domain->mmu->root) in ipmmu_ctx_write_all()
226 ipmmu_ctx_write(domain->mmu, domain->context_id, reg, data); in ipmmu_ctx_write_all()
228 ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data); in ipmmu_ctx_write_all()
253 static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain) in ipmmu_tlb_sync() argument
257 while (ipmmu_ctx_read_root(domain, IMCTR) & IMCTR_FLUSH) { in ipmmu_tlb_sync()
260 dev_err_ratelimited(domain->mmu->dev, in ipmmu_tlb_sync()
268 static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain) in ipmmu_tlb_invalidate() argument
272 reg = ipmmu_ctx_read_root(domain, IMCTR); in ipmmu_tlb_invalidate()
274 ipmmu_ctx_write_all(domain, IMCTR, reg); in ipmmu_tlb_invalidate()
276 ipmmu_tlb_sync(domain); in ipmmu_tlb_invalidate()
282 static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain, in ipmmu_utlb_enable() argument
285 struct ipmmu_vmsa_device *mmu = domain->mmu; in ipmmu_utlb_enable()
295 ipmmu_imuctr_write(mmu, utlb, IMUCTR_TTSEL_MMU(domain->context_id) | in ipmmu_utlb_enable()
297 mmu->utlb_ctx[utlb] = domain->context_id; in ipmmu_utlb_enable()
303 static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain, in ipmmu_utlb_disable() argument
306 struct ipmmu_vmsa_device *mmu = domain->mmu; in ipmmu_utlb_disable()
314 struct ipmmu_vmsa_domain *domain = cookie; in ipmmu_tlb_flush_all() local
316 ipmmu_tlb_invalidate(domain); in ipmmu_tlb_flush_all()
331 * Domain/Context Management
335 struct ipmmu_vmsa_domain *domain) in ipmmu_domain_allocate_context() argument
344 mmu->domains[ret] = domain; in ipmmu_domain_allocate_context()
367 static void ipmmu_domain_setup_context(struct ipmmu_vmsa_domain *domain) in ipmmu_domain_setup_context() argument
373 ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr; in ipmmu_domain_setup_context()
374 ipmmu_ctx_write_root(domain, IMTTLBR0, ttbr); in ipmmu_domain_setup_context()
375 ipmmu_ctx_write_root(domain, IMTTUBR0, ttbr >> 32); in ipmmu_domain_setup_context()
382 if (domain->mmu->features->twobit_imttbcr_sl0) in ipmmu_domain_setup_context()
387 if (domain->mmu->features->cache_snoop) in ipmmu_domain_setup_context()
391 ipmmu_ctx_write_root(domain, IMTTBCR, IMTTBCR_EAE | tmp); in ipmmu_domain_setup_context()
394 ipmmu_ctx_write_root(domain, IMMAIR0, in ipmmu_domain_setup_context()
395 domain->cfg.arm_lpae_s1_cfg.mair); in ipmmu_domain_setup_context()
398 if (domain->mmu->features->setup_imbuscr) in ipmmu_domain_setup_context()
399 ipmmu_ctx_write_root(domain, IMBUSCR, in ipmmu_domain_setup_context()
400 ipmmu_ctx_read_root(domain, IMBUSCR) & in ipmmu_domain_setup_context()
407 ipmmu_ctx_write_root(domain, IMSTR, ipmmu_ctx_read_root(domain, IMSTR)); in ipmmu_domain_setup_context()
416 ipmmu_ctx_write_all(domain, IMCTR, in ipmmu_domain_setup_context()
420 static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) in ipmmu_domain_init_context() argument
435 domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS; in ipmmu_domain_init_context()
436 domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K; in ipmmu_domain_init_context()
437 domain->cfg.ias = 32; in ipmmu_domain_init_context()
438 domain->cfg.oas = 40; in ipmmu_domain_init_context()
439 domain->cfg.tlb = &ipmmu_flush_ops; in ipmmu_domain_init_context()
440 domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32); in ipmmu_domain_init_context()
441 domain->io_domain.geometry.force_aperture = true; in ipmmu_domain_init_context()
446 domain->cfg.coherent_walk = false; in ipmmu_domain_init_context()
447 domain->cfg.iommu_dev = domain->mmu->root->dev; in ipmmu_domain_init_context()
452 ret = ipmmu_domain_allocate_context(domain->mmu->root, domain); in ipmmu_domain_init_context()
456 domain->context_id = ret; in ipmmu_domain_init_context()
458 domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg, in ipmmu_domain_init_context()
459 domain); in ipmmu_domain_init_context()
460 if (!domain->iop) { in ipmmu_domain_init_context()
461 ipmmu_domain_free_context(domain->mmu->root, in ipmmu_domain_init_context()
462 domain->context_id); in ipmmu_domain_init_context()
466 ipmmu_domain_setup_context(domain); in ipmmu_domain_init_context()
470 static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain) in ipmmu_domain_destroy_context() argument
472 if (!domain->mmu) in ipmmu_domain_destroy_context()
481 ipmmu_ctx_write_all(domain, IMCTR, IMCTR_FLUSH); in ipmmu_domain_destroy_context()
482 ipmmu_tlb_sync(domain); in ipmmu_domain_destroy_context()
483 ipmmu_domain_free_context(domain->mmu->root, domain->context_id); in ipmmu_domain_destroy_context()
490 static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain) in ipmmu_domain_irq() argument
493 struct ipmmu_vmsa_device *mmu = domain->mmu; in ipmmu_domain_irq()
497 status = ipmmu_ctx_read_root(domain, IMSTR); in ipmmu_domain_irq()
501 iova = ipmmu_ctx_read_root(domain, IMELAR); in ipmmu_domain_irq()
503 iova |= (u64)ipmmu_ctx_read_root(domain, IMEUAR) << 32; in ipmmu_domain_irq()
511 ipmmu_ctx_write_root(domain, IMSTR, 0); in ipmmu_domain_irq()
530 if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0)) in ipmmu_domain_irq()
570 struct ipmmu_vmsa_domain *domain; in __ipmmu_domain_alloc() local
572 domain = kzalloc(sizeof(*domain), GFP_KERNEL); in __ipmmu_domain_alloc()
573 if (!domain) in __ipmmu_domain_alloc()
576 mutex_init(&domain->mutex); in __ipmmu_domain_alloc()
578 return &domain->io_domain; in __ipmmu_domain_alloc()
604 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); in ipmmu_domain_free() local
607 * Free the domain resources. We assume that all devices have already in ipmmu_domain_free()
611 ipmmu_domain_destroy_context(domain); in ipmmu_domain_free()
612 free_io_pgtable_ops(domain->iop); in ipmmu_domain_free()
613 kfree(domain); in ipmmu_domain_free()
621 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); in ipmmu_attach_device() local
630 mutex_lock(&domain->mutex); in ipmmu_attach_device()
632 if (!domain->mmu) { in ipmmu_attach_device()
633 /* The domain hasn't been used yet, initialize it. */ in ipmmu_attach_device()
634 domain->mmu = mmu; in ipmmu_attach_device()
635 ret = ipmmu_domain_init_context(domain); in ipmmu_attach_device()
638 domain->mmu = NULL; in ipmmu_attach_device()
641 domain->context_id); in ipmmu_attach_device()
643 } else if (domain->mmu != mmu) { in ipmmu_attach_device()
646 * different IOMMUs to the same domain. in ipmmu_attach_device()
648 dev_err(dev, "Can't attach IPMMU %s to domain on IPMMU %s\n", in ipmmu_attach_device()
649 dev_name(mmu->dev), dev_name(domain->mmu->dev)); in ipmmu_attach_device()
652 dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id); in ipmmu_attach_device()
654 mutex_unlock(&domain->mutex); in ipmmu_attach_device()
660 ipmmu_utlb_enable(domain, fwspec->ids[i]); in ipmmu_attach_device()
669 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); in ipmmu_detach_device() local
673 ipmmu_utlb_disable(domain, fwspec->ids[i]); in ipmmu_detach_device()
683 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); in ipmmu_map() local
685 if (!domain) in ipmmu_map()
688 return domain->iop->map(domain->iop, iova, paddr, size, prot, gfp); in ipmmu_map()
694 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); in ipmmu_unmap() local
696 return domain->iop->unmap(domain->iop, iova, size, gather); in ipmmu_unmap()
701 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); in ipmmu_flush_iotlb_all() local
703 if (domain->mmu) in ipmmu_flush_iotlb_all()
704 ipmmu_tlb_flush_all(domain); in ipmmu_flush_iotlb_all()
716 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); in ipmmu_iova_to_phys() local
720 return domain->iop->iova_to_phys(domain->iop, iova); in ipmmu_iova_to_phys()
814 * VAs. This will allocate a corresponding IOMMU domain. in ipmmu_init_arm_mapping()