Lines Matching full:iommu

8 #include <linux/intel-iommu.h>
31 int intel_svm_enable_prq(struct intel_iommu *iommu) in intel_svm_enable_prq() argument
38 pr_warn("IOMMU: %s: Failed to allocate page request queue\n", in intel_svm_enable_prq()
39 iommu->name); in intel_svm_enable_prq()
42 iommu->prq = page_address(pages); in intel_svm_enable_prq()
44 irq = dmar_alloc_hwirq(DMAR_UNITS_SUPPORTED + iommu->seq_id, iommu->node, iommu); in intel_svm_enable_prq()
46 pr_err("IOMMU: %s: Failed to create IRQ vector for page request queue\n", in intel_svm_enable_prq()
47 iommu->name); in intel_svm_enable_prq()
50 free_pages((unsigned long)iommu->prq, PRQ_ORDER); in intel_svm_enable_prq()
51 iommu->prq = NULL; in intel_svm_enable_prq()
54 iommu->pr_irq = irq; in intel_svm_enable_prq()
56 snprintf(iommu->prq_name, sizeof(iommu->prq_name), "dmar%d-prq", iommu->seq_id); in intel_svm_enable_prq()
59 iommu->prq_name, iommu); in intel_svm_enable_prq()
61 pr_err("IOMMU: %s: Failed to request IRQ for page request queue\n", in intel_svm_enable_prq()
62 iommu->name); in intel_svm_enable_prq()
64 iommu->pr_irq = 0; in intel_svm_enable_prq()
67 dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL); in intel_svm_enable_prq()
68 dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL); in intel_svm_enable_prq()
69 dmar_writeq(iommu->reg + DMAR_PQA_REG, virt_to_phys(iommu->prq) | PRQ_ORDER); in intel_svm_enable_prq()
71 init_completion(&iommu->prq_complete); in intel_svm_enable_prq()
76 int intel_svm_finish_prq(struct intel_iommu *iommu) in intel_svm_finish_prq() argument
78 dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL); in intel_svm_finish_prq()
79 dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL); in intel_svm_finish_prq()
80 dmar_writeq(iommu->reg + DMAR_PQA_REG, 0ULL); in intel_svm_finish_prq()
82 if (iommu->pr_irq) { in intel_svm_finish_prq()
83 free_irq(iommu->pr_irq, iommu); in intel_svm_finish_prq()
84 dmar_free_hwirq(iommu->pr_irq); in intel_svm_finish_prq()
85 iommu->pr_irq = 0; in intel_svm_finish_prq()
88 free_pages((unsigned long)iommu->prq, PRQ_ORDER); in intel_svm_finish_prq()
89 iommu->prq = NULL; in intel_svm_finish_prq()
94 static inline bool intel_svm_capable(struct intel_iommu *iommu) in intel_svm_capable() argument
96 return iommu->flags & VTD_FLAG_SVM_CAPABLE; in intel_svm_capable()
99 void intel_svm_check(struct intel_iommu *iommu) in intel_svm_check() argument
101 if (!pasid_supported(iommu)) in intel_svm_check()
105 !cap_fl1gp_support(iommu->cap)) { in intel_svm_check()
107 iommu->name); in intel_svm_check()
112 !cap_5lp_support(iommu->cap)) { in intel_svm_check()
114 iommu->name); in intel_svm_check()
118 iommu->flags |= VTD_FLAG_SVM_CAPABLE; in intel_svm_check()
131 qi_flush_piotlb(sdev->iommu, sdev->did, svm->pasid, address, pages, ih); in __flush_svm_range_dev()
133 qi_flush_dev_iotlb_pasid(sdev->iommu, sdev->sid, info->pfsid, in __flush_svm_range_dev()
195 intel_pasid_tear_down_entry(sdev->iommu, sdev->dev, in intel_mm_release()
260 struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL); in intel_svm_bind_gpasid() local
268 if (WARN_ON(!iommu) || !data) in intel_svm_bind_gpasid()
274 /* IOMMU core ensures argsz is more than the start of the union */ in intel_svm_bind_gpasid()
348 sdev->iommu = iommu; in intel_svm_bind_gpasid()
355 ret = intel_iommu_enable_pasid(iommu, sdev->dev); in intel_svm_bind_gpasid()
367 spin_lock_irqsave(&iommu->lock, iflags); in intel_svm_bind_gpasid()
368 ret = intel_pasid_setup_nested(iommu, dev, in intel_svm_bind_gpasid()
372 spin_unlock_irqrestore(&iommu->lock, iflags); in intel_svm_bind_gpasid()
401 struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL); in intel_svm_unbind_gpasid() local
406 if (WARN_ON(!iommu)) in intel_svm_unbind_gpasid()
419 intel_pasid_tear_down_entry(iommu, dev, in intel_svm_unbind_gpasid()
427 * IOMMU driver did not allocate it. in intel_svm_unbind_gpasid()
431 * the unbind, IOMMU driver will get notified in intel_svm_unbind_gpasid()
468 struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL); in intel_svm_bind_mm() local
476 if (!iommu || dmar_disabled) in intel_svm_bind_mm()
479 if (!intel_svm_capable(iommu)) in intel_svm_bind_mm()
491 if (!ecap_srs(iommu->ecap) || mm) { in intel_svm_bind_mm()
533 sdev->iommu = iommu; in intel_svm_bind_mm()
535 ret = intel_iommu_enable_pasid(iommu, dev); in intel_svm_bind_mm()
592 spin_lock_irqsave(&iommu->lock, iflags); in intel_svm_bind_mm()
593 ret = intel_pasid_setup_first_level(iommu, dev, in intel_svm_bind_mm()
599 spin_unlock_irqrestore(&iommu->lock, iflags); in intel_svm_bind_mm()
619 spin_lock_irqsave(&iommu->lock, iflags); in intel_svm_bind_mm()
620 ret = intel_pasid_setup_first_level(iommu, dev, in intel_svm_bind_mm()
626 spin_unlock_irqrestore(&iommu->lock, iflags); in intel_svm_bind_mm()
647 struct intel_iommu *iommu; in intel_svm_unbind_mm() local
651 iommu = device_to_iommu(dev, NULL, NULL); in intel_svm_unbind_mm()
652 if (!iommu) in intel_svm_unbind_mm()
670 intel_pasid_tear_down_entry(iommu, dev, in intel_svm_unbind_mm()
770 struct intel_iommu *iommu; in intel_svm_drain_prq() local
784 iommu = info->iommu; in intel_svm_drain_prq()
788 did = domain->iommu_did[iommu->seq_id]; in intel_svm_drain_prq()
796 reinit_completion(&iommu->prq_complete); in intel_svm_drain_prq()
797 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; in intel_svm_drain_prq()
798 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; in intel_svm_drain_prq()
802 req = &iommu->prq[head / sizeof(*req)]; in intel_svm_drain_prq()
808 wait_for_completion(&iommu->prq_complete); in intel_svm_drain_prq()
830 reinit_completion(&iommu->prq_complete); in intel_svm_drain_prq()
831 qi_submit_sync(iommu, desc, 3, QI_OPT_WAIT_DRAIN); in intel_svm_drain_prq()
832 if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) { in intel_svm_drain_prq()
833 wait_for_completion(&iommu->prq_complete); in intel_svm_drain_prq()
895 struct intel_iommu *iommu = d; in prq_event_thread() local
901 writel(DMA_PRS_PPR, iommu->reg + DMAR_PRS_REG); in prq_event_thread()
903 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; in prq_event_thread()
904 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; in prq_event_thread()
915 req = &iommu->prq[head / sizeof(*req)]; in prq_event_thread()
921 iommu->name, ((unsigned long long *)req)[0], in prq_event_thread()
945 iommu->name, req->pasid, ((unsigned long long *)req)[0], in prq_event_thread()
976 * If prq is to be handled outside iommu driver via receiver of in prq_event_thread()
1043 qi_submit_sync(iommu, &resp, 1, 0); in prq_event_thread()
1049 dmar_writeq(iommu->reg + DMAR_PQH_REG, tail); in prq_event_thread()
1055 if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) { in prq_event_thread()
1056 pr_info_ratelimited("IOMMU: %s: PRQ overflow detected\n", in prq_event_thread()
1057 iommu->name); in prq_event_thread()
1058 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; in prq_event_thread()
1059 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; in prq_event_thread()
1061 writel(DMA_PRS_PRO, iommu->reg + DMAR_PRS_REG); in prq_event_thread()
1062 pr_info_ratelimited("IOMMU: %s: PRQ overflow cleared", in prq_event_thread()
1063 iommu->name); in prq_event_thread()
1067 if (!completion_done(&iommu->prq_complete)) in prq_event_thread()
1068 complete(&iommu->prq_complete); in prq_event_thread()
1083 * TODO: Consolidate with generic iommu-sva bind after it is merged. in intel_svm_bind()
1133 struct intel_iommu *iommu; in intel_svm_page_response() local
1144 iommu = device_to_iommu(dev, &bus, &devfn); in intel_svm_page_response()
1145 if (!iommu) in intel_svm_page_response()
1218 qi_submit_sync(iommu, &desc, 1, 0); in intel_svm_page_response()