Lines Matching full:smmu
3 * IOMMU API for ARM architected SMMU implementations.
13 * - Non-secure access to the SMMU
18 #define pr_fmt(fmt) "arm-smmu: " fmt
44 #include "arm-smmu.h"
47 * Apparently, some Qualcomm arm64 platforms which appear to expose their SMMU
61 …"Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' f…
66 …domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
74 static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu) in arm_smmu_rpm_get() argument
76 if (pm_runtime_enabled(smmu->dev)) in arm_smmu_rpm_get()
77 return pm_runtime_resume_and_get(smmu->dev); in arm_smmu_rpm_get()
82 static inline void arm_smmu_rpm_put(struct arm_smmu_device *smmu) in arm_smmu_rpm_put() argument
84 if (pm_runtime_enabled(smmu->dev)) in arm_smmu_rpm_put()
85 pm_runtime_put_autosuspend(smmu->dev); in arm_smmu_rpm_put()
135 struct arm_smmu_device **smmu) in arm_smmu_register_legacy_master() argument
178 *smmu = dev_get_drvdata(smmu_dev); in arm_smmu_register_legacy_master()
188 * delay setting bus ops until we're sure every possible SMMU is ready,
200 struct arm_smmu_device **smmu) in arm_smmu_register_legacy_master() argument
212 static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, int page, in __arm_smmu_tlb_sync() argument
218 if (smmu->impl && unlikely(smmu->impl->tlb_sync)) in __arm_smmu_tlb_sync()
219 return smmu->impl->tlb_sync(smmu, page, sync, status); in __arm_smmu_tlb_sync()
221 arm_smmu_writel(smmu, page, sync, QCOM_DUMMY_VAL); in __arm_smmu_tlb_sync()
224 reg = arm_smmu_readl(smmu, page, status); in __arm_smmu_tlb_sync()
231 dev_err_ratelimited(smmu->dev, in __arm_smmu_tlb_sync()
232 "TLB sync timed out -- SMMU may be deadlocked\n"); in __arm_smmu_tlb_sync()
235 static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu) in arm_smmu_tlb_sync_global() argument
239 spin_lock_irqsave(&smmu->global_sync_lock, flags); in arm_smmu_tlb_sync_global()
240 __arm_smmu_tlb_sync(smmu, ARM_SMMU_GR0, ARM_SMMU_GR0_sTLBGSYNC, in arm_smmu_tlb_sync_global()
242 spin_unlock_irqrestore(&smmu->global_sync_lock, flags); in arm_smmu_tlb_sync_global()
247 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_tlb_sync_context() local
251 __arm_smmu_tlb_sync(smmu, ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx), in arm_smmu_tlb_sync_context()
264 arm_smmu_cb_write(smmu_domain->smmu, smmu_domain->cfg.cbndx, in arm_smmu_tlb_inv_context_s1()
272 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_tlb_inv_context_s2() local
276 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid); in arm_smmu_tlb_inv_context_s2()
277 arm_smmu_tlb_sync_global(smmu); in arm_smmu_tlb_inv_context_s2()
284 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_tlb_inv_range_s1() local
288 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) in arm_smmu_tlb_inv_range_s1()
295 arm_smmu_cb_write(smmu, idx, reg, iova); in arm_smmu_tlb_inv_range_s1()
302 arm_smmu_cb_writeq(smmu, idx, reg, iova); in arm_smmu_tlb_inv_range_s1()
312 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_tlb_inv_range_s2() local
315 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) in arm_smmu_tlb_inv_range_s2()
321 arm_smmu_cb_writeq(smmu, idx, reg, iova); in arm_smmu_tlb_inv_range_s2()
323 arm_smmu_cb_write(smmu, idx, reg, iova); in arm_smmu_tlb_inv_range_s2()
377 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_tlb_add_page_s2_v1() local
379 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) in arm_smmu_tlb_add_page_s2_v1()
382 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid); in arm_smmu_tlb_add_page_s2_v1()
409 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_context_fault() local
412 fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR); in arm_smmu_context_fault()
416 fsynr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSYNR0); in arm_smmu_context_fault()
417 iova = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_FAR); in arm_smmu_context_fault()
418 cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx)); in arm_smmu_context_fault()
420 dev_err_ratelimited(smmu->dev, in arm_smmu_context_fault()
424 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr); in arm_smmu_context_fault()
431 struct arm_smmu_device *smmu = dev; in arm_smmu_global_fault() local
435 gfsr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR); in arm_smmu_global_fault()
436 gfsynr0 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR0); in arm_smmu_global_fault()
437 gfsynr1 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR1); in arm_smmu_global_fault()
438 gfsynr2 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR2); in arm_smmu_global_fault()
446 dev_err(smmu->dev, in arm_smmu_global_fault()
447 …"Blocked unknown Stream ID 0x%hx; boot with \"arm-smmu.disable_bypass=0\" to allow, but this may h… in arm_smmu_global_fault()
450 dev_err(smmu->dev, in arm_smmu_global_fault()
452 dev_err(smmu->dev, in arm_smmu_global_fault()
457 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, gfsr); in arm_smmu_global_fault()
465 struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx]; in arm_smmu_init_context_bank()
518 void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx) in arm_smmu_write_context_bank() argument
522 struct arm_smmu_cb *cb = &smmu->cbs[idx]; in arm_smmu_write_context_bank()
527 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, 0); in arm_smmu_write_context_bank()
534 if (smmu->version > ARM_SMMU_V1) { in arm_smmu_write_context_bank()
540 if (smmu->features & ARM_SMMU_FEAT_VMID16) in arm_smmu_write_context_bank()
543 arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBA2R(idx), reg); in arm_smmu_write_context_bank()
548 if (smmu->version < ARM_SMMU_V2) in arm_smmu_write_context_bank()
560 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) { in arm_smmu_write_context_bank()
564 arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(idx), reg); in arm_smmu_write_context_bank()
571 if (stage1 && smmu->version > ARM_SMMU_V1) in arm_smmu_write_context_bank()
572 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR2, cb->tcr[1]); in arm_smmu_write_context_bank()
573 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR, cb->tcr[0]); in arm_smmu_write_context_bank()
577 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_CONTEXTIDR, cfg->asid); in arm_smmu_write_context_bank()
578 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]); in arm_smmu_write_context_bank()
579 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR1, cb->ttbr[1]); in arm_smmu_write_context_bank()
581 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]); in arm_smmu_write_context_bank()
583 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR1, in arm_smmu_write_context_bank()
589 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR0, cb->mair[0]); in arm_smmu_write_context_bank()
590 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR1, cb->mair[1]); in arm_smmu_write_context_bank()
601 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg); in arm_smmu_write_context_bank()
605 struct arm_smmu_device *smmu, in arm_smmu_alloc_context_bank() argument
608 if (smmu->impl && smmu->impl->alloc_context_bank) in arm_smmu_alloc_context_bank()
609 return smmu->impl->alloc_context_bank(smmu_domain, smmu, dev, start); in arm_smmu_alloc_context_bank()
611 return __arm_smmu_alloc_bitmap(smmu->context_map, start, smmu->num_context_banks); in arm_smmu_alloc_context_bank()
615 struct arm_smmu_device *smmu, in arm_smmu_init_domain_context() argument
628 if (smmu_domain->smmu) in arm_smmu_init_domain_context()
633 smmu_domain->smmu = smmu; in arm_smmu_init_domain_context()
655 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1)) in arm_smmu_init_domain_context()
657 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2)) in arm_smmu_init_domain_context()
668 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L) in arm_smmu_init_domain_context()
672 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) && in arm_smmu_init_domain_context()
676 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K | in arm_smmu_init_domain_context()
689 start = smmu->num_s2_context_banks; in arm_smmu_init_domain_context()
690 ias = smmu->va_size; in arm_smmu_init_domain_context()
691 oas = smmu->ipa_size; in arm_smmu_init_domain_context()
713 ias = smmu->ipa_size; in arm_smmu_init_domain_context()
714 oas = smmu->pa_size; in arm_smmu_init_domain_context()
722 if (smmu->version == ARM_SMMU_V2) in arm_smmu_init_domain_context()
732 ret = arm_smmu_alloc_context_bank(smmu_domain, smmu, dev, start); in arm_smmu_init_domain_context()
737 smmu_domain->smmu = smmu; in arm_smmu_init_domain_context()
740 if (smmu->version < ARM_SMMU_V2) { in arm_smmu_init_domain_context()
741 cfg->irptndx = atomic_inc_return(&smmu->irptndx); in arm_smmu_init_domain_context()
742 cfg->irptndx %= smmu->num_context_irqs; in arm_smmu_init_domain_context()
753 .pgsize_bitmap = smmu->pgsize_bitmap, in arm_smmu_init_domain_context()
756 .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK, in arm_smmu_init_domain_context()
758 .iommu_dev = smmu->dev, in arm_smmu_init_domain_context()
761 if (smmu->impl && smmu->impl->init_context) { in arm_smmu_init_domain_context()
762 ret = smmu->impl->init_context(smmu_domain, &pgtbl_cfg, dev); in arm_smmu_init_domain_context()
790 arm_smmu_write_context_bank(smmu, cfg->cbndx); in arm_smmu_init_domain_context()
796 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; in arm_smmu_init_domain_context()
798 if (smmu->impl && smmu->impl->context_fault) in arm_smmu_init_domain_context()
799 context_fault = smmu->impl->context_fault; in arm_smmu_init_domain_context()
803 ret = devm_request_irq(smmu->dev, irq, context_fault, in arm_smmu_init_domain_context()
804 IRQF_SHARED, "arm-smmu-context-fault", domain); in arm_smmu_init_domain_context()
806 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n", in arm_smmu_init_domain_context()
818 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx); in arm_smmu_init_domain_context()
819 smmu_domain->smmu = NULL; in arm_smmu_init_domain_context()
828 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_destroy_domain_context() local
832 if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY) in arm_smmu_destroy_domain_context()
835 ret = arm_smmu_rpm_get(smmu); in arm_smmu_destroy_domain_context()
843 smmu->cbs[cfg->cbndx].cfg = NULL; in arm_smmu_destroy_domain_context()
844 arm_smmu_write_context_bank(smmu, cfg->cbndx); in arm_smmu_destroy_domain_context()
847 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; in arm_smmu_destroy_domain_context()
848 devm_free_irq(smmu->dev, irq, domain); in arm_smmu_destroy_domain_context()
852 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx); in arm_smmu_destroy_domain_context()
854 arm_smmu_rpm_put(smmu); in arm_smmu_destroy_domain_context()
899 static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx) in arm_smmu_write_smr() argument
901 struct arm_smmu_smr *smr = smmu->smrs + idx; in arm_smmu_write_smr()
905 if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid) in arm_smmu_write_smr()
907 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(idx), reg); in arm_smmu_write_smr()
910 static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx) in arm_smmu_write_s2cr() argument
912 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx; in arm_smmu_write_s2cr()
915 if (smmu->impl && smmu->impl->write_s2cr) { in arm_smmu_write_s2cr()
916 smmu->impl->write_s2cr(smmu, idx); in arm_smmu_write_s2cr()
924 if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs && in arm_smmu_write_s2cr()
925 smmu->smrs[idx].valid) in arm_smmu_write_s2cr()
927 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_S2CR(idx), reg); in arm_smmu_write_s2cr()
930 static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx) in arm_smmu_write_sme() argument
932 arm_smmu_write_s2cr(smmu, idx); in arm_smmu_write_sme()
933 if (smmu->smrs) in arm_smmu_write_sme()
934 arm_smmu_write_smr(smmu, idx); in arm_smmu_write_sme()
941 static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu) in arm_smmu_test_smr_masks() argument
946 if (!smmu->smrs) in arm_smmu_test_smr_masks()
956 for (i = 0; i < smmu->num_mapping_groups; i++) in arm_smmu_test_smr_masks()
957 if (!smmu->smrs[i].valid) in arm_smmu_test_smr_masks()
966 smr = FIELD_PREP(ARM_SMMU_SMR_ID, smmu->streamid_mask); in arm_smmu_test_smr_masks()
967 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(i), smr); in arm_smmu_test_smr_masks()
968 smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i)); in arm_smmu_test_smr_masks()
969 smmu->streamid_mask = FIELD_GET(ARM_SMMU_SMR_ID, smr); in arm_smmu_test_smr_masks()
971 smr = FIELD_PREP(ARM_SMMU_SMR_MASK, smmu->streamid_mask); in arm_smmu_test_smr_masks()
972 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(i), smr); in arm_smmu_test_smr_masks()
973 smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i)); in arm_smmu_test_smr_masks()
974 smmu->smr_mask_mask = FIELD_GET(ARM_SMMU_SMR_MASK, smr); in arm_smmu_test_smr_masks()
977 static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask) in arm_smmu_find_sme() argument
979 struct arm_smmu_smr *smrs = smmu->smrs; in arm_smmu_find_sme()
987 for (i = 0; i < smmu->num_mapping_groups; ++i) { in arm_smmu_find_sme()
1019 static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx) in arm_smmu_free_sme() argument
1021 bool pinned = smmu->s2crs[idx].pinned; in arm_smmu_free_sme()
1022 u8 cbndx = smmu->s2crs[idx].cbndx; in arm_smmu_free_sme()
1024 if (--smmu->s2crs[idx].count) in arm_smmu_free_sme()
1027 smmu->s2crs[idx] = s2cr_init_val; in arm_smmu_free_sme()
1029 smmu->s2crs[idx].pinned = true; in arm_smmu_free_sme()
1030 smmu->s2crs[idx].cbndx = cbndx; in arm_smmu_free_sme()
1031 } else if (smmu->smrs) { in arm_smmu_free_sme()
1032 smmu->smrs[idx].valid = false; in arm_smmu_free_sme()
1042 struct arm_smmu_device *smmu = cfg->smmu; in arm_smmu_master_alloc_smes() local
1043 struct arm_smmu_smr *smrs = smmu->smrs; in arm_smmu_master_alloc_smes()
1046 mutex_lock(&smmu->stream_map_mutex); in arm_smmu_master_alloc_smes()
1057 ret = arm_smmu_find_sme(smmu, sid, mask); in arm_smmu_master_alloc_smes()
1062 if (smrs && smmu->s2crs[idx].count == 0) { in arm_smmu_master_alloc_smes()
1067 smmu->s2crs[idx].count++; in arm_smmu_master_alloc_smes()
1073 arm_smmu_write_sme(smmu, idx); in arm_smmu_master_alloc_smes()
1075 mutex_unlock(&smmu->stream_map_mutex); in arm_smmu_master_alloc_smes()
1080 arm_smmu_free_sme(smmu, cfg->smendx[i]); in arm_smmu_master_alloc_smes()
1083 mutex_unlock(&smmu->stream_map_mutex); in arm_smmu_master_alloc_smes()
1090 struct arm_smmu_device *smmu = cfg->smmu; in arm_smmu_master_free_smes() local
1093 mutex_lock(&smmu->stream_map_mutex); in arm_smmu_master_free_smes()
1095 if (arm_smmu_free_sme(smmu, idx)) in arm_smmu_master_free_smes()
1096 arm_smmu_write_sme(smmu, idx); in arm_smmu_master_free_smes()
1099 mutex_unlock(&smmu->stream_map_mutex); in arm_smmu_master_free_smes()
1106 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_domain_add_master() local
1107 struct arm_smmu_s2cr *s2cr = smmu->s2crs; in arm_smmu_domain_add_master()
1128 arm_smmu_write_s2cr(smmu, idx); in arm_smmu_domain_add_master()
1138 struct arm_smmu_device *smmu; in arm_smmu_attach_dev() local
1142 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n"); in arm_smmu_attach_dev()
1157 smmu = cfg->smmu; in arm_smmu_attach_dev()
1159 ret = arm_smmu_rpm_get(smmu); in arm_smmu_attach_dev()
1164 ret = arm_smmu_init_domain_context(domain, smmu, dev); in arm_smmu_attach_dev()
1172 if (smmu_domain->smmu != smmu) { in arm_smmu_attach_dev()
1174 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n", in arm_smmu_attach_dev()
1175 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev)); in arm_smmu_attach_dev()
1194 pm_runtime_set_autosuspend_delay(smmu->dev, 20); in arm_smmu_attach_dev()
1195 pm_runtime_use_autosuspend(smmu->dev); in arm_smmu_attach_dev()
1198 arm_smmu_rpm_put(smmu); in arm_smmu_attach_dev()
1207 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu; in arm_smmu_map_pages() local
1213 arm_smmu_rpm_get(smmu); in arm_smmu_map_pages()
1215 arm_smmu_rpm_put(smmu); in arm_smmu_map_pages()
1225 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu; in arm_smmu_unmap_pages() local
1231 arm_smmu_rpm_get(smmu); in arm_smmu_unmap_pages()
1233 arm_smmu_rpm_put(smmu); in arm_smmu_unmap_pages()
1241 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_flush_iotlb_all() local
1244 arm_smmu_rpm_get(smmu); in arm_smmu_flush_iotlb_all()
1246 arm_smmu_rpm_put(smmu); in arm_smmu_flush_iotlb_all()
1254 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_iotlb_sync() local
1256 if (!smmu) in arm_smmu_iotlb_sync()
1259 arm_smmu_rpm_get(smmu); in arm_smmu_iotlb_sync()
1260 if (smmu->version == ARM_SMMU_V2 || in arm_smmu_iotlb_sync()
1264 arm_smmu_tlb_sync_global(smmu); in arm_smmu_iotlb_sync()
1265 arm_smmu_rpm_put(smmu); in arm_smmu_iotlb_sync()
1272 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_iova_to_phys_hard() local
1275 struct device *dev = smmu->dev; in arm_smmu_iova_to_phys_hard()
1283 ret = arm_smmu_rpm_get(smmu); in arm_smmu_iova_to_phys_hard()
1290 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_ATS1PR, va); in arm_smmu_iova_to_phys_hard()
1292 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_ATS1PR, va); in arm_smmu_iova_to_phys_hard()
1294 reg = arm_smmu_page(smmu, ARM_SMMU_CB(smmu, idx)) + ARM_SMMU_CB_ATSR; in arm_smmu_iova_to_phys_hard()
1301 arm_smmu_rpm_put(smmu); in arm_smmu_iova_to_phys_hard()
1305 phys = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_PAR); in arm_smmu_iova_to_phys_hard()
1315 arm_smmu_rpm_put(smmu); in arm_smmu_iova_to_phys_hard()
1332 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS && in arm_smmu_iova_to_phys()
1344 * Return true here as the SMMU can always send out coherent in arm_smmu_capable()
1366 struct arm_smmu_device *smmu = NULL; in arm_smmu_probe_device() local
1372 ret = arm_smmu_register_legacy_master(dev, &smmu); in arm_smmu_probe_device()
1383 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode); in arm_smmu_probe_device()
1393 if (sid & ~smmu->streamid_mask) { in arm_smmu_probe_device()
1394 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n", in arm_smmu_probe_device()
1395 sid, smmu->streamid_mask); in arm_smmu_probe_device()
1398 if (mask & ~smmu->smr_mask_mask) { in arm_smmu_probe_device()
1399 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n", in arm_smmu_probe_device()
1400 mask, smmu->smr_mask_mask); in arm_smmu_probe_device()
1411 cfg->smmu = smmu; in arm_smmu_probe_device()
1416 ret = arm_smmu_rpm_get(smmu); in arm_smmu_probe_device()
1421 arm_smmu_rpm_put(smmu); in arm_smmu_probe_device()
1426 device_link_add(dev, smmu->dev, in arm_smmu_probe_device()
1429 return &smmu->iommu; in arm_smmu_probe_device()
1442 struct arm_smmu_device *smmu; in arm_smmu_release_device() local
1449 smmu = cfg->smmu; in arm_smmu_release_device()
1451 ret = arm_smmu_rpm_get(smmu); in arm_smmu_release_device()
1457 arm_smmu_rpm_put(smmu); in arm_smmu_release_device()
1468 struct arm_smmu_device *smmu = cfg->smmu; in arm_smmu_device_group() local
1473 if (group && smmu->s2crs[idx].group && in arm_smmu_device_group()
1474 group != smmu->s2crs[idx].group) in arm_smmu_device_group()
1477 group = smmu->s2crs[idx].group; in arm_smmu_device_group()
1493 smmu->s2crs[idx].group = group; in arm_smmu_device_group()
1539 if (smmu_domain->smmu) { in arm_smmu_domain_set_attr()
1604 const struct arm_smmu_impl *impl = cfg->smmu->impl; in arm_smmu_def_domain_type()
1634 static void arm_smmu_device_reset(struct arm_smmu_device *smmu) in arm_smmu_device_reset() argument
1640 reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR); in arm_smmu_device_reset()
1641 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, reg); in arm_smmu_device_reset()
1647 for (i = 0; i < smmu->num_mapping_groups; ++i) in arm_smmu_device_reset()
1648 arm_smmu_write_sme(smmu, i); in arm_smmu_device_reset()
1651 for (i = 0; i < smmu->num_context_banks; ++i) { in arm_smmu_device_reset()
1652 arm_smmu_write_context_bank(smmu, i); in arm_smmu_device_reset()
1653 arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_FSR, ARM_SMMU_FSR_FAULT); in arm_smmu_device_reset()
1657 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLH, QCOM_DUMMY_VAL); in arm_smmu_device_reset()
1658 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLNSNH, QCOM_DUMMY_VAL); in arm_smmu_device_reset()
1660 reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sCR0); in arm_smmu_device_reset()
1682 if (smmu->features & ARM_SMMU_FEAT_VMID16) in arm_smmu_device_reset()
1685 if (smmu->features & ARM_SMMU_FEAT_EXIDS) in arm_smmu_device_reset()
1688 if (smmu->impl && smmu->impl->reset) in arm_smmu_device_reset()
1689 smmu->impl->reset(smmu); in arm_smmu_device_reset()
1692 arm_smmu_tlb_sync_global(smmu); in arm_smmu_device_reset()
1693 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, reg); in arm_smmu_device_reset()
1715 static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) in arm_smmu_device_cfg_probe() argument
1719 bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK; in arm_smmu_device_cfg_probe()
1722 dev_notice(smmu->dev, "probing hardware configuration...\n"); in arm_smmu_device_cfg_probe()
1723 dev_notice(smmu->dev, "SMMUv%d with:\n", in arm_smmu_device_cfg_probe()
1724 smmu->version == ARM_SMMU_V2 ? 2 : 1); in arm_smmu_device_cfg_probe()
1727 id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID0); in arm_smmu_device_cfg_probe()
1736 smmu->features |= ARM_SMMU_FEAT_TRANS_S1; in arm_smmu_device_cfg_probe()
1737 dev_notice(smmu->dev, "\tstage 1 translation\n"); in arm_smmu_device_cfg_probe()
1741 smmu->features |= ARM_SMMU_FEAT_TRANS_S2; in arm_smmu_device_cfg_probe()
1742 dev_notice(smmu->dev, "\tstage 2 translation\n"); in arm_smmu_device_cfg_probe()
1746 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED; in arm_smmu_device_cfg_probe()
1747 dev_notice(smmu->dev, "\tnested translation\n"); in arm_smmu_device_cfg_probe()
1750 if (!(smmu->features & in arm_smmu_device_cfg_probe()
1752 dev_err(smmu->dev, "\tno translation support!\n"); in arm_smmu_device_cfg_probe()
1757 ((smmu->version < ARM_SMMU_V2) || !(id & ARM_SMMU_ID0_ATOSNS))) { in arm_smmu_device_cfg_probe()
1758 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS; in arm_smmu_device_cfg_probe()
1759 dev_notice(smmu->dev, "\taddress translation ops\n"); in arm_smmu_device_cfg_probe()
1770 dev_notice(smmu->dev, "\t%scoherent table walk\n", in arm_smmu_device_cfg_probe()
1773 dev_notice(smmu->dev, in arm_smmu_device_cfg_probe()
1777 if (smmu->version == ARM_SMMU_V2 && id & ARM_SMMU_ID0_EXIDS) { in arm_smmu_device_cfg_probe()
1778 smmu->features |= ARM_SMMU_FEAT_EXIDS; in arm_smmu_device_cfg_probe()
1783 smmu->streamid_mask = size - 1; in arm_smmu_device_cfg_probe()
1785 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH; in arm_smmu_device_cfg_probe()
1788 dev_err(smmu->dev, in arm_smmu_device_cfg_probe()
1794 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs), in arm_smmu_device_cfg_probe()
1796 if (!smmu->smrs) in arm_smmu_device_cfg_probe()
1799 dev_notice(smmu->dev, in arm_smmu_device_cfg_probe()
1803 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs), in arm_smmu_device_cfg_probe()
1805 if (!smmu->s2crs) in arm_smmu_device_cfg_probe()
1808 smmu->s2crs[i] = s2cr_init_val; in arm_smmu_device_cfg_probe()
1810 smmu->num_mapping_groups = size; in arm_smmu_device_cfg_probe()
1811 mutex_init(&smmu->stream_map_mutex); in arm_smmu_device_cfg_probe()
1812 spin_lock_init(&smmu->global_sync_lock); in arm_smmu_device_cfg_probe()
1814 if (smmu->version < ARM_SMMU_V2 || in arm_smmu_device_cfg_probe()
1816 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L; in arm_smmu_device_cfg_probe()
1818 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S; in arm_smmu_device_cfg_probe()
1822 id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID1); in arm_smmu_device_cfg_probe()
1823 smmu->pgshift = (id & ARM_SMMU_ID1_PAGESIZE) ? 16 : 12; in arm_smmu_device_cfg_probe()
1825 /* Check for size mismatch of SMMU address space from mapped region */ in arm_smmu_device_cfg_probe()
1827 if (smmu->numpage != 2 * size << smmu->pgshift) in arm_smmu_device_cfg_probe()
1828 dev_warn(smmu->dev, in arm_smmu_device_cfg_probe()
1829 "SMMU address space size (0x%x) differs from mapped region size (0x%x)!\n", in arm_smmu_device_cfg_probe()
1830 2 * size << smmu->pgshift, smmu->numpage); in arm_smmu_device_cfg_probe()
1832 smmu->numpage = size; in arm_smmu_device_cfg_probe()
1834 smmu->num_s2_context_banks = FIELD_GET(ARM_SMMU_ID1_NUMS2CB, id); in arm_smmu_device_cfg_probe()
1835 smmu->num_context_banks = FIELD_GET(ARM_SMMU_ID1_NUMCB, id); in arm_smmu_device_cfg_probe()
1836 if (smmu->num_s2_context_banks > smmu->num_context_banks) { in arm_smmu_device_cfg_probe()
1837 dev_err(smmu->dev, "impossible number of S2 context banks!\n"); in arm_smmu_device_cfg_probe()
1840 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n", in arm_smmu_device_cfg_probe()
1841 smmu->num_context_banks, smmu->num_s2_context_banks); in arm_smmu_device_cfg_probe()
1842 smmu->cbs = devm_kcalloc(smmu->dev, smmu->num_context_banks, in arm_smmu_device_cfg_probe()
1843 sizeof(*smmu->cbs), GFP_KERNEL); in arm_smmu_device_cfg_probe()
1844 if (!smmu->cbs) in arm_smmu_device_cfg_probe()
1848 id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID2); in arm_smmu_device_cfg_probe()
1850 smmu->ipa_size = size; in arm_smmu_device_cfg_probe()
1854 smmu->pa_size = size; in arm_smmu_device_cfg_probe()
1857 smmu->features |= ARM_SMMU_FEAT_VMID16; in arm_smmu_device_cfg_probe()
1864 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size))) in arm_smmu_device_cfg_probe()
1865 dev_warn(smmu->dev, in arm_smmu_device_cfg_probe()
1868 if (smmu->version < ARM_SMMU_V2) { in arm_smmu_device_cfg_probe()
1869 smmu->va_size = smmu->ipa_size; in arm_smmu_device_cfg_probe()
1870 if (smmu->version == ARM_SMMU_V1_64K) in arm_smmu_device_cfg_probe()
1871 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K; in arm_smmu_device_cfg_probe()
1874 smmu->va_size = arm_smmu_id_size_to_bits(size); in arm_smmu_device_cfg_probe()
1876 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K; in arm_smmu_device_cfg_probe()
1878 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K; in arm_smmu_device_cfg_probe()
1880 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K; in arm_smmu_device_cfg_probe()
1883 if (smmu->impl && smmu->impl->cfg_probe) { in arm_smmu_device_cfg_probe()
1884 ret = smmu->impl->cfg_probe(smmu); in arm_smmu_device_cfg_probe()
1890 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) in arm_smmu_device_cfg_probe()
1891 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M; in arm_smmu_device_cfg_probe()
1892 if (smmu->features & in arm_smmu_device_cfg_probe()
1894 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G; in arm_smmu_device_cfg_probe()
1895 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K) in arm_smmu_device_cfg_probe()
1896 smmu->pgsize_bitmap |= SZ_16K | SZ_32M; in arm_smmu_device_cfg_probe()
1897 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K) in arm_smmu_device_cfg_probe()
1898 smmu->pgsize_bitmap |= SZ_64K | SZ_512M; in arm_smmu_device_cfg_probe()
1901 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap; in arm_smmu_device_cfg_probe()
1903 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap; in arm_smmu_device_cfg_probe()
1904 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n", in arm_smmu_device_cfg_probe()
1905 smmu->pgsize_bitmap); in arm_smmu_device_cfg_probe()
1908 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1) in arm_smmu_device_cfg_probe()
1909 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n", in arm_smmu_device_cfg_probe()
1910 smmu->va_size, smmu->ipa_size); in arm_smmu_device_cfg_probe()
1912 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2) in arm_smmu_device_cfg_probe()
1913 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n", in arm_smmu_device_cfg_probe()
1914 smmu->ipa_size, smmu->pa_size); in arm_smmu_device_cfg_probe()
1935 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
1936 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
1940 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
1941 { .compatible = "nvidia,smmu-500", .data = &arm_mmu500 },
1942 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
1948 static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu) in acpi_smmu_get_data() argument
1955 smmu->version = ARM_SMMU_V1; in acpi_smmu_get_data()
1956 smmu->model = GENERIC_SMMU; in acpi_smmu_get_data()
1959 smmu->version = ARM_SMMU_V1_64K; in acpi_smmu_get_data()
1960 smmu->model = GENERIC_SMMU; in acpi_smmu_get_data()
1963 smmu->version = ARM_SMMU_V2; in acpi_smmu_get_data()
1964 smmu->model = GENERIC_SMMU; in acpi_smmu_get_data()
1967 smmu->version = ARM_SMMU_V2; in acpi_smmu_get_data()
1968 smmu->model = ARM_MMU500; in acpi_smmu_get_data()
1971 smmu->version = ARM_SMMU_V2; in acpi_smmu_get_data()
1972 smmu->model = CAVIUM_SMMUV2; in acpi_smmu_get_data()
1982 struct arm_smmu_device *smmu) in arm_smmu_device_acpi_probe() argument
1984 struct device *dev = smmu->dev; in arm_smmu_device_acpi_probe()
1993 ret = acpi_smmu_get_data(iort_smmu->model, smmu); in arm_smmu_device_acpi_probe()
1998 smmu->num_global_irqs = 1; in arm_smmu_device_acpi_probe()
2001 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK; in arm_smmu_device_acpi_probe()
2007 struct arm_smmu_device *smmu) in arm_smmu_device_acpi_probe() argument
2014 struct arm_smmu_device *smmu) in arm_smmu_device_dt_probe() argument
2021 &smmu->num_global_irqs)) { in arm_smmu_device_dt_probe()
2027 smmu->version = data->version; in arm_smmu_device_dt_probe()
2028 smmu->model = data->model; in arm_smmu_device_dt_probe()
2034 IS_ENABLED(CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS) ? "DMA API" : "SMMU"); in arm_smmu_device_dt_probe()
2045 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK; in arm_smmu_device_dt_probe()
2100 struct arm_smmu_device *smmu; in arm_smmu_device_probe() local
2105 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL); in arm_smmu_device_probe()
2106 if (!smmu) { in arm_smmu_device_probe()
2110 smmu->dev = dev; in arm_smmu_device_probe()
2113 err = arm_smmu_device_dt_probe(pdev, smmu); in arm_smmu_device_probe()
2115 err = arm_smmu_device_acpi_probe(pdev, smmu); in arm_smmu_device_probe()
2120 smmu->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); in arm_smmu_device_probe()
2121 if (IS_ERR(smmu->base)) in arm_smmu_device_probe()
2122 return PTR_ERR(smmu->base); in arm_smmu_device_probe()
2128 smmu->numpage = resource_size(res); in arm_smmu_device_probe()
2130 smmu = arm_smmu_impl_init(smmu); in arm_smmu_device_probe()
2131 if (IS_ERR(smmu)) in arm_smmu_device_probe()
2132 return PTR_ERR(smmu); in arm_smmu_device_probe()
2137 if (num_irqs > smmu->num_global_irqs) in arm_smmu_device_probe()
2138 smmu->num_context_irqs++; in arm_smmu_device_probe()
2141 if (!smmu->num_context_irqs) { in arm_smmu_device_probe()
2143 num_irqs, smmu->num_global_irqs + 1); in arm_smmu_device_probe()
2147 smmu->irqs = devm_kcalloc(dev, num_irqs, sizeof(*smmu->irqs), in arm_smmu_device_probe()
2149 if (!smmu->irqs) { in arm_smmu_device_probe()
2159 smmu->irqs[i] = irq; in arm_smmu_device_probe()
2162 err = devm_clk_bulk_get_all(dev, &smmu->clks); in arm_smmu_device_probe()
2167 smmu->num_clks = err; in arm_smmu_device_probe()
2169 err = clk_bulk_prepare_enable(smmu->num_clks, smmu->clks); in arm_smmu_device_probe()
2173 err = arm_smmu_device_cfg_probe(smmu); in arm_smmu_device_probe()
2177 if (smmu->version == ARM_SMMU_V2) { in arm_smmu_device_probe()
2178 if (smmu->num_context_banks > smmu->num_context_irqs) { in arm_smmu_device_probe()
2181 smmu->num_context_irqs, smmu->num_context_banks); in arm_smmu_device_probe()
2186 smmu->num_context_irqs = smmu->num_context_banks; in arm_smmu_device_probe()
2189 if (smmu->impl && smmu->impl->global_fault) in arm_smmu_device_probe()
2190 global_fault = smmu->impl->global_fault; in arm_smmu_device_probe()
2194 for (i = 0; i < smmu->num_global_irqs; ++i) { in arm_smmu_device_probe()
2195 err = devm_request_irq(smmu->dev, smmu->irqs[i], in arm_smmu_device_probe()
2198 "arm-smmu global fault", in arm_smmu_device_probe()
2199 smmu); in arm_smmu_device_probe()
2202 i, smmu->irqs[i]); in arm_smmu_device_probe()
2207 err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL, in arm_smmu_device_probe()
2208 "smmu.%pa", &ioaddr); in arm_smmu_device_probe()
2214 iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops); in arm_smmu_device_probe()
2215 iommu_device_set_fwnode(&smmu->iommu, dev->fwnode); in arm_smmu_device_probe()
2217 err = iommu_device_register(&smmu->iommu); in arm_smmu_device_probe()
2223 platform_set_drvdata(pdev, smmu); in arm_smmu_device_probe()
2224 arm_smmu_device_reset(smmu); in arm_smmu_device_probe()
2225 arm_smmu_test_smr_masks(smmu); in arm_smmu_device_probe()
2239 * For ACPI and generic DT bindings, an SMMU will be probed before in arm_smmu_device_probe()
2241 * ready to handle default domain setup as soon as any SMMU exists. in arm_smmu_device_probe()
2251 struct arm_smmu_device *smmu = platform_get_drvdata(pdev); in arm_smmu_device_remove() local
2253 if (!smmu) in arm_smmu_device_remove()
2256 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS)) in arm_smmu_device_remove()
2260 iommu_device_unregister(&smmu->iommu); in arm_smmu_device_remove()
2261 iommu_device_sysfs_remove(&smmu->iommu); in arm_smmu_device_remove()
2263 arm_smmu_rpm_get(smmu); in arm_smmu_device_remove()
2265 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, ARM_SMMU_sCR0_CLIENTPD); in arm_smmu_device_remove()
2266 arm_smmu_rpm_put(smmu); in arm_smmu_device_remove()
2268 if (pm_runtime_enabled(smmu->dev)) in arm_smmu_device_remove()
2269 pm_runtime_force_suspend(smmu->dev); in arm_smmu_device_remove()
2271 clk_bulk_disable(smmu->num_clks, smmu->clks); in arm_smmu_device_remove()
2273 clk_bulk_unprepare(smmu->num_clks, smmu->clks); in arm_smmu_device_remove()
2284 struct arm_smmu_device *smmu = dev_get_drvdata(dev); in arm_smmu_runtime_resume() local
2287 ret = clk_bulk_enable(smmu->num_clks, smmu->clks); in arm_smmu_runtime_resume()
2291 arm_smmu_device_reset(smmu); in arm_smmu_runtime_resume()
2298 struct arm_smmu_device *smmu = dev_get_drvdata(dev); in arm_smmu_runtime_suspend() local
2300 clk_bulk_disable(smmu->num_clks, smmu->clks); in arm_smmu_runtime_suspend()
2329 .name = "arm-smmu",
2340 MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2342 MODULE_ALIAS("platform:arm-smmu");