Lines Matching full:iommu

3  * IOMMU API for Rockchip
13 #include <linux/dma-iommu.h>
18 #include <linux/iommu.h>
118 bool skip_read; /* rk3126/rk3128 can't read vop iommu registers */
119 bool dlr_disable; /* avoid access iommu when runtime ops called */
122 struct iommu_device iommu; member
124 struct iommu_domain *domain; /* domain to which iommu is attached */
132 struct device_link *link; /* runtime PM link from IOMMU to master */
133 struct rk_iommu *iommu; member
157 * The Rockchip rk3288 iommu uses a 2-level page table.
166 * Each iommu device has a MMU_DTE_ADDR register that contains the physical
322 * rk3288 iova (IOMMU Virtual Address) format
363 static void rk_iommu_command(struct rk_iommu *iommu, u32 command) in rk_iommu_command() argument
367 for (i = 0; i < iommu->num_mmu; i++) in rk_iommu_command()
368 writel(command, iommu->bases[i] + RK_MMU_COMMAND); in rk_iommu_command()
375 static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova_start, in rk_iommu_zap_lines() argument
384 for (i = 0; i < iommu->num_mmu; i++) { in rk_iommu_zap_lines()
388 rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova); in rk_iommu_zap_lines()
392 static bool rk_iommu_is_stall_active(struct rk_iommu *iommu) in rk_iommu_is_stall_active() argument
397 for (i = 0; i < iommu->num_mmu; i++) in rk_iommu_is_stall_active()
398 active &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) & in rk_iommu_is_stall_active()
404 static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu) in rk_iommu_is_paging_enabled() argument
409 for (i = 0; i < iommu->num_mmu; i++) in rk_iommu_is_paging_enabled()
410 enable &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) & in rk_iommu_is_paging_enabled()
416 static bool rk_iommu_is_reset_done(struct rk_iommu *iommu) in rk_iommu_is_reset_done() argument
421 for (i = 0; i < iommu->num_mmu; i++) in rk_iommu_is_reset_done()
422 done &= rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR) == 0; in rk_iommu_is_reset_done()
427 static int rk_iommu_enable_stall(struct rk_iommu *iommu) in rk_iommu_enable_stall() argument
433 if (iommu->skip_read) in rk_iommu_enable_stall()
436 if (rk_iommu_is_stall_active(iommu)) in rk_iommu_enable_stall()
440 if (!rk_iommu_is_paging_enabled(iommu)) in rk_iommu_enable_stall()
444 rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_STALL); in rk_iommu_enable_stall()
445 if (iommu->skip_read) in rk_iommu_enable_stall()
448 ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val, in rk_iommu_enable_stall()
452 for (i = 0; i < iommu->num_mmu; i++) in rk_iommu_enable_stall()
453 dev_err(iommu->dev, "Enable stall request timed out, retry_count = %d, status: %#08x\n", in rk_iommu_enable_stall()
455 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS)); in rk_iommu_enable_stall()
456 if (iommu->cmd_retry && (retry_count++ < CMD_RETRY_COUNT)) in rk_iommu_enable_stall()
463 static int rk_iommu_disable_stall(struct rk_iommu *iommu) in rk_iommu_disable_stall() argument
469 if (iommu->skip_read) in rk_iommu_disable_stall()
472 if (!rk_iommu_is_stall_active(iommu)) in rk_iommu_disable_stall()
476 rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_STALL); in rk_iommu_disable_stall()
477 if (iommu->skip_read) in rk_iommu_disable_stall()
480 ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val, in rk_iommu_disable_stall()
484 for (i = 0; i < iommu->num_mmu; i++) in rk_iommu_disable_stall()
485 dev_err(iommu->dev, "Disable stall request timed out, retry_count = %d, status: %#08x\n", in rk_iommu_disable_stall()
487 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS)); in rk_iommu_disable_stall()
488 if (iommu->cmd_retry && (retry_count++ < CMD_RETRY_COUNT)) in rk_iommu_disable_stall()
495 static int rk_iommu_enable_paging(struct rk_iommu *iommu) in rk_iommu_enable_paging() argument
501 if (iommu->skip_read) in rk_iommu_enable_paging()
504 if (rk_iommu_is_paging_enabled(iommu)) in rk_iommu_enable_paging()
508 rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_PAGING); in rk_iommu_enable_paging()
509 if (iommu->skip_read) in rk_iommu_enable_paging()
512 ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val, in rk_iommu_enable_paging()
516 for (i = 0; i < iommu->num_mmu; i++) in rk_iommu_enable_paging()
517 dev_err(iommu->dev, "Enable paging request timed out, retry_count = %d, status: %#08x\n", in rk_iommu_enable_paging()
519 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS)); in rk_iommu_enable_paging()
520 if (iommu->cmd_retry && (retry_count++ < CMD_RETRY_COUNT)) in rk_iommu_enable_paging()
527 static int rk_iommu_disable_paging(struct rk_iommu *iommu) in rk_iommu_disable_paging() argument
533 if (iommu->skip_read) in rk_iommu_disable_paging()
536 if (!rk_iommu_is_paging_enabled(iommu)) in rk_iommu_disable_paging()
540 rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_PAGING); in rk_iommu_disable_paging()
541 if (iommu->skip_read) in rk_iommu_disable_paging()
544 ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val, in rk_iommu_disable_paging()
548 for (i = 0; i < iommu->num_mmu; i++) in rk_iommu_disable_paging()
549 dev_err(iommu->dev, "Disable paging request timed out, retry_count = %d, status: %#08x\n", in rk_iommu_disable_paging()
551 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS)); in rk_iommu_disable_paging()
552 if (iommu->cmd_retry && (retry_count++ < CMD_RETRY_COUNT)) in rk_iommu_disable_paging()
564 static int rk_iommu_force_reset(struct rk_iommu *iommu) in rk_iommu_force_reset() argument
571 if (iommu->reset_disabled) in rk_iommu_force_reset()
574 if (iommu->skip_read) in rk_iommu_force_reset()
585 for (i = 0; i < iommu->num_mmu; i++) { in rk_iommu_force_reset()
587 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, dte_address_mask); in rk_iommu_force_reset()
589 ret = readx_poll_timeout(rk_iommu_read_dte_addr, iommu->bases[i], dte_addr, in rk_iommu_force_reset()
593 dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n"); in rk_iommu_force_reset()
599 rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET); in rk_iommu_force_reset()
600 if (iommu->skip_read) in rk_iommu_force_reset()
603 ret = readx_poll_timeout(rk_iommu_is_reset_done, iommu, val, in rk_iommu_force_reset()
607 dev_err(iommu->dev, "FORCE_RESET command timed out\n"); in rk_iommu_force_reset()
641 static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova) in log_iova() argument
643 void __iomem *base = iommu->bases[index]; in log_iova()
680 dev_err(iommu->dev, "iova = %pad: dte_index: %#03x pte_index: %#03x page_offset: %#03x\n", in log_iova()
682 …dev_err(iommu->dev, "mmu_dte_addr: %pa dte@%pa: %#08x valid: %u pte@%pa: %#08x valid: %u page@%pa … in log_iova()
688 static int rk_pagefault_done(struct rk_iommu *iommu) in rk_pagefault_done() argument
697 for (i = 0; i < iommu->num_mmu; i++) { in rk_pagefault_done()
698 int_status = rk_iommu_read(iommu->bases[i], RK_MMU_INT_STATUS); in rk_pagefault_done()
703 iova = rk_iommu_read(iommu->bases[i], RK_MMU_PAGE_FAULT_ADDR); in rk_pagefault_done()
708 status = rk_iommu_read(iommu->bases[i], RK_MMU_STATUS); in rk_pagefault_done()
712 dev_err(iommu->dev, "Page fault at %pad of type %s\n", in rk_pagefault_done()
716 log_iova(iommu, i, iova); in rk_pagefault_done()
718 if (!iommu->master_handle_irq) { in rk_pagefault_done()
724 if (iommu->domain) in rk_pagefault_done()
725 report_iommu_fault(iommu->domain, iommu->dev, iova, in rk_pagefault_done()
728 dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n"); in rk_pagefault_done()
731 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE); in rk_pagefault_done()
734 * Master may clear the int_mask to prevent iommu in rk_pagefault_done()
738 int_mask = rk_iommu_read(iommu->bases[i], RK_MMU_INT_MASK); in rk_pagefault_done()
740 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_PAGE_FAULT_DONE); in rk_pagefault_done()
744 dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova); in rk_pagefault_done()
747 dev_err(iommu->dev, "unexpected int_status: %#08x\n", in rk_pagefault_done()
750 rk_iommu_write(iommu->bases[i], RK_MMU_INT_CLEAR, int_status); in rk_pagefault_done()
758 struct rk_iommu *iommu = rk_iommu_from_dev(master_dev); in rockchip_pagefault_done() local
760 return rk_pagefault_done(iommu); in rockchip_pagefault_done()
766 struct rk_iommu *iommu = rk_iommu_from_dev(master_dev); in rockchip_get_iommu_base() local
768 return iommu->bases[idx]; in rockchip_get_iommu_base()
774 struct rk_iommu *iommu = dev_id; in rk_iommu_irq() local
778 err = pm_runtime_get_if_in_use(iommu->dev); in rk_iommu_irq()
782 if (WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks))) in rk_iommu_irq()
786 if (iommu->master_handle_irq) { in rk_iommu_irq()
787 if (iommu->domain) in rk_iommu_irq()
788 ret = report_iommu_fault(iommu->domain, iommu->dev, -1, 0x0); in rk_iommu_irq()
790 ret = rk_pagefault_done(iommu); in rk_iommu_irq()
793 clk_bulk_disable(iommu->num_clocks, iommu->clocks); in rk_iommu_irq()
796 pm_runtime_put(iommu->dev); in rk_iommu_irq()
841 struct rk_iommu *iommu; in rk_iommu_zap_iova() local
844 iommu = list_entry(pos, struct rk_iommu, node); in rk_iommu_zap_iova()
847 ret = pm_runtime_get_if_in_use(iommu->dev); in rk_iommu_zap_iova()
851 WARN_ON(clk_bulk_enable(iommu->num_clocks, in rk_iommu_zap_iova()
852 iommu->clocks)); in rk_iommu_zap_iova()
853 rk_iommu_zap_lines(iommu, iova, size); in rk_iommu_zap_iova()
854 clk_bulk_disable(iommu->num_clocks, iommu->clocks); in rk_iommu_zap_iova()
855 pm_runtime_put(iommu->dev); in rk_iommu_zap_iova()
909 size_t size, struct rk_iommu *iommu) in rk_iommu_unmap_iova() argument
922 if (iommu && iommu->need_res_map) in rk_iommu_unmap_iova()
938 struct rk_iommu *iommu = NULL; in rk_iommu_get() local
942 iommu = list_entry(pos, struct rk_iommu, node); in rk_iommu_get()
943 if (iommu->need_res_map) in rk_iommu_get()
948 return iommu; in rk_iommu_get()
1047 struct rk_iommu *iommu = rk_iommu_get(rk_domain); in rk_iommu_unmap() local
1069 iommu); in rk_iommu_unmap()
1088 struct rk_iommu *iommu; in rk_iommu_flush_tlb_all() local
1091 iommu = list_entry(pos, struct rk_iommu, node); in rk_iommu_flush_tlb_all()
1093 ret = pm_runtime_get_if_in_use(iommu->dev); in rk_iommu_flush_tlb_all()
1097 WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)); in rk_iommu_flush_tlb_all()
1098 for (i = 0; i < iommu->num_mmu; i++) in rk_iommu_flush_tlb_all()
1099 rk_iommu_write(iommu->bases[i], RK_MMU_COMMAND, in rk_iommu_flush_tlb_all()
1101 clk_bulk_disable(iommu->num_clocks, iommu->clocks); in rk_iommu_flush_tlb_all()
1102 pm_runtime_put(iommu->dev); in rk_iommu_flush_tlb_all()
1112 return data ? data->iommu : NULL; in rk_iommu_from_dev()
1115 /* Must be called with iommu powered on and attached */
1116 static void rk_iommu_disable(struct rk_iommu *iommu) in rk_iommu_disable() argument
1121 WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)); in rk_iommu_disable()
1122 rk_iommu_enable_stall(iommu); in rk_iommu_disable()
1123 rk_iommu_disable_paging(iommu); in rk_iommu_disable()
1124 for (i = 0; i < iommu->num_mmu; i++) { in rk_iommu_disable()
1125 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0); in rk_iommu_disable()
1126 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0); in rk_iommu_disable()
1128 rk_iommu_disable_stall(iommu); in rk_iommu_disable()
1129 clk_bulk_disable(iommu->num_clocks, iommu->clocks); in rk_iommu_disable()
1131 iommu->iommu_enabled = false; in rk_iommu_disable()
1136 struct rk_iommu *iommu; in rockchip_iommu_disable() local
1138 iommu = rk_iommu_from_dev(dev); in rockchip_iommu_disable()
1139 if (!iommu) in rockchip_iommu_disable()
1142 rk_iommu_disable(iommu); in rockchip_iommu_disable()
1148 /* Must be called with iommu powered on and attached */
1149 static int rk_iommu_enable(struct rk_iommu *iommu) in rk_iommu_enable() argument
1151 struct iommu_domain *domain = iommu->domain; in rk_iommu_enable()
1156 ret = clk_bulk_enable(iommu->num_clocks, iommu->clocks); in rk_iommu_enable()
1160 ret = rk_iommu_enable_stall(iommu); in rk_iommu_enable()
1164 ret = rk_iommu_force_reset(iommu); in rk_iommu_enable()
1168 for (i = 0; i < iommu->num_mmu; i++) { in rk_iommu_enable()
1169 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, in rk_iommu_enable()
1171 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE); in rk_iommu_enable()
1172 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK); in rk_iommu_enable()
1174 /* Workaround for iommu blocked, BIT(31) default to 1 */ in rk_iommu_enable()
1175 auto_gate = rk_iommu_read(iommu->bases[i], RK_MMU_AUTO_GATING); in rk_iommu_enable()
1177 rk_iommu_write(iommu->bases[i], RK_MMU_AUTO_GATING, auto_gate); in rk_iommu_enable()
1180 ret = rk_iommu_enable_paging(iommu); in rk_iommu_enable()
1183 rk_iommu_disable_stall(iommu); in rk_iommu_enable()
1185 clk_bulk_disable(iommu->num_clocks, iommu->clocks); in rk_iommu_enable()
1188 iommu->iommu_enabled = true; in rk_iommu_enable()
1195 struct rk_iommu *iommu; in rockchip_iommu_enable() local
1197 iommu = rk_iommu_from_dev(dev); in rockchip_iommu_enable()
1198 if (!iommu) in rockchip_iommu_enable()
1201 return rk_iommu_enable(iommu); in rockchip_iommu_enable()
1207 struct rk_iommu *iommu; in rockchip_iommu_is_enabled() local
1209 iommu = rk_iommu_from_dev(dev); in rockchip_iommu_is_enabled()
1210 if (!iommu) in rockchip_iommu_is_enabled()
1213 return iommu->iommu_enabled; in rockchip_iommu_is_enabled()
1219 struct rk_iommu *iommu; in rockchip_iommu_force_reset() local
1222 iommu = rk_iommu_from_dev(dev); in rockchip_iommu_force_reset()
1223 if (!iommu) in rockchip_iommu_force_reset()
1226 ret = rk_iommu_enable_stall(iommu); in rockchip_iommu_force_reset()
1230 ret = rk_iommu_force_reset(iommu); in rockchip_iommu_force_reset()
1232 rk_iommu_disable_stall(iommu); in rockchip_iommu_force_reset()
1242 struct rk_iommu *iommu; in rk_iommu_detach_device() local
1248 iommu = rk_iommu_from_dev(dev); in rk_iommu_detach_device()
1249 if (!iommu) in rk_iommu_detach_device()
1252 dev_dbg(dev, "Detaching from iommu domain\n"); in rk_iommu_detach_device()
1254 if (!iommu->domain) in rk_iommu_detach_device()
1257 iommu->domain = NULL; in rk_iommu_detach_device()
1260 list_del_init(&iommu->node); in rk_iommu_detach_device()
1263 ret = pm_runtime_get_if_in_use(iommu->dev); in rk_iommu_detach_device()
1266 rk_iommu_disable(iommu); in rk_iommu_detach_device()
1267 pm_runtime_put(iommu->dev); in rk_iommu_detach_device()
1274 struct rk_iommu *iommu; in rk_iommu_attach_device() local
1281 * Such a device does not belong to an iommu group. in rk_iommu_attach_device()
1283 iommu = rk_iommu_from_dev(dev); in rk_iommu_attach_device()
1284 if (!iommu) in rk_iommu_attach_device()
1287 dev_dbg(dev, "Attaching to iommu domain\n"); in rk_iommu_attach_device()
1289 if (iommu->domain) in rk_iommu_attach_device()
1290 rk_iommu_detach_device(iommu->domain, dev); in rk_iommu_attach_device()
1292 iommu->domain = domain; in rk_iommu_attach_device()
1294 /* Attach NULL for disable iommu */ in rk_iommu_attach_device()
1299 list_add_tail(&iommu->node, &rk_domain->iommus); in rk_iommu_attach_device()
1302 rk_domain->shootdown_entire = iommu->shootdown_entire; in rk_iommu_attach_device()
1303 ret = pm_runtime_get_if_in_use(iommu->dev); in rk_iommu_attach_device()
1307 ret = rk_iommu_enable(iommu); in rk_iommu_attach_device()
1309 rk_iommu_detach_device(iommu->domain, dev); in rk_iommu_attach_device()
1311 pm_runtime_put(iommu->dev); in rk_iommu_attach_device()
1399 struct rk_iommu *iommu; in rk_iommu_probe_device() local
1405 iommu = rk_iommu_from_dev(dev); in rk_iommu_probe_device()
1407 data->link = device_link_add(dev, iommu->dev, in rk_iommu_probe_device()
1420 return &iommu->iommu; in rk_iommu_probe_device()
1432 struct rk_iommu *iommu; in rk_iommu_device_group() local
1434 iommu = rk_iommu_from_dev(dev); in rk_iommu_device_group()
1436 return iommu_group_ref_get(iommu->group); in rk_iommu_device_group()
1459 data->iommu = platform_get_drvdata(iommu_dev); in rk_iommu_of_xlate()
1473 struct rk_iommu *iommu = rk_iommu_from_dev(dev); in rockchip_iommu_mask_irq() local
1476 if (!iommu) in rockchip_iommu_mask_irq()
1479 for (i = 0; i < iommu->num_mmu; i++) in rockchip_iommu_mask_irq()
1480 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0); in rockchip_iommu_mask_irq()
1486 struct rk_iommu *iommu = rk_iommu_from_dev(dev); in rockchip_iommu_unmask_irq() local
1489 if (!iommu) in rockchip_iommu_unmask_irq()
1492 for (i = 0; i < iommu->num_mmu; i++) { in rockchip_iommu_unmask_irq()
1494 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE); in rockchip_iommu_unmask_irq()
1495 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK); in rockchip_iommu_unmask_irq()
1496 /* Leave iommu in pagefault state until mapping finished */ in rockchip_iommu_unmask_irq()
1497 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_PAGE_FAULT_DONE); in rockchip_iommu_unmask_irq()
1522 struct rk_iommu *iommu; in rk_iommu_probe() local
1528 iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL); in rk_iommu_probe()
1529 if (!iommu) in rk_iommu_probe()
1532 platform_set_drvdata(pdev, iommu); in rk_iommu_probe()
1533 iommu->dev = dev; in rk_iommu_probe()
1534 iommu->num_mmu = 0; in rk_iommu_probe()
1547 iommu->bases = devm_kcalloc(dev, num_res, sizeof(*iommu->bases), in rk_iommu_probe()
1549 if (!iommu->bases) in rk_iommu_probe()
1556 iommu->bases[i] = devm_ioremap_resource(&pdev->dev, res); in rk_iommu_probe()
1557 if (IS_ERR(iommu->bases[i])) in rk_iommu_probe()
1559 iommu->num_mmu++; in rk_iommu_probe()
1561 if (iommu->num_mmu == 0) in rk_iommu_probe()
1562 return PTR_ERR(iommu->bases[0]); in rk_iommu_probe()
1564 iommu->num_irq = platform_irq_count(pdev); in rk_iommu_probe()
1565 if (iommu->num_irq < 0) in rk_iommu_probe()
1566 return iommu->num_irq; in rk_iommu_probe()
1568 iommu->reset_disabled = device_property_read_bool(dev, in rk_iommu_probe()
1570 iommu->skip_read = device_property_read_bool(dev, in rk_iommu_probe()
1572 iommu->dlr_disable = device_property_read_bool(dev, in rk_iommu_probe()
1574 iommu->shootdown_entire = device_property_read_bool(dev, in rk_iommu_probe()
1576 iommu->master_handle_irq = device_property_read_bool(dev, in rk_iommu_probe()
1580 iommu->cmd_retry = device_property_read_bool(dev, in rk_iommu_probe()
1583 iommu->need_res_map = device_property_read_bool(dev, in rk_iommu_probe()
1587 * iommu clocks should be present for all new devices and devicetrees in rk_iommu_probe()
1591 err = devm_clk_bulk_get_all(dev, &iommu->clocks); in rk_iommu_probe()
1593 iommu->num_clocks = 0; in rk_iommu_probe()
1597 iommu->num_clocks = err; in rk_iommu_probe()
1599 err = clk_bulk_prepare(iommu->num_clocks, iommu->clocks); in rk_iommu_probe()
1603 iommu->group = iommu_group_alloc(); in rk_iommu_probe()
1604 if (IS_ERR(iommu->group)) { in rk_iommu_probe()
1605 err = PTR_ERR(iommu->group); in rk_iommu_probe()
1609 err = iommu_device_sysfs_add(&iommu->iommu, dev, NULL, dev_name(dev)); in rk_iommu_probe()
1613 iommu_device_set_ops(&iommu->iommu, &rk_iommu_ops); in rk_iommu_probe()
1615 iommu_device_set_fwnode(&iommu->iommu, &dev->of_node->fwnode); in rk_iommu_probe()
1617 err = iommu_device_register(&iommu->iommu); in rk_iommu_probe()
1622 * Use the first registered IOMMU device for domain to use with DMA in rk_iommu_probe()
1624 * IOMMU device.. in rk_iommu_probe()
1633 if (iommu->skip_read) in rk_iommu_probe()
1636 for (i = 0; i < iommu->num_irq; i++) { in rk_iommu_probe()
1642 err = devm_request_irq(iommu->dev, irq, rk_iommu_irq, in rk_iommu_probe()
1643 IRQF_SHARED, dev_name(dev), iommu); in rk_iommu_probe()
1651 if (!res_page && iommu->need_res_map) { in rk_iommu_probe()
1661 iommu_device_sysfs_remove(&iommu->iommu); in rk_iommu_probe()
1663 iommu_group_put(iommu->group); in rk_iommu_probe()
1665 clk_bulk_unprepare(iommu->num_clocks, iommu->clocks); in rk_iommu_probe()
1671 struct rk_iommu *iommu = platform_get_drvdata(pdev); in rk_iommu_shutdown() local
1674 if (iommu->skip_read) in rk_iommu_shutdown()
1677 for (i = 0; i < iommu->num_irq; i++) { in rk_iommu_shutdown()
1680 devm_free_irq(iommu->dev, irq, iommu); in rk_iommu_shutdown()
1684 if (!iommu->dlr_disable) in rk_iommu_shutdown()
1690 struct rk_iommu *iommu = dev_get_drvdata(dev); in rk_iommu_suspend() local
1692 if (!iommu->domain) in rk_iommu_suspend()
1695 if (iommu->dlr_disable) in rk_iommu_suspend()
1698 rk_iommu_disable(iommu); in rk_iommu_suspend()
1704 struct rk_iommu *iommu = dev_get_drvdata(dev); in rk_iommu_resume() local
1706 if (!iommu->domain) in rk_iommu_resume()
1709 if (iommu->dlr_disable) in rk_iommu_resume()
1712 return rk_iommu_enable(iommu); in rk_iommu_resume()
1740 { .compatible = "rockchip,iommu",
1743 { .compatible = "rockchip,iommu-v2",
1746 { .compatible = "rockchip,rk3568-iommu",
1769 MODULE_DESCRIPTION("IOMMU API for Rockchip");
1771 MODULE_ALIAS("platform:rockchip-iommu");