xref: /OK3568_Linux_fs/kernel/drivers/iommu/rockchip-iommu.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * IOMMU API for Rockchip
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Module Authors:	Simon Xue <xxm@rock-chips.com>
6*4882a593Smuzhiyun  *			Daniel Kurtz <djkurtz@chromium.org>
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/clk.h>
10*4882a593Smuzhiyun #include <linux/compiler.h>
11*4882a593Smuzhiyun #include <linux/delay.h>
12*4882a593Smuzhiyun #include <linux/device.h>
13*4882a593Smuzhiyun #include <linux/dma-iommu.h>
14*4882a593Smuzhiyun #include <linux/dma-mapping.h>
15*4882a593Smuzhiyun #include <linux/errno.h>
16*4882a593Smuzhiyun #include <linux/interrupt.h>
17*4882a593Smuzhiyun #include <linux/io.h>
18*4882a593Smuzhiyun #include <linux/iommu.h>
19*4882a593Smuzhiyun #include <linux/iopoll.h>
20*4882a593Smuzhiyun #include <linux/list.h>
21*4882a593Smuzhiyun #include <linux/mm.h>
22*4882a593Smuzhiyun #include <linux/module.h>
23*4882a593Smuzhiyun #include <linux/init.h>
24*4882a593Smuzhiyun #include <linux/of.h>
25*4882a593Smuzhiyun #include <linux/of_iommu.h>
26*4882a593Smuzhiyun #include <linux/of_platform.h>
27*4882a593Smuzhiyun #include <linux/platform_device.h>
28*4882a593Smuzhiyun #include <linux/pm_runtime.h>
29*4882a593Smuzhiyun #include <linux/slab.h>
30*4882a593Smuzhiyun #include <linux/spinlock.h>
31*4882a593Smuzhiyun #include <soc/rockchip/rockchip_iommu.h>
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun /** MMU register offsets */
34*4882a593Smuzhiyun #define RK_MMU_DTE_ADDR		0x00	/* Directory table address */
35*4882a593Smuzhiyun #define RK_MMU_STATUS		0x04
36*4882a593Smuzhiyun #define RK_MMU_COMMAND		0x08
37*4882a593Smuzhiyun #define RK_MMU_PAGE_FAULT_ADDR	0x0C	/* IOVA of last page fault */
38*4882a593Smuzhiyun #define RK_MMU_ZAP_ONE_LINE	0x10	/* Shootdown one IOTLB entry */
39*4882a593Smuzhiyun #define RK_MMU_INT_RAWSTAT	0x14	/* IRQ status ignoring mask */
40*4882a593Smuzhiyun #define RK_MMU_INT_CLEAR	0x18	/* Acknowledge and re-arm irq */
41*4882a593Smuzhiyun #define RK_MMU_INT_MASK		0x1C	/* IRQ enable */
42*4882a593Smuzhiyun #define RK_MMU_INT_STATUS	0x20	/* IRQ status after masking */
43*4882a593Smuzhiyun #define RK_MMU_AUTO_GATING	0x24
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun #define DTE_ADDR_DUMMY		0xCAFEBABE
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun #define RK_MMU_POLL_PERIOD_US		100
48*4882a593Smuzhiyun #define RK_MMU_FORCE_RESET_TIMEOUT_US	100000
49*4882a593Smuzhiyun #define RK_MMU_POLL_TIMEOUT_US		1000
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun /* RK_MMU_STATUS fields */
52*4882a593Smuzhiyun #define RK_MMU_STATUS_PAGING_ENABLED       BIT(0)
53*4882a593Smuzhiyun #define RK_MMU_STATUS_PAGE_FAULT_ACTIVE    BIT(1)
54*4882a593Smuzhiyun #define RK_MMU_STATUS_STALL_ACTIVE         BIT(2)
55*4882a593Smuzhiyun #define RK_MMU_STATUS_IDLE                 BIT(3)
56*4882a593Smuzhiyun #define RK_MMU_STATUS_REPLAY_BUFFER_EMPTY  BIT(4)
57*4882a593Smuzhiyun #define RK_MMU_STATUS_PAGE_FAULT_IS_WRITE  BIT(5)
58*4882a593Smuzhiyun #define RK_MMU_STATUS_STALL_NOT_ACTIVE     BIT(31)
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun /* RK_MMU_COMMAND command values */
61*4882a593Smuzhiyun #define RK_MMU_CMD_ENABLE_PAGING    0  /* Enable memory translation */
62*4882a593Smuzhiyun #define RK_MMU_CMD_DISABLE_PAGING   1  /* Disable memory translation */
63*4882a593Smuzhiyun #define RK_MMU_CMD_ENABLE_STALL     2  /* Stall paging to allow other cmds */
64*4882a593Smuzhiyun #define RK_MMU_CMD_DISABLE_STALL    3  /* Stop stall re-enables paging */
65*4882a593Smuzhiyun #define RK_MMU_CMD_ZAP_CACHE        4  /* Shoot down entire IOTLB */
66*4882a593Smuzhiyun #define RK_MMU_CMD_PAGE_FAULT_DONE  5  /* Clear page fault */
67*4882a593Smuzhiyun #define RK_MMU_CMD_FORCE_RESET      6  /* Reset all registers */
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun /* RK_MMU_INT_* register fields */
70*4882a593Smuzhiyun #define RK_MMU_IRQ_PAGE_FAULT    0x01  /* page fault */
71*4882a593Smuzhiyun #define RK_MMU_IRQ_BUS_ERROR     0x02  /* bus read error */
72*4882a593Smuzhiyun #define RK_MMU_IRQ_MASK          (RK_MMU_IRQ_PAGE_FAULT | RK_MMU_IRQ_BUS_ERROR)
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun #define NUM_DT_ENTRIES 1024
75*4882a593Smuzhiyun #define NUM_PT_ENTRIES 1024
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun #define SPAGE_ORDER 12
78*4882a593Smuzhiyun #define SPAGE_SIZE (1 << SPAGE_ORDER)
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun #define DISABLE_FETCH_DTE_TIME_LIMIT BIT(31)
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun #define CMD_RETRY_COUNT 10
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun  /*
85*4882a593Smuzhiyun   * Support mapping any size that fits in one page table:
86*4882a593Smuzhiyun   *   4 KiB to 4 MiB
87*4882a593Smuzhiyun   */
88*4882a593Smuzhiyun #define RK_IOMMU_PGSIZE_BITMAP 0x007ff000
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun struct rk_iommu_domain {
91*4882a593Smuzhiyun 	struct list_head iommus;
92*4882a593Smuzhiyun 	u32 *dt; /* page directory table */
93*4882a593Smuzhiyun 	dma_addr_t dt_dma;
94*4882a593Smuzhiyun 	spinlock_t iommus_lock; /* lock for iommus list */
95*4882a593Smuzhiyun 	spinlock_t dt_lock; /* lock for modifying page directory table */
96*4882a593Smuzhiyun 	bool shootdown_entire;
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	struct iommu_domain domain;
99*4882a593Smuzhiyun };
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun struct rk_iommu_ops {
102*4882a593Smuzhiyun 	phys_addr_t (*pt_address)(u32 dte);
103*4882a593Smuzhiyun 	u32 (*mk_dtentries)(dma_addr_t pt_dma);
104*4882a593Smuzhiyun 	u32 (*mk_ptentries)(phys_addr_t page, int prot);
105*4882a593Smuzhiyun 	phys_addr_t (*dte_addr_phys)(u32 addr);
106*4882a593Smuzhiyun 	u32 (*dma_addr_dte)(dma_addr_t dt_dma);
107*4882a593Smuzhiyun 	u64 dma_bit_mask;
108*4882a593Smuzhiyun };
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun struct rk_iommu {
111*4882a593Smuzhiyun 	struct device *dev;
112*4882a593Smuzhiyun 	void __iomem **bases;
113*4882a593Smuzhiyun 	int num_mmu;
114*4882a593Smuzhiyun 	int num_irq;
115*4882a593Smuzhiyun 	struct clk_bulk_data *clocks;
116*4882a593Smuzhiyun 	int num_clocks;
117*4882a593Smuzhiyun 	bool reset_disabled;
118*4882a593Smuzhiyun 	bool skip_read; /* rk3126/rk3128 can't read vop iommu registers */
119*4882a593Smuzhiyun 	bool dlr_disable; /* avoid access iommu when runtime ops called */
120*4882a593Smuzhiyun 	bool cmd_retry;
121*4882a593Smuzhiyun 	bool master_handle_irq;
122*4882a593Smuzhiyun 	struct iommu_device iommu;
123*4882a593Smuzhiyun 	struct list_head node; /* entry in rk_iommu_domain.iommus */
124*4882a593Smuzhiyun 	struct iommu_domain *domain; /* domain to which iommu is attached */
125*4882a593Smuzhiyun 	struct iommu_group *group;
126*4882a593Smuzhiyun 	bool shootdown_entire;
127*4882a593Smuzhiyun 	bool iommu_enabled;
128*4882a593Smuzhiyun 	bool need_res_map;
129*4882a593Smuzhiyun };
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun struct rk_iommudata {
132*4882a593Smuzhiyun 	struct device_link *link; /* runtime PM link from IOMMU to master */
133*4882a593Smuzhiyun 	struct rk_iommu *iommu;
134*4882a593Smuzhiyun 	bool defer_attach;
135*4882a593Smuzhiyun };
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun static struct device *dma_dev;
138*4882a593Smuzhiyun static const struct rk_iommu_ops *rk_ops;
139*4882a593Smuzhiyun static struct rk_iommu *rk_iommu_from_dev(struct device *dev);
140*4882a593Smuzhiyun static char reserve_range[PAGE_SIZE] __aligned(PAGE_SIZE);
141*4882a593Smuzhiyun static phys_addr_t res_page;
142*4882a593Smuzhiyun 
rk_table_flush(struct rk_iommu_domain * dom,dma_addr_t dma,unsigned int count)143*4882a593Smuzhiyun static inline void rk_table_flush(struct rk_iommu_domain *dom, dma_addr_t dma,
144*4882a593Smuzhiyun 				  unsigned int count)
145*4882a593Smuzhiyun {
146*4882a593Smuzhiyun 	size_t size = count * sizeof(u32); /* count of u32 entry */
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	dma_sync_single_for_device(dma_dev, dma, size, DMA_TO_DEVICE);
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun 
to_rk_domain(struct iommu_domain * dom)151*4882a593Smuzhiyun static struct rk_iommu_domain *to_rk_domain(struct iommu_domain *dom)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun 	return container_of(dom, struct rk_iommu_domain, domain);
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun /*
157*4882a593Smuzhiyun  * The Rockchip rk3288 iommu uses a 2-level page table.
158*4882a593Smuzhiyun  * The first level is the "Directory Table" (DT).
159*4882a593Smuzhiyun  * The DT consists of 1024 4-byte Directory Table Entries (DTEs), each pointing
160*4882a593Smuzhiyun  * to a "Page Table".
161*4882a593Smuzhiyun  * The second level is the 1024 Page Tables (PT).
162*4882a593Smuzhiyun  * Each PT consists of 1024 4-byte Page Table Entries (PTEs), each pointing to
163*4882a593Smuzhiyun  * a 4 KB page of physical memory.
164*4882a593Smuzhiyun  *
165*4882a593Smuzhiyun  * The DT and each PT fits in a single 4 KB page (4-bytes * 1024 entries).
166*4882a593Smuzhiyun  * Each iommu device has a MMU_DTE_ADDR register that contains the physical
167*4882a593Smuzhiyun  * address of the start of the DT page.
168*4882a593Smuzhiyun  *
169*4882a593Smuzhiyun  * The structure of the page table is as follows:
170*4882a593Smuzhiyun  *
171*4882a593Smuzhiyun  *                   DT
172*4882a593Smuzhiyun  * MMU_DTE_ADDR -> +-----+
173*4882a593Smuzhiyun  *                 |     |
174*4882a593Smuzhiyun  *                 +-----+     PT
175*4882a593Smuzhiyun  *                 | DTE | -> +-----+
176*4882a593Smuzhiyun  *                 +-----+    |     |     Memory
177*4882a593Smuzhiyun  *                 |     |    +-----+     Page
178*4882a593Smuzhiyun  *                 |     |    | PTE | -> +-----+
179*4882a593Smuzhiyun  *                 +-----+    +-----+    |     |
180*4882a593Smuzhiyun  *                            |     |    |     |
181*4882a593Smuzhiyun  *                            |     |    |     |
182*4882a593Smuzhiyun  *                            +-----+    |     |
183*4882a593Smuzhiyun  *                                       |     |
184*4882a593Smuzhiyun  *                                       |     |
185*4882a593Smuzhiyun  *                                       +-----+
186*4882a593Smuzhiyun  */
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun /*
189*4882a593Smuzhiyun  * Each DTE has a PT address and a valid bit:
190*4882a593Smuzhiyun  * +---------------------+-----------+-+
191*4882a593Smuzhiyun  * | PT address          | Reserved  |V|
192*4882a593Smuzhiyun  * +---------------------+-----------+-+
193*4882a593Smuzhiyun  *  31:12 - PT address (PTs always starts on a 4 KB boundary)
194*4882a593Smuzhiyun  *  11: 1 - Reserved
195*4882a593Smuzhiyun  *      0 - 1 if PT @ PT address is valid
196*4882a593Smuzhiyun  */
197*4882a593Smuzhiyun #define RK_DTE_PT_ADDRESS_MASK    0xfffff000
198*4882a593Smuzhiyun #define RK_DTE_PT_VALID           BIT(0)
199*4882a593Smuzhiyun 
rk_dte_pt_address(u32 dte)200*4882a593Smuzhiyun static inline phys_addr_t rk_dte_pt_address(u32 dte)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun 	return (phys_addr_t)dte & RK_DTE_PT_ADDRESS_MASK;
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun /*
206*4882a593Smuzhiyun  * In v2:
207*4882a593Smuzhiyun  * 31:12 - PT address bit 31:0
208*4882a593Smuzhiyun  * 11: 8 - PT address bit 35:32
209*4882a593Smuzhiyun  *  7: 4 - PT address bit 39:36
210*4882a593Smuzhiyun  *  3: 1 - Reserved
211*4882a593Smuzhiyun  *     0 - 1 if PT @ PT address is valid
212*4882a593Smuzhiyun  */
213*4882a593Smuzhiyun #define RK_DTE_PT_ADDRESS_MASK_V2 GENMASK_ULL(31, 4)
214*4882a593Smuzhiyun #define DTE_HI_MASK1	GENMASK(11, 8)
215*4882a593Smuzhiyun #define DTE_HI_MASK2	GENMASK(7, 4)
216*4882a593Smuzhiyun #define DTE_HI_SHIFT1	24 /* shift bit 8 to bit 32 */
217*4882a593Smuzhiyun #define DTE_HI_SHIFT2	32 /* shift bit 4 to bit 36 */
218*4882a593Smuzhiyun #define PAGE_DESC_HI_MASK1	GENMASK_ULL(35, 32)
219*4882a593Smuzhiyun #define PAGE_DESC_HI_MASK2	GENMASK_ULL(39, 36)
220*4882a593Smuzhiyun 
rk_dte_pt_address_v2(u32 dte)221*4882a593Smuzhiyun static inline phys_addr_t rk_dte_pt_address_v2(u32 dte)
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun 	u64 dte_v2 = dte;
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	dte_v2 = ((dte_v2 & DTE_HI_MASK2) << DTE_HI_SHIFT2) |
226*4882a593Smuzhiyun 		 ((dte_v2 & DTE_HI_MASK1) << DTE_HI_SHIFT1) |
227*4882a593Smuzhiyun 		 (dte_v2 & RK_DTE_PT_ADDRESS_MASK);
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	return (phys_addr_t)dte_v2;
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun 
rk_dte_is_pt_valid(u32 dte)232*4882a593Smuzhiyun static inline bool rk_dte_is_pt_valid(u32 dte)
233*4882a593Smuzhiyun {
234*4882a593Smuzhiyun 	return dte & RK_DTE_PT_VALID;
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun 
rk_mk_dte(dma_addr_t pt_dma)237*4882a593Smuzhiyun static inline u32 rk_mk_dte(dma_addr_t pt_dma)
238*4882a593Smuzhiyun {
239*4882a593Smuzhiyun 	return (pt_dma & RK_DTE_PT_ADDRESS_MASK) | RK_DTE_PT_VALID;
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun 
rk_mk_dte_v2(dma_addr_t pt_dma)242*4882a593Smuzhiyun static inline u32 rk_mk_dte_v2(dma_addr_t pt_dma)
243*4882a593Smuzhiyun {
244*4882a593Smuzhiyun 	pt_dma = (pt_dma & RK_DTE_PT_ADDRESS_MASK) |
245*4882a593Smuzhiyun 		 ((pt_dma & PAGE_DESC_HI_MASK1) >> DTE_HI_SHIFT1) |
246*4882a593Smuzhiyun 		 (pt_dma & PAGE_DESC_HI_MASK2) >> DTE_HI_SHIFT2;
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	return (pt_dma & RK_DTE_PT_ADDRESS_MASK_V2) | RK_DTE_PT_VALID;
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun /*
252*4882a593Smuzhiyun  * Each PTE has a Page address, some flags and a valid bit:
253*4882a593Smuzhiyun  * +---------------------+---+-------+-+
254*4882a593Smuzhiyun  * | Page address        |Rsv| Flags |V|
255*4882a593Smuzhiyun  * +---------------------+---+-------+-+
256*4882a593Smuzhiyun  *  31:12 - Page address (Pages always start on a 4 KB boundary)
257*4882a593Smuzhiyun  *  11: 9 - Reserved
258*4882a593Smuzhiyun  *   8: 1 - Flags
259*4882a593Smuzhiyun  *      8 - Read allocate - allocate cache space on read misses
260*4882a593Smuzhiyun  *      7 - Read cache - enable cache & prefetch of data
261*4882a593Smuzhiyun  *      6 - Write buffer - enable delaying writes on their way to memory
262*4882a593Smuzhiyun  *      5 - Write allocate - allocate cache space on write misses
263*4882a593Smuzhiyun  *      4 - Write cache - different writes can be merged together
264*4882a593Smuzhiyun  *      3 - Override cache attributes
265*4882a593Smuzhiyun  *          if 1, bits 4-8 control cache attributes
266*4882a593Smuzhiyun  *          if 0, the system bus defaults are used
267*4882a593Smuzhiyun  *      2 - Writable
268*4882a593Smuzhiyun  *      1 - Readable
269*4882a593Smuzhiyun  *      0 - 1 if Page @ Page address is valid
270*4882a593Smuzhiyun  */
271*4882a593Smuzhiyun #define RK_PTE_PAGE_ADDRESS_MASK  0xfffff000
272*4882a593Smuzhiyun #define RK_PTE_PAGE_FLAGS_MASK    0x000001fe
273*4882a593Smuzhiyun #define RK_PTE_PAGE_WRITABLE      BIT(2)
274*4882a593Smuzhiyun #define RK_PTE_PAGE_READABLE      BIT(1)
275*4882a593Smuzhiyun #define RK_PTE_PAGE_VALID         BIT(0)
276*4882a593Smuzhiyun 
rk_pte_is_page_valid(u32 pte)277*4882a593Smuzhiyun static inline bool rk_pte_is_page_valid(u32 pte)
278*4882a593Smuzhiyun {
279*4882a593Smuzhiyun 	return pte & RK_PTE_PAGE_VALID;
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun #define RK_PTE_PAGE_REPRESENT	BIT(3)
283*4882a593Smuzhiyun 
rk_pte_is_page_represent(u32 pte)284*4882a593Smuzhiyun static inline bool rk_pte_is_page_represent(u32 pte)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun 	return pte & RK_PTE_PAGE_REPRESENT;
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun /* TODO: set cache flags per prot IOMMU_CACHE */
rk_mk_pte(phys_addr_t page,int prot)290*4882a593Smuzhiyun static u32 rk_mk_pte(phys_addr_t page, int prot)
291*4882a593Smuzhiyun {
292*4882a593Smuzhiyun 	u32 flags = 0;
293*4882a593Smuzhiyun 	flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0;
294*4882a593Smuzhiyun 	flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0;
295*4882a593Smuzhiyun 	flags |= (prot & IOMMU_PRIV) ? RK_PTE_PAGE_REPRESENT : 0;
296*4882a593Smuzhiyun 	page &= RK_PTE_PAGE_ADDRESS_MASK;
297*4882a593Smuzhiyun 	return page | flags | RK_PTE_PAGE_VALID;
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun 
rk_mk_pte_v2(phys_addr_t page,int prot)300*4882a593Smuzhiyun static u32 rk_mk_pte_v2(phys_addr_t page, int prot)
301*4882a593Smuzhiyun {
302*4882a593Smuzhiyun 	u32 flags = 0;
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	/* If BIT(3) set, don't break iommu_map if BIT(0) set.
305*4882a593Smuzhiyun 	 * Means we can reupdate a page that already presented. We can use
306*4882a593Smuzhiyun 	 * this bit to reupdate a pre-mapped 4G range.
307*4882a593Smuzhiyun 	 */
308*4882a593Smuzhiyun 	flags |= (prot & IOMMU_PRIV) ? RK_PTE_PAGE_REPRESENT : 0;
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0;
311*4882a593Smuzhiyun 	flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0;
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 	return rk_mk_dte_v2(page) | flags;
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun 
rk_mk_pte_invalid(u32 pte)316*4882a593Smuzhiyun static u32 rk_mk_pte_invalid(u32 pte)
317*4882a593Smuzhiyun {
318*4882a593Smuzhiyun 	return pte & ~(RK_PTE_PAGE_VALID | RK_PTE_PAGE_REPRESENT);
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun /*
322*4882a593Smuzhiyun  * rk3288 iova (IOMMU Virtual Address) format
323*4882a593Smuzhiyun  *  31       22.21       12.11          0
324*4882a593Smuzhiyun  * +-----------+-----------+-------------+
325*4882a593Smuzhiyun  * | DTE index | PTE index | Page offset |
326*4882a593Smuzhiyun  * +-----------+-----------+-------------+
327*4882a593Smuzhiyun  *  31:22 - DTE index   - index of DTE in DT
328*4882a593Smuzhiyun  *  21:12 - PTE index   - index of PTE in PT @ DTE.pt_address
329*4882a593Smuzhiyun  *  11: 0 - Page offset - offset into page @ PTE.page_address
330*4882a593Smuzhiyun  */
331*4882a593Smuzhiyun #define RK_IOVA_DTE_MASK    0xffc00000
332*4882a593Smuzhiyun #define RK_IOVA_DTE_SHIFT   22
333*4882a593Smuzhiyun #define RK_IOVA_PTE_MASK    0x003ff000
334*4882a593Smuzhiyun #define RK_IOVA_PTE_SHIFT   12
335*4882a593Smuzhiyun #define RK_IOVA_PAGE_MASK   0x00000fff
336*4882a593Smuzhiyun #define RK_IOVA_PAGE_SHIFT  0
337*4882a593Smuzhiyun 
rk_iova_dte_index(dma_addr_t iova)338*4882a593Smuzhiyun static u32 rk_iova_dte_index(dma_addr_t iova)
339*4882a593Smuzhiyun {
340*4882a593Smuzhiyun 	return (u32)(iova & RK_IOVA_DTE_MASK) >> RK_IOVA_DTE_SHIFT;
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun 
rk_iova_pte_index(dma_addr_t iova)343*4882a593Smuzhiyun static u32 rk_iova_pte_index(dma_addr_t iova)
344*4882a593Smuzhiyun {
345*4882a593Smuzhiyun 	return (u32)(iova & RK_IOVA_PTE_MASK) >> RK_IOVA_PTE_SHIFT;
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun 
rk_iova_page_offset(dma_addr_t iova)348*4882a593Smuzhiyun static u32 rk_iova_page_offset(dma_addr_t iova)
349*4882a593Smuzhiyun {
350*4882a593Smuzhiyun 	return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT;
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun 
rk_iommu_read(void __iomem * base,u32 offset)353*4882a593Smuzhiyun static u32 rk_iommu_read(void __iomem *base, u32 offset)
354*4882a593Smuzhiyun {
355*4882a593Smuzhiyun 	return readl(base + offset);
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun 
rk_iommu_write(void __iomem * base,u32 offset,u32 value)358*4882a593Smuzhiyun static void rk_iommu_write(void __iomem *base, u32 offset, u32 value)
359*4882a593Smuzhiyun {
360*4882a593Smuzhiyun 	writel(value, base + offset);
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun 
rk_iommu_command(struct rk_iommu * iommu,u32 command)363*4882a593Smuzhiyun static void rk_iommu_command(struct rk_iommu *iommu, u32 command)
364*4882a593Smuzhiyun {
365*4882a593Smuzhiyun 	int i;
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 	for (i = 0; i < iommu->num_mmu; i++)
368*4882a593Smuzhiyun 		writel(command, iommu->bases[i] + RK_MMU_COMMAND);
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun 
rk_iommu_base_command(void __iomem * base,u32 command)371*4882a593Smuzhiyun static void rk_iommu_base_command(void __iomem *base, u32 command)
372*4882a593Smuzhiyun {
373*4882a593Smuzhiyun 	writel(command, base + RK_MMU_COMMAND);
374*4882a593Smuzhiyun }
rk_iommu_zap_lines(struct rk_iommu * iommu,dma_addr_t iova_start,size_t size)375*4882a593Smuzhiyun static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova_start,
376*4882a593Smuzhiyun 			       size_t size)
377*4882a593Smuzhiyun {
378*4882a593Smuzhiyun 	int i;
379*4882a593Smuzhiyun 	dma_addr_t iova_end = iova_start + size;
380*4882a593Smuzhiyun 	/*
381*4882a593Smuzhiyun 	 * TODO(djkurtz): Figure out when it is more efficient to shootdown the
382*4882a593Smuzhiyun 	 * entire iotlb rather than iterate over individual iovas.
383*4882a593Smuzhiyun 	 */
384*4882a593Smuzhiyun 	for (i = 0; i < iommu->num_mmu; i++) {
385*4882a593Smuzhiyun 		dma_addr_t iova;
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 		for (iova = iova_start; iova < iova_end; iova += SPAGE_SIZE)
388*4882a593Smuzhiyun 			rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova);
389*4882a593Smuzhiyun 	}
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun 
rk_iommu_is_stall_active(struct rk_iommu * iommu)392*4882a593Smuzhiyun static bool rk_iommu_is_stall_active(struct rk_iommu *iommu)
393*4882a593Smuzhiyun {
394*4882a593Smuzhiyun 	bool active = true;
395*4882a593Smuzhiyun 	int i;
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	for (i = 0; i < iommu->num_mmu; i++)
398*4882a593Smuzhiyun 		active &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
399*4882a593Smuzhiyun 					   RK_MMU_STATUS_STALL_ACTIVE);
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	return active;
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun 
rk_iommu_is_paging_enabled(struct rk_iommu * iommu)404*4882a593Smuzhiyun static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu)
405*4882a593Smuzhiyun {
406*4882a593Smuzhiyun 	bool enable = true;
407*4882a593Smuzhiyun 	int i;
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun 	for (i = 0; i < iommu->num_mmu; i++)
410*4882a593Smuzhiyun 		enable &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
411*4882a593Smuzhiyun 					   RK_MMU_STATUS_PAGING_ENABLED);
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 	return enable;
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun 
rk_iommu_is_reset_done(struct rk_iommu * iommu)416*4882a593Smuzhiyun static bool rk_iommu_is_reset_done(struct rk_iommu *iommu)
417*4882a593Smuzhiyun {
418*4882a593Smuzhiyun 	bool done = true;
419*4882a593Smuzhiyun 	int i;
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 	for (i = 0; i < iommu->num_mmu; i++)
422*4882a593Smuzhiyun 		done &= rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR) == 0;
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 	return done;
425*4882a593Smuzhiyun }
426*4882a593Smuzhiyun 
rk_iommu_enable_stall(struct rk_iommu * iommu)427*4882a593Smuzhiyun static int rk_iommu_enable_stall(struct rk_iommu *iommu)
428*4882a593Smuzhiyun {
429*4882a593Smuzhiyun 	int ret, i;
430*4882a593Smuzhiyun 	bool val;
431*4882a593Smuzhiyun 	int retry_count = 0;
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun 	if (iommu->skip_read)
434*4882a593Smuzhiyun 		goto read_wa;
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 	if (rk_iommu_is_stall_active(iommu))
437*4882a593Smuzhiyun 		return 0;
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	/* Stall can only be enabled if paging is enabled */
440*4882a593Smuzhiyun 	if (!rk_iommu_is_paging_enabled(iommu))
441*4882a593Smuzhiyun 		return 0;
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun read_wa:
444*4882a593Smuzhiyun 	rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_STALL);
445*4882a593Smuzhiyun 	if (iommu->skip_read)
446*4882a593Smuzhiyun 		return 0;
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun 	ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val,
449*4882a593Smuzhiyun 				 val, RK_MMU_POLL_PERIOD_US,
450*4882a593Smuzhiyun 				 RK_MMU_POLL_TIMEOUT_US);
451*4882a593Smuzhiyun 	if (ret) {
452*4882a593Smuzhiyun 		for (i = 0; i < iommu->num_mmu; i++)
453*4882a593Smuzhiyun 			dev_err(iommu->dev, "Enable stall request timed out, retry_count = %d, status: %#08x\n",
454*4882a593Smuzhiyun 				retry_count,
455*4882a593Smuzhiyun 				rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
456*4882a593Smuzhiyun 		if (iommu->cmd_retry && (retry_count++ < CMD_RETRY_COUNT))
457*4882a593Smuzhiyun 			goto read_wa;
458*4882a593Smuzhiyun 	}
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 	return ret;
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun 
rk_iommu_disable_stall(struct rk_iommu * iommu)463*4882a593Smuzhiyun static int rk_iommu_disable_stall(struct rk_iommu *iommu)
464*4882a593Smuzhiyun {
465*4882a593Smuzhiyun 	int ret, i;
466*4882a593Smuzhiyun 	bool val;
467*4882a593Smuzhiyun 	int retry_count = 0;
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 	if (iommu->skip_read)
470*4882a593Smuzhiyun 		goto read_wa;
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 	if (!rk_iommu_is_stall_active(iommu))
473*4882a593Smuzhiyun 		return 0;
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun read_wa:
476*4882a593Smuzhiyun 	rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_STALL);
477*4882a593Smuzhiyun 	if (iommu->skip_read)
478*4882a593Smuzhiyun 		return 0;
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 	ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val,
481*4882a593Smuzhiyun 				 !val, RK_MMU_POLL_PERIOD_US,
482*4882a593Smuzhiyun 				 RK_MMU_POLL_TIMEOUT_US);
483*4882a593Smuzhiyun 	if (ret) {
484*4882a593Smuzhiyun 		for (i = 0; i < iommu->num_mmu; i++)
485*4882a593Smuzhiyun 			dev_err(iommu->dev, "Disable stall request timed out, retry_count = %d, status: %#08x\n",
486*4882a593Smuzhiyun 				retry_count,
487*4882a593Smuzhiyun 				rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
488*4882a593Smuzhiyun 		if (iommu->cmd_retry && (retry_count++ < CMD_RETRY_COUNT))
489*4882a593Smuzhiyun 			goto read_wa;
490*4882a593Smuzhiyun 	}
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun 	return ret;
493*4882a593Smuzhiyun }
494*4882a593Smuzhiyun 
rk_iommu_enable_paging(struct rk_iommu * iommu)495*4882a593Smuzhiyun static int rk_iommu_enable_paging(struct rk_iommu *iommu)
496*4882a593Smuzhiyun {
497*4882a593Smuzhiyun 	int ret, i;
498*4882a593Smuzhiyun 	bool val;
499*4882a593Smuzhiyun 	int retry_count = 0;
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun 	if (iommu->skip_read)
502*4882a593Smuzhiyun 		goto read_wa;
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun 	if (rk_iommu_is_paging_enabled(iommu))
505*4882a593Smuzhiyun 		return 0;
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun read_wa:
508*4882a593Smuzhiyun 	rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_PAGING);
509*4882a593Smuzhiyun 	if (iommu->skip_read)
510*4882a593Smuzhiyun 		return 0;
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun 	ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val,
513*4882a593Smuzhiyun 				 val, RK_MMU_POLL_PERIOD_US,
514*4882a593Smuzhiyun 				 RK_MMU_POLL_TIMEOUT_US);
515*4882a593Smuzhiyun 	if (ret) {
516*4882a593Smuzhiyun 		for (i = 0; i < iommu->num_mmu; i++)
517*4882a593Smuzhiyun 			dev_err(iommu->dev, "Enable paging request timed out, retry_count = %d, status: %#08x\n",
518*4882a593Smuzhiyun 				retry_count,
519*4882a593Smuzhiyun 				rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
520*4882a593Smuzhiyun 		if (iommu->cmd_retry && (retry_count++ < CMD_RETRY_COUNT))
521*4882a593Smuzhiyun 			goto read_wa;
522*4882a593Smuzhiyun 	}
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun 	return ret;
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun 
rk_iommu_disable_paging(struct rk_iommu * iommu)527*4882a593Smuzhiyun static int rk_iommu_disable_paging(struct rk_iommu *iommu)
528*4882a593Smuzhiyun {
529*4882a593Smuzhiyun 	int ret, i;
530*4882a593Smuzhiyun 	bool val;
531*4882a593Smuzhiyun 	int retry_count = 0;
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 	if (iommu->skip_read)
534*4882a593Smuzhiyun 		goto read_wa;
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun 	if (!rk_iommu_is_paging_enabled(iommu))
537*4882a593Smuzhiyun 		return 0;
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun read_wa:
540*4882a593Smuzhiyun 	rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_PAGING);
541*4882a593Smuzhiyun 	if (iommu->skip_read)
542*4882a593Smuzhiyun 		return 0;
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 	ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val,
545*4882a593Smuzhiyun 				 !val, RK_MMU_POLL_PERIOD_US,
546*4882a593Smuzhiyun 				 RK_MMU_POLL_TIMEOUT_US);
547*4882a593Smuzhiyun 	if (ret) {
548*4882a593Smuzhiyun 		for (i = 0; i < iommu->num_mmu; i++)
549*4882a593Smuzhiyun 			dev_err(iommu->dev, "Disable paging request timed out, retry_count = %d, status: %#08x\n",
550*4882a593Smuzhiyun 				retry_count,
551*4882a593Smuzhiyun 				rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
552*4882a593Smuzhiyun 		if (iommu->cmd_retry && (retry_count++ < CMD_RETRY_COUNT))
553*4882a593Smuzhiyun 			goto read_wa;
554*4882a593Smuzhiyun 	}
555*4882a593Smuzhiyun 
556*4882a593Smuzhiyun 	return ret;
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun 
rk_iommu_read_dte_addr(void __iomem * base)559*4882a593Smuzhiyun static u32 rk_iommu_read_dte_addr(void __iomem *base)
560*4882a593Smuzhiyun {
561*4882a593Smuzhiyun 	return rk_iommu_read(base, RK_MMU_DTE_ADDR);
562*4882a593Smuzhiyun }
563*4882a593Smuzhiyun 
rk_iommu_force_reset(struct rk_iommu * iommu)564*4882a593Smuzhiyun static int rk_iommu_force_reset(struct rk_iommu *iommu)
565*4882a593Smuzhiyun {
566*4882a593Smuzhiyun 	int ret, i;
567*4882a593Smuzhiyun 	u32 dte_addr;
568*4882a593Smuzhiyun 	bool val;
569*4882a593Smuzhiyun 	u32 dte_address_mask;
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun 	if (iommu->reset_disabled)
572*4882a593Smuzhiyun 		return 0;
573*4882a593Smuzhiyun 
574*4882a593Smuzhiyun 	if (iommu->skip_read)
575*4882a593Smuzhiyun 		goto read_wa;
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 	/*
578*4882a593Smuzhiyun 	 * Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY
579*4882a593Smuzhiyun 	 * and verifying that upper 5 nybbles are read back.
580*4882a593Smuzhiyun 	 */
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun 	/*
583*4882a593Smuzhiyun 	 * In v2: upper 7 nybbles are read back.
584*4882a593Smuzhiyun 	 */
585*4882a593Smuzhiyun 	for (i = 0; i < iommu->num_mmu; i++) {
586*4882a593Smuzhiyun 		dte_address_mask = rk_ops->pt_address(DTE_ADDR_DUMMY);
587*4882a593Smuzhiyun 		rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, dte_address_mask);
588*4882a593Smuzhiyun 
589*4882a593Smuzhiyun 		ret = readx_poll_timeout(rk_iommu_read_dte_addr, iommu->bases[i], dte_addr,
590*4882a593Smuzhiyun 					 dte_addr == dte_address_mask,
591*4882a593Smuzhiyun 					 RK_MMU_POLL_PERIOD_US, RK_MMU_POLL_TIMEOUT_US);
592*4882a593Smuzhiyun 		if (ret) {
593*4882a593Smuzhiyun 			dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n");
594*4882a593Smuzhiyun 			return -EFAULT;
595*4882a593Smuzhiyun 		}
596*4882a593Smuzhiyun 	}
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun read_wa:
599*4882a593Smuzhiyun 	rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET);
600*4882a593Smuzhiyun 	if (iommu->skip_read)
601*4882a593Smuzhiyun 		return 0;
602*4882a593Smuzhiyun 
603*4882a593Smuzhiyun 	ret = readx_poll_timeout(rk_iommu_is_reset_done, iommu, val,
604*4882a593Smuzhiyun 				 val, RK_MMU_POLL_TIMEOUT_US,
605*4882a593Smuzhiyun 				 RK_MMU_FORCE_RESET_TIMEOUT_US);
606*4882a593Smuzhiyun 	if (ret) {
607*4882a593Smuzhiyun 		dev_err(iommu->dev, "FORCE_RESET command timed out\n");
608*4882a593Smuzhiyun 		return ret;
609*4882a593Smuzhiyun 	}
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 	return 0;
612*4882a593Smuzhiyun }
613*4882a593Smuzhiyun 
rk_dte_addr_phys(u32 addr)614*4882a593Smuzhiyun static inline phys_addr_t rk_dte_addr_phys(u32 addr)
615*4882a593Smuzhiyun {
616*4882a593Smuzhiyun 	return (phys_addr_t)addr;
617*4882a593Smuzhiyun }
618*4882a593Smuzhiyun 
rk_dma_addr_dte(dma_addr_t dt_dma)619*4882a593Smuzhiyun static inline u32 rk_dma_addr_dte(dma_addr_t dt_dma)
620*4882a593Smuzhiyun {
621*4882a593Smuzhiyun 	return dt_dma;
622*4882a593Smuzhiyun }
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun #define DT_HI_MASK GENMASK_ULL(39, 32)
625*4882a593Smuzhiyun #define DTE_BASE_HI_MASK GENMASK(11, 4)
626*4882a593Smuzhiyun #define DT_SHIFT   28
627*4882a593Smuzhiyun 
rk_dte_addr_phys_v2(u32 addr)628*4882a593Smuzhiyun static inline phys_addr_t rk_dte_addr_phys_v2(u32 addr)
629*4882a593Smuzhiyun {
630*4882a593Smuzhiyun 	u64 addr64 = addr;
631*4882a593Smuzhiyun 	return (phys_addr_t)(addr64 & RK_DTE_PT_ADDRESS_MASK) |
632*4882a593Smuzhiyun 	       ((addr64 & DTE_BASE_HI_MASK) << DT_SHIFT);
633*4882a593Smuzhiyun }
634*4882a593Smuzhiyun 
rk_dma_addr_dte_v2(dma_addr_t dt_dma)635*4882a593Smuzhiyun static inline u32 rk_dma_addr_dte_v2(dma_addr_t dt_dma)
636*4882a593Smuzhiyun {
637*4882a593Smuzhiyun 	return (dt_dma & RK_DTE_PT_ADDRESS_MASK) |
638*4882a593Smuzhiyun 	       ((dt_dma & DT_HI_MASK) >> DT_SHIFT);
639*4882a593Smuzhiyun }
640*4882a593Smuzhiyun 
log_iova(struct rk_iommu * iommu,int index,dma_addr_t iova)641*4882a593Smuzhiyun static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
642*4882a593Smuzhiyun {
643*4882a593Smuzhiyun 	void __iomem *base = iommu->bases[index];
644*4882a593Smuzhiyun 	u32 dte_index, pte_index, page_offset;
645*4882a593Smuzhiyun 	u32 mmu_dte_addr;
646*4882a593Smuzhiyun 	phys_addr_t mmu_dte_addr_phys, dte_addr_phys;
647*4882a593Smuzhiyun 	u32 *dte_addr;
648*4882a593Smuzhiyun 	u32 dte;
649*4882a593Smuzhiyun 	phys_addr_t pte_addr_phys = 0;
650*4882a593Smuzhiyun 	u32 *pte_addr = NULL;
651*4882a593Smuzhiyun 	u32 pte = 0;
652*4882a593Smuzhiyun 	phys_addr_t page_addr_phys = 0;
653*4882a593Smuzhiyun 	u32 page_flags = 0;
654*4882a593Smuzhiyun 
655*4882a593Smuzhiyun 	dte_index = rk_iova_dte_index(iova);
656*4882a593Smuzhiyun 	pte_index = rk_iova_pte_index(iova);
657*4882a593Smuzhiyun 	page_offset = rk_iova_page_offset(iova);
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun 	mmu_dte_addr = rk_iommu_read(base, RK_MMU_DTE_ADDR);
660*4882a593Smuzhiyun 	mmu_dte_addr_phys = rk_ops->dte_addr_phys(mmu_dte_addr);
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun 	dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index);
663*4882a593Smuzhiyun 	dte_addr = phys_to_virt(dte_addr_phys);
664*4882a593Smuzhiyun 	dte = *dte_addr;
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun 	if (!rk_dte_is_pt_valid(dte))
667*4882a593Smuzhiyun 		goto print_it;
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun 	pte_addr_phys = rk_ops->pt_address(dte) + (pte_index * 4);
670*4882a593Smuzhiyun 	pte_addr = phys_to_virt(pte_addr_phys);
671*4882a593Smuzhiyun 	pte = *pte_addr;
672*4882a593Smuzhiyun 
673*4882a593Smuzhiyun 	if (!rk_pte_is_page_valid(pte))
674*4882a593Smuzhiyun 		goto print_it;
675*4882a593Smuzhiyun 
676*4882a593Smuzhiyun 	page_addr_phys = rk_ops->pt_address(pte) + page_offset;
677*4882a593Smuzhiyun 	page_flags = pte & RK_PTE_PAGE_FLAGS_MASK;
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun print_it:
680*4882a593Smuzhiyun 	dev_err(iommu->dev, "iova = %pad: dte_index: %#03x pte_index: %#03x page_offset: %#03x\n",
681*4882a593Smuzhiyun 		&iova, dte_index, pte_index, page_offset);
682*4882a593Smuzhiyun 	dev_err(iommu->dev, "mmu_dte_addr: %pa dte@%pa: %#08x valid: %u pte@%pa: %#08x valid: %u page@%pa flags: %#03x\n",
683*4882a593Smuzhiyun 		&mmu_dte_addr_phys, &dte_addr_phys, dte,
684*4882a593Smuzhiyun 		rk_dte_is_pt_valid(dte), &pte_addr_phys, pte,
685*4882a593Smuzhiyun 		rk_pte_is_page_valid(pte), &page_addr_phys, page_flags);
686*4882a593Smuzhiyun }
687*4882a593Smuzhiyun 
rk_pagefault_done(struct rk_iommu * iommu)688*4882a593Smuzhiyun static int rk_pagefault_done(struct rk_iommu *iommu)
689*4882a593Smuzhiyun {
690*4882a593Smuzhiyun 	u32 status;
691*4882a593Smuzhiyun 	u32 int_status;
692*4882a593Smuzhiyun 	dma_addr_t iova;
693*4882a593Smuzhiyun 	int i;
694*4882a593Smuzhiyun 	u32 int_mask;
695*4882a593Smuzhiyun 	irqreturn_t ret = IRQ_NONE;
696*4882a593Smuzhiyun 
697*4882a593Smuzhiyun 	for (i = 0; i < iommu->num_mmu; i++) {
698*4882a593Smuzhiyun 		int_status = rk_iommu_read(iommu->bases[i], RK_MMU_INT_STATUS);
699*4882a593Smuzhiyun 		if (int_status == 0)
700*4882a593Smuzhiyun 			continue;
701*4882a593Smuzhiyun 
702*4882a593Smuzhiyun 		ret = IRQ_HANDLED;
703*4882a593Smuzhiyun 		iova = rk_iommu_read(iommu->bases[i], RK_MMU_PAGE_FAULT_ADDR);
704*4882a593Smuzhiyun 
705*4882a593Smuzhiyun 		if (int_status & RK_MMU_IRQ_PAGE_FAULT) {
706*4882a593Smuzhiyun 			int flags;
707*4882a593Smuzhiyun 
708*4882a593Smuzhiyun 			status = rk_iommu_read(iommu->bases[i], RK_MMU_STATUS);
709*4882a593Smuzhiyun 			flags = (status & RK_MMU_STATUS_PAGE_FAULT_IS_WRITE) ?
710*4882a593Smuzhiyun 					IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
711*4882a593Smuzhiyun 
712*4882a593Smuzhiyun 			dev_err(iommu->dev, "Page fault at %pad of type %s\n",
713*4882a593Smuzhiyun 				&iova,
714*4882a593Smuzhiyun 				(flags == IOMMU_FAULT_WRITE) ? "write" : "read");
715*4882a593Smuzhiyun 
716*4882a593Smuzhiyun 			log_iova(iommu, i, iova);
717*4882a593Smuzhiyun 
718*4882a593Smuzhiyun 			if (!iommu->master_handle_irq) {
719*4882a593Smuzhiyun 				/*
720*4882a593Smuzhiyun 				 * Report page fault to any installed handlers.
721*4882a593Smuzhiyun 				 * Ignore the return code, though, since we always zap cache
722*4882a593Smuzhiyun 				 * and clear the page fault anyway.
723*4882a593Smuzhiyun 				 */
724*4882a593Smuzhiyun 				if (iommu->domain)
725*4882a593Smuzhiyun 					report_iommu_fault(iommu->domain, iommu->dev, iova,
726*4882a593Smuzhiyun 						   status);
727*4882a593Smuzhiyun 				else
728*4882a593Smuzhiyun 					dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n");
729*4882a593Smuzhiyun 			}
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun 			rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
732*4882a593Smuzhiyun 
733*4882a593Smuzhiyun 			/*
734*4882a593Smuzhiyun 			 * Master may clear the int_mask to prevent iommu
735*4882a593Smuzhiyun 			 * re-enter interrupt when mapping. So we postpone
736*4882a593Smuzhiyun 			 * sending PAGE_FAULT_DONE command to mapping finished.
737*4882a593Smuzhiyun 			 */
738*4882a593Smuzhiyun 			int_mask = rk_iommu_read(iommu->bases[i], RK_MMU_INT_MASK);
739*4882a593Smuzhiyun 			if (int_mask != 0x0)
740*4882a593Smuzhiyun 				rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_PAGE_FAULT_DONE);
741*4882a593Smuzhiyun 		}
742*4882a593Smuzhiyun 
743*4882a593Smuzhiyun 		if (int_status & RK_MMU_IRQ_BUS_ERROR)
744*4882a593Smuzhiyun 			dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova);
745*4882a593Smuzhiyun 
746*4882a593Smuzhiyun 		if (int_status & ~RK_MMU_IRQ_MASK)
747*4882a593Smuzhiyun 			dev_err(iommu->dev, "unexpected int_status: %#08x\n",
748*4882a593Smuzhiyun 				int_status);
749*4882a593Smuzhiyun 
750*4882a593Smuzhiyun 		rk_iommu_write(iommu->bases[i], RK_MMU_INT_CLEAR, int_status);
751*4882a593Smuzhiyun 	}
752*4882a593Smuzhiyun 
753*4882a593Smuzhiyun 	return ret;
754*4882a593Smuzhiyun }
755*4882a593Smuzhiyun 
rockchip_pagefault_done(struct device * master_dev)756*4882a593Smuzhiyun int rockchip_pagefault_done(struct device *master_dev)
757*4882a593Smuzhiyun {
758*4882a593Smuzhiyun 	struct rk_iommu *iommu = rk_iommu_from_dev(master_dev);
759*4882a593Smuzhiyun 
760*4882a593Smuzhiyun 	return rk_pagefault_done(iommu);
761*4882a593Smuzhiyun }
762*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(rockchip_pagefault_done);
763*4882a593Smuzhiyun 
rockchip_get_iommu_base(struct device * master_dev,int idx)764*4882a593Smuzhiyun void __iomem *rockchip_get_iommu_base(struct device *master_dev, int idx)
765*4882a593Smuzhiyun {
766*4882a593Smuzhiyun 	struct rk_iommu *iommu = rk_iommu_from_dev(master_dev);
767*4882a593Smuzhiyun 
768*4882a593Smuzhiyun 	return iommu->bases[idx];
769*4882a593Smuzhiyun }
770*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(rockchip_get_iommu_base);
771*4882a593Smuzhiyun 
rk_iommu_irq(int irq,void * dev_id)772*4882a593Smuzhiyun static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
773*4882a593Smuzhiyun {
774*4882a593Smuzhiyun 	struct rk_iommu *iommu = dev_id;
775*4882a593Smuzhiyun 	irqreturn_t ret = IRQ_NONE;
776*4882a593Smuzhiyun 	int err;
777*4882a593Smuzhiyun 
778*4882a593Smuzhiyun 	err = pm_runtime_get_if_in_use(iommu->dev);
779*4882a593Smuzhiyun 	if (WARN_ON_ONCE(err <= 0))
780*4882a593Smuzhiyun 		return ret;
781*4882a593Smuzhiyun 
782*4882a593Smuzhiyun 	if (WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)))
783*4882a593Smuzhiyun 		goto out;
784*4882a593Smuzhiyun 
785*4882a593Smuzhiyun 	/* Master must call rockchip_pagefault_done to handle pagefault */
786*4882a593Smuzhiyun 	if (iommu->master_handle_irq) {
787*4882a593Smuzhiyun 		if (iommu->domain)
788*4882a593Smuzhiyun 			ret = report_iommu_fault(iommu->domain, iommu->dev, -1, 0x0);
789*4882a593Smuzhiyun 	} else {
790*4882a593Smuzhiyun 		ret = rk_pagefault_done(iommu);
791*4882a593Smuzhiyun 	}
792*4882a593Smuzhiyun 
793*4882a593Smuzhiyun 	clk_bulk_disable(iommu->num_clocks, iommu->clocks);
794*4882a593Smuzhiyun 
795*4882a593Smuzhiyun out:
796*4882a593Smuzhiyun 	pm_runtime_put(iommu->dev);
797*4882a593Smuzhiyun 	return ret;
798*4882a593Smuzhiyun }
799*4882a593Smuzhiyun 
rk_iommu_iova_to_phys(struct iommu_domain * domain,dma_addr_t iova)800*4882a593Smuzhiyun static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain,
801*4882a593Smuzhiyun 					 dma_addr_t iova)
802*4882a593Smuzhiyun {
803*4882a593Smuzhiyun 	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
804*4882a593Smuzhiyun 	unsigned long flags;
805*4882a593Smuzhiyun 	phys_addr_t pt_phys, phys = 0;
806*4882a593Smuzhiyun 	u32 dte, pte;
807*4882a593Smuzhiyun 	u32 *page_table;
808*4882a593Smuzhiyun 
809*4882a593Smuzhiyun 	spin_lock_irqsave(&rk_domain->dt_lock, flags);
810*4882a593Smuzhiyun 
811*4882a593Smuzhiyun 	dte = rk_domain->dt[rk_iova_dte_index(iova)];
812*4882a593Smuzhiyun 	if (!rk_dte_is_pt_valid(dte))
813*4882a593Smuzhiyun 		goto out;
814*4882a593Smuzhiyun 
815*4882a593Smuzhiyun 	pt_phys = rk_ops->pt_address(dte);
816*4882a593Smuzhiyun 	page_table = (u32 *)phys_to_virt(pt_phys);
817*4882a593Smuzhiyun 	pte = page_table[rk_iova_pte_index(iova)];
818*4882a593Smuzhiyun 	if (!rk_pte_is_page_valid(pte))
819*4882a593Smuzhiyun 		goto out;
820*4882a593Smuzhiyun 
821*4882a593Smuzhiyun 	phys = rk_ops->pt_address(pte) + rk_iova_page_offset(iova);
822*4882a593Smuzhiyun out:
823*4882a593Smuzhiyun 	spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
824*4882a593Smuzhiyun 
825*4882a593Smuzhiyun 	return phys;
826*4882a593Smuzhiyun }
827*4882a593Smuzhiyun 
rk_iommu_zap_iova(struct rk_iommu_domain * rk_domain,dma_addr_t iova,size_t size)828*4882a593Smuzhiyun static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain,
829*4882a593Smuzhiyun 			      dma_addr_t iova, size_t size)
830*4882a593Smuzhiyun {
831*4882a593Smuzhiyun 	struct list_head *pos;
832*4882a593Smuzhiyun 	unsigned long flags;
833*4882a593Smuzhiyun 
834*4882a593Smuzhiyun 	/* Do not zap tlb cache line if shootdown_entire set */
835*4882a593Smuzhiyun 	if (rk_domain->shootdown_entire)
836*4882a593Smuzhiyun 		return;
837*4882a593Smuzhiyun 
838*4882a593Smuzhiyun 	/* shootdown these iova from all iommus using this domain */
839*4882a593Smuzhiyun 	spin_lock_irqsave(&rk_domain->iommus_lock, flags);
840*4882a593Smuzhiyun 	list_for_each(pos, &rk_domain->iommus) {
841*4882a593Smuzhiyun 		struct rk_iommu *iommu;
842*4882a593Smuzhiyun 		int ret;
843*4882a593Smuzhiyun 
844*4882a593Smuzhiyun 		iommu = list_entry(pos, struct rk_iommu, node);
845*4882a593Smuzhiyun 
846*4882a593Smuzhiyun 		/* Only zap TLBs of IOMMUs that are powered on. */
847*4882a593Smuzhiyun 		ret = pm_runtime_get_if_in_use(iommu->dev);
848*4882a593Smuzhiyun 		if (WARN_ON_ONCE(ret < 0))
849*4882a593Smuzhiyun 			continue;
850*4882a593Smuzhiyun 		if (ret) {
851*4882a593Smuzhiyun 			WARN_ON(clk_bulk_enable(iommu->num_clocks,
852*4882a593Smuzhiyun 						iommu->clocks));
853*4882a593Smuzhiyun 			rk_iommu_zap_lines(iommu, iova, size);
854*4882a593Smuzhiyun 			clk_bulk_disable(iommu->num_clocks, iommu->clocks);
855*4882a593Smuzhiyun 			pm_runtime_put(iommu->dev);
856*4882a593Smuzhiyun 		}
857*4882a593Smuzhiyun 	}
858*4882a593Smuzhiyun 	spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
859*4882a593Smuzhiyun }
860*4882a593Smuzhiyun 
rk_iommu_zap_iova_first_last(struct rk_iommu_domain * rk_domain,dma_addr_t iova,size_t size)861*4882a593Smuzhiyun static void rk_iommu_zap_iova_first_last(struct rk_iommu_domain *rk_domain,
862*4882a593Smuzhiyun 					 dma_addr_t iova, size_t size)
863*4882a593Smuzhiyun {
864*4882a593Smuzhiyun 	rk_iommu_zap_iova(rk_domain, iova, SPAGE_SIZE);
865*4882a593Smuzhiyun 	if (size > SPAGE_SIZE)
866*4882a593Smuzhiyun 		rk_iommu_zap_iova(rk_domain, iova + size - SPAGE_SIZE,
867*4882a593Smuzhiyun 					SPAGE_SIZE);
868*4882a593Smuzhiyun }
869*4882a593Smuzhiyun 
rk_dte_get_page_table(struct rk_iommu_domain * rk_domain,dma_addr_t iova)870*4882a593Smuzhiyun static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
871*4882a593Smuzhiyun 				  dma_addr_t iova)
872*4882a593Smuzhiyun {
873*4882a593Smuzhiyun 	u32 *page_table, *dte_addr;
874*4882a593Smuzhiyun 	u32 dte_index, dte;
875*4882a593Smuzhiyun 	phys_addr_t pt_phys;
876*4882a593Smuzhiyun 	dma_addr_t pt_dma;
877*4882a593Smuzhiyun 
878*4882a593Smuzhiyun 	assert_spin_locked(&rk_domain->dt_lock);
879*4882a593Smuzhiyun 
880*4882a593Smuzhiyun 	dte_index = rk_iova_dte_index(iova);
881*4882a593Smuzhiyun 	dte_addr = &rk_domain->dt[dte_index];
882*4882a593Smuzhiyun 	dte = *dte_addr;
883*4882a593Smuzhiyun 	if (rk_dte_is_pt_valid(dte))
884*4882a593Smuzhiyun 		goto done;
885*4882a593Smuzhiyun 
886*4882a593Smuzhiyun 	page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
887*4882a593Smuzhiyun 	if (!page_table)
888*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
889*4882a593Smuzhiyun 
890*4882a593Smuzhiyun 	pt_dma = dma_map_single(dma_dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE);
891*4882a593Smuzhiyun 	if (dma_mapping_error(dma_dev, pt_dma)) {
892*4882a593Smuzhiyun 		dev_err(dma_dev, "DMA mapping error while allocating page table\n");
893*4882a593Smuzhiyun 		free_page((unsigned long)page_table);
894*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
895*4882a593Smuzhiyun 	}
896*4882a593Smuzhiyun 
897*4882a593Smuzhiyun 	dte = rk_ops->mk_dtentries(pt_dma);
898*4882a593Smuzhiyun 	*dte_addr = dte;
899*4882a593Smuzhiyun 
900*4882a593Smuzhiyun 	rk_table_flush(rk_domain,
901*4882a593Smuzhiyun 		       rk_domain->dt_dma + dte_index * sizeof(u32), 1);
902*4882a593Smuzhiyun done:
903*4882a593Smuzhiyun 	pt_phys = rk_ops->pt_address(dte);
904*4882a593Smuzhiyun 	return (u32 *)phys_to_virt(pt_phys);
905*4882a593Smuzhiyun }
906*4882a593Smuzhiyun 
rk_iommu_unmap_iova(struct rk_iommu_domain * rk_domain,u32 * pte_addr,dma_addr_t pte_dma,size_t size,struct rk_iommu * iommu)907*4882a593Smuzhiyun static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain,
908*4882a593Smuzhiyun 				  u32 *pte_addr, dma_addr_t pte_dma,
909*4882a593Smuzhiyun 				  size_t size, struct rk_iommu *iommu)
910*4882a593Smuzhiyun {
911*4882a593Smuzhiyun 	unsigned int pte_count;
912*4882a593Smuzhiyun 	unsigned int pte_total = size / SPAGE_SIZE;
913*4882a593Smuzhiyun 	int prot = IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV;
914*4882a593Smuzhiyun 
915*4882a593Smuzhiyun 	assert_spin_locked(&rk_domain->dt_lock);
916*4882a593Smuzhiyun 
917*4882a593Smuzhiyun 	for (pte_count = 0; pte_count < pte_total; pte_count++) {
918*4882a593Smuzhiyun 		u32 pte = pte_addr[pte_count];
919*4882a593Smuzhiyun 		if (!rk_pte_is_page_valid(pte))
920*4882a593Smuzhiyun 			break;
921*4882a593Smuzhiyun 
922*4882a593Smuzhiyun 		if (iommu && iommu->need_res_map)
923*4882a593Smuzhiyun 			pte_addr[pte_count] = rk_ops->mk_ptentries(res_page,
924*4882a593Smuzhiyun 								   prot);
925*4882a593Smuzhiyun 		else
926*4882a593Smuzhiyun 			pte_addr[pte_count] = rk_mk_pte_invalid(pte);
927*4882a593Smuzhiyun 	}
928*4882a593Smuzhiyun 
929*4882a593Smuzhiyun 	rk_table_flush(rk_domain, pte_dma, pte_count);
930*4882a593Smuzhiyun 
931*4882a593Smuzhiyun 	return pte_count * SPAGE_SIZE;
932*4882a593Smuzhiyun }
933*4882a593Smuzhiyun 
rk_iommu_get(struct rk_iommu_domain * rk_domain)934*4882a593Smuzhiyun static struct rk_iommu *rk_iommu_get(struct rk_iommu_domain *rk_domain)
935*4882a593Smuzhiyun {
936*4882a593Smuzhiyun 	unsigned long flags;
937*4882a593Smuzhiyun 	struct list_head *pos;
938*4882a593Smuzhiyun 	struct rk_iommu *iommu = NULL;
939*4882a593Smuzhiyun 
940*4882a593Smuzhiyun 	spin_lock_irqsave(&rk_domain->iommus_lock, flags);
941*4882a593Smuzhiyun 	list_for_each(pos, &rk_domain->iommus) {
942*4882a593Smuzhiyun 		iommu = list_entry(pos, struct rk_iommu, node);
943*4882a593Smuzhiyun 		if (iommu->need_res_map)
944*4882a593Smuzhiyun 			break;
945*4882a593Smuzhiyun 	}
946*4882a593Smuzhiyun 	spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
947*4882a593Smuzhiyun 
948*4882a593Smuzhiyun 	return iommu;
949*4882a593Smuzhiyun }
950*4882a593Smuzhiyun 
rk_iommu_map_iova(struct rk_iommu_domain * rk_domain,u32 * pte_addr,dma_addr_t pte_dma,dma_addr_t iova,phys_addr_t paddr,size_t size,int prot)951*4882a593Smuzhiyun static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
952*4882a593Smuzhiyun 			     dma_addr_t pte_dma, dma_addr_t iova,
953*4882a593Smuzhiyun 			     phys_addr_t paddr, size_t size, int prot)
954*4882a593Smuzhiyun {
955*4882a593Smuzhiyun 	unsigned int pte_count;
956*4882a593Smuzhiyun 	unsigned int pte_total = size / SPAGE_SIZE;
957*4882a593Smuzhiyun 	phys_addr_t page_phys;
958*4882a593Smuzhiyun 
959*4882a593Smuzhiyun 	assert_spin_locked(&rk_domain->dt_lock);
960*4882a593Smuzhiyun 
961*4882a593Smuzhiyun 	for (pte_count = 0; pte_count < pte_total; pte_count++) {
962*4882a593Smuzhiyun 		u32 pte = pte_addr[pte_count];
963*4882a593Smuzhiyun 
964*4882a593Smuzhiyun 		if (rk_pte_is_page_valid(pte) && !rk_pte_is_page_represent(pte))
965*4882a593Smuzhiyun 			goto unwind;
966*4882a593Smuzhiyun 
967*4882a593Smuzhiyun 		if (prot & IOMMU_PRIV) {
968*4882a593Smuzhiyun 			pte_addr[pte_count] = rk_ops->mk_ptentries(res_page, prot);
969*4882a593Smuzhiyun 		} else {
970*4882a593Smuzhiyun 			pte_addr[pte_count] = rk_ops->mk_ptentries(paddr, prot);
971*4882a593Smuzhiyun 
972*4882a593Smuzhiyun 			paddr += SPAGE_SIZE;
973*4882a593Smuzhiyun 		}
974*4882a593Smuzhiyun 	}
975*4882a593Smuzhiyun 
976*4882a593Smuzhiyun 	rk_table_flush(rk_domain, pte_dma, pte_total);
977*4882a593Smuzhiyun 
978*4882a593Smuzhiyun 	/*
979*4882a593Smuzhiyun 	 * Zap the first and last iova to evict from iotlb any previously
980*4882a593Smuzhiyun 	 * mapped cachelines holding stale values for its dte and pte.
981*4882a593Smuzhiyun 	 * We only zap the first and last iova, since only they could have
982*4882a593Smuzhiyun 	 * dte or pte shared with an existing mapping.
983*4882a593Smuzhiyun 	 */
984*4882a593Smuzhiyun 	rk_iommu_zap_iova_first_last(rk_domain, iova, size);
985*4882a593Smuzhiyun 
986*4882a593Smuzhiyun 	return 0;
987*4882a593Smuzhiyun unwind:
988*4882a593Smuzhiyun 	/* Unmap the range of iovas that we just mapped */
989*4882a593Smuzhiyun 	rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma,
990*4882a593Smuzhiyun 			    pte_count * SPAGE_SIZE, NULL);
991*4882a593Smuzhiyun 
992*4882a593Smuzhiyun 	iova += pte_count * SPAGE_SIZE;
993*4882a593Smuzhiyun 	page_phys = rk_ops->pt_address(pte_addr[pte_count]);
994*4882a593Smuzhiyun 	pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n",
995*4882a593Smuzhiyun 	       &iova, &page_phys, &paddr, prot);
996*4882a593Smuzhiyun 
997*4882a593Smuzhiyun 	return -EADDRINUSE;
998*4882a593Smuzhiyun }
999*4882a593Smuzhiyun 
rk_iommu_map(struct iommu_domain * domain,unsigned long _iova,phys_addr_t paddr,size_t size,int prot,gfp_t gfp)1000*4882a593Smuzhiyun static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
1001*4882a593Smuzhiyun 			phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
1002*4882a593Smuzhiyun {
1003*4882a593Smuzhiyun 	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
1004*4882a593Smuzhiyun 	unsigned long flags;
1005*4882a593Smuzhiyun 	dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
1006*4882a593Smuzhiyun 	u32 *page_table, *pte_addr;
1007*4882a593Smuzhiyun 	u32 dte, pte_index;
1008*4882a593Smuzhiyun 	int ret;
1009*4882a593Smuzhiyun 
1010*4882a593Smuzhiyun 	spin_lock_irqsave(&rk_domain->dt_lock, flags);
1011*4882a593Smuzhiyun 
1012*4882a593Smuzhiyun 	/*
1013*4882a593Smuzhiyun 	 * pgsize_bitmap specifies iova sizes that fit in one page table
1014*4882a593Smuzhiyun 	 * (1024 4-KiB pages = 4 MiB).
1015*4882a593Smuzhiyun 	 * So, size will always be 4096 <= size <= 4194304.
1016*4882a593Smuzhiyun 	 * Since iommu_map() guarantees that both iova and size will be
1017*4882a593Smuzhiyun 	 * aligned, we will always only be mapping from a single dte here.
1018*4882a593Smuzhiyun 	 */
1019*4882a593Smuzhiyun 	page_table = rk_dte_get_page_table(rk_domain, iova);
1020*4882a593Smuzhiyun 	if (IS_ERR(page_table)) {
1021*4882a593Smuzhiyun 		spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
1022*4882a593Smuzhiyun 		return PTR_ERR(page_table);
1023*4882a593Smuzhiyun 	}
1024*4882a593Smuzhiyun 
1025*4882a593Smuzhiyun 	dte = rk_domain->dt[rk_iova_dte_index(iova)];
1026*4882a593Smuzhiyun 	pte_index = rk_iova_pte_index(iova);
1027*4882a593Smuzhiyun 	pte_addr = &page_table[pte_index];
1028*4882a593Smuzhiyun 	pte_dma = rk_ops->pt_address(dte) + pte_index * sizeof(u32);
1029*4882a593Smuzhiyun 	ret = rk_iommu_map_iova(rk_domain, pte_addr, pte_dma, iova,
1030*4882a593Smuzhiyun 				paddr, size, prot);
1031*4882a593Smuzhiyun 
1032*4882a593Smuzhiyun 	spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
1033*4882a593Smuzhiyun 
1034*4882a593Smuzhiyun 	return ret;
1035*4882a593Smuzhiyun }
1036*4882a593Smuzhiyun 
rk_iommu_unmap(struct iommu_domain * domain,unsigned long _iova,size_t size,struct iommu_iotlb_gather * gather)1037*4882a593Smuzhiyun static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
1038*4882a593Smuzhiyun 			     size_t size, struct iommu_iotlb_gather *gather)
1039*4882a593Smuzhiyun {
1040*4882a593Smuzhiyun 	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
1041*4882a593Smuzhiyun 	unsigned long flags;
1042*4882a593Smuzhiyun 	dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
1043*4882a593Smuzhiyun 	phys_addr_t pt_phys;
1044*4882a593Smuzhiyun 	u32 dte;
1045*4882a593Smuzhiyun 	u32 *pte_addr;
1046*4882a593Smuzhiyun 	size_t unmap_size;
1047*4882a593Smuzhiyun 	struct rk_iommu *iommu = rk_iommu_get(rk_domain);
1048*4882a593Smuzhiyun 
1049*4882a593Smuzhiyun 	spin_lock_irqsave(&rk_domain->dt_lock, flags);
1050*4882a593Smuzhiyun 
1051*4882a593Smuzhiyun 	/*
1052*4882a593Smuzhiyun 	 * pgsize_bitmap specifies iova sizes that fit in one page table
1053*4882a593Smuzhiyun 	 * (1024 4-KiB pages = 4 MiB).
1054*4882a593Smuzhiyun 	 * So, size will always be 4096 <= size <= 4194304.
1055*4882a593Smuzhiyun 	 * Since iommu_unmap() guarantees that both iova and size will be
1056*4882a593Smuzhiyun 	 * aligned, we will always only be unmapping from a single dte here.
1057*4882a593Smuzhiyun 	 */
1058*4882a593Smuzhiyun 	dte = rk_domain->dt[rk_iova_dte_index(iova)];
1059*4882a593Smuzhiyun 	/* Just return 0 if iova is unmapped */
1060*4882a593Smuzhiyun 	if (!rk_dte_is_pt_valid(dte)) {
1061*4882a593Smuzhiyun 		spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
1062*4882a593Smuzhiyun 		return 0;
1063*4882a593Smuzhiyun 	}
1064*4882a593Smuzhiyun 
1065*4882a593Smuzhiyun 	pt_phys = rk_ops->pt_address(dte);
1066*4882a593Smuzhiyun 	pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova);
1067*4882a593Smuzhiyun 	pte_dma = pt_phys + rk_iova_pte_index(iova) * sizeof(u32);
1068*4882a593Smuzhiyun 	unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size,
1069*4882a593Smuzhiyun 					 iommu);
1070*4882a593Smuzhiyun 
1071*4882a593Smuzhiyun 	spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
1072*4882a593Smuzhiyun 
1073*4882a593Smuzhiyun 	/* Shootdown iotlb entries for iova range that was just unmapped */
1074*4882a593Smuzhiyun 	rk_iommu_zap_iova(rk_domain, iova, unmap_size);
1075*4882a593Smuzhiyun 
1076*4882a593Smuzhiyun 	return unmap_size;
1077*4882a593Smuzhiyun }
1078*4882a593Smuzhiyun 
rk_iommu_flush_tlb_all(struct iommu_domain * domain)1079*4882a593Smuzhiyun static void rk_iommu_flush_tlb_all(struct iommu_domain *domain)
1080*4882a593Smuzhiyun {
1081*4882a593Smuzhiyun 	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
1082*4882a593Smuzhiyun 	struct list_head *pos;
1083*4882a593Smuzhiyun 	unsigned long flags;
1084*4882a593Smuzhiyun 	int i;
1085*4882a593Smuzhiyun 
1086*4882a593Smuzhiyun 	spin_lock_irqsave(&rk_domain->iommus_lock, flags);
1087*4882a593Smuzhiyun 	list_for_each(pos, &rk_domain->iommus) {
1088*4882a593Smuzhiyun 		struct rk_iommu *iommu;
1089*4882a593Smuzhiyun 		int ret;
1090*4882a593Smuzhiyun 
1091*4882a593Smuzhiyun 		iommu = list_entry(pos, struct rk_iommu, node);
1092*4882a593Smuzhiyun 
1093*4882a593Smuzhiyun 		ret = pm_runtime_get_if_in_use(iommu->dev);
1094*4882a593Smuzhiyun 		if (WARN_ON_ONCE(ret < 0))
1095*4882a593Smuzhiyun 			continue;
1096*4882a593Smuzhiyun 		if (ret) {
1097*4882a593Smuzhiyun 			WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks));
1098*4882a593Smuzhiyun 			for (i = 0; i < iommu->num_mmu; i++)
1099*4882a593Smuzhiyun 				rk_iommu_write(iommu->bases[i], RK_MMU_COMMAND,
1100*4882a593Smuzhiyun 					       RK_MMU_CMD_ZAP_CACHE);
1101*4882a593Smuzhiyun 			clk_bulk_disable(iommu->num_clocks, iommu->clocks);
1102*4882a593Smuzhiyun 			pm_runtime_put(iommu->dev);
1103*4882a593Smuzhiyun 		}
1104*4882a593Smuzhiyun 	}
1105*4882a593Smuzhiyun 	spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
1106*4882a593Smuzhiyun }
1107*4882a593Smuzhiyun 
rk_iommu_from_dev(struct device * dev)1108*4882a593Smuzhiyun static struct rk_iommu *rk_iommu_from_dev(struct device *dev)
1109*4882a593Smuzhiyun {
1110*4882a593Smuzhiyun 	struct rk_iommudata *data = dev_iommu_priv_get(dev);
1111*4882a593Smuzhiyun 
1112*4882a593Smuzhiyun 	return data ? data->iommu : NULL;
1113*4882a593Smuzhiyun }
1114*4882a593Smuzhiyun 
1115*4882a593Smuzhiyun /* Must be called with iommu powered on and attached */
rk_iommu_disable(struct rk_iommu * iommu)1116*4882a593Smuzhiyun static void rk_iommu_disable(struct rk_iommu *iommu)
1117*4882a593Smuzhiyun {
1118*4882a593Smuzhiyun 	int i;
1119*4882a593Smuzhiyun 
1120*4882a593Smuzhiyun 	/* Ignore error while disabling, just keep going */
1121*4882a593Smuzhiyun 	WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks));
1122*4882a593Smuzhiyun 	rk_iommu_enable_stall(iommu);
1123*4882a593Smuzhiyun 	rk_iommu_disable_paging(iommu);
1124*4882a593Smuzhiyun 	for (i = 0; i < iommu->num_mmu; i++) {
1125*4882a593Smuzhiyun 		rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0);
1126*4882a593Smuzhiyun 		rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0);
1127*4882a593Smuzhiyun 	}
1128*4882a593Smuzhiyun 	rk_iommu_disable_stall(iommu);
1129*4882a593Smuzhiyun 	clk_bulk_disable(iommu->num_clocks, iommu->clocks);
1130*4882a593Smuzhiyun 
1131*4882a593Smuzhiyun 	iommu->iommu_enabled = false;
1132*4882a593Smuzhiyun }
1133*4882a593Smuzhiyun 
rockchip_iommu_disable(struct device * dev)1134*4882a593Smuzhiyun int rockchip_iommu_disable(struct device *dev)
1135*4882a593Smuzhiyun {
1136*4882a593Smuzhiyun 	struct rk_iommu *iommu;
1137*4882a593Smuzhiyun 
1138*4882a593Smuzhiyun 	iommu = rk_iommu_from_dev(dev);
1139*4882a593Smuzhiyun 	if (!iommu)
1140*4882a593Smuzhiyun 		return -ENODEV;
1141*4882a593Smuzhiyun 
1142*4882a593Smuzhiyun 	rk_iommu_disable(iommu);
1143*4882a593Smuzhiyun 
1144*4882a593Smuzhiyun 	return 0;
1145*4882a593Smuzhiyun }
1146*4882a593Smuzhiyun EXPORT_SYMBOL(rockchip_iommu_disable);
1147*4882a593Smuzhiyun 
1148*4882a593Smuzhiyun /* Must be called with iommu powered on and attached */
rk_iommu_enable(struct rk_iommu * iommu)1149*4882a593Smuzhiyun static int rk_iommu_enable(struct rk_iommu *iommu)
1150*4882a593Smuzhiyun {
1151*4882a593Smuzhiyun 	struct iommu_domain *domain = iommu->domain;
1152*4882a593Smuzhiyun 	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
1153*4882a593Smuzhiyun 	int ret, i;
1154*4882a593Smuzhiyun 	u32 auto_gate;
1155*4882a593Smuzhiyun 
1156*4882a593Smuzhiyun 	ret = clk_bulk_enable(iommu->num_clocks, iommu->clocks);
1157*4882a593Smuzhiyun 	if (ret)
1158*4882a593Smuzhiyun 		return ret;
1159*4882a593Smuzhiyun 
1160*4882a593Smuzhiyun 	ret = rk_iommu_enable_stall(iommu);
1161*4882a593Smuzhiyun 	if (ret)
1162*4882a593Smuzhiyun 		goto out_disable_clocks;
1163*4882a593Smuzhiyun 
1164*4882a593Smuzhiyun 	ret = rk_iommu_force_reset(iommu);
1165*4882a593Smuzhiyun 	if (ret)
1166*4882a593Smuzhiyun 		goto out_disable_stall;
1167*4882a593Smuzhiyun 
1168*4882a593Smuzhiyun 	for (i = 0; i < iommu->num_mmu; i++) {
1169*4882a593Smuzhiyun 		rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR,
1170*4882a593Smuzhiyun 			       rk_ops->dma_addr_dte(rk_domain->dt_dma));
1171*4882a593Smuzhiyun 		rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
1172*4882a593Smuzhiyun 		rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
1173*4882a593Smuzhiyun 
1174*4882a593Smuzhiyun 		/* Workaround for iommu blocked, BIT(31) default to 1 */
1175*4882a593Smuzhiyun 		auto_gate = rk_iommu_read(iommu->bases[i], RK_MMU_AUTO_GATING);
1176*4882a593Smuzhiyun 		auto_gate |= DISABLE_FETCH_DTE_TIME_LIMIT;
1177*4882a593Smuzhiyun 		rk_iommu_write(iommu->bases[i], RK_MMU_AUTO_GATING, auto_gate);
1178*4882a593Smuzhiyun 	}
1179*4882a593Smuzhiyun 
1180*4882a593Smuzhiyun 	ret = rk_iommu_enable_paging(iommu);
1181*4882a593Smuzhiyun 
1182*4882a593Smuzhiyun out_disable_stall:
1183*4882a593Smuzhiyun 	rk_iommu_disable_stall(iommu);
1184*4882a593Smuzhiyun out_disable_clocks:
1185*4882a593Smuzhiyun 	clk_bulk_disable(iommu->num_clocks, iommu->clocks);
1186*4882a593Smuzhiyun 
1187*4882a593Smuzhiyun 	if (!ret)
1188*4882a593Smuzhiyun 		iommu->iommu_enabled = true;
1189*4882a593Smuzhiyun 
1190*4882a593Smuzhiyun 	return ret;
1191*4882a593Smuzhiyun }
1192*4882a593Smuzhiyun 
rockchip_iommu_enable(struct device * dev)1193*4882a593Smuzhiyun int rockchip_iommu_enable(struct device *dev)
1194*4882a593Smuzhiyun {
1195*4882a593Smuzhiyun 	struct rk_iommu *iommu;
1196*4882a593Smuzhiyun 
1197*4882a593Smuzhiyun 	iommu = rk_iommu_from_dev(dev);
1198*4882a593Smuzhiyun 	if (!iommu)
1199*4882a593Smuzhiyun 		return -ENODEV;
1200*4882a593Smuzhiyun 
1201*4882a593Smuzhiyun 	return rk_iommu_enable(iommu);
1202*4882a593Smuzhiyun }
1203*4882a593Smuzhiyun EXPORT_SYMBOL(rockchip_iommu_enable);
1204*4882a593Smuzhiyun 
rockchip_iommu_is_enabled(struct device * dev)1205*4882a593Smuzhiyun bool rockchip_iommu_is_enabled(struct device *dev)
1206*4882a593Smuzhiyun {
1207*4882a593Smuzhiyun 	struct rk_iommu *iommu;
1208*4882a593Smuzhiyun 
1209*4882a593Smuzhiyun 	iommu = rk_iommu_from_dev(dev);
1210*4882a593Smuzhiyun 	if (!iommu)
1211*4882a593Smuzhiyun 		return false;
1212*4882a593Smuzhiyun 
1213*4882a593Smuzhiyun 	return iommu->iommu_enabled;
1214*4882a593Smuzhiyun }
1215*4882a593Smuzhiyun EXPORT_SYMBOL(rockchip_iommu_is_enabled);
1216*4882a593Smuzhiyun 
rockchip_iommu_force_reset(struct device * dev)1217*4882a593Smuzhiyun int rockchip_iommu_force_reset(struct device *dev)
1218*4882a593Smuzhiyun {
1219*4882a593Smuzhiyun 	struct rk_iommu *iommu;
1220*4882a593Smuzhiyun 	int ret;
1221*4882a593Smuzhiyun 
1222*4882a593Smuzhiyun 	iommu = rk_iommu_from_dev(dev);
1223*4882a593Smuzhiyun 	if (!iommu)
1224*4882a593Smuzhiyun 		return -ENODEV;
1225*4882a593Smuzhiyun 
1226*4882a593Smuzhiyun 	ret = rk_iommu_enable_stall(iommu);
1227*4882a593Smuzhiyun 	if (ret)
1228*4882a593Smuzhiyun 		return ret;
1229*4882a593Smuzhiyun 
1230*4882a593Smuzhiyun 	ret = rk_iommu_force_reset(iommu);
1231*4882a593Smuzhiyun 
1232*4882a593Smuzhiyun 	rk_iommu_disable_stall(iommu);
1233*4882a593Smuzhiyun 
1234*4882a593Smuzhiyun 	return ret;
1235*4882a593Smuzhiyun 
1236*4882a593Smuzhiyun }
1237*4882a593Smuzhiyun EXPORT_SYMBOL(rockchip_iommu_force_reset);
1238*4882a593Smuzhiyun 
rk_iommu_detach_device(struct iommu_domain * domain,struct device * dev)1239*4882a593Smuzhiyun static void rk_iommu_detach_device(struct iommu_domain *domain,
1240*4882a593Smuzhiyun 				   struct device *dev)
1241*4882a593Smuzhiyun {
1242*4882a593Smuzhiyun 	struct rk_iommu *iommu;
1243*4882a593Smuzhiyun 	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
1244*4882a593Smuzhiyun 	unsigned long flags;
1245*4882a593Smuzhiyun 	int ret;
1246*4882a593Smuzhiyun 
1247*4882a593Smuzhiyun 	/* Allow 'virtual devices' (eg drm) to detach from domain */
1248*4882a593Smuzhiyun 	iommu = rk_iommu_from_dev(dev);
1249*4882a593Smuzhiyun 	if (!iommu)
1250*4882a593Smuzhiyun 		return;
1251*4882a593Smuzhiyun 
1252*4882a593Smuzhiyun 	dev_dbg(dev, "Detaching from iommu domain\n");
1253*4882a593Smuzhiyun 
1254*4882a593Smuzhiyun 	if (!iommu->domain)
1255*4882a593Smuzhiyun 		return;
1256*4882a593Smuzhiyun 
1257*4882a593Smuzhiyun 	iommu->domain = NULL;
1258*4882a593Smuzhiyun 
1259*4882a593Smuzhiyun 	spin_lock_irqsave(&rk_domain->iommus_lock, flags);
1260*4882a593Smuzhiyun 	list_del_init(&iommu->node);
1261*4882a593Smuzhiyun 	spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
1262*4882a593Smuzhiyun 
1263*4882a593Smuzhiyun 	ret = pm_runtime_get_if_in_use(iommu->dev);
1264*4882a593Smuzhiyun 	WARN_ON_ONCE(ret < 0);
1265*4882a593Smuzhiyun 	if (ret > 0) {
1266*4882a593Smuzhiyun 		rk_iommu_disable(iommu);
1267*4882a593Smuzhiyun 		pm_runtime_put(iommu->dev);
1268*4882a593Smuzhiyun 	}
1269*4882a593Smuzhiyun }
1270*4882a593Smuzhiyun 
rk_iommu_attach_device(struct iommu_domain * domain,struct device * dev)1271*4882a593Smuzhiyun static int rk_iommu_attach_device(struct iommu_domain *domain,
1272*4882a593Smuzhiyun 		struct device *dev)
1273*4882a593Smuzhiyun {
1274*4882a593Smuzhiyun 	struct rk_iommu *iommu;
1275*4882a593Smuzhiyun 	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
1276*4882a593Smuzhiyun 	unsigned long flags;
1277*4882a593Smuzhiyun 	int ret;
1278*4882a593Smuzhiyun 
1279*4882a593Smuzhiyun 	/*
1280*4882a593Smuzhiyun 	 * Allow 'virtual devices' (e.g., drm) to attach to domain.
1281*4882a593Smuzhiyun 	 * Such a device does not belong to an iommu group.
1282*4882a593Smuzhiyun 	 */
1283*4882a593Smuzhiyun 	iommu = rk_iommu_from_dev(dev);
1284*4882a593Smuzhiyun 	if (!iommu)
1285*4882a593Smuzhiyun 		return 0;
1286*4882a593Smuzhiyun 
1287*4882a593Smuzhiyun 	dev_dbg(dev, "Attaching to iommu domain\n");
1288*4882a593Smuzhiyun 
1289*4882a593Smuzhiyun 	if (iommu->domain)
1290*4882a593Smuzhiyun 		rk_iommu_detach_device(iommu->domain, dev);
1291*4882a593Smuzhiyun 
1292*4882a593Smuzhiyun 	iommu->domain = domain;
1293*4882a593Smuzhiyun 
1294*4882a593Smuzhiyun 	/* Attach NULL for disable iommu */
1295*4882a593Smuzhiyun 	if (!domain)
1296*4882a593Smuzhiyun 		return 0;
1297*4882a593Smuzhiyun 
1298*4882a593Smuzhiyun 	spin_lock_irqsave(&rk_domain->iommus_lock, flags);
1299*4882a593Smuzhiyun 	list_add_tail(&iommu->node, &rk_domain->iommus);
1300*4882a593Smuzhiyun 	spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
1301*4882a593Smuzhiyun 
1302*4882a593Smuzhiyun 	rk_domain->shootdown_entire = iommu->shootdown_entire;
1303*4882a593Smuzhiyun 	ret = pm_runtime_get_if_in_use(iommu->dev);
1304*4882a593Smuzhiyun 	if (!ret || WARN_ON_ONCE(ret < 0))
1305*4882a593Smuzhiyun 		return 0;
1306*4882a593Smuzhiyun 
1307*4882a593Smuzhiyun 	ret = rk_iommu_enable(iommu);
1308*4882a593Smuzhiyun 	if (ret)
1309*4882a593Smuzhiyun 		rk_iommu_detach_device(iommu->domain, dev);
1310*4882a593Smuzhiyun 
1311*4882a593Smuzhiyun 	pm_runtime_put(iommu->dev);
1312*4882a593Smuzhiyun 
1313*4882a593Smuzhiyun 	return ret;
1314*4882a593Smuzhiyun }
1315*4882a593Smuzhiyun 
rk_iommu_domain_alloc(unsigned type)1316*4882a593Smuzhiyun static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
1317*4882a593Smuzhiyun {
1318*4882a593Smuzhiyun 	struct rk_iommu_domain *rk_domain;
1319*4882a593Smuzhiyun 
1320*4882a593Smuzhiyun 	if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
1321*4882a593Smuzhiyun 		return NULL;
1322*4882a593Smuzhiyun 
1323*4882a593Smuzhiyun 	if (!dma_dev)
1324*4882a593Smuzhiyun 		return NULL;
1325*4882a593Smuzhiyun 
1326*4882a593Smuzhiyun 	rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL);
1327*4882a593Smuzhiyun 	if (!rk_domain)
1328*4882a593Smuzhiyun 		return NULL;
1329*4882a593Smuzhiyun 
1330*4882a593Smuzhiyun 	if (type == IOMMU_DOMAIN_DMA &&
1331*4882a593Smuzhiyun 	    iommu_get_dma_cookie(&rk_domain->domain))
1332*4882a593Smuzhiyun 		goto err_free_domain;
1333*4882a593Smuzhiyun 
1334*4882a593Smuzhiyun 	/*
1335*4882a593Smuzhiyun 	 * rk32xx iommus use a 2 level pagetable.
1336*4882a593Smuzhiyun 	 * Each level1 (dt) and level2 (pt) table has 1024 4-byte entries.
1337*4882a593Smuzhiyun 	 * Allocate one 4 KiB page for each table.
1338*4882a593Smuzhiyun 	 */
1339*4882a593Smuzhiyun 	rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32);
1340*4882a593Smuzhiyun 	if (!rk_domain->dt)
1341*4882a593Smuzhiyun 		goto err_put_cookie;
1342*4882a593Smuzhiyun 
1343*4882a593Smuzhiyun 	rk_domain->dt_dma = dma_map_single(dma_dev, rk_domain->dt,
1344*4882a593Smuzhiyun 					   SPAGE_SIZE, DMA_TO_DEVICE);
1345*4882a593Smuzhiyun 	if (dma_mapping_error(dma_dev, rk_domain->dt_dma)) {
1346*4882a593Smuzhiyun 		dev_err(dma_dev, "DMA map error for DT\n");
1347*4882a593Smuzhiyun 		goto err_free_dt;
1348*4882a593Smuzhiyun 	}
1349*4882a593Smuzhiyun 
1350*4882a593Smuzhiyun 	spin_lock_init(&rk_domain->iommus_lock);
1351*4882a593Smuzhiyun 	spin_lock_init(&rk_domain->dt_lock);
1352*4882a593Smuzhiyun 	INIT_LIST_HEAD(&rk_domain->iommus);
1353*4882a593Smuzhiyun 
1354*4882a593Smuzhiyun 	rk_domain->domain.geometry.aperture_start = 0;
1355*4882a593Smuzhiyun 	rk_domain->domain.geometry.aperture_end   = DMA_BIT_MASK(32);
1356*4882a593Smuzhiyun 	rk_domain->domain.geometry.force_aperture = true;
1357*4882a593Smuzhiyun 
1358*4882a593Smuzhiyun 	return &rk_domain->domain;
1359*4882a593Smuzhiyun 
1360*4882a593Smuzhiyun err_free_dt:
1361*4882a593Smuzhiyun 	free_page((unsigned long)rk_domain->dt);
1362*4882a593Smuzhiyun err_put_cookie:
1363*4882a593Smuzhiyun 	if (type == IOMMU_DOMAIN_DMA)
1364*4882a593Smuzhiyun 		iommu_put_dma_cookie(&rk_domain->domain);
1365*4882a593Smuzhiyun err_free_domain:
1366*4882a593Smuzhiyun 	kfree(rk_domain);
1367*4882a593Smuzhiyun 
1368*4882a593Smuzhiyun 	return NULL;
1369*4882a593Smuzhiyun }
1370*4882a593Smuzhiyun 
rk_iommu_domain_free(struct iommu_domain * domain)1371*4882a593Smuzhiyun static void rk_iommu_domain_free(struct iommu_domain *domain)
1372*4882a593Smuzhiyun {
1373*4882a593Smuzhiyun 	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
1374*4882a593Smuzhiyun 	int i;
1375*4882a593Smuzhiyun 
1376*4882a593Smuzhiyun 	WARN_ON(!list_empty(&rk_domain->iommus));
1377*4882a593Smuzhiyun 
1378*4882a593Smuzhiyun 	for (i = 0; i < NUM_DT_ENTRIES; i++) {
1379*4882a593Smuzhiyun 		u32 dte = rk_domain->dt[i];
1380*4882a593Smuzhiyun 		if (rk_dte_is_pt_valid(dte)) {
1381*4882a593Smuzhiyun 			phys_addr_t pt_phys = rk_ops->pt_address(dte);
1382*4882a593Smuzhiyun 			u32 *page_table = phys_to_virt(pt_phys);
1383*4882a593Smuzhiyun 			dma_unmap_single(dma_dev, pt_phys,
1384*4882a593Smuzhiyun 					 SPAGE_SIZE, DMA_TO_DEVICE);
1385*4882a593Smuzhiyun 			free_page((unsigned long)page_table);
1386*4882a593Smuzhiyun 		}
1387*4882a593Smuzhiyun 	}
1388*4882a593Smuzhiyun 
1389*4882a593Smuzhiyun 	dma_unmap_single(dma_dev, rk_domain->dt_dma,
1390*4882a593Smuzhiyun 			 SPAGE_SIZE, DMA_TO_DEVICE);
1391*4882a593Smuzhiyun 	free_page((unsigned long)rk_domain->dt);
1392*4882a593Smuzhiyun 
1393*4882a593Smuzhiyun 	kfree(rk_domain);
1394*4882a593Smuzhiyun }
1395*4882a593Smuzhiyun 
rk_iommu_probe_device(struct device * dev)1396*4882a593Smuzhiyun static struct iommu_device *rk_iommu_probe_device(struct device *dev)
1397*4882a593Smuzhiyun {
1398*4882a593Smuzhiyun 	struct rk_iommudata *data;
1399*4882a593Smuzhiyun 	struct rk_iommu *iommu;
1400*4882a593Smuzhiyun 
1401*4882a593Smuzhiyun 	data = dev_iommu_priv_get(dev);
1402*4882a593Smuzhiyun 	if (!data)
1403*4882a593Smuzhiyun 		return ERR_PTR(-ENODEV);
1404*4882a593Smuzhiyun 
1405*4882a593Smuzhiyun 	iommu = rk_iommu_from_dev(dev);
1406*4882a593Smuzhiyun 
1407*4882a593Smuzhiyun 	data->link = device_link_add(dev, iommu->dev,
1408*4882a593Smuzhiyun 				     DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME);
1409*4882a593Smuzhiyun 
1410*4882a593Smuzhiyun 	data->defer_attach = false;
1411*4882a593Smuzhiyun 
1412*4882a593Smuzhiyun 	/* set max segment size for dev, needed for single chunk map */
1413*4882a593Smuzhiyun 	if (!dev->dma_parms)
1414*4882a593Smuzhiyun 		dev->dma_parms = kzalloc(sizeof(*dev->dma_parms), GFP_KERNEL);
1415*4882a593Smuzhiyun 	if (!dev->dma_parms)
1416*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
1417*4882a593Smuzhiyun 
1418*4882a593Smuzhiyun 	dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
1419*4882a593Smuzhiyun 
1420*4882a593Smuzhiyun 	return &iommu->iommu;
1421*4882a593Smuzhiyun }
1422*4882a593Smuzhiyun 
rk_iommu_release_device(struct device * dev)1423*4882a593Smuzhiyun static void rk_iommu_release_device(struct device *dev)
1424*4882a593Smuzhiyun {
1425*4882a593Smuzhiyun 	struct rk_iommudata *data = dev_iommu_priv_get(dev);
1426*4882a593Smuzhiyun 
1427*4882a593Smuzhiyun 	device_link_del(data->link);
1428*4882a593Smuzhiyun }
1429*4882a593Smuzhiyun 
rk_iommu_device_group(struct device * dev)1430*4882a593Smuzhiyun static struct iommu_group *rk_iommu_device_group(struct device *dev)
1431*4882a593Smuzhiyun {
1432*4882a593Smuzhiyun 	struct rk_iommu *iommu;
1433*4882a593Smuzhiyun 
1434*4882a593Smuzhiyun 	iommu = rk_iommu_from_dev(dev);
1435*4882a593Smuzhiyun 
1436*4882a593Smuzhiyun 	return iommu_group_ref_get(iommu->group);
1437*4882a593Smuzhiyun }
1438*4882a593Smuzhiyun 
rk_iommu_is_attach_deferred(struct iommu_domain * domain,struct device * dev)1439*4882a593Smuzhiyun static bool rk_iommu_is_attach_deferred(struct iommu_domain *domain,
1440*4882a593Smuzhiyun 					struct device *dev)
1441*4882a593Smuzhiyun {
1442*4882a593Smuzhiyun 	struct rk_iommudata *data = dev_iommu_priv_get(dev);
1443*4882a593Smuzhiyun 
1444*4882a593Smuzhiyun 	return data->defer_attach;
1445*4882a593Smuzhiyun }
1446*4882a593Smuzhiyun 
rk_iommu_of_xlate(struct device * dev,struct of_phandle_args * args)1447*4882a593Smuzhiyun static int rk_iommu_of_xlate(struct device *dev,
1448*4882a593Smuzhiyun 			     struct of_phandle_args *args)
1449*4882a593Smuzhiyun {
1450*4882a593Smuzhiyun 	struct platform_device *iommu_dev;
1451*4882a593Smuzhiyun 	struct rk_iommudata *data;
1452*4882a593Smuzhiyun 
1453*4882a593Smuzhiyun 	data = devm_kzalloc(dma_dev, sizeof(*data), GFP_KERNEL);
1454*4882a593Smuzhiyun 	if (!data)
1455*4882a593Smuzhiyun 		return -ENOMEM;
1456*4882a593Smuzhiyun 
1457*4882a593Smuzhiyun 	iommu_dev = of_find_device_by_node(args->np);
1458*4882a593Smuzhiyun 
1459*4882a593Smuzhiyun 	data->iommu = platform_get_drvdata(iommu_dev);
1460*4882a593Smuzhiyun 
1461*4882a593Smuzhiyun 	if (strstr(dev_name(dev), "vop"))
1462*4882a593Smuzhiyun 		data->defer_attach = true;
1463*4882a593Smuzhiyun 
1464*4882a593Smuzhiyun 	dev_iommu_priv_set(dev, data);
1465*4882a593Smuzhiyun 
1466*4882a593Smuzhiyun 	platform_device_put(iommu_dev);
1467*4882a593Smuzhiyun 
1468*4882a593Smuzhiyun 	return 0;
1469*4882a593Smuzhiyun }
1470*4882a593Smuzhiyun 
rockchip_iommu_mask_irq(struct device * dev)1471*4882a593Smuzhiyun void rockchip_iommu_mask_irq(struct device *dev)
1472*4882a593Smuzhiyun {
1473*4882a593Smuzhiyun 	struct rk_iommu *iommu = rk_iommu_from_dev(dev);
1474*4882a593Smuzhiyun 	int i;
1475*4882a593Smuzhiyun 
1476*4882a593Smuzhiyun 	if (!iommu)
1477*4882a593Smuzhiyun 		return;
1478*4882a593Smuzhiyun 
1479*4882a593Smuzhiyun 	for (i = 0; i < iommu->num_mmu; i++)
1480*4882a593Smuzhiyun 		rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0);
1481*4882a593Smuzhiyun }
1482*4882a593Smuzhiyun EXPORT_SYMBOL(rockchip_iommu_mask_irq);
1483*4882a593Smuzhiyun 
rockchip_iommu_unmask_irq(struct device * dev)1484*4882a593Smuzhiyun void rockchip_iommu_unmask_irq(struct device *dev)
1485*4882a593Smuzhiyun {
1486*4882a593Smuzhiyun 	struct rk_iommu *iommu = rk_iommu_from_dev(dev);
1487*4882a593Smuzhiyun 	int i;
1488*4882a593Smuzhiyun 
1489*4882a593Smuzhiyun 	if (!iommu)
1490*4882a593Smuzhiyun 		return;
1491*4882a593Smuzhiyun 
1492*4882a593Smuzhiyun 	for (i = 0; i < iommu->num_mmu; i++) {
1493*4882a593Smuzhiyun 		/* Need to zap tlb in case of mapping during pagefault */
1494*4882a593Smuzhiyun 		rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
1495*4882a593Smuzhiyun 		rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
1496*4882a593Smuzhiyun 		/* Leave iommu in pagefault state until mapping finished */
1497*4882a593Smuzhiyun 		rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_PAGE_FAULT_DONE);
1498*4882a593Smuzhiyun 	}
1499*4882a593Smuzhiyun }
1500*4882a593Smuzhiyun EXPORT_SYMBOL(rockchip_iommu_unmask_irq);
1501*4882a593Smuzhiyun 
1502*4882a593Smuzhiyun static const struct iommu_ops rk_iommu_ops = {
1503*4882a593Smuzhiyun 	.domain_alloc = rk_iommu_domain_alloc,
1504*4882a593Smuzhiyun 	.domain_free = rk_iommu_domain_free,
1505*4882a593Smuzhiyun 	.attach_dev = rk_iommu_attach_device,
1506*4882a593Smuzhiyun 	.detach_dev = rk_iommu_detach_device,
1507*4882a593Smuzhiyun 	.map = rk_iommu_map,
1508*4882a593Smuzhiyun 	.unmap = rk_iommu_unmap,
1509*4882a593Smuzhiyun 	.flush_iotlb_all = rk_iommu_flush_tlb_all,
1510*4882a593Smuzhiyun 	.probe_device = rk_iommu_probe_device,
1511*4882a593Smuzhiyun 	.release_device = rk_iommu_release_device,
1512*4882a593Smuzhiyun 	.iova_to_phys = rk_iommu_iova_to_phys,
1513*4882a593Smuzhiyun 	.is_attach_deferred = rk_iommu_is_attach_deferred,
1514*4882a593Smuzhiyun 	.device_group = rk_iommu_device_group,
1515*4882a593Smuzhiyun 	.pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
1516*4882a593Smuzhiyun 	.of_xlate = rk_iommu_of_xlate,
1517*4882a593Smuzhiyun };
1518*4882a593Smuzhiyun 
rk_iommu_probe(struct platform_device * pdev)1519*4882a593Smuzhiyun static int rk_iommu_probe(struct platform_device *pdev)
1520*4882a593Smuzhiyun {
1521*4882a593Smuzhiyun 	struct device *dev = &pdev->dev;
1522*4882a593Smuzhiyun 	struct rk_iommu *iommu;
1523*4882a593Smuzhiyun 	struct resource *res;
1524*4882a593Smuzhiyun 	const struct rk_iommu_ops *ops;
1525*4882a593Smuzhiyun 	int num_res = pdev->num_resources;
1526*4882a593Smuzhiyun 	int err, i;
1527*4882a593Smuzhiyun 
1528*4882a593Smuzhiyun 	iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
1529*4882a593Smuzhiyun 	if (!iommu)
1530*4882a593Smuzhiyun 		return -ENOMEM;
1531*4882a593Smuzhiyun 
1532*4882a593Smuzhiyun 	platform_set_drvdata(pdev, iommu);
1533*4882a593Smuzhiyun 	iommu->dev = dev;
1534*4882a593Smuzhiyun 	iommu->num_mmu = 0;
1535*4882a593Smuzhiyun 
1536*4882a593Smuzhiyun 	ops = of_device_get_match_data(dev);
1537*4882a593Smuzhiyun 	if (!rk_ops)
1538*4882a593Smuzhiyun 		rk_ops = ops;
1539*4882a593Smuzhiyun 
1540*4882a593Smuzhiyun 	/*
1541*4882a593Smuzhiyun 	 * That should not happen unless different versions of the
1542*4882a593Smuzhiyun 	 * hardware block are embedded the same SoC
1543*4882a593Smuzhiyun 	 */
1544*4882a593Smuzhiyun 	if (WARN_ON(rk_ops != ops))
1545*4882a593Smuzhiyun 		return -EINVAL;
1546*4882a593Smuzhiyun 
1547*4882a593Smuzhiyun 	iommu->bases = devm_kcalloc(dev, num_res, sizeof(*iommu->bases),
1548*4882a593Smuzhiyun 				    GFP_KERNEL);
1549*4882a593Smuzhiyun 	if (!iommu->bases)
1550*4882a593Smuzhiyun 		return -ENOMEM;
1551*4882a593Smuzhiyun 
1552*4882a593Smuzhiyun 	for (i = 0; i < num_res; i++) {
1553*4882a593Smuzhiyun 		res = platform_get_resource(pdev, IORESOURCE_MEM, i);
1554*4882a593Smuzhiyun 		if (!res)
1555*4882a593Smuzhiyun 			continue;
1556*4882a593Smuzhiyun 		iommu->bases[i] = devm_ioremap_resource(&pdev->dev, res);
1557*4882a593Smuzhiyun 		if (IS_ERR(iommu->bases[i]))
1558*4882a593Smuzhiyun 			continue;
1559*4882a593Smuzhiyun 		iommu->num_mmu++;
1560*4882a593Smuzhiyun 	}
1561*4882a593Smuzhiyun 	if (iommu->num_mmu == 0)
1562*4882a593Smuzhiyun 		return PTR_ERR(iommu->bases[0]);
1563*4882a593Smuzhiyun 
1564*4882a593Smuzhiyun 	iommu->num_irq = platform_irq_count(pdev);
1565*4882a593Smuzhiyun 	if (iommu->num_irq < 0)
1566*4882a593Smuzhiyun 		return iommu->num_irq;
1567*4882a593Smuzhiyun 
1568*4882a593Smuzhiyun 	iommu->reset_disabled = device_property_read_bool(dev,
1569*4882a593Smuzhiyun 					"rockchip,disable-mmu-reset");
1570*4882a593Smuzhiyun 	iommu->skip_read = device_property_read_bool(dev,
1571*4882a593Smuzhiyun 					"rockchip,skip-mmu-read");
1572*4882a593Smuzhiyun 	iommu->dlr_disable = device_property_read_bool(dev,
1573*4882a593Smuzhiyun 					"rockchip,disable-device-link-resume");
1574*4882a593Smuzhiyun 	iommu->shootdown_entire = device_property_read_bool(dev,
1575*4882a593Smuzhiyun 					"rockchip,shootdown-entire");
1576*4882a593Smuzhiyun 	iommu->master_handle_irq = device_property_read_bool(dev,
1577*4882a593Smuzhiyun 					"rockchip,master-handle-irq");
1578*4882a593Smuzhiyun 	if (of_machine_is_compatible("rockchip,rv1126") ||
1579*4882a593Smuzhiyun 	    of_machine_is_compatible("rockchip,rv1109"))
1580*4882a593Smuzhiyun 		iommu->cmd_retry = device_property_read_bool(dev,
1581*4882a593Smuzhiyun 					"rockchip,enable-cmd-retry");
1582*4882a593Smuzhiyun 
1583*4882a593Smuzhiyun 	iommu->need_res_map = device_property_read_bool(dev,
1584*4882a593Smuzhiyun 					"rockchip,reserve-map");
1585*4882a593Smuzhiyun 
1586*4882a593Smuzhiyun 	/*
1587*4882a593Smuzhiyun 	 * iommu clocks should be present for all new devices and devicetrees
1588*4882a593Smuzhiyun 	 * but there are older devicetrees without clocks out in the wild.
1589*4882a593Smuzhiyun 	 * So clocks as optional for the time being.
1590*4882a593Smuzhiyun 	 */
1591*4882a593Smuzhiyun 	err = devm_clk_bulk_get_all(dev, &iommu->clocks);
1592*4882a593Smuzhiyun 	if (err == -ENOENT)
1593*4882a593Smuzhiyun 		iommu->num_clocks = 0;
1594*4882a593Smuzhiyun 	else if (err < 0)
1595*4882a593Smuzhiyun 		return err;
1596*4882a593Smuzhiyun 	else
1597*4882a593Smuzhiyun 		iommu->num_clocks = err;
1598*4882a593Smuzhiyun 
1599*4882a593Smuzhiyun 	err = clk_bulk_prepare(iommu->num_clocks, iommu->clocks);
1600*4882a593Smuzhiyun 	if (err)
1601*4882a593Smuzhiyun 		return err;
1602*4882a593Smuzhiyun 
1603*4882a593Smuzhiyun 	iommu->group = iommu_group_alloc();
1604*4882a593Smuzhiyun 	if (IS_ERR(iommu->group)) {
1605*4882a593Smuzhiyun 		err = PTR_ERR(iommu->group);
1606*4882a593Smuzhiyun 		goto err_unprepare_clocks;
1607*4882a593Smuzhiyun 	}
1608*4882a593Smuzhiyun 
1609*4882a593Smuzhiyun 	err = iommu_device_sysfs_add(&iommu->iommu, dev, NULL, dev_name(dev));
1610*4882a593Smuzhiyun 	if (err)
1611*4882a593Smuzhiyun 		goto err_put_group;
1612*4882a593Smuzhiyun 
1613*4882a593Smuzhiyun 	iommu_device_set_ops(&iommu->iommu, &rk_iommu_ops);
1614*4882a593Smuzhiyun 
1615*4882a593Smuzhiyun 	iommu_device_set_fwnode(&iommu->iommu, &dev->of_node->fwnode);
1616*4882a593Smuzhiyun 
1617*4882a593Smuzhiyun 	err = iommu_device_register(&iommu->iommu);
1618*4882a593Smuzhiyun 	if (err)
1619*4882a593Smuzhiyun 		goto err_remove_sysfs;
1620*4882a593Smuzhiyun 
1621*4882a593Smuzhiyun 	/*
1622*4882a593Smuzhiyun 	 * Use the first registered IOMMU device for domain to use with DMA
1623*4882a593Smuzhiyun 	 * API, since a domain might not physically correspond to a single
1624*4882a593Smuzhiyun 	 * IOMMU device..
1625*4882a593Smuzhiyun 	 */
1626*4882a593Smuzhiyun 	if (!dma_dev)
1627*4882a593Smuzhiyun 		dma_dev = &pdev->dev;
1628*4882a593Smuzhiyun 
1629*4882a593Smuzhiyun 	bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
1630*4882a593Smuzhiyun 
1631*4882a593Smuzhiyun 	pm_runtime_enable(dev);
1632*4882a593Smuzhiyun 
1633*4882a593Smuzhiyun 	if (iommu->skip_read)
1634*4882a593Smuzhiyun 		goto skip_request_irq;
1635*4882a593Smuzhiyun 
1636*4882a593Smuzhiyun 	for (i = 0; i < iommu->num_irq; i++) {
1637*4882a593Smuzhiyun 		int irq = platform_get_irq(pdev, i);
1638*4882a593Smuzhiyun 
1639*4882a593Smuzhiyun 		if (irq < 0)
1640*4882a593Smuzhiyun 			return irq;
1641*4882a593Smuzhiyun 
1642*4882a593Smuzhiyun 		err = devm_request_irq(iommu->dev, irq, rk_iommu_irq,
1643*4882a593Smuzhiyun 				       IRQF_SHARED, dev_name(dev), iommu);
1644*4882a593Smuzhiyun 		if (err) {
1645*4882a593Smuzhiyun 			pm_runtime_disable(dev);
1646*4882a593Smuzhiyun 			goto err_remove_sysfs;
1647*4882a593Smuzhiyun 		}
1648*4882a593Smuzhiyun 	}
1649*4882a593Smuzhiyun 
1650*4882a593Smuzhiyun skip_request_irq:
1651*4882a593Smuzhiyun 	if (!res_page && iommu->need_res_map) {
1652*4882a593Smuzhiyun 		res_page = __pa_symbol(reserve_range);
1653*4882a593Smuzhiyun 
1654*4882a593Smuzhiyun 		pr_info("%s,%d, res_page = 0x%pa\n", __func__, __LINE__, &res_page);
1655*4882a593Smuzhiyun 	}
1656*4882a593Smuzhiyun 
1657*4882a593Smuzhiyun 	dma_set_mask_and_coherent(dev, rk_ops->dma_bit_mask);
1658*4882a593Smuzhiyun 
1659*4882a593Smuzhiyun 	return 0;
1660*4882a593Smuzhiyun err_remove_sysfs:
1661*4882a593Smuzhiyun 	iommu_device_sysfs_remove(&iommu->iommu);
1662*4882a593Smuzhiyun err_put_group:
1663*4882a593Smuzhiyun 	iommu_group_put(iommu->group);
1664*4882a593Smuzhiyun err_unprepare_clocks:
1665*4882a593Smuzhiyun 	clk_bulk_unprepare(iommu->num_clocks, iommu->clocks);
1666*4882a593Smuzhiyun 	return err;
1667*4882a593Smuzhiyun }
1668*4882a593Smuzhiyun 
rk_iommu_shutdown(struct platform_device * pdev)1669*4882a593Smuzhiyun static void rk_iommu_shutdown(struct platform_device *pdev)
1670*4882a593Smuzhiyun {
1671*4882a593Smuzhiyun 	struct rk_iommu *iommu = platform_get_drvdata(pdev);
1672*4882a593Smuzhiyun 	int i;
1673*4882a593Smuzhiyun 
1674*4882a593Smuzhiyun 	if (iommu->skip_read)
1675*4882a593Smuzhiyun 		goto skip_free_irq;
1676*4882a593Smuzhiyun 
1677*4882a593Smuzhiyun 	for (i = 0; i < iommu->num_irq; i++) {
1678*4882a593Smuzhiyun 		int irq = platform_get_irq(pdev, i);
1679*4882a593Smuzhiyun 
1680*4882a593Smuzhiyun 		devm_free_irq(iommu->dev, irq, iommu);
1681*4882a593Smuzhiyun 	}
1682*4882a593Smuzhiyun 
1683*4882a593Smuzhiyun skip_free_irq:
1684*4882a593Smuzhiyun 	if (!iommu->dlr_disable)
1685*4882a593Smuzhiyun 		pm_runtime_force_suspend(&pdev->dev);
1686*4882a593Smuzhiyun }
1687*4882a593Smuzhiyun 
rk_iommu_suspend(struct device * dev)1688*4882a593Smuzhiyun static int __maybe_unused rk_iommu_suspend(struct device *dev)
1689*4882a593Smuzhiyun {
1690*4882a593Smuzhiyun 	struct rk_iommu *iommu = dev_get_drvdata(dev);
1691*4882a593Smuzhiyun 
1692*4882a593Smuzhiyun 	if (!iommu->domain)
1693*4882a593Smuzhiyun 		return 0;
1694*4882a593Smuzhiyun 
1695*4882a593Smuzhiyun 	if (iommu->dlr_disable)
1696*4882a593Smuzhiyun 		return 0;
1697*4882a593Smuzhiyun 
1698*4882a593Smuzhiyun 	rk_iommu_disable(iommu);
1699*4882a593Smuzhiyun 	return 0;
1700*4882a593Smuzhiyun }
1701*4882a593Smuzhiyun 
rk_iommu_resume(struct device * dev)1702*4882a593Smuzhiyun static int __maybe_unused rk_iommu_resume(struct device *dev)
1703*4882a593Smuzhiyun {
1704*4882a593Smuzhiyun 	struct rk_iommu *iommu = dev_get_drvdata(dev);
1705*4882a593Smuzhiyun 
1706*4882a593Smuzhiyun 	if (!iommu->domain)
1707*4882a593Smuzhiyun 		return 0;
1708*4882a593Smuzhiyun 
1709*4882a593Smuzhiyun 	if (iommu->dlr_disable)
1710*4882a593Smuzhiyun 		return 0;
1711*4882a593Smuzhiyun 
1712*4882a593Smuzhiyun 	return rk_iommu_enable(iommu);
1713*4882a593Smuzhiyun }
1714*4882a593Smuzhiyun 
1715*4882a593Smuzhiyun static const struct dev_pm_ops rk_iommu_pm_ops = {
1716*4882a593Smuzhiyun 	SET_RUNTIME_PM_OPS(rk_iommu_suspend, rk_iommu_resume, NULL)
1717*4882a593Smuzhiyun 	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1718*4882a593Smuzhiyun 				pm_runtime_force_resume)
1719*4882a593Smuzhiyun };
1720*4882a593Smuzhiyun 
1721*4882a593Smuzhiyun static struct rk_iommu_ops iommu_data_ops_v1 = {
1722*4882a593Smuzhiyun 	.pt_address = &rk_dte_pt_address,
1723*4882a593Smuzhiyun 	.mk_dtentries = &rk_mk_dte,
1724*4882a593Smuzhiyun 	.mk_ptentries = &rk_mk_pte,
1725*4882a593Smuzhiyun 	.dte_addr_phys = &rk_dte_addr_phys,
1726*4882a593Smuzhiyun 	.dma_addr_dte = &rk_dma_addr_dte,
1727*4882a593Smuzhiyun 	.dma_bit_mask = DMA_BIT_MASK(32),
1728*4882a593Smuzhiyun };
1729*4882a593Smuzhiyun 
1730*4882a593Smuzhiyun static struct rk_iommu_ops iommu_data_ops_v2 = {
1731*4882a593Smuzhiyun 	.pt_address = &rk_dte_pt_address_v2,
1732*4882a593Smuzhiyun 	.mk_dtentries = &rk_mk_dte_v2,
1733*4882a593Smuzhiyun 	.mk_ptentries = &rk_mk_pte_v2,
1734*4882a593Smuzhiyun 	.dte_addr_phys = &rk_dte_addr_phys_v2,
1735*4882a593Smuzhiyun 	.dma_addr_dte = &rk_dma_addr_dte_v2,
1736*4882a593Smuzhiyun 	.dma_bit_mask = DMA_BIT_MASK(40),
1737*4882a593Smuzhiyun };
1738*4882a593Smuzhiyun 
1739*4882a593Smuzhiyun static const struct of_device_id rk_iommu_dt_ids[] = {
1740*4882a593Smuzhiyun 	{	.compatible = "rockchip,iommu",
1741*4882a593Smuzhiyun 		.data = &iommu_data_ops_v1,
1742*4882a593Smuzhiyun 	},
1743*4882a593Smuzhiyun 	{	.compatible = "rockchip,iommu-v2",
1744*4882a593Smuzhiyun 		.data = &iommu_data_ops_v2,
1745*4882a593Smuzhiyun 	},
1746*4882a593Smuzhiyun 	{	.compatible = "rockchip,rk3568-iommu",
1747*4882a593Smuzhiyun 		.data = &iommu_data_ops_v2,
1748*4882a593Smuzhiyun 	},
1749*4882a593Smuzhiyun 	{ /* sentinel */ }
1750*4882a593Smuzhiyun };
1751*4882a593Smuzhiyun 
1752*4882a593Smuzhiyun static struct platform_driver rk_iommu_driver = {
1753*4882a593Smuzhiyun 	.probe = rk_iommu_probe,
1754*4882a593Smuzhiyun 	.shutdown = rk_iommu_shutdown,
1755*4882a593Smuzhiyun 	.driver = {
1756*4882a593Smuzhiyun 		   .name = "rk_iommu",
1757*4882a593Smuzhiyun 		   .of_match_table = rk_iommu_dt_ids,
1758*4882a593Smuzhiyun 		   .pm = &rk_iommu_pm_ops,
1759*4882a593Smuzhiyun 		   .suppress_bind_attrs = true,
1760*4882a593Smuzhiyun 	},
1761*4882a593Smuzhiyun };
1762*4882a593Smuzhiyun 
rk_iommu_init(void)1763*4882a593Smuzhiyun static int __init rk_iommu_init(void)
1764*4882a593Smuzhiyun {
1765*4882a593Smuzhiyun 	return platform_driver_register(&rk_iommu_driver);
1766*4882a593Smuzhiyun }
1767*4882a593Smuzhiyun subsys_initcall(rk_iommu_init);
1768*4882a593Smuzhiyun 
1769*4882a593Smuzhiyun MODULE_DESCRIPTION("IOMMU API for Rockchip");
1770*4882a593Smuzhiyun MODULE_AUTHOR("Simon Xue <xxm@rock-chips.com> and Daniel Kurtz <djkurtz@chromium.org>");
1771*4882a593Smuzhiyun MODULE_ALIAS("platform:rockchip-iommu");
1772*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
1773