xref: /OK3568_Linux_fs/kernel/drivers/iommu/omap-iommu.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * omap iommu: main structures
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2008-2009 Nokia Corporation
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #ifndef _OMAP_IOMMU_H
11*4882a593Smuzhiyun #define _OMAP_IOMMU_H
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include <linux/bitops.h>
14*4882a593Smuzhiyun #include <linux/iommu.h>
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #define for_each_iotlb_cr(obj, n, __i, cr)				\
17*4882a593Smuzhiyun 	for (__i = 0;							\
18*4882a593Smuzhiyun 	     (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true);	\
19*4882a593Smuzhiyun 	     __i++)
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun struct iotlb_entry {
22*4882a593Smuzhiyun 	u32 da;
23*4882a593Smuzhiyun 	u32 pa;
24*4882a593Smuzhiyun 	u32 pgsz, prsvd, valid;
25*4882a593Smuzhiyun 	u32 endian, elsz, mixed;
26*4882a593Smuzhiyun };
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun /**
29*4882a593Smuzhiyun  * struct omap_iommu_device - omap iommu device data
30*4882a593Smuzhiyun  * @pgtable:	page table used by an omap iommu attached to a domain
31*4882a593Smuzhiyun  * @iommu_dev:	pointer to store an omap iommu instance attached to a domain
32*4882a593Smuzhiyun  */
33*4882a593Smuzhiyun struct omap_iommu_device {
34*4882a593Smuzhiyun 	u32 *pgtable;
35*4882a593Smuzhiyun 	struct omap_iommu *iommu_dev;
36*4882a593Smuzhiyun };
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun /**
39*4882a593Smuzhiyun  * struct omap_iommu_domain - omap iommu domain
40*4882a593Smuzhiyun  * @num_iommus: number of iommus in this domain
41*4882a593Smuzhiyun  * @iommus:	omap iommu device data for all iommus in this domain
42*4882a593Smuzhiyun  * @dev:	Device using this domain.
43*4882a593Smuzhiyun  * @lock:	domain lock, should be taken when attaching/detaching
44*4882a593Smuzhiyun  * @domain:	generic domain handle used by iommu core code
45*4882a593Smuzhiyun  */
46*4882a593Smuzhiyun struct omap_iommu_domain {
47*4882a593Smuzhiyun 	u32 num_iommus;
48*4882a593Smuzhiyun 	struct omap_iommu_device *iommus;
49*4882a593Smuzhiyun 	struct device *dev;
50*4882a593Smuzhiyun 	spinlock_t lock;
51*4882a593Smuzhiyun 	struct iommu_domain domain;
52*4882a593Smuzhiyun };
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun struct omap_iommu {
55*4882a593Smuzhiyun 	const char	*name;
56*4882a593Smuzhiyun 	void __iomem	*regbase;
57*4882a593Smuzhiyun 	struct regmap	*syscfg;
58*4882a593Smuzhiyun 	struct device	*dev;
59*4882a593Smuzhiyun 	struct iommu_domain *domain;
60*4882a593Smuzhiyun 	struct dentry	*debug_dir;
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	spinlock_t	iommu_lock;	/* global for this whole object */
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 	/*
65*4882a593Smuzhiyun 	 * We don't change iopgd for a situation like pgd for a task,
66*4882a593Smuzhiyun 	 * but share it globally for each iommu.
67*4882a593Smuzhiyun 	 */
68*4882a593Smuzhiyun 	u32		*iopgd;
69*4882a593Smuzhiyun 	spinlock_t	page_table_lock; /* protect iopgd */
70*4882a593Smuzhiyun 	dma_addr_t	pd_dma;
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	int		nr_tlb_entries;
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	void *ctx; /* iommu context: registres saved area */
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	struct cr_regs *cr_ctx;
77*4882a593Smuzhiyun 	u32 num_cr_ctx;
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	int has_bus_err_back;
80*4882a593Smuzhiyun 	u32 id;
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	struct iommu_device iommu;
83*4882a593Smuzhiyun 	struct iommu_group *group;
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	u8 pwrst;
86*4882a593Smuzhiyun };
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun /**
89*4882a593Smuzhiyun  * struct omap_iommu_arch_data - omap iommu private data
90*4882a593Smuzhiyun  * @iommu_dev: handle of the OMAP iommu device
91*4882a593Smuzhiyun  * @dev: handle of the iommu device
92*4882a593Smuzhiyun  *
93*4882a593Smuzhiyun  * This is an omap iommu private data object, which binds an iommu user
94*4882a593Smuzhiyun  * to its iommu device. This object should be placed at the iommu user's
95*4882a593Smuzhiyun  * dev_archdata so generic IOMMU API can be used without having to
96*4882a593Smuzhiyun  * utilize omap-specific plumbing anymore.
97*4882a593Smuzhiyun  */
98*4882a593Smuzhiyun struct omap_iommu_arch_data {
99*4882a593Smuzhiyun 	struct omap_iommu *iommu_dev;
100*4882a593Smuzhiyun 	struct device *dev;
101*4882a593Smuzhiyun };
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun struct cr_regs {
104*4882a593Smuzhiyun 	u32 cam;
105*4882a593Smuzhiyun 	u32 ram;
106*4882a593Smuzhiyun };
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun struct iotlb_lock {
109*4882a593Smuzhiyun 	short base;
110*4882a593Smuzhiyun 	short vict;
111*4882a593Smuzhiyun };
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun /*
114*4882a593Smuzhiyun  * MMU Register offsets
115*4882a593Smuzhiyun  */
116*4882a593Smuzhiyun #define MMU_REVISION		0x00
117*4882a593Smuzhiyun #define MMU_IRQSTATUS		0x18
118*4882a593Smuzhiyun #define MMU_IRQENABLE		0x1c
119*4882a593Smuzhiyun #define MMU_WALKING_ST		0x40
120*4882a593Smuzhiyun #define MMU_CNTL		0x44
121*4882a593Smuzhiyun #define MMU_FAULT_AD		0x48
122*4882a593Smuzhiyun #define MMU_TTB			0x4c
123*4882a593Smuzhiyun #define MMU_LOCK		0x50
124*4882a593Smuzhiyun #define MMU_LD_TLB		0x54
125*4882a593Smuzhiyun #define MMU_CAM			0x58
126*4882a593Smuzhiyun #define MMU_RAM			0x5c
127*4882a593Smuzhiyun #define MMU_GFLUSH		0x60
128*4882a593Smuzhiyun #define MMU_FLUSH_ENTRY		0x64
129*4882a593Smuzhiyun #define MMU_READ_CAM		0x68
130*4882a593Smuzhiyun #define MMU_READ_RAM		0x6c
131*4882a593Smuzhiyun #define MMU_EMU_FAULT_AD	0x70
132*4882a593Smuzhiyun #define MMU_GP_REG		0x88
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun #define MMU_REG_SIZE		256
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun /*
137*4882a593Smuzhiyun  * MMU Register bit definitions
138*4882a593Smuzhiyun  */
139*4882a593Smuzhiyun /* IRQSTATUS & IRQENABLE */
140*4882a593Smuzhiyun #define MMU_IRQ_MULTIHITFAULT	BIT(4)
141*4882a593Smuzhiyun #define MMU_IRQ_TABLEWALKFAULT	BIT(3)
142*4882a593Smuzhiyun #define MMU_IRQ_EMUMISS		BIT(2)
143*4882a593Smuzhiyun #define MMU_IRQ_TRANSLATIONFAULT	BIT(1)
144*4882a593Smuzhiyun #define MMU_IRQ_TLBMISS		BIT(0)
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun #define __MMU_IRQ_FAULT		\
147*4882a593Smuzhiyun 	(MMU_IRQ_MULTIHITFAULT | MMU_IRQ_EMUMISS | MMU_IRQ_TRANSLATIONFAULT)
148*4882a593Smuzhiyun #define MMU_IRQ_MASK		\
149*4882a593Smuzhiyun 	(__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT | MMU_IRQ_TLBMISS)
150*4882a593Smuzhiyun #define MMU_IRQ_TWL_MASK	(__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT)
151*4882a593Smuzhiyun #define MMU_IRQ_TLB_MISS_MASK	(__MMU_IRQ_FAULT | MMU_IRQ_TLBMISS)
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun /* MMU_CNTL */
154*4882a593Smuzhiyun #define MMU_CNTL_SHIFT		1
155*4882a593Smuzhiyun #define MMU_CNTL_MASK		(7 << MMU_CNTL_SHIFT)
156*4882a593Smuzhiyun #define MMU_CNTL_EML_TLB	BIT(3)
157*4882a593Smuzhiyun #define MMU_CNTL_TWL_EN		BIT(2)
158*4882a593Smuzhiyun #define MMU_CNTL_MMU_EN		BIT(1)
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun /* CAM */
161*4882a593Smuzhiyun #define MMU_CAM_VATAG_SHIFT	12
162*4882a593Smuzhiyun #define MMU_CAM_VATAG_MASK \
163*4882a593Smuzhiyun 	((~0UL >> MMU_CAM_VATAG_SHIFT) << MMU_CAM_VATAG_SHIFT)
164*4882a593Smuzhiyun #define MMU_CAM_P		BIT(3)
165*4882a593Smuzhiyun #define MMU_CAM_V		BIT(2)
166*4882a593Smuzhiyun #define MMU_CAM_PGSZ_MASK	3
167*4882a593Smuzhiyun #define MMU_CAM_PGSZ_1M		(0 << 0)
168*4882a593Smuzhiyun #define MMU_CAM_PGSZ_64K	(1 << 0)
169*4882a593Smuzhiyun #define MMU_CAM_PGSZ_4K		(2 << 0)
170*4882a593Smuzhiyun #define MMU_CAM_PGSZ_16M	(3 << 0)
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun /* RAM */
173*4882a593Smuzhiyun #define MMU_RAM_PADDR_SHIFT	12
174*4882a593Smuzhiyun #define MMU_RAM_PADDR_MASK \
175*4882a593Smuzhiyun 	((~0UL >> MMU_RAM_PADDR_SHIFT) << MMU_RAM_PADDR_SHIFT)
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun #define MMU_RAM_ENDIAN_SHIFT	9
178*4882a593Smuzhiyun #define MMU_RAM_ENDIAN_MASK	BIT(MMU_RAM_ENDIAN_SHIFT)
179*4882a593Smuzhiyun #define MMU_RAM_ENDIAN_LITTLE	(0 << MMU_RAM_ENDIAN_SHIFT)
180*4882a593Smuzhiyun #define MMU_RAM_ENDIAN_BIG	BIT(MMU_RAM_ENDIAN_SHIFT)
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun #define MMU_RAM_ELSZ_SHIFT	7
183*4882a593Smuzhiyun #define MMU_RAM_ELSZ_MASK	(3 << MMU_RAM_ELSZ_SHIFT)
184*4882a593Smuzhiyun #define MMU_RAM_ELSZ_8		(0 << MMU_RAM_ELSZ_SHIFT)
185*4882a593Smuzhiyun #define MMU_RAM_ELSZ_16		(1 << MMU_RAM_ELSZ_SHIFT)
186*4882a593Smuzhiyun #define MMU_RAM_ELSZ_32		(2 << MMU_RAM_ELSZ_SHIFT)
187*4882a593Smuzhiyun #define MMU_RAM_ELSZ_NONE	(3 << MMU_RAM_ELSZ_SHIFT)
188*4882a593Smuzhiyun #define MMU_RAM_MIXED_SHIFT	6
189*4882a593Smuzhiyun #define MMU_RAM_MIXED_MASK	BIT(MMU_RAM_MIXED_SHIFT)
190*4882a593Smuzhiyun #define MMU_RAM_MIXED		MMU_RAM_MIXED_MASK
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun #define MMU_GP_REG_BUS_ERR_BACK_EN	0x1
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun #define get_cam_va_mask(pgsz)				\
195*4882a593Smuzhiyun 	(((pgsz) == MMU_CAM_PGSZ_16M) ? 0xff000000 :	\
196*4882a593Smuzhiyun 	 ((pgsz) == MMU_CAM_PGSZ_1M)  ? 0xfff00000 :	\
197*4882a593Smuzhiyun 	 ((pgsz) == MMU_CAM_PGSZ_64K) ? 0xffff0000 :	\
198*4882a593Smuzhiyun 	 ((pgsz) == MMU_CAM_PGSZ_4K)  ? 0xfffff000 : 0)
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun /*
201*4882a593Smuzhiyun  * DSP_SYSTEM registers and bit definitions (applicable only for DRA7xx DSP)
202*4882a593Smuzhiyun  */
203*4882a593Smuzhiyun #define DSP_SYS_REVISION		0x00
204*4882a593Smuzhiyun #define DSP_SYS_MMU_CONFIG		0x18
205*4882a593Smuzhiyun #define DSP_SYS_MMU_CONFIG_EN_SHIFT	4
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun /*
208*4882a593Smuzhiyun  * utilities for super page(16MB, 1MB, 64KB and 4KB)
209*4882a593Smuzhiyun  */
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun #define iopgsz_max(bytes)			\
212*4882a593Smuzhiyun 	(((bytes) >= SZ_16M) ? SZ_16M :		\
213*4882a593Smuzhiyun 	 ((bytes) >= SZ_1M)  ? SZ_1M  :		\
214*4882a593Smuzhiyun 	 ((bytes) >= SZ_64K) ? SZ_64K :		\
215*4882a593Smuzhiyun 	 ((bytes) >= SZ_4K)  ? SZ_4K  :	0)
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun #define bytes_to_iopgsz(bytes)				\
218*4882a593Smuzhiyun 	(((bytes) == SZ_16M) ? MMU_CAM_PGSZ_16M :	\
219*4882a593Smuzhiyun 	 ((bytes) == SZ_1M)  ? MMU_CAM_PGSZ_1M  :	\
220*4882a593Smuzhiyun 	 ((bytes) == SZ_64K) ? MMU_CAM_PGSZ_64K :	\
221*4882a593Smuzhiyun 	 ((bytes) == SZ_4K)  ? MMU_CAM_PGSZ_4K  : -1)
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun #define iopgsz_to_bytes(iopgsz)				\
224*4882a593Smuzhiyun 	(((iopgsz) == MMU_CAM_PGSZ_16M)	? SZ_16M :	\
225*4882a593Smuzhiyun 	 ((iopgsz) == MMU_CAM_PGSZ_1M)	? SZ_1M  :	\
226*4882a593Smuzhiyun 	 ((iopgsz) == MMU_CAM_PGSZ_64K)	? SZ_64K :	\
227*4882a593Smuzhiyun 	 ((iopgsz) == MMU_CAM_PGSZ_4K)	? SZ_4K  : 0)
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun #define iopgsz_ok(bytes) (bytes_to_iopgsz(bytes) >= 0)
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun /*
232*4882a593Smuzhiyun  * global functions
233*4882a593Smuzhiyun  */
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n);
236*4882a593Smuzhiyun void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l);
237*4882a593Smuzhiyun void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l);
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun #ifdef CONFIG_OMAP_IOMMU_DEBUG
240*4882a593Smuzhiyun void omap_iommu_debugfs_init(void);
241*4882a593Smuzhiyun void omap_iommu_debugfs_exit(void);
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun void omap_iommu_debugfs_add(struct omap_iommu *obj);
244*4882a593Smuzhiyun void omap_iommu_debugfs_remove(struct omap_iommu *obj);
245*4882a593Smuzhiyun #else
omap_iommu_debugfs_init(void)246*4882a593Smuzhiyun static inline void omap_iommu_debugfs_init(void) { }
omap_iommu_debugfs_exit(void)247*4882a593Smuzhiyun static inline void omap_iommu_debugfs_exit(void) { }
248*4882a593Smuzhiyun 
omap_iommu_debugfs_add(struct omap_iommu * obj)249*4882a593Smuzhiyun static inline void omap_iommu_debugfs_add(struct omap_iommu *obj) { }
omap_iommu_debugfs_remove(struct omap_iommu * obj)250*4882a593Smuzhiyun static inline void omap_iommu_debugfs_remove(struct omap_iommu *obj) { }
251*4882a593Smuzhiyun #endif
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun /*
254*4882a593Smuzhiyun  * register accessors
255*4882a593Smuzhiyun  */
iommu_read_reg(struct omap_iommu * obj,size_t offs)256*4882a593Smuzhiyun static inline u32 iommu_read_reg(struct omap_iommu *obj, size_t offs)
257*4882a593Smuzhiyun {
258*4882a593Smuzhiyun 	return __raw_readl(obj->regbase + offs);
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun 
iommu_write_reg(struct omap_iommu * obj,u32 val,size_t offs)261*4882a593Smuzhiyun static inline void iommu_write_reg(struct omap_iommu *obj, u32 val, size_t offs)
262*4882a593Smuzhiyun {
263*4882a593Smuzhiyun 	__raw_writel(val, obj->regbase + offs);
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun 
iotlb_cr_valid(struct cr_regs * cr)266*4882a593Smuzhiyun static inline int iotlb_cr_valid(struct cr_regs *cr)
267*4882a593Smuzhiyun {
268*4882a593Smuzhiyun 	if (!cr)
269*4882a593Smuzhiyun 		return -EINVAL;
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	return cr->cam & MMU_CAM_V;
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun #endif /* _OMAP_IOMMU_H */
275