1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2011,2016 Samsung Electronics Co., Ltd.
4*4882a593Smuzhiyun * http://www.samsung.com
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #ifdef CONFIG_EXYNOS_IOMMU_DEBUG
8*4882a593Smuzhiyun #define DEBUG
9*4882a593Smuzhiyun #endif
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <linux/clk.h>
12*4882a593Smuzhiyun #include <linux/dma-mapping.h>
13*4882a593Smuzhiyun #include <linux/err.h>
14*4882a593Smuzhiyun #include <linux/io.h>
15*4882a593Smuzhiyun #include <linux/iommu.h>
16*4882a593Smuzhiyun #include <linux/interrupt.h>
17*4882a593Smuzhiyun #include <linux/kmemleak.h>
18*4882a593Smuzhiyun #include <linux/list.h>
19*4882a593Smuzhiyun #include <linux/of.h>
20*4882a593Smuzhiyun #include <linux/of_iommu.h>
21*4882a593Smuzhiyun #include <linux/of_platform.h>
22*4882a593Smuzhiyun #include <linux/platform_device.h>
23*4882a593Smuzhiyun #include <linux/pm_runtime.h>
24*4882a593Smuzhiyun #include <linux/slab.h>
25*4882a593Smuzhiyun #include <linux/dma-iommu.h>
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun typedef u32 sysmmu_iova_t;
28*4882a593Smuzhiyun typedef u32 sysmmu_pte_t;
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun /* We do not consider super section mapping (16MB) */
31*4882a593Smuzhiyun #define SECT_ORDER 20
32*4882a593Smuzhiyun #define LPAGE_ORDER 16
33*4882a593Smuzhiyun #define SPAGE_ORDER 12
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun #define SECT_SIZE (1 << SECT_ORDER)
36*4882a593Smuzhiyun #define LPAGE_SIZE (1 << LPAGE_ORDER)
37*4882a593Smuzhiyun #define SPAGE_SIZE (1 << SPAGE_ORDER)
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun #define SECT_MASK (~(SECT_SIZE - 1))
40*4882a593Smuzhiyun #define LPAGE_MASK (~(LPAGE_SIZE - 1))
41*4882a593Smuzhiyun #define SPAGE_MASK (~(SPAGE_SIZE - 1))
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun #define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \
44*4882a593Smuzhiyun ((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
45*4882a593Smuzhiyun #define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK)
46*4882a593Smuzhiyun #define lv1ent_page_zero(sent) ((*(sent) & 3) == 1)
47*4882a593Smuzhiyun #define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \
48*4882a593Smuzhiyun ((*(sent) & 3) == 1))
49*4882a593Smuzhiyun #define lv1ent_section(sent) ((*(sent) & 3) == 2)
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun #define lv2ent_fault(pent) ((*(pent) & 3) == 0)
52*4882a593Smuzhiyun #define lv2ent_small(pent) ((*(pent) & 2) == 2)
53*4882a593Smuzhiyun #define lv2ent_large(pent) ((*(pent) & 3) == 1)
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun /*
56*4882a593Smuzhiyun * v1.x - v3.x SYSMMU supports 32bit physical and 32bit virtual address spaces
57*4882a593Smuzhiyun * v5.0 introduced support for 36bit physical address space by shifting
58*4882a593Smuzhiyun * all page entry values by 4 bits.
59*4882a593Smuzhiyun * All SYSMMU controllers in the system support the address spaces of the same
60*4882a593Smuzhiyun * size, so PG_ENT_SHIFT can be initialized on first SYSMMU probe to proper
61*4882a593Smuzhiyun * value (0 or 4).
62*4882a593Smuzhiyun */
63*4882a593Smuzhiyun static short PG_ENT_SHIFT = -1;
64*4882a593Smuzhiyun #define SYSMMU_PG_ENT_SHIFT 0
65*4882a593Smuzhiyun #define SYSMMU_V5_PG_ENT_SHIFT 4
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun static const sysmmu_pte_t *LV1_PROT;
68*4882a593Smuzhiyun static const sysmmu_pte_t SYSMMU_LV1_PROT[] = {
69*4882a593Smuzhiyun ((0 << 15) | (0 << 10)), /* no access */
70*4882a593Smuzhiyun ((1 << 15) | (1 << 10)), /* IOMMU_READ only */
71*4882a593Smuzhiyun ((0 << 15) | (1 << 10)), /* IOMMU_WRITE not supported, use read/write */
72*4882a593Smuzhiyun ((0 << 15) | (1 << 10)), /* IOMMU_READ | IOMMU_WRITE */
73*4882a593Smuzhiyun };
74*4882a593Smuzhiyun static const sysmmu_pte_t SYSMMU_V5_LV1_PROT[] = {
75*4882a593Smuzhiyun (0 << 4), /* no access */
76*4882a593Smuzhiyun (1 << 4), /* IOMMU_READ only */
77*4882a593Smuzhiyun (2 << 4), /* IOMMU_WRITE only */
78*4882a593Smuzhiyun (3 << 4), /* IOMMU_READ | IOMMU_WRITE */
79*4882a593Smuzhiyun };
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun static const sysmmu_pte_t *LV2_PROT;
82*4882a593Smuzhiyun static const sysmmu_pte_t SYSMMU_LV2_PROT[] = {
83*4882a593Smuzhiyun ((0 << 9) | (0 << 4)), /* no access */
84*4882a593Smuzhiyun ((1 << 9) | (1 << 4)), /* IOMMU_READ only */
85*4882a593Smuzhiyun ((0 << 9) | (1 << 4)), /* IOMMU_WRITE not supported, use read/write */
86*4882a593Smuzhiyun ((0 << 9) | (1 << 4)), /* IOMMU_READ | IOMMU_WRITE */
87*4882a593Smuzhiyun };
88*4882a593Smuzhiyun static const sysmmu_pte_t SYSMMU_V5_LV2_PROT[] = {
89*4882a593Smuzhiyun (0 << 2), /* no access */
90*4882a593Smuzhiyun (1 << 2), /* IOMMU_READ only */
91*4882a593Smuzhiyun (2 << 2), /* IOMMU_WRITE only */
92*4882a593Smuzhiyun (3 << 2), /* IOMMU_READ | IOMMU_WRITE */
93*4882a593Smuzhiyun };
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun #define SYSMMU_SUPPORTED_PROT_BITS (IOMMU_READ | IOMMU_WRITE)
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun #define sect_to_phys(ent) (((phys_addr_t) ent) << PG_ENT_SHIFT)
98*4882a593Smuzhiyun #define section_phys(sent) (sect_to_phys(*(sent)) & SECT_MASK)
99*4882a593Smuzhiyun #define section_offs(iova) (iova & (SECT_SIZE - 1))
100*4882a593Smuzhiyun #define lpage_phys(pent) (sect_to_phys(*(pent)) & LPAGE_MASK)
101*4882a593Smuzhiyun #define lpage_offs(iova) (iova & (LPAGE_SIZE - 1))
102*4882a593Smuzhiyun #define spage_phys(pent) (sect_to_phys(*(pent)) & SPAGE_MASK)
103*4882a593Smuzhiyun #define spage_offs(iova) (iova & (SPAGE_SIZE - 1))
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun #define NUM_LV1ENTRIES 4096
106*4882a593Smuzhiyun #define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE)
107*4882a593Smuzhiyun
lv1ent_offset(sysmmu_iova_t iova)108*4882a593Smuzhiyun static u32 lv1ent_offset(sysmmu_iova_t iova)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun return iova >> SECT_ORDER;
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun
lv2ent_offset(sysmmu_iova_t iova)113*4882a593Smuzhiyun static u32 lv2ent_offset(sysmmu_iova_t iova)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1);
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun #define LV1TABLE_SIZE (NUM_LV1ENTRIES * sizeof(sysmmu_pte_t))
119*4882a593Smuzhiyun #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun #define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
122*4882a593Smuzhiyun #define lv2table_base(sent) (sect_to_phys(*(sent) & 0xFFFFFFC0))
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun #define mk_lv1ent_sect(pa, prot) ((pa >> PG_ENT_SHIFT) | LV1_PROT[prot] | 2)
125*4882a593Smuzhiyun #define mk_lv1ent_page(pa) ((pa >> PG_ENT_SHIFT) | 1)
126*4882a593Smuzhiyun #define mk_lv2ent_lpage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 1)
127*4882a593Smuzhiyun #define mk_lv2ent_spage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 2)
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun #define CTRL_ENABLE 0x5
130*4882a593Smuzhiyun #define CTRL_BLOCK 0x7
131*4882a593Smuzhiyun #define CTRL_DISABLE 0x0
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun #define CFG_LRU 0x1
134*4882a593Smuzhiyun #define CFG_EAP (1 << 2)
135*4882a593Smuzhiyun #define CFG_QOS(n) ((n & 0xF) << 7)
136*4882a593Smuzhiyun #define CFG_ACGEN (1 << 24) /* System MMU 3.3 only */
137*4882a593Smuzhiyun #define CFG_SYSSEL (1 << 22) /* System MMU 3.2 only */
138*4882a593Smuzhiyun #define CFG_FLPDCACHE (1 << 20) /* System MMU 3.2+ only */
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun /* common registers */
141*4882a593Smuzhiyun #define REG_MMU_CTRL 0x000
142*4882a593Smuzhiyun #define REG_MMU_CFG 0x004
143*4882a593Smuzhiyun #define REG_MMU_STATUS 0x008
144*4882a593Smuzhiyun #define REG_MMU_VERSION 0x034
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun #define MMU_MAJ_VER(val) ((val) >> 7)
147*4882a593Smuzhiyun #define MMU_MIN_VER(val) ((val) & 0x7F)
148*4882a593Smuzhiyun #define MMU_RAW_VER(reg) (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun #define MAKE_MMU_VER(maj, min) ((((maj) & 0xF) << 7) | ((min) & 0x7F))
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun /* v1.x - v3.x registers */
153*4882a593Smuzhiyun #define REG_MMU_FLUSH 0x00C
154*4882a593Smuzhiyun #define REG_MMU_FLUSH_ENTRY 0x010
155*4882a593Smuzhiyun #define REG_PT_BASE_ADDR 0x014
156*4882a593Smuzhiyun #define REG_INT_STATUS 0x018
157*4882a593Smuzhiyun #define REG_INT_CLEAR 0x01C
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun #define REG_PAGE_FAULT_ADDR 0x024
160*4882a593Smuzhiyun #define REG_AW_FAULT_ADDR 0x028
161*4882a593Smuzhiyun #define REG_AR_FAULT_ADDR 0x02C
162*4882a593Smuzhiyun #define REG_DEFAULT_SLAVE_ADDR 0x030
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun /* v5.x registers */
165*4882a593Smuzhiyun #define REG_V5_PT_BASE_PFN 0x00C
166*4882a593Smuzhiyun #define REG_V5_MMU_FLUSH_ALL 0x010
167*4882a593Smuzhiyun #define REG_V5_MMU_FLUSH_ENTRY 0x014
168*4882a593Smuzhiyun #define REG_V5_MMU_FLUSH_RANGE 0x018
169*4882a593Smuzhiyun #define REG_V5_MMU_FLUSH_START 0x020
170*4882a593Smuzhiyun #define REG_V5_MMU_FLUSH_END 0x024
171*4882a593Smuzhiyun #define REG_V5_INT_STATUS 0x060
172*4882a593Smuzhiyun #define REG_V5_INT_CLEAR 0x064
173*4882a593Smuzhiyun #define REG_V5_FAULT_AR_VA 0x070
174*4882a593Smuzhiyun #define REG_V5_FAULT_AW_VA 0x080
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun #define has_sysmmu(dev) (dev_iommu_priv_get(dev) != NULL)
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun static struct device *dma_dev;
179*4882a593Smuzhiyun static struct kmem_cache *lv2table_kmem_cache;
180*4882a593Smuzhiyun static sysmmu_pte_t *zero_lv2_table;
181*4882a593Smuzhiyun #define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table))
182*4882a593Smuzhiyun
section_entry(sysmmu_pte_t * pgtable,sysmmu_iova_t iova)183*4882a593Smuzhiyun static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova)
184*4882a593Smuzhiyun {
185*4882a593Smuzhiyun return pgtable + lv1ent_offset(iova);
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun
page_entry(sysmmu_pte_t * sent,sysmmu_iova_t iova)188*4882a593Smuzhiyun static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova)
189*4882a593Smuzhiyun {
190*4882a593Smuzhiyun return (sysmmu_pte_t *)phys_to_virt(
191*4882a593Smuzhiyun lv2table_base(sent)) + lv2ent_offset(iova);
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun /*
195*4882a593Smuzhiyun * IOMMU fault information register
196*4882a593Smuzhiyun */
197*4882a593Smuzhiyun struct sysmmu_fault_info {
198*4882a593Smuzhiyun unsigned int bit; /* bit number in STATUS register */
199*4882a593Smuzhiyun unsigned short addr_reg; /* register to read VA fault address */
200*4882a593Smuzhiyun const char *name; /* human readable fault name */
201*4882a593Smuzhiyun unsigned int type; /* fault type for report_iommu_fault */
202*4882a593Smuzhiyun };
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun static const struct sysmmu_fault_info sysmmu_faults[] = {
205*4882a593Smuzhiyun { 0, REG_PAGE_FAULT_ADDR, "PAGE", IOMMU_FAULT_READ },
206*4882a593Smuzhiyun { 1, REG_AR_FAULT_ADDR, "AR MULTI-HIT", IOMMU_FAULT_READ },
207*4882a593Smuzhiyun { 2, REG_AW_FAULT_ADDR, "AW MULTI-HIT", IOMMU_FAULT_WRITE },
208*4882a593Smuzhiyun { 3, REG_DEFAULT_SLAVE_ADDR, "BUS ERROR", IOMMU_FAULT_READ },
209*4882a593Smuzhiyun { 4, REG_AR_FAULT_ADDR, "AR SECURITY PROTECTION", IOMMU_FAULT_READ },
210*4882a593Smuzhiyun { 5, REG_AR_FAULT_ADDR, "AR ACCESS PROTECTION", IOMMU_FAULT_READ },
211*4882a593Smuzhiyun { 6, REG_AW_FAULT_ADDR, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE },
212*4882a593Smuzhiyun { 7, REG_AW_FAULT_ADDR, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE },
213*4882a593Smuzhiyun };
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun static const struct sysmmu_fault_info sysmmu_v5_faults[] = {
216*4882a593Smuzhiyun { 0, REG_V5_FAULT_AR_VA, "AR PTW", IOMMU_FAULT_READ },
217*4882a593Smuzhiyun { 1, REG_V5_FAULT_AR_VA, "AR PAGE", IOMMU_FAULT_READ },
218*4882a593Smuzhiyun { 2, REG_V5_FAULT_AR_VA, "AR MULTI-HIT", IOMMU_FAULT_READ },
219*4882a593Smuzhiyun { 3, REG_V5_FAULT_AR_VA, "AR ACCESS PROTECTION", IOMMU_FAULT_READ },
220*4882a593Smuzhiyun { 4, REG_V5_FAULT_AR_VA, "AR SECURITY PROTECTION", IOMMU_FAULT_READ },
221*4882a593Smuzhiyun { 16, REG_V5_FAULT_AW_VA, "AW PTW", IOMMU_FAULT_WRITE },
222*4882a593Smuzhiyun { 17, REG_V5_FAULT_AW_VA, "AW PAGE", IOMMU_FAULT_WRITE },
223*4882a593Smuzhiyun { 18, REG_V5_FAULT_AW_VA, "AW MULTI-HIT", IOMMU_FAULT_WRITE },
224*4882a593Smuzhiyun { 19, REG_V5_FAULT_AW_VA, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE },
225*4882a593Smuzhiyun { 20, REG_V5_FAULT_AW_VA, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE },
226*4882a593Smuzhiyun };
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun /*
229*4882a593Smuzhiyun * This structure is attached to dev->iommu->priv of the master device
230*4882a593Smuzhiyun * on device add, contains a list of SYSMMU controllers defined by device tree,
231*4882a593Smuzhiyun * which are bound to given master device. It is usually referenced by 'owner'
232*4882a593Smuzhiyun * pointer.
233*4882a593Smuzhiyun */
234*4882a593Smuzhiyun struct exynos_iommu_owner {
235*4882a593Smuzhiyun struct list_head controllers; /* list of sysmmu_drvdata.owner_node */
236*4882a593Smuzhiyun struct iommu_domain *domain; /* domain this device is attached */
237*4882a593Smuzhiyun struct mutex rpm_lock; /* for runtime pm of all sysmmus */
238*4882a593Smuzhiyun };
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun /*
241*4882a593Smuzhiyun * This structure exynos specific generalization of struct iommu_domain.
242*4882a593Smuzhiyun * It contains list of SYSMMU controllers from all master devices, which has
243*4882a593Smuzhiyun * been attached to this domain and page tables of IO address space defined by
244*4882a593Smuzhiyun * it. It is usually referenced by 'domain' pointer.
245*4882a593Smuzhiyun */
246*4882a593Smuzhiyun struct exynos_iommu_domain {
247*4882a593Smuzhiyun struct list_head clients; /* list of sysmmu_drvdata.domain_node */
248*4882a593Smuzhiyun sysmmu_pte_t *pgtable; /* lv1 page table, 16KB */
249*4882a593Smuzhiyun short *lv2entcnt; /* free lv2 entry counter for each section */
250*4882a593Smuzhiyun spinlock_t lock; /* lock for modyfying list of clients */
251*4882a593Smuzhiyun spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
252*4882a593Smuzhiyun struct iommu_domain domain; /* generic domain data structure */
253*4882a593Smuzhiyun };
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun /*
256*4882a593Smuzhiyun * This structure hold all data of a single SYSMMU controller, this includes
257*4882a593Smuzhiyun * hw resources like registers and clocks, pointers and list nodes to connect
258*4882a593Smuzhiyun * it to all other structures, internal state and parameters read from device
259*4882a593Smuzhiyun * tree. It is usually referenced by 'data' pointer.
260*4882a593Smuzhiyun */
261*4882a593Smuzhiyun struct sysmmu_drvdata {
262*4882a593Smuzhiyun struct device *sysmmu; /* SYSMMU controller device */
263*4882a593Smuzhiyun struct device *master; /* master device (owner) */
264*4882a593Smuzhiyun struct device_link *link; /* runtime PM link to master */
265*4882a593Smuzhiyun void __iomem *sfrbase; /* our registers */
266*4882a593Smuzhiyun struct clk *clk; /* SYSMMU's clock */
267*4882a593Smuzhiyun struct clk *aclk; /* SYSMMU's aclk clock */
268*4882a593Smuzhiyun struct clk *pclk; /* SYSMMU's pclk clock */
269*4882a593Smuzhiyun struct clk *clk_master; /* master's device clock */
270*4882a593Smuzhiyun spinlock_t lock; /* lock for modyfying state */
271*4882a593Smuzhiyun bool active; /* current status */
272*4882a593Smuzhiyun struct exynos_iommu_domain *domain; /* domain we belong to */
273*4882a593Smuzhiyun struct list_head domain_node; /* node for domain clients list */
274*4882a593Smuzhiyun struct list_head owner_node; /* node for owner controllers list */
275*4882a593Smuzhiyun phys_addr_t pgtable; /* assigned page table structure */
276*4882a593Smuzhiyun unsigned int version; /* our version */
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun struct iommu_device iommu; /* IOMMU core handle */
279*4882a593Smuzhiyun };
280*4882a593Smuzhiyun
to_exynos_domain(struct iommu_domain * dom)281*4882a593Smuzhiyun static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
282*4882a593Smuzhiyun {
283*4882a593Smuzhiyun return container_of(dom, struct exynos_iommu_domain, domain);
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun
sysmmu_unblock(struct sysmmu_drvdata * data)286*4882a593Smuzhiyun static void sysmmu_unblock(struct sysmmu_drvdata *data)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun
sysmmu_block(struct sysmmu_drvdata * data)291*4882a593Smuzhiyun static bool sysmmu_block(struct sysmmu_drvdata *data)
292*4882a593Smuzhiyun {
293*4882a593Smuzhiyun int i = 120;
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
296*4882a593Smuzhiyun while ((i > 0) && !(readl(data->sfrbase + REG_MMU_STATUS) & 1))
297*4882a593Smuzhiyun --i;
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun if (!(readl(data->sfrbase + REG_MMU_STATUS) & 1)) {
300*4882a593Smuzhiyun sysmmu_unblock(data);
301*4882a593Smuzhiyun return false;
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun return true;
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun
__sysmmu_tlb_invalidate(struct sysmmu_drvdata * data)307*4882a593Smuzhiyun static void __sysmmu_tlb_invalidate(struct sysmmu_drvdata *data)
308*4882a593Smuzhiyun {
309*4882a593Smuzhiyun if (MMU_MAJ_VER(data->version) < 5)
310*4882a593Smuzhiyun writel(0x1, data->sfrbase + REG_MMU_FLUSH);
311*4882a593Smuzhiyun else
312*4882a593Smuzhiyun writel(0x1, data->sfrbase + REG_V5_MMU_FLUSH_ALL);
313*4882a593Smuzhiyun }
314*4882a593Smuzhiyun
__sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata * data,sysmmu_iova_t iova,unsigned int num_inv)315*4882a593Smuzhiyun static void __sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
316*4882a593Smuzhiyun sysmmu_iova_t iova, unsigned int num_inv)
317*4882a593Smuzhiyun {
318*4882a593Smuzhiyun unsigned int i;
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun if (MMU_MAJ_VER(data->version) < 5) {
321*4882a593Smuzhiyun for (i = 0; i < num_inv; i++) {
322*4882a593Smuzhiyun writel((iova & SPAGE_MASK) | 1,
323*4882a593Smuzhiyun data->sfrbase + REG_MMU_FLUSH_ENTRY);
324*4882a593Smuzhiyun iova += SPAGE_SIZE;
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun } else {
327*4882a593Smuzhiyun if (num_inv == 1) {
328*4882a593Smuzhiyun writel((iova & SPAGE_MASK) | 1,
329*4882a593Smuzhiyun data->sfrbase + REG_V5_MMU_FLUSH_ENTRY);
330*4882a593Smuzhiyun } else {
331*4882a593Smuzhiyun writel((iova & SPAGE_MASK),
332*4882a593Smuzhiyun data->sfrbase + REG_V5_MMU_FLUSH_START);
333*4882a593Smuzhiyun writel((iova & SPAGE_MASK) + (num_inv - 1) * SPAGE_SIZE,
334*4882a593Smuzhiyun data->sfrbase + REG_V5_MMU_FLUSH_END);
335*4882a593Smuzhiyun writel(1, data->sfrbase + REG_V5_MMU_FLUSH_RANGE);
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun
__sysmmu_set_ptbase(struct sysmmu_drvdata * data,phys_addr_t pgd)340*4882a593Smuzhiyun static void __sysmmu_set_ptbase(struct sysmmu_drvdata *data, phys_addr_t pgd)
341*4882a593Smuzhiyun {
342*4882a593Smuzhiyun if (MMU_MAJ_VER(data->version) < 5)
343*4882a593Smuzhiyun writel(pgd, data->sfrbase + REG_PT_BASE_ADDR);
344*4882a593Smuzhiyun else
345*4882a593Smuzhiyun writel(pgd >> PAGE_SHIFT,
346*4882a593Smuzhiyun data->sfrbase + REG_V5_PT_BASE_PFN);
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun __sysmmu_tlb_invalidate(data);
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun
__sysmmu_enable_clocks(struct sysmmu_drvdata * data)351*4882a593Smuzhiyun static void __sysmmu_enable_clocks(struct sysmmu_drvdata *data)
352*4882a593Smuzhiyun {
353*4882a593Smuzhiyun BUG_ON(clk_prepare_enable(data->clk_master));
354*4882a593Smuzhiyun BUG_ON(clk_prepare_enable(data->clk));
355*4882a593Smuzhiyun BUG_ON(clk_prepare_enable(data->pclk));
356*4882a593Smuzhiyun BUG_ON(clk_prepare_enable(data->aclk));
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun
__sysmmu_disable_clocks(struct sysmmu_drvdata * data)359*4882a593Smuzhiyun static void __sysmmu_disable_clocks(struct sysmmu_drvdata *data)
360*4882a593Smuzhiyun {
361*4882a593Smuzhiyun clk_disable_unprepare(data->aclk);
362*4882a593Smuzhiyun clk_disable_unprepare(data->pclk);
363*4882a593Smuzhiyun clk_disable_unprepare(data->clk);
364*4882a593Smuzhiyun clk_disable_unprepare(data->clk_master);
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun
__sysmmu_get_version(struct sysmmu_drvdata * data)367*4882a593Smuzhiyun static void __sysmmu_get_version(struct sysmmu_drvdata *data)
368*4882a593Smuzhiyun {
369*4882a593Smuzhiyun u32 ver;
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun __sysmmu_enable_clocks(data);
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun ver = readl(data->sfrbase + REG_MMU_VERSION);
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun /* controllers on some SoCs don't report proper version */
376*4882a593Smuzhiyun if (ver == 0x80000001u)
377*4882a593Smuzhiyun data->version = MAKE_MMU_VER(1, 0);
378*4882a593Smuzhiyun else
379*4882a593Smuzhiyun data->version = MMU_RAW_VER(ver);
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun dev_dbg(data->sysmmu, "hardware version: %d.%d\n",
382*4882a593Smuzhiyun MMU_MAJ_VER(data->version), MMU_MIN_VER(data->version));
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun __sysmmu_disable_clocks(data);
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun
show_fault_information(struct sysmmu_drvdata * data,const struct sysmmu_fault_info * finfo,sysmmu_iova_t fault_addr)387*4882a593Smuzhiyun static void show_fault_information(struct sysmmu_drvdata *data,
388*4882a593Smuzhiyun const struct sysmmu_fault_info *finfo,
389*4882a593Smuzhiyun sysmmu_iova_t fault_addr)
390*4882a593Smuzhiyun {
391*4882a593Smuzhiyun sysmmu_pte_t *ent;
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun dev_err(data->sysmmu, "%s: %s FAULT occurred at %#x\n",
394*4882a593Smuzhiyun dev_name(data->master), finfo->name, fault_addr);
395*4882a593Smuzhiyun dev_dbg(data->sysmmu, "Page table base: %pa\n", &data->pgtable);
396*4882a593Smuzhiyun ent = section_entry(phys_to_virt(data->pgtable), fault_addr);
397*4882a593Smuzhiyun dev_dbg(data->sysmmu, "\tLv1 entry: %#x\n", *ent);
398*4882a593Smuzhiyun if (lv1ent_page(ent)) {
399*4882a593Smuzhiyun ent = page_entry(ent, fault_addr);
400*4882a593Smuzhiyun dev_dbg(data->sysmmu, "\t Lv2 entry: %#x\n", *ent);
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun
exynos_sysmmu_irq(int irq,void * dev_id)404*4882a593Smuzhiyun static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
405*4882a593Smuzhiyun {
406*4882a593Smuzhiyun /* SYSMMU is in blocked state when interrupt occurred. */
407*4882a593Smuzhiyun struct sysmmu_drvdata *data = dev_id;
408*4882a593Smuzhiyun const struct sysmmu_fault_info *finfo;
409*4882a593Smuzhiyun unsigned int i, n, itype;
410*4882a593Smuzhiyun sysmmu_iova_t fault_addr = -1;
411*4882a593Smuzhiyun unsigned short reg_status, reg_clear;
412*4882a593Smuzhiyun int ret = -ENOSYS;
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun WARN_ON(!data->active);
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun if (MMU_MAJ_VER(data->version) < 5) {
417*4882a593Smuzhiyun reg_status = REG_INT_STATUS;
418*4882a593Smuzhiyun reg_clear = REG_INT_CLEAR;
419*4882a593Smuzhiyun finfo = sysmmu_faults;
420*4882a593Smuzhiyun n = ARRAY_SIZE(sysmmu_faults);
421*4882a593Smuzhiyun } else {
422*4882a593Smuzhiyun reg_status = REG_V5_INT_STATUS;
423*4882a593Smuzhiyun reg_clear = REG_V5_INT_CLEAR;
424*4882a593Smuzhiyun finfo = sysmmu_v5_faults;
425*4882a593Smuzhiyun n = ARRAY_SIZE(sysmmu_v5_faults);
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun spin_lock(&data->lock);
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun clk_enable(data->clk_master);
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun itype = __ffs(readl(data->sfrbase + reg_status));
433*4882a593Smuzhiyun for (i = 0; i < n; i++, finfo++)
434*4882a593Smuzhiyun if (finfo->bit == itype)
435*4882a593Smuzhiyun break;
436*4882a593Smuzhiyun /* unknown/unsupported fault */
437*4882a593Smuzhiyun BUG_ON(i == n);
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun /* print debug message */
440*4882a593Smuzhiyun fault_addr = readl(data->sfrbase + finfo->addr_reg);
441*4882a593Smuzhiyun show_fault_information(data, finfo, fault_addr);
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun if (data->domain)
444*4882a593Smuzhiyun ret = report_iommu_fault(&data->domain->domain,
445*4882a593Smuzhiyun data->master, fault_addr, finfo->type);
446*4882a593Smuzhiyun /* fault is not recovered by fault handler */
447*4882a593Smuzhiyun BUG_ON(ret != 0);
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun writel(1 << itype, data->sfrbase + reg_clear);
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun sysmmu_unblock(data);
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun clk_disable(data->clk_master);
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun spin_unlock(&data->lock);
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun return IRQ_HANDLED;
458*4882a593Smuzhiyun }
459*4882a593Smuzhiyun
__sysmmu_disable(struct sysmmu_drvdata * data)460*4882a593Smuzhiyun static void __sysmmu_disable(struct sysmmu_drvdata *data)
461*4882a593Smuzhiyun {
462*4882a593Smuzhiyun unsigned long flags;
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun clk_enable(data->clk_master);
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun spin_lock_irqsave(&data->lock, flags);
467*4882a593Smuzhiyun writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
468*4882a593Smuzhiyun writel(0, data->sfrbase + REG_MMU_CFG);
469*4882a593Smuzhiyun data->active = false;
470*4882a593Smuzhiyun spin_unlock_irqrestore(&data->lock, flags);
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun __sysmmu_disable_clocks(data);
473*4882a593Smuzhiyun }
474*4882a593Smuzhiyun
__sysmmu_init_config(struct sysmmu_drvdata * data)475*4882a593Smuzhiyun static void __sysmmu_init_config(struct sysmmu_drvdata *data)
476*4882a593Smuzhiyun {
477*4882a593Smuzhiyun unsigned int cfg;
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun if (data->version <= MAKE_MMU_VER(3, 1))
480*4882a593Smuzhiyun cfg = CFG_LRU | CFG_QOS(15);
481*4882a593Smuzhiyun else if (data->version <= MAKE_MMU_VER(3, 2))
482*4882a593Smuzhiyun cfg = CFG_LRU | CFG_QOS(15) | CFG_FLPDCACHE | CFG_SYSSEL;
483*4882a593Smuzhiyun else
484*4882a593Smuzhiyun cfg = CFG_QOS(15) | CFG_FLPDCACHE | CFG_ACGEN;
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun cfg |= CFG_EAP; /* enable access protection bits check */
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun writel(cfg, data->sfrbase + REG_MMU_CFG);
489*4882a593Smuzhiyun }
490*4882a593Smuzhiyun
__sysmmu_enable(struct sysmmu_drvdata * data)491*4882a593Smuzhiyun static void __sysmmu_enable(struct sysmmu_drvdata *data)
492*4882a593Smuzhiyun {
493*4882a593Smuzhiyun unsigned long flags;
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun __sysmmu_enable_clocks(data);
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun spin_lock_irqsave(&data->lock, flags);
498*4882a593Smuzhiyun writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
499*4882a593Smuzhiyun __sysmmu_init_config(data);
500*4882a593Smuzhiyun __sysmmu_set_ptbase(data, data->pgtable);
501*4882a593Smuzhiyun writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
502*4882a593Smuzhiyun data->active = true;
503*4882a593Smuzhiyun spin_unlock_irqrestore(&data->lock, flags);
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun /*
506*4882a593Smuzhiyun * SYSMMU driver keeps master's clock enabled only for the short
507*4882a593Smuzhiyun * time, while accessing the registers. For performing address
508*4882a593Smuzhiyun * translation during DMA transaction it relies on the client
509*4882a593Smuzhiyun * driver to enable it.
510*4882a593Smuzhiyun */
511*4882a593Smuzhiyun clk_disable(data->clk_master);
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun
sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata * data,sysmmu_iova_t iova)514*4882a593Smuzhiyun static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
515*4882a593Smuzhiyun sysmmu_iova_t iova)
516*4882a593Smuzhiyun {
517*4882a593Smuzhiyun unsigned long flags;
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun spin_lock_irqsave(&data->lock, flags);
520*4882a593Smuzhiyun if (data->active && data->version >= MAKE_MMU_VER(3, 3)) {
521*4882a593Smuzhiyun clk_enable(data->clk_master);
522*4882a593Smuzhiyun if (sysmmu_block(data)) {
523*4882a593Smuzhiyun if (data->version >= MAKE_MMU_VER(5, 0))
524*4882a593Smuzhiyun __sysmmu_tlb_invalidate(data);
525*4882a593Smuzhiyun else
526*4882a593Smuzhiyun __sysmmu_tlb_invalidate_entry(data, iova, 1);
527*4882a593Smuzhiyun sysmmu_unblock(data);
528*4882a593Smuzhiyun }
529*4882a593Smuzhiyun clk_disable(data->clk_master);
530*4882a593Smuzhiyun }
531*4882a593Smuzhiyun spin_unlock_irqrestore(&data->lock, flags);
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun
sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata * data,sysmmu_iova_t iova,size_t size)534*4882a593Smuzhiyun static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
535*4882a593Smuzhiyun sysmmu_iova_t iova, size_t size)
536*4882a593Smuzhiyun {
537*4882a593Smuzhiyun unsigned long flags;
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun spin_lock_irqsave(&data->lock, flags);
540*4882a593Smuzhiyun if (data->active) {
541*4882a593Smuzhiyun unsigned int num_inv = 1;
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun clk_enable(data->clk_master);
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun /*
546*4882a593Smuzhiyun * L2TLB invalidation required
547*4882a593Smuzhiyun * 4KB page: 1 invalidation
548*4882a593Smuzhiyun * 64KB page: 16 invalidations
549*4882a593Smuzhiyun * 1MB page: 64 invalidations
550*4882a593Smuzhiyun * because it is set-associative TLB
551*4882a593Smuzhiyun * with 8-way and 64 sets.
552*4882a593Smuzhiyun * 1MB page can be cached in one of all sets.
553*4882a593Smuzhiyun * 64KB page can be one of 16 consecutive sets.
554*4882a593Smuzhiyun */
555*4882a593Smuzhiyun if (MMU_MAJ_VER(data->version) == 2)
556*4882a593Smuzhiyun num_inv = min_t(unsigned int, size / PAGE_SIZE, 64);
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun if (sysmmu_block(data)) {
559*4882a593Smuzhiyun __sysmmu_tlb_invalidate_entry(data, iova, num_inv);
560*4882a593Smuzhiyun sysmmu_unblock(data);
561*4882a593Smuzhiyun }
562*4882a593Smuzhiyun clk_disable(data->clk_master);
563*4882a593Smuzhiyun }
564*4882a593Smuzhiyun spin_unlock_irqrestore(&data->lock, flags);
565*4882a593Smuzhiyun }
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun static const struct iommu_ops exynos_iommu_ops;
568*4882a593Smuzhiyun
exynos_sysmmu_probe(struct platform_device * pdev)569*4882a593Smuzhiyun static int exynos_sysmmu_probe(struct platform_device *pdev)
570*4882a593Smuzhiyun {
571*4882a593Smuzhiyun int irq, ret;
572*4882a593Smuzhiyun struct device *dev = &pdev->dev;
573*4882a593Smuzhiyun struct sysmmu_drvdata *data;
574*4882a593Smuzhiyun struct resource *res;
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
577*4882a593Smuzhiyun if (!data)
578*4882a593Smuzhiyun return -ENOMEM;
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
581*4882a593Smuzhiyun data->sfrbase = devm_ioremap_resource(dev, res);
582*4882a593Smuzhiyun if (IS_ERR(data->sfrbase))
583*4882a593Smuzhiyun return PTR_ERR(data->sfrbase);
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun irq = platform_get_irq(pdev, 0);
586*4882a593Smuzhiyun if (irq <= 0)
587*4882a593Smuzhiyun return irq;
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0,
590*4882a593Smuzhiyun dev_name(dev), data);
591*4882a593Smuzhiyun if (ret) {
592*4882a593Smuzhiyun dev_err(dev, "Unabled to register handler of irq %d\n", irq);
593*4882a593Smuzhiyun return ret;
594*4882a593Smuzhiyun }
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun data->clk = devm_clk_get(dev, "sysmmu");
597*4882a593Smuzhiyun if (PTR_ERR(data->clk) == -ENOENT)
598*4882a593Smuzhiyun data->clk = NULL;
599*4882a593Smuzhiyun else if (IS_ERR(data->clk))
600*4882a593Smuzhiyun return PTR_ERR(data->clk);
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun data->aclk = devm_clk_get(dev, "aclk");
603*4882a593Smuzhiyun if (PTR_ERR(data->aclk) == -ENOENT)
604*4882a593Smuzhiyun data->aclk = NULL;
605*4882a593Smuzhiyun else if (IS_ERR(data->aclk))
606*4882a593Smuzhiyun return PTR_ERR(data->aclk);
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun data->pclk = devm_clk_get(dev, "pclk");
609*4882a593Smuzhiyun if (PTR_ERR(data->pclk) == -ENOENT)
610*4882a593Smuzhiyun data->pclk = NULL;
611*4882a593Smuzhiyun else if (IS_ERR(data->pclk))
612*4882a593Smuzhiyun return PTR_ERR(data->pclk);
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun if (!data->clk && (!data->aclk || !data->pclk)) {
615*4882a593Smuzhiyun dev_err(dev, "Failed to get device clock(s)!\n");
616*4882a593Smuzhiyun return -ENOSYS;
617*4882a593Smuzhiyun }
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun data->clk_master = devm_clk_get(dev, "master");
620*4882a593Smuzhiyun if (PTR_ERR(data->clk_master) == -ENOENT)
621*4882a593Smuzhiyun data->clk_master = NULL;
622*4882a593Smuzhiyun else if (IS_ERR(data->clk_master))
623*4882a593Smuzhiyun return PTR_ERR(data->clk_master);
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun data->sysmmu = dev;
626*4882a593Smuzhiyun spin_lock_init(&data->lock);
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL,
629*4882a593Smuzhiyun dev_name(data->sysmmu));
630*4882a593Smuzhiyun if (ret)
631*4882a593Smuzhiyun return ret;
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun iommu_device_set_ops(&data->iommu, &exynos_iommu_ops);
634*4882a593Smuzhiyun iommu_device_set_fwnode(&data->iommu, &dev->of_node->fwnode);
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun ret = iommu_device_register(&data->iommu);
637*4882a593Smuzhiyun if (ret)
638*4882a593Smuzhiyun goto err_iommu_register;
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun platform_set_drvdata(pdev, data);
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun __sysmmu_get_version(data);
643*4882a593Smuzhiyun if (PG_ENT_SHIFT < 0) {
644*4882a593Smuzhiyun if (MMU_MAJ_VER(data->version) < 5) {
645*4882a593Smuzhiyun PG_ENT_SHIFT = SYSMMU_PG_ENT_SHIFT;
646*4882a593Smuzhiyun LV1_PROT = SYSMMU_LV1_PROT;
647*4882a593Smuzhiyun LV2_PROT = SYSMMU_LV2_PROT;
648*4882a593Smuzhiyun } else {
649*4882a593Smuzhiyun PG_ENT_SHIFT = SYSMMU_V5_PG_ENT_SHIFT;
650*4882a593Smuzhiyun LV1_PROT = SYSMMU_V5_LV1_PROT;
651*4882a593Smuzhiyun LV2_PROT = SYSMMU_V5_LV2_PROT;
652*4882a593Smuzhiyun }
653*4882a593Smuzhiyun }
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun /*
656*4882a593Smuzhiyun * use the first registered sysmmu device for performing
657*4882a593Smuzhiyun * dma mapping operations on iommu page tables (cpu cache flush)
658*4882a593Smuzhiyun */
659*4882a593Smuzhiyun if (!dma_dev)
660*4882a593Smuzhiyun dma_dev = &pdev->dev;
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun pm_runtime_enable(dev);
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun return 0;
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun err_iommu_register:
667*4882a593Smuzhiyun iommu_device_sysfs_remove(&data->iommu);
668*4882a593Smuzhiyun return ret;
669*4882a593Smuzhiyun }
670*4882a593Smuzhiyun
exynos_sysmmu_suspend(struct device * dev)671*4882a593Smuzhiyun static int __maybe_unused exynos_sysmmu_suspend(struct device *dev)
672*4882a593Smuzhiyun {
673*4882a593Smuzhiyun struct sysmmu_drvdata *data = dev_get_drvdata(dev);
674*4882a593Smuzhiyun struct device *master = data->master;
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun if (master) {
677*4882a593Smuzhiyun struct exynos_iommu_owner *owner = dev_iommu_priv_get(master);
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun mutex_lock(&owner->rpm_lock);
680*4882a593Smuzhiyun if (data->domain) {
681*4882a593Smuzhiyun dev_dbg(data->sysmmu, "saving state\n");
682*4882a593Smuzhiyun __sysmmu_disable(data);
683*4882a593Smuzhiyun }
684*4882a593Smuzhiyun mutex_unlock(&owner->rpm_lock);
685*4882a593Smuzhiyun }
686*4882a593Smuzhiyun return 0;
687*4882a593Smuzhiyun }
688*4882a593Smuzhiyun
exynos_sysmmu_resume(struct device * dev)689*4882a593Smuzhiyun static int __maybe_unused exynos_sysmmu_resume(struct device *dev)
690*4882a593Smuzhiyun {
691*4882a593Smuzhiyun struct sysmmu_drvdata *data = dev_get_drvdata(dev);
692*4882a593Smuzhiyun struct device *master = data->master;
693*4882a593Smuzhiyun
694*4882a593Smuzhiyun if (master) {
695*4882a593Smuzhiyun struct exynos_iommu_owner *owner = dev_iommu_priv_get(master);
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun mutex_lock(&owner->rpm_lock);
698*4882a593Smuzhiyun if (data->domain) {
699*4882a593Smuzhiyun dev_dbg(data->sysmmu, "restoring state\n");
700*4882a593Smuzhiyun __sysmmu_enable(data);
701*4882a593Smuzhiyun }
702*4882a593Smuzhiyun mutex_unlock(&owner->rpm_lock);
703*4882a593Smuzhiyun }
704*4882a593Smuzhiyun return 0;
705*4882a593Smuzhiyun }
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun static const struct dev_pm_ops sysmmu_pm_ops = {
708*4882a593Smuzhiyun SET_RUNTIME_PM_OPS(exynos_sysmmu_suspend, exynos_sysmmu_resume, NULL)
709*4882a593Smuzhiyun SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
710*4882a593Smuzhiyun pm_runtime_force_resume)
711*4882a593Smuzhiyun };
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun static const struct of_device_id sysmmu_of_match[] = {
714*4882a593Smuzhiyun { .compatible = "samsung,exynos-sysmmu", },
715*4882a593Smuzhiyun { },
716*4882a593Smuzhiyun };
717*4882a593Smuzhiyun
718*4882a593Smuzhiyun static struct platform_driver exynos_sysmmu_driver __refdata = {
719*4882a593Smuzhiyun .probe = exynos_sysmmu_probe,
720*4882a593Smuzhiyun .driver = {
721*4882a593Smuzhiyun .name = "exynos-sysmmu",
722*4882a593Smuzhiyun .of_match_table = sysmmu_of_match,
723*4882a593Smuzhiyun .pm = &sysmmu_pm_ops,
724*4882a593Smuzhiyun .suppress_bind_attrs = true,
725*4882a593Smuzhiyun }
726*4882a593Smuzhiyun };
727*4882a593Smuzhiyun
exynos_iommu_set_pte(sysmmu_pte_t * ent,sysmmu_pte_t val)728*4882a593Smuzhiyun static inline void exynos_iommu_set_pte(sysmmu_pte_t *ent, sysmmu_pte_t val)
729*4882a593Smuzhiyun {
730*4882a593Smuzhiyun dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent), sizeof(*ent),
731*4882a593Smuzhiyun DMA_TO_DEVICE);
732*4882a593Smuzhiyun *ent = cpu_to_le32(val);
733*4882a593Smuzhiyun dma_sync_single_for_device(dma_dev, virt_to_phys(ent), sizeof(*ent),
734*4882a593Smuzhiyun DMA_TO_DEVICE);
735*4882a593Smuzhiyun }
736*4882a593Smuzhiyun
exynos_iommu_domain_alloc(unsigned type)737*4882a593Smuzhiyun static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
738*4882a593Smuzhiyun {
739*4882a593Smuzhiyun struct exynos_iommu_domain *domain;
740*4882a593Smuzhiyun dma_addr_t handle;
741*4882a593Smuzhiyun int i;
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun /* Check if correct PTE offsets are initialized */
744*4882a593Smuzhiyun BUG_ON(PG_ENT_SHIFT < 0 || !dma_dev);
745*4882a593Smuzhiyun
746*4882a593Smuzhiyun domain = kzalloc(sizeof(*domain), GFP_KERNEL);
747*4882a593Smuzhiyun if (!domain)
748*4882a593Smuzhiyun return NULL;
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun if (type == IOMMU_DOMAIN_DMA) {
751*4882a593Smuzhiyun if (iommu_get_dma_cookie(&domain->domain) != 0)
752*4882a593Smuzhiyun goto err_pgtable;
753*4882a593Smuzhiyun } else if (type != IOMMU_DOMAIN_UNMANAGED) {
754*4882a593Smuzhiyun goto err_pgtable;
755*4882a593Smuzhiyun }
756*4882a593Smuzhiyun
757*4882a593Smuzhiyun domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2);
758*4882a593Smuzhiyun if (!domain->pgtable)
759*4882a593Smuzhiyun goto err_dma_cookie;
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
762*4882a593Smuzhiyun if (!domain->lv2entcnt)
763*4882a593Smuzhiyun goto err_counter;
764*4882a593Smuzhiyun
765*4882a593Smuzhiyun /* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */
766*4882a593Smuzhiyun for (i = 0; i < NUM_LV1ENTRIES; i++)
767*4882a593Smuzhiyun domain->pgtable[i] = ZERO_LV2LINK;
768*4882a593Smuzhiyun
769*4882a593Smuzhiyun handle = dma_map_single(dma_dev, domain->pgtable, LV1TABLE_SIZE,
770*4882a593Smuzhiyun DMA_TO_DEVICE);
771*4882a593Smuzhiyun /* For mapping page table entries we rely on dma == phys */
772*4882a593Smuzhiyun BUG_ON(handle != virt_to_phys(domain->pgtable));
773*4882a593Smuzhiyun if (dma_mapping_error(dma_dev, handle))
774*4882a593Smuzhiyun goto err_lv2ent;
775*4882a593Smuzhiyun
776*4882a593Smuzhiyun spin_lock_init(&domain->lock);
777*4882a593Smuzhiyun spin_lock_init(&domain->pgtablelock);
778*4882a593Smuzhiyun INIT_LIST_HEAD(&domain->clients);
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun domain->domain.geometry.aperture_start = 0;
781*4882a593Smuzhiyun domain->domain.geometry.aperture_end = ~0UL;
782*4882a593Smuzhiyun domain->domain.geometry.force_aperture = true;
783*4882a593Smuzhiyun
784*4882a593Smuzhiyun return &domain->domain;
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun err_lv2ent:
787*4882a593Smuzhiyun free_pages((unsigned long)domain->lv2entcnt, 1);
788*4882a593Smuzhiyun err_counter:
789*4882a593Smuzhiyun free_pages((unsigned long)domain->pgtable, 2);
790*4882a593Smuzhiyun err_dma_cookie:
791*4882a593Smuzhiyun if (type == IOMMU_DOMAIN_DMA)
792*4882a593Smuzhiyun iommu_put_dma_cookie(&domain->domain);
793*4882a593Smuzhiyun err_pgtable:
794*4882a593Smuzhiyun kfree(domain);
795*4882a593Smuzhiyun return NULL;
796*4882a593Smuzhiyun }
797*4882a593Smuzhiyun
exynos_iommu_domain_free(struct iommu_domain * iommu_domain)798*4882a593Smuzhiyun static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
799*4882a593Smuzhiyun {
800*4882a593Smuzhiyun struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
801*4882a593Smuzhiyun struct sysmmu_drvdata *data, *next;
802*4882a593Smuzhiyun unsigned long flags;
803*4882a593Smuzhiyun int i;
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun WARN_ON(!list_empty(&domain->clients));
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun spin_lock_irqsave(&domain->lock, flags);
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
810*4882a593Smuzhiyun spin_lock(&data->lock);
811*4882a593Smuzhiyun __sysmmu_disable(data);
812*4882a593Smuzhiyun data->pgtable = 0;
813*4882a593Smuzhiyun data->domain = NULL;
814*4882a593Smuzhiyun list_del_init(&data->domain_node);
815*4882a593Smuzhiyun spin_unlock(&data->lock);
816*4882a593Smuzhiyun }
817*4882a593Smuzhiyun
818*4882a593Smuzhiyun spin_unlock_irqrestore(&domain->lock, flags);
819*4882a593Smuzhiyun
820*4882a593Smuzhiyun if (iommu_domain->type == IOMMU_DOMAIN_DMA)
821*4882a593Smuzhiyun iommu_put_dma_cookie(iommu_domain);
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun dma_unmap_single(dma_dev, virt_to_phys(domain->pgtable), LV1TABLE_SIZE,
824*4882a593Smuzhiyun DMA_TO_DEVICE);
825*4882a593Smuzhiyun
826*4882a593Smuzhiyun for (i = 0; i < NUM_LV1ENTRIES; i++)
827*4882a593Smuzhiyun if (lv1ent_page(domain->pgtable + i)) {
828*4882a593Smuzhiyun phys_addr_t base = lv2table_base(domain->pgtable + i);
829*4882a593Smuzhiyun
830*4882a593Smuzhiyun dma_unmap_single(dma_dev, base, LV2TABLE_SIZE,
831*4882a593Smuzhiyun DMA_TO_DEVICE);
832*4882a593Smuzhiyun kmem_cache_free(lv2table_kmem_cache,
833*4882a593Smuzhiyun phys_to_virt(base));
834*4882a593Smuzhiyun }
835*4882a593Smuzhiyun
836*4882a593Smuzhiyun free_pages((unsigned long)domain->pgtable, 2);
837*4882a593Smuzhiyun free_pages((unsigned long)domain->lv2entcnt, 1);
838*4882a593Smuzhiyun kfree(domain);
839*4882a593Smuzhiyun }
840*4882a593Smuzhiyun
exynos_iommu_detach_device(struct iommu_domain * iommu_domain,struct device * dev)841*4882a593Smuzhiyun static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain,
842*4882a593Smuzhiyun struct device *dev)
843*4882a593Smuzhiyun {
844*4882a593Smuzhiyun struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
845*4882a593Smuzhiyun struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
846*4882a593Smuzhiyun phys_addr_t pagetable = virt_to_phys(domain->pgtable);
847*4882a593Smuzhiyun struct sysmmu_drvdata *data, *next;
848*4882a593Smuzhiyun unsigned long flags;
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun if (!has_sysmmu(dev) || owner->domain != iommu_domain)
851*4882a593Smuzhiyun return;
852*4882a593Smuzhiyun
853*4882a593Smuzhiyun mutex_lock(&owner->rpm_lock);
854*4882a593Smuzhiyun
855*4882a593Smuzhiyun list_for_each_entry(data, &owner->controllers, owner_node) {
856*4882a593Smuzhiyun pm_runtime_get_noresume(data->sysmmu);
857*4882a593Smuzhiyun if (pm_runtime_active(data->sysmmu))
858*4882a593Smuzhiyun __sysmmu_disable(data);
859*4882a593Smuzhiyun pm_runtime_put(data->sysmmu);
860*4882a593Smuzhiyun }
861*4882a593Smuzhiyun
862*4882a593Smuzhiyun spin_lock_irqsave(&domain->lock, flags);
863*4882a593Smuzhiyun list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
864*4882a593Smuzhiyun spin_lock(&data->lock);
865*4882a593Smuzhiyun data->pgtable = 0;
866*4882a593Smuzhiyun data->domain = NULL;
867*4882a593Smuzhiyun list_del_init(&data->domain_node);
868*4882a593Smuzhiyun spin_unlock(&data->lock);
869*4882a593Smuzhiyun }
870*4882a593Smuzhiyun owner->domain = NULL;
871*4882a593Smuzhiyun spin_unlock_irqrestore(&domain->lock, flags);
872*4882a593Smuzhiyun
873*4882a593Smuzhiyun mutex_unlock(&owner->rpm_lock);
874*4882a593Smuzhiyun
875*4882a593Smuzhiyun dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n", __func__,
876*4882a593Smuzhiyun &pagetable);
877*4882a593Smuzhiyun }
878*4882a593Smuzhiyun
exynos_iommu_attach_device(struct iommu_domain * iommu_domain,struct device * dev)879*4882a593Smuzhiyun static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
880*4882a593Smuzhiyun struct device *dev)
881*4882a593Smuzhiyun {
882*4882a593Smuzhiyun struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
883*4882a593Smuzhiyun struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
884*4882a593Smuzhiyun struct sysmmu_drvdata *data;
885*4882a593Smuzhiyun phys_addr_t pagetable = virt_to_phys(domain->pgtable);
886*4882a593Smuzhiyun unsigned long flags;
887*4882a593Smuzhiyun
888*4882a593Smuzhiyun if (!has_sysmmu(dev))
889*4882a593Smuzhiyun return -ENODEV;
890*4882a593Smuzhiyun
891*4882a593Smuzhiyun if (owner->domain)
892*4882a593Smuzhiyun exynos_iommu_detach_device(owner->domain, dev);
893*4882a593Smuzhiyun
894*4882a593Smuzhiyun mutex_lock(&owner->rpm_lock);
895*4882a593Smuzhiyun
896*4882a593Smuzhiyun spin_lock_irqsave(&domain->lock, flags);
897*4882a593Smuzhiyun list_for_each_entry(data, &owner->controllers, owner_node) {
898*4882a593Smuzhiyun spin_lock(&data->lock);
899*4882a593Smuzhiyun data->pgtable = pagetable;
900*4882a593Smuzhiyun data->domain = domain;
901*4882a593Smuzhiyun list_add_tail(&data->domain_node, &domain->clients);
902*4882a593Smuzhiyun spin_unlock(&data->lock);
903*4882a593Smuzhiyun }
904*4882a593Smuzhiyun owner->domain = iommu_domain;
905*4882a593Smuzhiyun spin_unlock_irqrestore(&domain->lock, flags);
906*4882a593Smuzhiyun
907*4882a593Smuzhiyun list_for_each_entry(data, &owner->controllers, owner_node) {
908*4882a593Smuzhiyun pm_runtime_get_noresume(data->sysmmu);
909*4882a593Smuzhiyun if (pm_runtime_active(data->sysmmu))
910*4882a593Smuzhiyun __sysmmu_enable(data);
911*4882a593Smuzhiyun pm_runtime_put(data->sysmmu);
912*4882a593Smuzhiyun }
913*4882a593Smuzhiyun
914*4882a593Smuzhiyun mutex_unlock(&owner->rpm_lock);
915*4882a593Smuzhiyun
916*4882a593Smuzhiyun dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa\n", __func__,
917*4882a593Smuzhiyun &pagetable);
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun return 0;
920*4882a593Smuzhiyun }
921*4882a593Smuzhiyun
alloc_lv2entry(struct exynos_iommu_domain * domain,sysmmu_pte_t * sent,sysmmu_iova_t iova,short * pgcounter)922*4882a593Smuzhiyun static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
923*4882a593Smuzhiyun sysmmu_pte_t *sent, sysmmu_iova_t iova, short *pgcounter)
924*4882a593Smuzhiyun {
925*4882a593Smuzhiyun if (lv1ent_section(sent)) {
926*4882a593Smuzhiyun WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova);
927*4882a593Smuzhiyun return ERR_PTR(-EADDRINUSE);
928*4882a593Smuzhiyun }
929*4882a593Smuzhiyun
930*4882a593Smuzhiyun if (lv1ent_fault(sent)) {
931*4882a593Smuzhiyun dma_addr_t handle;
932*4882a593Smuzhiyun sysmmu_pte_t *pent;
933*4882a593Smuzhiyun bool need_flush_flpd_cache = lv1ent_zero(sent);
934*4882a593Smuzhiyun
935*4882a593Smuzhiyun pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
936*4882a593Smuzhiyun BUG_ON((uintptr_t)pent & (LV2TABLE_SIZE - 1));
937*4882a593Smuzhiyun if (!pent)
938*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
939*4882a593Smuzhiyun
940*4882a593Smuzhiyun exynos_iommu_set_pte(sent, mk_lv1ent_page(virt_to_phys(pent)));
941*4882a593Smuzhiyun kmemleak_ignore(pent);
942*4882a593Smuzhiyun *pgcounter = NUM_LV2ENTRIES;
943*4882a593Smuzhiyun handle = dma_map_single(dma_dev, pent, LV2TABLE_SIZE,
944*4882a593Smuzhiyun DMA_TO_DEVICE);
945*4882a593Smuzhiyun if (dma_mapping_error(dma_dev, handle)) {
946*4882a593Smuzhiyun kmem_cache_free(lv2table_kmem_cache, pent);
947*4882a593Smuzhiyun return ERR_PTR(-EADDRINUSE);
948*4882a593Smuzhiyun }
949*4882a593Smuzhiyun
950*4882a593Smuzhiyun /*
951*4882a593Smuzhiyun * If pre-fetched SLPD is a faulty SLPD in zero_l2_table,
952*4882a593Smuzhiyun * FLPD cache may cache the address of zero_l2_table. This
953*4882a593Smuzhiyun * function replaces the zero_l2_table with new L2 page table
954*4882a593Smuzhiyun * to write valid mappings.
955*4882a593Smuzhiyun * Accessing the valid area may cause page fault since FLPD
956*4882a593Smuzhiyun * cache may still cache zero_l2_table for the valid area
957*4882a593Smuzhiyun * instead of new L2 page table that has the mapping
958*4882a593Smuzhiyun * information of the valid area.
959*4882a593Smuzhiyun * Thus any replacement of zero_l2_table with other valid L2
960*4882a593Smuzhiyun * page table must involve FLPD cache invalidation for System
961*4882a593Smuzhiyun * MMU v3.3.
962*4882a593Smuzhiyun * FLPD cache invalidation is performed with TLB invalidation
963*4882a593Smuzhiyun * by VPN without blocking. It is safe to invalidate TLB without
964*4882a593Smuzhiyun * blocking because the target address of TLB invalidation is
965*4882a593Smuzhiyun * not currently mapped.
966*4882a593Smuzhiyun */
967*4882a593Smuzhiyun if (need_flush_flpd_cache) {
968*4882a593Smuzhiyun struct sysmmu_drvdata *data;
969*4882a593Smuzhiyun
970*4882a593Smuzhiyun spin_lock(&domain->lock);
971*4882a593Smuzhiyun list_for_each_entry(data, &domain->clients, domain_node)
972*4882a593Smuzhiyun sysmmu_tlb_invalidate_flpdcache(data, iova);
973*4882a593Smuzhiyun spin_unlock(&domain->lock);
974*4882a593Smuzhiyun }
975*4882a593Smuzhiyun }
976*4882a593Smuzhiyun
977*4882a593Smuzhiyun return page_entry(sent, iova);
978*4882a593Smuzhiyun }
979*4882a593Smuzhiyun
lv1set_section(struct exynos_iommu_domain * domain,sysmmu_pte_t * sent,sysmmu_iova_t iova,phys_addr_t paddr,int prot,short * pgcnt)980*4882a593Smuzhiyun static int lv1set_section(struct exynos_iommu_domain *domain,
981*4882a593Smuzhiyun sysmmu_pte_t *sent, sysmmu_iova_t iova,
982*4882a593Smuzhiyun phys_addr_t paddr, int prot, short *pgcnt)
983*4882a593Smuzhiyun {
984*4882a593Smuzhiyun if (lv1ent_section(sent)) {
985*4882a593Smuzhiyun WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
986*4882a593Smuzhiyun iova);
987*4882a593Smuzhiyun return -EADDRINUSE;
988*4882a593Smuzhiyun }
989*4882a593Smuzhiyun
990*4882a593Smuzhiyun if (lv1ent_page(sent)) {
991*4882a593Smuzhiyun if (*pgcnt != NUM_LV2ENTRIES) {
992*4882a593Smuzhiyun WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
993*4882a593Smuzhiyun iova);
994*4882a593Smuzhiyun return -EADDRINUSE;
995*4882a593Smuzhiyun }
996*4882a593Smuzhiyun
997*4882a593Smuzhiyun kmem_cache_free(lv2table_kmem_cache, page_entry(sent, 0));
998*4882a593Smuzhiyun *pgcnt = 0;
999*4882a593Smuzhiyun }
1000*4882a593Smuzhiyun
1001*4882a593Smuzhiyun exynos_iommu_set_pte(sent, mk_lv1ent_sect(paddr, prot));
1002*4882a593Smuzhiyun
1003*4882a593Smuzhiyun spin_lock(&domain->lock);
1004*4882a593Smuzhiyun if (lv1ent_page_zero(sent)) {
1005*4882a593Smuzhiyun struct sysmmu_drvdata *data;
1006*4882a593Smuzhiyun /*
1007*4882a593Smuzhiyun * Flushing FLPD cache in System MMU v3.3 that may cache a FLPD
1008*4882a593Smuzhiyun * entry by speculative prefetch of SLPD which has no mapping.
1009*4882a593Smuzhiyun */
1010*4882a593Smuzhiyun list_for_each_entry(data, &domain->clients, domain_node)
1011*4882a593Smuzhiyun sysmmu_tlb_invalidate_flpdcache(data, iova);
1012*4882a593Smuzhiyun }
1013*4882a593Smuzhiyun spin_unlock(&domain->lock);
1014*4882a593Smuzhiyun
1015*4882a593Smuzhiyun return 0;
1016*4882a593Smuzhiyun }
1017*4882a593Smuzhiyun
lv2set_page(sysmmu_pte_t * pent,phys_addr_t paddr,size_t size,int prot,short * pgcnt)1018*4882a593Smuzhiyun static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
1019*4882a593Smuzhiyun int prot, short *pgcnt)
1020*4882a593Smuzhiyun {
1021*4882a593Smuzhiyun if (size == SPAGE_SIZE) {
1022*4882a593Smuzhiyun if (WARN_ON(!lv2ent_fault(pent)))
1023*4882a593Smuzhiyun return -EADDRINUSE;
1024*4882a593Smuzhiyun
1025*4882a593Smuzhiyun exynos_iommu_set_pte(pent, mk_lv2ent_spage(paddr, prot));
1026*4882a593Smuzhiyun *pgcnt -= 1;
1027*4882a593Smuzhiyun } else { /* size == LPAGE_SIZE */
1028*4882a593Smuzhiyun int i;
1029*4882a593Smuzhiyun dma_addr_t pent_base = virt_to_phys(pent);
1030*4882a593Smuzhiyun
1031*4882a593Smuzhiyun dma_sync_single_for_cpu(dma_dev, pent_base,
1032*4882a593Smuzhiyun sizeof(*pent) * SPAGES_PER_LPAGE,
1033*4882a593Smuzhiyun DMA_TO_DEVICE);
1034*4882a593Smuzhiyun for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
1035*4882a593Smuzhiyun if (WARN_ON(!lv2ent_fault(pent))) {
1036*4882a593Smuzhiyun if (i > 0)
1037*4882a593Smuzhiyun memset(pent - i, 0, sizeof(*pent) * i);
1038*4882a593Smuzhiyun return -EADDRINUSE;
1039*4882a593Smuzhiyun }
1040*4882a593Smuzhiyun
1041*4882a593Smuzhiyun *pent = mk_lv2ent_lpage(paddr, prot);
1042*4882a593Smuzhiyun }
1043*4882a593Smuzhiyun dma_sync_single_for_device(dma_dev, pent_base,
1044*4882a593Smuzhiyun sizeof(*pent) * SPAGES_PER_LPAGE,
1045*4882a593Smuzhiyun DMA_TO_DEVICE);
1046*4882a593Smuzhiyun *pgcnt -= SPAGES_PER_LPAGE;
1047*4882a593Smuzhiyun }
1048*4882a593Smuzhiyun
1049*4882a593Smuzhiyun return 0;
1050*4882a593Smuzhiyun }
1051*4882a593Smuzhiyun
1052*4882a593Smuzhiyun /*
1053*4882a593Smuzhiyun * *CAUTION* to the I/O virtual memory managers that support exynos-iommu:
1054*4882a593Smuzhiyun *
1055*4882a593Smuzhiyun * System MMU v3.x has advanced logic to improve address translation
1056*4882a593Smuzhiyun * performance with caching more page table entries by a page table walk.
1057*4882a593Smuzhiyun * However, the logic has a bug that while caching faulty page table entries,
1058*4882a593Smuzhiyun * System MMU reports page fault if the cached fault entry is hit even though
1059*4882a593Smuzhiyun * the fault entry is updated to a valid entry after the entry is cached.
1060*4882a593Smuzhiyun * To prevent caching faulty page table entries which may be updated to valid
1061*4882a593Smuzhiyun * entries later, the virtual memory manager should care about the workaround
1062*4882a593Smuzhiyun * for the problem. The following describes the workaround.
1063*4882a593Smuzhiyun *
1064*4882a593Smuzhiyun * Any two consecutive I/O virtual address regions must have a hole of 128KiB
1065*4882a593Smuzhiyun * at maximum to prevent misbehavior of System MMU 3.x (workaround for h/w bug).
1066*4882a593Smuzhiyun *
1067*4882a593Smuzhiyun * Precisely, any start address of I/O virtual region must be aligned with
1068*4882a593Smuzhiyun * the following sizes for System MMU v3.1 and v3.2.
1069*4882a593Smuzhiyun * System MMU v3.1: 128KiB
1070*4882a593Smuzhiyun * System MMU v3.2: 256KiB
1071*4882a593Smuzhiyun *
1072*4882a593Smuzhiyun * Because System MMU v3.3 caches page table entries more aggressively, it needs
1073*4882a593Smuzhiyun * more workarounds.
1074*4882a593Smuzhiyun * - Any two consecutive I/O virtual regions must have a hole of size larger
1075*4882a593Smuzhiyun * than or equal to 128KiB.
1076*4882a593Smuzhiyun * - Start address of an I/O virtual region must be aligned by 128KiB.
1077*4882a593Smuzhiyun */
exynos_iommu_map(struct iommu_domain * iommu_domain,unsigned long l_iova,phys_addr_t paddr,size_t size,int prot,gfp_t gfp)1078*4882a593Smuzhiyun static int exynos_iommu_map(struct iommu_domain *iommu_domain,
1079*4882a593Smuzhiyun unsigned long l_iova, phys_addr_t paddr, size_t size,
1080*4882a593Smuzhiyun int prot, gfp_t gfp)
1081*4882a593Smuzhiyun {
1082*4882a593Smuzhiyun struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
1083*4882a593Smuzhiyun sysmmu_pte_t *entry;
1084*4882a593Smuzhiyun sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
1085*4882a593Smuzhiyun unsigned long flags;
1086*4882a593Smuzhiyun int ret = -ENOMEM;
1087*4882a593Smuzhiyun
1088*4882a593Smuzhiyun BUG_ON(domain->pgtable == NULL);
1089*4882a593Smuzhiyun prot &= SYSMMU_SUPPORTED_PROT_BITS;
1090*4882a593Smuzhiyun
1091*4882a593Smuzhiyun spin_lock_irqsave(&domain->pgtablelock, flags);
1092*4882a593Smuzhiyun
1093*4882a593Smuzhiyun entry = section_entry(domain->pgtable, iova);
1094*4882a593Smuzhiyun
1095*4882a593Smuzhiyun if (size == SECT_SIZE) {
1096*4882a593Smuzhiyun ret = lv1set_section(domain, entry, iova, paddr, prot,
1097*4882a593Smuzhiyun &domain->lv2entcnt[lv1ent_offset(iova)]);
1098*4882a593Smuzhiyun } else {
1099*4882a593Smuzhiyun sysmmu_pte_t *pent;
1100*4882a593Smuzhiyun
1101*4882a593Smuzhiyun pent = alloc_lv2entry(domain, entry, iova,
1102*4882a593Smuzhiyun &domain->lv2entcnt[lv1ent_offset(iova)]);
1103*4882a593Smuzhiyun
1104*4882a593Smuzhiyun if (IS_ERR(pent))
1105*4882a593Smuzhiyun ret = PTR_ERR(pent);
1106*4882a593Smuzhiyun else
1107*4882a593Smuzhiyun ret = lv2set_page(pent, paddr, size, prot,
1108*4882a593Smuzhiyun &domain->lv2entcnt[lv1ent_offset(iova)]);
1109*4882a593Smuzhiyun }
1110*4882a593Smuzhiyun
1111*4882a593Smuzhiyun if (ret)
1112*4882a593Smuzhiyun pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n",
1113*4882a593Smuzhiyun __func__, ret, size, iova);
1114*4882a593Smuzhiyun
1115*4882a593Smuzhiyun spin_unlock_irqrestore(&domain->pgtablelock, flags);
1116*4882a593Smuzhiyun
1117*4882a593Smuzhiyun return ret;
1118*4882a593Smuzhiyun }
1119*4882a593Smuzhiyun
exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain * domain,sysmmu_iova_t iova,size_t size)1120*4882a593Smuzhiyun static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain,
1121*4882a593Smuzhiyun sysmmu_iova_t iova, size_t size)
1122*4882a593Smuzhiyun {
1123*4882a593Smuzhiyun struct sysmmu_drvdata *data;
1124*4882a593Smuzhiyun unsigned long flags;
1125*4882a593Smuzhiyun
1126*4882a593Smuzhiyun spin_lock_irqsave(&domain->lock, flags);
1127*4882a593Smuzhiyun
1128*4882a593Smuzhiyun list_for_each_entry(data, &domain->clients, domain_node)
1129*4882a593Smuzhiyun sysmmu_tlb_invalidate_entry(data, iova, size);
1130*4882a593Smuzhiyun
1131*4882a593Smuzhiyun spin_unlock_irqrestore(&domain->lock, flags);
1132*4882a593Smuzhiyun }
1133*4882a593Smuzhiyun
exynos_iommu_unmap(struct iommu_domain * iommu_domain,unsigned long l_iova,size_t size,struct iommu_iotlb_gather * gather)1134*4882a593Smuzhiyun static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
1135*4882a593Smuzhiyun unsigned long l_iova, size_t size,
1136*4882a593Smuzhiyun struct iommu_iotlb_gather *gather)
1137*4882a593Smuzhiyun {
1138*4882a593Smuzhiyun struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
1139*4882a593Smuzhiyun sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
1140*4882a593Smuzhiyun sysmmu_pte_t *ent;
1141*4882a593Smuzhiyun size_t err_pgsize;
1142*4882a593Smuzhiyun unsigned long flags;
1143*4882a593Smuzhiyun
1144*4882a593Smuzhiyun BUG_ON(domain->pgtable == NULL);
1145*4882a593Smuzhiyun
1146*4882a593Smuzhiyun spin_lock_irqsave(&domain->pgtablelock, flags);
1147*4882a593Smuzhiyun
1148*4882a593Smuzhiyun ent = section_entry(domain->pgtable, iova);
1149*4882a593Smuzhiyun
1150*4882a593Smuzhiyun if (lv1ent_section(ent)) {
1151*4882a593Smuzhiyun if (WARN_ON(size < SECT_SIZE)) {
1152*4882a593Smuzhiyun err_pgsize = SECT_SIZE;
1153*4882a593Smuzhiyun goto err;
1154*4882a593Smuzhiyun }
1155*4882a593Smuzhiyun
1156*4882a593Smuzhiyun /* workaround for h/w bug in System MMU v3.3 */
1157*4882a593Smuzhiyun exynos_iommu_set_pte(ent, ZERO_LV2LINK);
1158*4882a593Smuzhiyun size = SECT_SIZE;
1159*4882a593Smuzhiyun goto done;
1160*4882a593Smuzhiyun }
1161*4882a593Smuzhiyun
1162*4882a593Smuzhiyun if (unlikely(lv1ent_fault(ent))) {
1163*4882a593Smuzhiyun if (size > SECT_SIZE)
1164*4882a593Smuzhiyun size = SECT_SIZE;
1165*4882a593Smuzhiyun goto done;
1166*4882a593Smuzhiyun }
1167*4882a593Smuzhiyun
1168*4882a593Smuzhiyun /* lv1ent_page(sent) == true here */
1169*4882a593Smuzhiyun
1170*4882a593Smuzhiyun ent = page_entry(ent, iova);
1171*4882a593Smuzhiyun
1172*4882a593Smuzhiyun if (unlikely(lv2ent_fault(ent))) {
1173*4882a593Smuzhiyun size = SPAGE_SIZE;
1174*4882a593Smuzhiyun goto done;
1175*4882a593Smuzhiyun }
1176*4882a593Smuzhiyun
1177*4882a593Smuzhiyun if (lv2ent_small(ent)) {
1178*4882a593Smuzhiyun exynos_iommu_set_pte(ent, 0);
1179*4882a593Smuzhiyun size = SPAGE_SIZE;
1180*4882a593Smuzhiyun domain->lv2entcnt[lv1ent_offset(iova)] += 1;
1181*4882a593Smuzhiyun goto done;
1182*4882a593Smuzhiyun }
1183*4882a593Smuzhiyun
1184*4882a593Smuzhiyun /* lv1ent_large(ent) == true here */
1185*4882a593Smuzhiyun if (WARN_ON(size < LPAGE_SIZE)) {
1186*4882a593Smuzhiyun err_pgsize = LPAGE_SIZE;
1187*4882a593Smuzhiyun goto err;
1188*4882a593Smuzhiyun }
1189*4882a593Smuzhiyun
1190*4882a593Smuzhiyun dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent),
1191*4882a593Smuzhiyun sizeof(*ent) * SPAGES_PER_LPAGE,
1192*4882a593Smuzhiyun DMA_TO_DEVICE);
1193*4882a593Smuzhiyun memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
1194*4882a593Smuzhiyun dma_sync_single_for_device(dma_dev, virt_to_phys(ent),
1195*4882a593Smuzhiyun sizeof(*ent) * SPAGES_PER_LPAGE,
1196*4882a593Smuzhiyun DMA_TO_DEVICE);
1197*4882a593Smuzhiyun size = LPAGE_SIZE;
1198*4882a593Smuzhiyun domain->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
1199*4882a593Smuzhiyun done:
1200*4882a593Smuzhiyun spin_unlock_irqrestore(&domain->pgtablelock, flags);
1201*4882a593Smuzhiyun
1202*4882a593Smuzhiyun exynos_iommu_tlb_invalidate_entry(domain, iova, size);
1203*4882a593Smuzhiyun
1204*4882a593Smuzhiyun return size;
1205*4882a593Smuzhiyun err:
1206*4882a593Smuzhiyun spin_unlock_irqrestore(&domain->pgtablelock, flags);
1207*4882a593Smuzhiyun
1208*4882a593Smuzhiyun pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n",
1209*4882a593Smuzhiyun __func__, size, iova, err_pgsize);
1210*4882a593Smuzhiyun
1211*4882a593Smuzhiyun return 0;
1212*4882a593Smuzhiyun }
1213*4882a593Smuzhiyun
exynos_iommu_iova_to_phys(struct iommu_domain * iommu_domain,dma_addr_t iova)1214*4882a593Smuzhiyun static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *iommu_domain,
1215*4882a593Smuzhiyun dma_addr_t iova)
1216*4882a593Smuzhiyun {
1217*4882a593Smuzhiyun struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
1218*4882a593Smuzhiyun sysmmu_pte_t *entry;
1219*4882a593Smuzhiyun unsigned long flags;
1220*4882a593Smuzhiyun phys_addr_t phys = 0;
1221*4882a593Smuzhiyun
1222*4882a593Smuzhiyun spin_lock_irqsave(&domain->pgtablelock, flags);
1223*4882a593Smuzhiyun
1224*4882a593Smuzhiyun entry = section_entry(domain->pgtable, iova);
1225*4882a593Smuzhiyun
1226*4882a593Smuzhiyun if (lv1ent_section(entry)) {
1227*4882a593Smuzhiyun phys = section_phys(entry) + section_offs(iova);
1228*4882a593Smuzhiyun } else if (lv1ent_page(entry)) {
1229*4882a593Smuzhiyun entry = page_entry(entry, iova);
1230*4882a593Smuzhiyun
1231*4882a593Smuzhiyun if (lv2ent_large(entry))
1232*4882a593Smuzhiyun phys = lpage_phys(entry) + lpage_offs(iova);
1233*4882a593Smuzhiyun else if (lv2ent_small(entry))
1234*4882a593Smuzhiyun phys = spage_phys(entry) + spage_offs(iova);
1235*4882a593Smuzhiyun }
1236*4882a593Smuzhiyun
1237*4882a593Smuzhiyun spin_unlock_irqrestore(&domain->pgtablelock, flags);
1238*4882a593Smuzhiyun
1239*4882a593Smuzhiyun return phys;
1240*4882a593Smuzhiyun }
1241*4882a593Smuzhiyun
exynos_iommu_probe_device(struct device * dev)1242*4882a593Smuzhiyun static struct iommu_device *exynos_iommu_probe_device(struct device *dev)
1243*4882a593Smuzhiyun {
1244*4882a593Smuzhiyun struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
1245*4882a593Smuzhiyun struct sysmmu_drvdata *data;
1246*4882a593Smuzhiyun
1247*4882a593Smuzhiyun if (!has_sysmmu(dev))
1248*4882a593Smuzhiyun return ERR_PTR(-ENODEV);
1249*4882a593Smuzhiyun
1250*4882a593Smuzhiyun list_for_each_entry(data, &owner->controllers, owner_node) {
1251*4882a593Smuzhiyun /*
1252*4882a593Smuzhiyun * SYSMMU will be runtime activated via device link
1253*4882a593Smuzhiyun * (dependency) to its master device, so there are no
1254*4882a593Smuzhiyun * direct calls to pm_runtime_get/put in this driver.
1255*4882a593Smuzhiyun */
1256*4882a593Smuzhiyun data->link = device_link_add(dev, data->sysmmu,
1257*4882a593Smuzhiyun DL_FLAG_STATELESS |
1258*4882a593Smuzhiyun DL_FLAG_PM_RUNTIME);
1259*4882a593Smuzhiyun }
1260*4882a593Smuzhiyun
1261*4882a593Smuzhiyun /* There is always at least one entry, see exynos_iommu_of_xlate() */
1262*4882a593Smuzhiyun data = list_first_entry(&owner->controllers,
1263*4882a593Smuzhiyun struct sysmmu_drvdata, owner_node);
1264*4882a593Smuzhiyun
1265*4882a593Smuzhiyun return &data->iommu;
1266*4882a593Smuzhiyun }
1267*4882a593Smuzhiyun
exynos_iommu_release_device(struct device * dev)1268*4882a593Smuzhiyun static void exynos_iommu_release_device(struct device *dev)
1269*4882a593Smuzhiyun {
1270*4882a593Smuzhiyun struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
1271*4882a593Smuzhiyun struct sysmmu_drvdata *data;
1272*4882a593Smuzhiyun
1273*4882a593Smuzhiyun if (!has_sysmmu(dev))
1274*4882a593Smuzhiyun return;
1275*4882a593Smuzhiyun
1276*4882a593Smuzhiyun if (owner->domain) {
1277*4882a593Smuzhiyun struct iommu_group *group = iommu_group_get(dev);
1278*4882a593Smuzhiyun
1279*4882a593Smuzhiyun if (group) {
1280*4882a593Smuzhiyun WARN_ON(owner->domain !=
1281*4882a593Smuzhiyun iommu_group_default_domain(group));
1282*4882a593Smuzhiyun exynos_iommu_detach_device(owner->domain, dev);
1283*4882a593Smuzhiyun iommu_group_put(group);
1284*4882a593Smuzhiyun }
1285*4882a593Smuzhiyun }
1286*4882a593Smuzhiyun
1287*4882a593Smuzhiyun list_for_each_entry(data, &owner->controllers, owner_node)
1288*4882a593Smuzhiyun device_link_del(data->link);
1289*4882a593Smuzhiyun }
1290*4882a593Smuzhiyun
exynos_iommu_of_xlate(struct device * dev,struct of_phandle_args * spec)1291*4882a593Smuzhiyun static int exynos_iommu_of_xlate(struct device *dev,
1292*4882a593Smuzhiyun struct of_phandle_args *spec)
1293*4882a593Smuzhiyun {
1294*4882a593Smuzhiyun struct platform_device *sysmmu = of_find_device_by_node(spec->np);
1295*4882a593Smuzhiyun struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
1296*4882a593Smuzhiyun struct sysmmu_drvdata *data, *entry;
1297*4882a593Smuzhiyun
1298*4882a593Smuzhiyun if (!sysmmu)
1299*4882a593Smuzhiyun return -ENODEV;
1300*4882a593Smuzhiyun
1301*4882a593Smuzhiyun data = platform_get_drvdata(sysmmu);
1302*4882a593Smuzhiyun if (!data) {
1303*4882a593Smuzhiyun put_device(&sysmmu->dev);
1304*4882a593Smuzhiyun return -ENODEV;
1305*4882a593Smuzhiyun }
1306*4882a593Smuzhiyun
1307*4882a593Smuzhiyun if (!owner) {
1308*4882a593Smuzhiyun owner = kzalloc(sizeof(*owner), GFP_KERNEL);
1309*4882a593Smuzhiyun if (!owner) {
1310*4882a593Smuzhiyun put_device(&sysmmu->dev);
1311*4882a593Smuzhiyun return -ENOMEM;
1312*4882a593Smuzhiyun }
1313*4882a593Smuzhiyun
1314*4882a593Smuzhiyun INIT_LIST_HEAD(&owner->controllers);
1315*4882a593Smuzhiyun mutex_init(&owner->rpm_lock);
1316*4882a593Smuzhiyun dev_iommu_priv_set(dev, owner);
1317*4882a593Smuzhiyun }
1318*4882a593Smuzhiyun
1319*4882a593Smuzhiyun list_for_each_entry(entry, &owner->controllers, owner_node)
1320*4882a593Smuzhiyun if (entry == data)
1321*4882a593Smuzhiyun return 0;
1322*4882a593Smuzhiyun
1323*4882a593Smuzhiyun list_add_tail(&data->owner_node, &owner->controllers);
1324*4882a593Smuzhiyun data->master = dev;
1325*4882a593Smuzhiyun
1326*4882a593Smuzhiyun return 0;
1327*4882a593Smuzhiyun }
1328*4882a593Smuzhiyun
1329*4882a593Smuzhiyun static const struct iommu_ops exynos_iommu_ops = {
1330*4882a593Smuzhiyun .domain_alloc = exynos_iommu_domain_alloc,
1331*4882a593Smuzhiyun .domain_free = exynos_iommu_domain_free,
1332*4882a593Smuzhiyun .attach_dev = exynos_iommu_attach_device,
1333*4882a593Smuzhiyun .detach_dev = exynos_iommu_detach_device,
1334*4882a593Smuzhiyun .map = exynos_iommu_map,
1335*4882a593Smuzhiyun .unmap = exynos_iommu_unmap,
1336*4882a593Smuzhiyun .iova_to_phys = exynos_iommu_iova_to_phys,
1337*4882a593Smuzhiyun .device_group = generic_device_group,
1338*4882a593Smuzhiyun .probe_device = exynos_iommu_probe_device,
1339*4882a593Smuzhiyun .release_device = exynos_iommu_release_device,
1340*4882a593Smuzhiyun .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
1341*4882a593Smuzhiyun .of_xlate = exynos_iommu_of_xlate,
1342*4882a593Smuzhiyun };
1343*4882a593Smuzhiyun
exynos_iommu_init(void)1344*4882a593Smuzhiyun static int __init exynos_iommu_init(void)
1345*4882a593Smuzhiyun {
1346*4882a593Smuzhiyun struct device_node *np;
1347*4882a593Smuzhiyun int ret;
1348*4882a593Smuzhiyun
1349*4882a593Smuzhiyun np = of_find_matching_node(NULL, sysmmu_of_match);
1350*4882a593Smuzhiyun if (!np)
1351*4882a593Smuzhiyun return 0;
1352*4882a593Smuzhiyun
1353*4882a593Smuzhiyun of_node_put(np);
1354*4882a593Smuzhiyun
1355*4882a593Smuzhiyun lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
1356*4882a593Smuzhiyun LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
1357*4882a593Smuzhiyun if (!lv2table_kmem_cache) {
1358*4882a593Smuzhiyun pr_err("%s: Failed to create kmem cache\n", __func__);
1359*4882a593Smuzhiyun return -ENOMEM;
1360*4882a593Smuzhiyun }
1361*4882a593Smuzhiyun
1362*4882a593Smuzhiyun ret = platform_driver_register(&exynos_sysmmu_driver);
1363*4882a593Smuzhiyun if (ret) {
1364*4882a593Smuzhiyun pr_err("%s: Failed to register driver\n", __func__);
1365*4882a593Smuzhiyun goto err_reg_driver;
1366*4882a593Smuzhiyun }
1367*4882a593Smuzhiyun
1368*4882a593Smuzhiyun zero_lv2_table = kmem_cache_zalloc(lv2table_kmem_cache, GFP_KERNEL);
1369*4882a593Smuzhiyun if (zero_lv2_table == NULL) {
1370*4882a593Smuzhiyun pr_err("%s: Failed to allocate zero level2 page table\n",
1371*4882a593Smuzhiyun __func__);
1372*4882a593Smuzhiyun ret = -ENOMEM;
1373*4882a593Smuzhiyun goto err_zero_lv2;
1374*4882a593Smuzhiyun }
1375*4882a593Smuzhiyun
1376*4882a593Smuzhiyun ret = bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
1377*4882a593Smuzhiyun if (ret) {
1378*4882a593Smuzhiyun pr_err("%s: Failed to register exynos-iommu driver.\n",
1379*4882a593Smuzhiyun __func__);
1380*4882a593Smuzhiyun goto err_set_iommu;
1381*4882a593Smuzhiyun }
1382*4882a593Smuzhiyun
1383*4882a593Smuzhiyun return 0;
1384*4882a593Smuzhiyun err_set_iommu:
1385*4882a593Smuzhiyun kmem_cache_free(lv2table_kmem_cache, zero_lv2_table);
1386*4882a593Smuzhiyun err_zero_lv2:
1387*4882a593Smuzhiyun platform_driver_unregister(&exynos_sysmmu_driver);
1388*4882a593Smuzhiyun err_reg_driver:
1389*4882a593Smuzhiyun kmem_cache_destroy(lv2table_kmem_cache);
1390*4882a593Smuzhiyun return ret;
1391*4882a593Smuzhiyun }
1392*4882a593Smuzhiyun core_initcall(exynos_iommu_init);
1393