1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Author: Stepan Moskovchenko <stepanm@codeaurora.org>
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8*4882a593Smuzhiyun #include <linux/kernel.h>
9*4882a593Smuzhiyun #include <linux/init.h>
10*4882a593Smuzhiyun #include <linux/platform_device.h>
11*4882a593Smuzhiyun #include <linux/errno.h>
12*4882a593Smuzhiyun #include <linux/io.h>
13*4882a593Smuzhiyun #include <linux/io-pgtable.h>
14*4882a593Smuzhiyun #include <linux/interrupt.h>
15*4882a593Smuzhiyun #include <linux/list.h>
16*4882a593Smuzhiyun #include <linux/spinlock.h>
17*4882a593Smuzhiyun #include <linux/slab.h>
18*4882a593Smuzhiyun #include <linux/iommu.h>
19*4882a593Smuzhiyun #include <linux/clk.h>
20*4882a593Smuzhiyun #include <linux/err.h>
21*4882a593Smuzhiyun #include <linux/of_iommu.h>
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun #include <asm/cacheflush.h>
24*4882a593Smuzhiyun #include <linux/sizes.h>
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun #include "msm_iommu_hw-8xxx.h"
27*4882a593Smuzhiyun #include "msm_iommu.h"
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #define MRC(reg, processor, op1, crn, crm, op2) \
30*4882a593Smuzhiyun __asm__ __volatile__ ( \
31*4882a593Smuzhiyun " mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
32*4882a593Smuzhiyun : "=r" (reg))
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun /* bitmap of the page sizes currently supported */
35*4882a593Smuzhiyun #define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun static DEFINE_SPINLOCK(msm_iommu_lock);
38*4882a593Smuzhiyun static LIST_HEAD(qcom_iommu_devices);
39*4882a593Smuzhiyun static struct iommu_ops msm_iommu_ops;
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun struct msm_priv {
42*4882a593Smuzhiyun struct list_head list_attached;
43*4882a593Smuzhiyun struct iommu_domain domain;
44*4882a593Smuzhiyun struct io_pgtable_cfg cfg;
45*4882a593Smuzhiyun struct io_pgtable_ops *iop;
46*4882a593Smuzhiyun struct device *dev;
47*4882a593Smuzhiyun spinlock_t pgtlock; /* pagetable lock */
48*4882a593Smuzhiyun };
49*4882a593Smuzhiyun
to_msm_priv(struct iommu_domain * dom)50*4882a593Smuzhiyun static struct msm_priv *to_msm_priv(struct iommu_domain *dom)
51*4882a593Smuzhiyun {
52*4882a593Smuzhiyun return container_of(dom, struct msm_priv, domain);
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun
__enable_clocks(struct msm_iommu_dev * iommu)55*4882a593Smuzhiyun static int __enable_clocks(struct msm_iommu_dev *iommu)
56*4882a593Smuzhiyun {
57*4882a593Smuzhiyun int ret;
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun ret = clk_enable(iommu->pclk);
60*4882a593Smuzhiyun if (ret)
61*4882a593Smuzhiyun goto fail;
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun if (iommu->clk) {
64*4882a593Smuzhiyun ret = clk_enable(iommu->clk);
65*4882a593Smuzhiyun if (ret)
66*4882a593Smuzhiyun clk_disable(iommu->pclk);
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun fail:
69*4882a593Smuzhiyun return ret;
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun
__disable_clocks(struct msm_iommu_dev * iommu)72*4882a593Smuzhiyun static void __disable_clocks(struct msm_iommu_dev *iommu)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun if (iommu->clk)
75*4882a593Smuzhiyun clk_disable(iommu->clk);
76*4882a593Smuzhiyun clk_disable(iommu->pclk);
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun
msm_iommu_reset(void __iomem * base,int ncb)79*4882a593Smuzhiyun static void msm_iommu_reset(void __iomem *base, int ncb)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun int ctx;
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun SET_RPUE(base, 0);
84*4882a593Smuzhiyun SET_RPUEIE(base, 0);
85*4882a593Smuzhiyun SET_ESRRESTORE(base, 0);
86*4882a593Smuzhiyun SET_TBE(base, 0);
87*4882a593Smuzhiyun SET_CR(base, 0);
88*4882a593Smuzhiyun SET_SPDMBE(base, 0);
89*4882a593Smuzhiyun SET_TESTBUSCR(base, 0);
90*4882a593Smuzhiyun SET_TLBRSW(base, 0);
91*4882a593Smuzhiyun SET_GLOBAL_TLBIALL(base, 0);
92*4882a593Smuzhiyun SET_RPU_ACR(base, 0);
93*4882a593Smuzhiyun SET_TLBLKCRWE(base, 1);
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun for (ctx = 0; ctx < ncb; ctx++) {
96*4882a593Smuzhiyun SET_BPRCOSH(base, ctx, 0);
97*4882a593Smuzhiyun SET_BPRCISH(base, ctx, 0);
98*4882a593Smuzhiyun SET_BPRCNSH(base, ctx, 0);
99*4882a593Smuzhiyun SET_BPSHCFG(base, ctx, 0);
100*4882a593Smuzhiyun SET_BPMTCFG(base, ctx, 0);
101*4882a593Smuzhiyun SET_ACTLR(base, ctx, 0);
102*4882a593Smuzhiyun SET_SCTLR(base, ctx, 0);
103*4882a593Smuzhiyun SET_FSRRESTORE(base, ctx, 0);
104*4882a593Smuzhiyun SET_TTBR0(base, ctx, 0);
105*4882a593Smuzhiyun SET_TTBR1(base, ctx, 0);
106*4882a593Smuzhiyun SET_TTBCR(base, ctx, 0);
107*4882a593Smuzhiyun SET_BFBCR(base, ctx, 0);
108*4882a593Smuzhiyun SET_PAR(base, ctx, 0);
109*4882a593Smuzhiyun SET_FAR(base, ctx, 0);
110*4882a593Smuzhiyun SET_CTX_TLBIALL(base, ctx, 0);
111*4882a593Smuzhiyun SET_TLBFLPTER(base, ctx, 0);
112*4882a593Smuzhiyun SET_TLBSLPTER(base, ctx, 0);
113*4882a593Smuzhiyun SET_TLBLKCR(base, ctx, 0);
114*4882a593Smuzhiyun SET_CONTEXTIDR(base, ctx, 0);
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun
__flush_iotlb(void * cookie)118*4882a593Smuzhiyun static void __flush_iotlb(void *cookie)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun struct msm_priv *priv = cookie;
121*4882a593Smuzhiyun struct msm_iommu_dev *iommu = NULL;
122*4882a593Smuzhiyun struct msm_iommu_ctx_dev *master;
123*4882a593Smuzhiyun int ret = 0;
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun list_for_each_entry(iommu, &priv->list_attached, dom_node) {
126*4882a593Smuzhiyun ret = __enable_clocks(iommu);
127*4882a593Smuzhiyun if (ret)
128*4882a593Smuzhiyun goto fail;
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun list_for_each_entry(master, &iommu->ctx_list, list)
131*4882a593Smuzhiyun SET_CTX_TLBIALL(iommu->base, master->num, 0);
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun __disable_clocks(iommu);
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun fail:
136*4882a593Smuzhiyun return;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun
__flush_iotlb_range(unsigned long iova,size_t size,size_t granule,bool leaf,void * cookie)139*4882a593Smuzhiyun static void __flush_iotlb_range(unsigned long iova, size_t size,
140*4882a593Smuzhiyun size_t granule, bool leaf, void *cookie)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun struct msm_priv *priv = cookie;
143*4882a593Smuzhiyun struct msm_iommu_dev *iommu = NULL;
144*4882a593Smuzhiyun struct msm_iommu_ctx_dev *master;
145*4882a593Smuzhiyun int ret = 0;
146*4882a593Smuzhiyun int temp_size;
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun list_for_each_entry(iommu, &priv->list_attached, dom_node) {
149*4882a593Smuzhiyun ret = __enable_clocks(iommu);
150*4882a593Smuzhiyun if (ret)
151*4882a593Smuzhiyun goto fail;
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun list_for_each_entry(master, &iommu->ctx_list, list) {
154*4882a593Smuzhiyun temp_size = size;
155*4882a593Smuzhiyun do {
156*4882a593Smuzhiyun iova &= TLBIVA_VA;
157*4882a593Smuzhiyun iova |= GET_CONTEXTIDR_ASID(iommu->base,
158*4882a593Smuzhiyun master->num);
159*4882a593Smuzhiyun SET_TLBIVA(iommu->base, master->num, iova);
160*4882a593Smuzhiyun iova += granule;
161*4882a593Smuzhiyun } while (temp_size -= granule);
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun __disable_clocks(iommu);
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun fail:
168*4882a593Smuzhiyun return;
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun
__flush_iotlb_walk(unsigned long iova,size_t size,size_t granule,void * cookie)171*4882a593Smuzhiyun static void __flush_iotlb_walk(unsigned long iova, size_t size,
172*4882a593Smuzhiyun size_t granule, void *cookie)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun __flush_iotlb_range(iova, size, granule, false, cookie);
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun
__flush_iotlb_page(struct iommu_iotlb_gather * gather,unsigned long iova,size_t granule,void * cookie)177*4882a593Smuzhiyun static void __flush_iotlb_page(struct iommu_iotlb_gather *gather,
178*4882a593Smuzhiyun unsigned long iova, size_t granule, void *cookie)
179*4882a593Smuzhiyun {
180*4882a593Smuzhiyun __flush_iotlb_range(iova, granule, granule, true, cookie);
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun static const struct iommu_flush_ops msm_iommu_flush_ops = {
184*4882a593Smuzhiyun .tlb_flush_all = __flush_iotlb,
185*4882a593Smuzhiyun .tlb_flush_walk = __flush_iotlb_walk,
186*4882a593Smuzhiyun .tlb_add_page = __flush_iotlb_page,
187*4882a593Smuzhiyun };
188*4882a593Smuzhiyun
msm_iommu_alloc_ctx(unsigned long * map,int start,int end)189*4882a593Smuzhiyun static int msm_iommu_alloc_ctx(unsigned long *map, int start, int end)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun int idx;
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun do {
194*4882a593Smuzhiyun idx = find_next_zero_bit(map, end, start);
195*4882a593Smuzhiyun if (idx == end)
196*4882a593Smuzhiyun return -ENOSPC;
197*4882a593Smuzhiyun } while (test_and_set_bit(idx, map));
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun return idx;
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun
msm_iommu_free_ctx(unsigned long * map,int idx)202*4882a593Smuzhiyun static void msm_iommu_free_ctx(unsigned long *map, int idx)
203*4882a593Smuzhiyun {
204*4882a593Smuzhiyun clear_bit(idx, map);
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun
config_mids(struct msm_iommu_dev * iommu,struct msm_iommu_ctx_dev * master)207*4882a593Smuzhiyun static void config_mids(struct msm_iommu_dev *iommu,
208*4882a593Smuzhiyun struct msm_iommu_ctx_dev *master)
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun int mid, ctx, i;
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun for (i = 0; i < master->num_mids; i++) {
213*4882a593Smuzhiyun mid = master->mids[i];
214*4882a593Smuzhiyun ctx = master->num;
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun SET_M2VCBR_N(iommu->base, mid, 0);
217*4882a593Smuzhiyun SET_CBACR_N(iommu->base, ctx, 0);
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun /* Set VMID = 0 */
220*4882a593Smuzhiyun SET_VMID(iommu->base, mid, 0);
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun /* Set the context number for that MID to this context */
223*4882a593Smuzhiyun SET_CBNDX(iommu->base, mid, ctx);
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun /* Set MID associated with this context bank to 0*/
226*4882a593Smuzhiyun SET_CBVMID(iommu->base, ctx, 0);
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun /* Set the ASID for TLB tagging for this context */
229*4882a593Smuzhiyun SET_CONTEXTIDR_ASID(iommu->base, ctx, ctx);
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun /* Set security bit override to be Non-secure */
232*4882a593Smuzhiyun SET_NSCFG(iommu->base, mid, 3);
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun
__reset_context(void __iomem * base,int ctx)236*4882a593Smuzhiyun static void __reset_context(void __iomem *base, int ctx)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun SET_BPRCOSH(base, ctx, 0);
239*4882a593Smuzhiyun SET_BPRCISH(base, ctx, 0);
240*4882a593Smuzhiyun SET_BPRCNSH(base, ctx, 0);
241*4882a593Smuzhiyun SET_BPSHCFG(base, ctx, 0);
242*4882a593Smuzhiyun SET_BPMTCFG(base, ctx, 0);
243*4882a593Smuzhiyun SET_ACTLR(base, ctx, 0);
244*4882a593Smuzhiyun SET_SCTLR(base, ctx, 0);
245*4882a593Smuzhiyun SET_FSRRESTORE(base, ctx, 0);
246*4882a593Smuzhiyun SET_TTBR0(base, ctx, 0);
247*4882a593Smuzhiyun SET_TTBR1(base, ctx, 0);
248*4882a593Smuzhiyun SET_TTBCR(base, ctx, 0);
249*4882a593Smuzhiyun SET_BFBCR(base, ctx, 0);
250*4882a593Smuzhiyun SET_PAR(base, ctx, 0);
251*4882a593Smuzhiyun SET_FAR(base, ctx, 0);
252*4882a593Smuzhiyun SET_CTX_TLBIALL(base, ctx, 0);
253*4882a593Smuzhiyun SET_TLBFLPTER(base, ctx, 0);
254*4882a593Smuzhiyun SET_TLBSLPTER(base, ctx, 0);
255*4882a593Smuzhiyun SET_TLBLKCR(base, ctx, 0);
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun
__program_context(void __iomem * base,int ctx,struct msm_priv * priv)258*4882a593Smuzhiyun static void __program_context(void __iomem *base, int ctx,
259*4882a593Smuzhiyun struct msm_priv *priv)
260*4882a593Smuzhiyun {
261*4882a593Smuzhiyun __reset_context(base, ctx);
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun /* Turn on TEX Remap */
264*4882a593Smuzhiyun SET_TRE(base, ctx, 1);
265*4882a593Smuzhiyun SET_AFE(base, ctx, 1);
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun /* Set up HTW mode */
268*4882a593Smuzhiyun /* TLB miss configuration: perform HTW on miss */
269*4882a593Smuzhiyun SET_TLBMCFG(base, ctx, 0x3);
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun /* V2P configuration: HTW for access */
272*4882a593Smuzhiyun SET_V2PCFG(base, ctx, 0x3);
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun SET_TTBCR(base, ctx, priv->cfg.arm_v7s_cfg.tcr);
275*4882a593Smuzhiyun SET_TTBR0(base, ctx, priv->cfg.arm_v7s_cfg.ttbr);
276*4882a593Smuzhiyun SET_TTBR1(base, ctx, 0);
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun /* Set prrr and nmrr */
279*4882a593Smuzhiyun SET_PRRR(base, ctx, priv->cfg.arm_v7s_cfg.prrr);
280*4882a593Smuzhiyun SET_NMRR(base, ctx, priv->cfg.arm_v7s_cfg.nmrr);
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun /* Invalidate the TLB for this context */
283*4882a593Smuzhiyun SET_CTX_TLBIALL(base, ctx, 0);
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun /* Set interrupt number to "secure" interrupt */
286*4882a593Smuzhiyun SET_IRPTNDX(base, ctx, 0);
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun /* Enable context fault interrupt */
289*4882a593Smuzhiyun SET_CFEIE(base, ctx, 1);
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun /* Stall access on a context fault and let the handler deal with it */
292*4882a593Smuzhiyun SET_CFCFG(base, ctx, 1);
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun /* Redirect all cacheable requests to L2 slave port. */
295*4882a593Smuzhiyun SET_RCISH(base, ctx, 1);
296*4882a593Smuzhiyun SET_RCOSH(base, ctx, 1);
297*4882a593Smuzhiyun SET_RCNSH(base, ctx, 1);
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun /* Turn on BFB prefetch */
300*4882a593Smuzhiyun SET_BFBDFE(base, ctx, 1);
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun /* Enable the MMU */
303*4882a593Smuzhiyun SET_M(base, ctx, 1);
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun
msm_iommu_domain_alloc(unsigned type)306*4882a593Smuzhiyun static struct iommu_domain *msm_iommu_domain_alloc(unsigned type)
307*4882a593Smuzhiyun {
308*4882a593Smuzhiyun struct msm_priv *priv;
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun if (type != IOMMU_DOMAIN_UNMANAGED)
311*4882a593Smuzhiyun return NULL;
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun priv = kzalloc(sizeof(*priv), GFP_KERNEL);
314*4882a593Smuzhiyun if (!priv)
315*4882a593Smuzhiyun goto fail_nomem;
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun INIT_LIST_HEAD(&priv->list_attached);
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun priv->domain.geometry.aperture_start = 0;
320*4882a593Smuzhiyun priv->domain.geometry.aperture_end = (1ULL << 32) - 1;
321*4882a593Smuzhiyun priv->domain.geometry.force_aperture = true;
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun return &priv->domain;
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun fail_nomem:
326*4882a593Smuzhiyun kfree(priv);
327*4882a593Smuzhiyun return NULL;
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun
msm_iommu_domain_free(struct iommu_domain * domain)330*4882a593Smuzhiyun static void msm_iommu_domain_free(struct iommu_domain *domain)
331*4882a593Smuzhiyun {
332*4882a593Smuzhiyun struct msm_priv *priv;
333*4882a593Smuzhiyun unsigned long flags;
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun spin_lock_irqsave(&msm_iommu_lock, flags);
336*4882a593Smuzhiyun priv = to_msm_priv(domain);
337*4882a593Smuzhiyun kfree(priv);
338*4882a593Smuzhiyun spin_unlock_irqrestore(&msm_iommu_lock, flags);
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun
msm_iommu_domain_config(struct msm_priv * priv)341*4882a593Smuzhiyun static int msm_iommu_domain_config(struct msm_priv *priv)
342*4882a593Smuzhiyun {
343*4882a593Smuzhiyun spin_lock_init(&priv->pgtlock);
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun priv->cfg = (struct io_pgtable_cfg) {
346*4882a593Smuzhiyun .pgsize_bitmap = msm_iommu_ops.pgsize_bitmap,
347*4882a593Smuzhiyun .ias = 32,
348*4882a593Smuzhiyun .oas = 32,
349*4882a593Smuzhiyun .tlb = &msm_iommu_flush_ops,
350*4882a593Smuzhiyun .iommu_dev = priv->dev,
351*4882a593Smuzhiyun };
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun priv->iop = alloc_io_pgtable_ops(ARM_V7S, &priv->cfg, priv);
354*4882a593Smuzhiyun if (!priv->iop) {
355*4882a593Smuzhiyun dev_err(priv->dev, "Failed to allocate pgtable\n");
356*4882a593Smuzhiyun return -EINVAL;
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun msm_iommu_ops.pgsize_bitmap = priv->cfg.pgsize_bitmap;
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun return 0;
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun /* Must be called under msm_iommu_lock */
find_iommu_for_dev(struct device * dev)365*4882a593Smuzhiyun static struct msm_iommu_dev *find_iommu_for_dev(struct device *dev)
366*4882a593Smuzhiyun {
367*4882a593Smuzhiyun struct msm_iommu_dev *iommu, *ret = NULL;
368*4882a593Smuzhiyun struct msm_iommu_ctx_dev *master;
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
371*4882a593Smuzhiyun master = list_first_entry(&iommu->ctx_list,
372*4882a593Smuzhiyun struct msm_iommu_ctx_dev,
373*4882a593Smuzhiyun list);
374*4882a593Smuzhiyun if (master->of_node == dev->of_node) {
375*4882a593Smuzhiyun ret = iommu;
376*4882a593Smuzhiyun break;
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun return ret;
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun
msm_iommu_probe_device(struct device * dev)383*4882a593Smuzhiyun static struct iommu_device *msm_iommu_probe_device(struct device *dev)
384*4882a593Smuzhiyun {
385*4882a593Smuzhiyun struct msm_iommu_dev *iommu;
386*4882a593Smuzhiyun unsigned long flags;
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun spin_lock_irqsave(&msm_iommu_lock, flags);
389*4882a593Smuzhiyun iommu = find_iommu_for_dev(dev);
390*4882a593Smuzhiyun spin_unlock_irqrestore(&msm_iommu_lock, flags);
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun if (!iommu)
393*4882a593Smuzhiyun return ERR_PTR(-ENODEV);
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun return &iommu->iommu;
396*4882a593Smuzhiyun }
397*4882a593Smuzhiyun
msm_iommu_release_device(struct device * dev)398*4882a593Smuzhiyun static void msm_iommu_release_device(struct device *dev)
399*4882a593Smuzhiyun {
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun
msm_iommu_attach_dev(struct iommu_domain * domain,struct device * dev)402*4882a593Smuzhiyun static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
403*4882a593Smuzhiyun {
404*4882a593Smuzhiyun int ret = 0;
405*4882a593Smuzhiyun unsigned long flags;
406*4882a593Smuzhiyun struct msm_iommu_dev *iommu;
407*4882a593Smuzhiyun struct msm_priv *priv = to_msm_priv(domain);
408*4882a593Smuzhiyun struct msm_iommu_ctx_dev *master;
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun priv->dev = dev;
411*4882a593Smuzhiyun msm_iommu_domain_config(priv);
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun spin_lock_irqsave(&msm_iommu_lock, flags);
414*4882a593Smuzhiyun list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
415*4882a593Smuzhiyun master = list_first_entry(&iommu->ctx_list,
416*4882a593Smuzhiyun struct msm_iommu_ctx_dev,
417*4882a593Smuzhiyun list);
418*4882a593Smuzhiyun if (master->of_node == dev->of_node) {
419*4882a593Smuzhiyun ret = __enable_clocks(iommu);
420*4882a593Smuzhiyun if (ret)
421*4882a593Smuzhiyun goto fail;
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun list_for_each_entry(master, &iommu->ctx_list, list) {
424*4882a593Smuzhiyun if (master->num) {
425*4882a593Smuzhiyun dev_err(dev, "domain already attached");
426*4882a593Smuzhiyun ret = -EEXIST;
427*4882a593Smuzhiyun goto fail;
428*4882a593Smuzhiyun }
429*4882a593Smuzhiyun master->num =
430*4882a593Smuzhiyun msm_iommu_alloc_ctx(iommu->context_map,
431*4882a593Smuzhiyun 0, iommu->ncb);
432*4882a593Smuzhiyun if (IS_ERR_VALUE(master->num)) {
433*4882a593Smuzhiyun ret = -ENODEV;
434*4882a593Smuzhiyun goto fail;
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun config_mids(iommu, master);
437*4882a593Smuzhiyun __program_context(iommu->base, master->num,
438*4882a593Smuzhiyun priv);
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun __disable_clocks(iommu);
441*4882a593Smuzhiyun list_add(&iommu->dom_node, &priv->list_attached);
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun fail:
446*4882a593Smuzhiyun spin_unlock_irqrestore(&msm_iommu_lock, flags);
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun return ret;
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun
msm_iommu_detach_dev(struct iommu_domain * domain,struct device * dev)451*4882a593Smuzhiyun static void msm_iommu_detach_dev(struct iommu_domain *domain,
452*4882a593Smuzhiyun struct device *dev)
453*4882a593Smuzhiyun {
454*4882a593Smuzhiyun struct msm_priv *priv = to_msm_priv(domain);
455*4882a593Smuzhiyun unsigned long flags;
456*4882a593Smuzhiyun struct msm_iommu_dev *iommu;
457*4882a593Smuzhiyun struct msm_iommu_ctx_dev *master;
458*4882a593Smuzhiyun int ret;
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun free_io_pgtable_ops(priv->iop);
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun spin_lock_irqsave(&msm_iommu_lock, flags);
463*4882a593Smuzhiyun list_for_each_entry(iommu, &priv->list_attached, dom_node) {
464*4882a593Smuzhiyun ret = __enable_clocks(iommu);
465*4882a593Smuzhiyun if (ret)
466*4882a593Smuzhiyun goto fail;
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun list_for_each_entry(master, &iommu->ctx_list, list) {
469*4882a593Smuzhiyun msm_iommu_free_ctx(iommu->context_map, master->num);
470*4882a593Smuzhiyun __reset_context(iommu->base, master->num);
471*4882a593Smuzhiyun }
472*4882a593Smuzhiyun __disable_clocks(iommu);
473*4882a593Smuzhiyun }
474*4882a593Smuzhiyun fail:
475*4882a593Smuzhiyun spin_unlock_irqrestore(&msm_iommu_lock, flags);
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun
msm_iommu_map(struct iommu_domain * domain,unsigned long iova,phys_addr_t pa,size_t len,int prot,gfp_t gfp)478*4882a593Smuzhiyun static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
479*4882a593Smuzhiyun phys_addr_t pa, size_t len, int prot, gfp_t gfp)
480*4882a593Smuzhiyun {
481*4882a593Smuzhiyun struct msm_priv *priv = to_msm_priv(domain);
482*4882a593Smuzhiyun unsigned long flags;
483*4882a593Smuzhiyun int ret;
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun spin_lock_irqsave(&priv->pgtlock, flags);
486*4882a593Smuzhiyun ret = priv->iop->map(priv->iop, iova, pa, len, prot, GFP_ATOMIC);
487*4882a593Smuzhiyun spin_unlock_irqrestore(&priv->pgtlock, flags);
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun return ret;
490*4882a593Smuzhiyun }
491*4882a593Smuzhiyun
msm_iommu_sync_map(struct iommu_domain * domain,unsigned long iova,size_t size)492*4882a593Smuzhiyun static void msm_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
493*4882a593Smuzhiyun size_t size)
494*4882a593Smuzhiyun {
495*4882a593Smuzhiyun struct msm_priv *priv = to_msm_priv(domain);
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun __flush_iotlb_range(iova, size, SZ_4K, false, priv);
498*4882a593Smuzhiyun }
499*4882a593Smuzhiyun
msm_iommu_unmap(struct iommu_domain * domain,unsigned long iova,size_t len,struct iommu_iotlb_gather * gather)500*4882a593Smuzhiyun static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
501*4882a593Smuzhiyun size_t len, struct iommu_iotlb_gather *gather)
502*4882a593Smuzhiyun {
503*4882a593Smuzhiyun struct msm_priv *priv = to_msm_priv(domain);
504*4882a593Smuzhiyun unsigned long flags;
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun spin_lock_irqsave(&priv->pgtlock, flags);
507*4882a593Smuzhiyun len = priv->iop->unmap(priv->iop, iova, len, gather);
508*4882a593Smuzhiyun spin_unlock_irqrestore(&priv->pgtlock, flags);
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun return len;
511*4882a593Smuzhiyun }
512*4882a593Smuzhiyun
msm_iommu_iova_to_phys(struct iommu_domain * domain,dma_addr_t va)513*4882a593Smuzhiyun static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
514*4882a593Smuzhiyun dma_addr_t va)
515*4882a593Smuzhiyun {
516*4882a593Smuzhiyun struct msm_priv *priv;
517*4882a593Smuzhiyun struct msm_iommu_dev *iommu;
518*4882a593Smuzhiyun struct msm_iommu_ctx_dev *master;
519*4882a593Smuzhiyun unsigned int par;
520*4882a593Smuzhiyun unsigned long flags;
521*4882a593Smuzhiyun phys_addr_t ret = 0;
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun spin_lock_irqsave(&msm_iommu_lock, flags);
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun priv = to_msm_priv(domain);
526*4882a593Smuzhiyun iommu = list_first_entry(&priv->list_attached,
527*4882a593Smuzhiyun struct msm_iommu_dev, dom_node);
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun if (list_empty(&iommu->ctx_list))
530*4882a593Smuzhiyun goto fail;
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun master = list_first_entry(&iommu->ctx_list,
533*4882a593Smuzhiyun struct msm_iommu_ctx_dev, list);
534*4882a593Smuzhiyun if (!master)
535*4882a593Smuzhiyun goto fail;
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun ret = __enable_clocks(iommu);
538*4882a593Smuzhiyun if (ret)
539*4882a593Smuzhiyun goto fail;
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun /* Invalidate context TLB */
542*4882a593Smuzhiyun SET_CTX_TLBIALL(iommu->base, master->num, 0);
543*4882a593Smuzhiyun SET_V2PPR(iommu->base, master->num, va & V2Pxx_VA);
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun par = GET_PAR(iommu->base, master->num);
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun /* We are dealing with a supersection */
548*4882a593Smuzhiyun if (GET_NOFAULT_SS(iommu->base, master->num))
549*4882a593Smuzhiyun ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
550*4882a593Smuzhiyun else /* Upper 20 bits from PAR, lower 12 from VA */
551*4882a593Smuzhiyun ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun if (GET_FAULT(iommu->base, master->num))
554*4882a593Smuzhiyun ret = 0;
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun __disable_clocks(iommu);
557*4882a593Smuzhiyun fail:
558*4882a593Smuzhiyun spin_unlock_irqrestore(&msm_iommu_lock, flags);
559*4882a593Smuzhiyun return ret;
560*4882a593Smuzhiyun }
561*4882a593Smuzhiyun
msm_iommu_capable(enum iommu_cap cap)562*4882a593Smuzhiyun static bool msm_iommu_capable(enum iommu_cap cap)
563*4882a593Smuzhiyun {
564*4882a593Smuzhiyun return false;
565*4882a593Smuzhiyun }
566*4882a593Smuzhiyun
print_ctx_regs(void __iomem * base,int ctx)567*4882a593Smuzhiyun static void print_ctx_regs(void __iomem *base, int ctx)
568*4882a593Smuzhiyun {
569*4882a593Smuzhiyun unsigned int fsr = GET_FSR(base, ctx);
570*4882a593Smuzhiyun pr_err("FAR = %08x PAR = %08x\n",
571*4882a593Smuzhiyun GET_FAR(base, ctx), GET_PAR(base, ctx));
572*4882a593Smuzhiyun pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr,
573*4882a593Smuzhiyun (fsr & 0x02) ? "TF " : "",
574*4882a593Smuzhiyun (fsr & 0x04) ? "AFF " : "",
575*4882a593Smuzhiyun (fsr & 0x08) ? "APF " : "",
576*4882a593Smuzhiyun (fsr & 0x10) ? "TLBMF " : "",
577*4882a593Smuzhiyun (fsr & 0x20) ? "HTWDEEF " : "",
578*4882a593Smuzhiyun (fsr & 0x40) ? "HTWSEEF " : "",
579*4882a593Smuzhiyun (fsr & 0x80) ? "MHF " : "",
580*4882a593Smuzhiyun (fsr & 0x10000) ? "SL " : "",
581*4882a593Smuzhiyun (fsr & 0x40000000) ? "SS " : "",
582*4882a593Smuzhiyun (fsr & 0x80000000) ? "MULTI " : "");
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
585*4882a593Smuzhiyun GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
586*4882a593Smuzhiyun pr_err("TTBR0 = %08x TTBR1 = %08x\n",
587*4882a593Smuzhiyun GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
588*4882a593Smuzhiyun pr_err("SCTLR = %08x ACTLR = %08x\n",
589*4882a593Smuzhiyun GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
590*4882a593Smuzhiyun }
591*4882a593Smuzhiyun
insert_iommu_master(struct device * dev,struct msm_iommu_dev ** iommu,struct of_phandle_args * spec)592*4882a593Smuzhiyun static void insert_iommu_master(struct device *dev,
593*4882a593Smuzhiyun struct msm_iommu_dev **iommu,
594*4882a593Smuzhiyun struct of_phandle_args *spec)
595*4882a593Smuzhiyun {
596*4882a593Smuzhiyun struct msm_iommu_ctx_dev *master = dev_iommu_priv_get(dev);
597*4882a593Smuzhiyun int sid;
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun if (list_empty(&(*iommu)->ctx_list)) {
600*4882a593Smuzhiyun master = kzalloc(sizeof(*master), GFP_ATOMIC);
601*4882a593Smuzhiyun master->of_node = dev->of_node;
602*4882a593Smuzhiyun list_add(&master->list, &(*iommu)->ctx_list);
603*4882a593Smuzhiyun dev_iommu_priv_set(dev, master);
604*4882a593Smuzhiyun }
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun for (sid = 0; sid < master->num_mids; sid++)
607*4882a593Smuzhiyun if (master->mids[sid] == spec->args[0]) {
608*4882a593Smuzhiyun dev_warn(dev, "Stream ID 0x%hx repeated; ignoring\n",
609*4882a593Smuzhiyun sid);
610*4882a593Smuzhiyun return;
611*4882a593Smuzhiyun }
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun master->mids[master->num_mids++] = spec->args[0];
614*4882a593Smuzhiyun }
615*4882a593Smuzhiyun
qcom_iommu_of_xlate(struct device * dev,struct of_phandle_args * spec)616*4882a593Smuzhiyun static int qcom_iommu_of_xlate(struct device *dev,
617*4882a593Smuzhiyun struct of_phandle_args *spec)
618*4882a593Smuzhiyun {
619*4882a593Smuzhiyun struct msm_iommu_dev *iommu = NULL, *iter;
620*4882a593Smuzhiyun unsigned long flags;
621*4882a593Smuzhiyun int ret = 0;
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun spin_lock_irqsave(&msm_iommu_lock, flags);
624*4882a593Smuzhiyun list_for_each_entry(iter, &qcom_iommu_devices, dev_node) {
625*4882a593Smuzhiyun if (iter->dev->of_node == spec->np) {
626*4882a593Smuzhiyun iommu = iter;
627*4882a593Smuzhiyun break;
628*4882a593Smuzhiyun }
629*4882a593Smuzhiyun }
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun if (!iommu) {
632*4882a593Smuzhiyun ret = -ENODEV;
633*4882a593Smuzhiyun goto fail;
634*4882a593Smuzhiyun }
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun insert_iommu_master(dev, &iommu, spec);
637*4882a593Smuzhiyun fail:
638*4882a593Smuzhiyun spin_unlock_irqrestore(&msm_iommu_lock, flags);
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun return ret;
641*4882a593Smuzhiyun }
642*4882a593Smuzhiyun
msm_iommu_fault_handler(int irq,void * dev_id)643*4882a593Smuzhiyun irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
644*4882a593Smuzhiyun {
645*4882a593Smuzhiyun struct msm_iommu_dev *iommu = dev_id;
646*4882a593Smuzhiyun unsigned int fsr;
647*4882a593Smuzhiyun int i, ret;
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun spin_lock(&msm_iommu_lock);
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun if (!iommu) {
652*4882a593Smuzhiyun pr_err("Invalid device ID in context interrupt handler\n");
653*4882a593Smuzhiyun goto fail;
654*4882a593Smuzhiyun }
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun pr_err("Unexpected IOMMU page fault!\n");
657*4882a593Smuzhiyun pr_err("base = %08x\n", (unsigned int)iommu->base);
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun ret = __enable_clocks(iommu);
660*4882a593Smuzhiyun if (ret)
661*4882a593Smuzhiyun goto fail;
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun for (i = 0; i < iommu->ncb; i++) {
664*4882a593Smuzhiyun fsr = GET_FSR(iommu->base, i);
665*4882a593Smuzhiyun if (fsr) {
666*4882a593Smuzhiyun pr_err("Fault occurred in context %d.\n", i);
667*4882a593Smuzhiyun pr_err("Interesting registers:\n");
668*4882a593Smuzhiyun print_ctx_regs(iommu->base, i);
669*4882a593Smuzhiyun SET_FSR(iommu->base, i, 0x4000000F);
670*4882a593Smuzhiyun }
671*4882a593Smuzhiyun }
672*4882a593Smuzhiyun __disable_clocks(iommu);
673*4882a593Smuzhiyun fail:
674*4882a593Smuzhiyun spin_unlock(&msm_iommu_lock);
675*4882a593Smuzhiyun return 0;
676*4882a593Smuzhiyun }
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun static struct iommu_ops msm_iommu_ops = {
679*4882a593Smuzhiyun .capable = msm_iommu_capable,
680*4882a593Smuzhiyun .domain_alloc = msm_iommu_domain_alloc,
681*4882a593Smuzhiyun .domain_free = msm_iommu_domain_free,
682*4882a593Smuzhiyun .attach_dev = msm_iommu_attach_dev,
683*4882a593Smuzhiyun .detach_dev = msm_iommu_detach_dev,
684*4882a593Smuzhiyun .map = msm_iommu_map,
685*4882a593Smuzhiyun .unmap = msm_iommu_unmap,
686*4882a593Smuzhiyun /*
687*4882a593Smuzhiyun * Nothing is needed here, the barrier to guarantee
688*4882a593Smuzhiyun * completion of the tlb sync operation is implicitly
689*4882a593Smuzhiyun * taken care when the iommu client does a writel before
690*4882a593Smuzhiyun * kick starting the other master.
691*4882a593Smuzhiyun */
692*4882a593Smuzhiyun .iotlb_sync = NULL,
693*4882a593Smuzhiyun .iotlb_sync_map = msm_iommu_sync_map,
694*4882a593Smuzhiyun .iova_to_phys = msm_iommu_iova_to_phys,
695*4882a593Smuzhiyun .probe_device = msm_iommu_probe_device,
696*4882a593Smuzhiyun .release_device = msm_iommu_release_device,
697*4882a593Smuzhiyun .device_group = generic_device_group,
698*4882a593Smuzhiyun .pgsize_bitmap = MSM_IOMMU_PGSIZES,
699*4882a593Smuzhiyun .of_xlate = qcom_iommu_of_xlate,
700*4882a593Smuzhiyun };
701*4882a593Smuzhiyun
msm_iommu_probe(struct platform_device * pdev)702*4882a593Smuzhiyun static int msm_iommu_probe(struct platform_device *pdev)
703*4882a593Smuzhiyun {
704*4882a593Smuzhiyun struct resource *r;
705*4882a593Smuzhiyun resource_size_t ioaddr;
706*4882a593Smuzhiyun struct msm_iommu_dev *iommu;
707*4882a593Smuzhiyun int ret, par, val;
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun iommu = devm_kzalloc(&pdev->dev, sizeof(*iommu), GFP_KERNEL);
710*4882a593Smuzhiyun if (!iommu)
711*4882a593Smuzhiyun return -ENODEV;
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun iommu->dev = &pdev->dev;
714*4882a593Smuzhiyun INIT_LIST_HEAD(&iommu->ctx_list);
715*4882a593Smuzhiyun
716*4882a593Smuzhiyun iommu->pclk = devm_clk_get(iommu->dev, "smmu_pclk");
717*4882a593Smuzhiyun if (IS_ERR(iommu->pclk)) {
718*4882a593Smuzhiyun dev_err(iommu->dev, "could not get smmu_pclk\n");
719*4882a593Smuzhiyun return PTR_ERR(iommu->pclk);
720*4882a593Smuzhiyun }
721*4882a593Smuzhiyun
722*4882a593Smuzhiyun ret = clk_prepare(iommu->pclk);
723*4882a593Smuzhiyun if (ret) {
724*4882a593Smuzhiyun dev_err(iommu->dev, "could not prepare smmu_pclk\n");
725*4882a593Smuzhiyun return ret;
726*4882a593Smuzhiyun }
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun iommu->clk = devm_clk_get(iommu->dev, "iommu_clk");
729*4882a593Smuzhiyun if (IS_ERR(iommu->clk)) {
730*4882a593Smuzhiyun dev_err(iommu->dev, "could not get iommu_clk\n");
731*4882a593Smuzhiyun clk_unprepare(iommu->pclk);
732*4882a593Smuzhiyun return PTR_ERR(iommu->clk);
733*4882a593Smuzhiyun }
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun ret = clk_prepare(iommu->clk);
736*4882a593Smuzhiyun if (ret) {
737*4882a593Smuzhiyun dev_err(iommu->dev, "could not prepare iommu_clk\n");
738*4882a593Smuzhiyun clk_unprepare(iommu->pclk);
739*4882a593Smuzhiyun return ret;
740*4882a593Smuzhiyun }
741*4882a593Smuzhiyun
742*4882a593Smuzhiyun r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
743*4882a593Smuzhiyun iommu->base = devm_ioremap_resource(iommu->dev, r);
744*4882a593Smuzhiyun if (IS_ERR(iommu->base)) {
745*4882a593Smuzhiyun dev_err(iommu->dev, "could not get iommu base\n");
746*4882a593Smuzhiyun ret = PTR_ERR(iommu->base);
747*4882a593Smuzhiyun goto fail;
748*4882a593Smuzhiyun }
749*4882a593Smuzhiyun ioaddr = r->start;
750*4882a593Smuzhiyun
751*4882a593Smuzhiyun iommu->irq = platform_get_irq(pdev, 0);
752*4882a593Smuzhiyun if (iommu->irq < 0) {
753*4882a593Smuzhiyun ret = -ENODEV;
754*4882a593Smuzhiyun goto fail;
755*4882a593Smuzhiyun }
756*4882a593Smuzhiyun
757*4882a593Smuzhiyun ret = of_property_read_u32(iommu->dev->of_node, "qcom,ncb", &val);
758*4882a593Smuzhiyun if (ret) {
759*4882a593Smuzhiyun dev_err(iommu->dev, "could not get ncb\n");
760*4882a593Smuzhiyun goto fail;
761*4882a593Smuzhiyun }
762*4882a593Smuzhiyun iommu->ncb = val;
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun msm_iommu_reset(iommu->base, iommu->ncb);
765*4882a593Smuzhiyun SET_M(iommu->base, 0, 1);
766*4882a593Smuzhiyun SET_PAR(iommu->base, 0, 0);
767*4882a593Smuzhiyun SET_V2PCFG(iommu->base, 0, 1);
768*4882a593Smuzhiyun SET_V2PPR(iommu->base, 0, 0);
769*4882a593Smuzhiyun par = GET_PAR(iommu->base, 0);
770*4882a593Smuzhiyun SET_V2PCFG(iommu->base, 0, 0);
771*4882a593Smuzhiyun SET_M(iommu->base, 0, 0);
772*4882a593Smuzhiyun
773*4882a593Smuzhiyun if (!par) {
774*4882a593Smuzhiyun pr_err("Invalid PAR value detected\n");
775*4882a593Smuzhiyun ret = -ENODEV;
776*4882a593Smuzhiyun goto fail;
777*4882a593Smuzhiyun }
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun ret = devm_request_threaded_irq(iommu->dev, iommu->irq, NULL,
780*4882a593Smuzhiyun msm_iommu_fault_handler,
781*4882a593Smuzhiyun IRQF_ONESHOT | IRQF_SHARED,
782*4882a593Smuzhiyun "msm_iommu_secure_irpt_handler",
783*4882a593Smuzhiyun iommu);
784*4882a593Smuzhiyun if (ret) {
785*4882a593Smuzhiyun pr_err("Request IRQ %d failed with ret=%d\n", iommu->irq, ret);
786*4882a593Smuzhiyun goto fail;
787*4882a593Smuzhiyun }
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun list_add(&iommu->dev_node, &qcom_iommu_devices);
790*4882a593Smuzhiyun
791*4882a593Smuzhiyun ret = iommu_device_sysfs_add(&iommu->iommu, iommu->dev, NULL,
792*4882a593Smuzhiyun "msm-smmu.%pa", &ioaddr);
793*4882a593Smuzhiyun if (ret) {
794*4882a593Smuzhiyun pr_err("Could not add msm-smmu at %pa to sysfs\n", &ioaddr);
795*4882a593Smuzhiyun goto fail;
796*4882a593Smuzhiyun }
797*4882a593Smuzhiyun
798*4882a593Smuzhiyun iommu_device_set_ops(&iommu->iommu, &msm_iommu_ops);
799*4882a593Smuzhiyun iommu_device_set_fwnode(&iommu->iommu, &pdev->dev.of_node->fwnode);
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun ret = iommu_device_register(&iommu->iommu);
802*4882a593Smuzhiyun if (ret) {
803*4882a593Smuzhiyun pr_err("Could not register msm-smmu at %pa\n", &ioaddr);
804*4882a593Smuzhiyun goto fail;
805*4882a593Smuzhiyun }
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun pr_info("device mapped at %p, irq %d with %d ctx banks\n",
810*4882a593Smuzhiyun iommu->base, iommu->irq, iommu->ncb);
811*4882a593Smuzhiyun
812*4882a593Smuzhiyun return ret;
813*4882a593Smuzhiyun fail:
814*4882a593Smuzhiyun clk_unprepare(iommu->clk);
815*4882a593Smuzhiyun clk_unprepare(iommu->pclk);
816*4882a593Smuzhiyun return ret;
817*4882a593Smuzhiyun }
818*4882a593Smuzhiyun
819*4882a593Smuzhiyun static const struct of_device_id msm_iommu_dt_match[] = {
820*4882a593Smuzhiyun { .compatible = "qcom,apq8064-iommu" },
821*4882a593Smuzhiyun {}
822*4882a593Smuzhiyun };
823*4882a593Smuzhiyun
msm_iommu_remove(struct platform_device * pdev)824*4882a593Smuzhiyun static int msm_iommu_remove(struct platform_device *pdev)
825*4882a593Smuzhiyun {
826*4882a593Smuzhiyun struct msm_iommu_dev *iommu = platform_get_drvdata(pdev);
827*4882a593Smuzhiyun
828*4882a593Smuzhiyun clk_unprepare(iommu->clk);
829*4882a593Smuzhiyun clk_unprepare(iommu->pclk);
830*4882a593Smuzhiyun return 0;
831*4882a593Smuzhiyun }
832*4882a593Smuzhiyun
833*4882a593Smuzhiyun static struct platform_driver msm_iommu_driver = {
834*4882a593Smuzhiyun .driver = {
835*4882a593Smuzhiyun .name = "msm_iommu",
836*4882a593Smuzhiyun .of_match_table = msm_iommu_dt_match,
837*4882a593Smuzhiyun },
838*4882a593Smuzhiyun .probe = msm_iommu_probe,
839*4882a593Smuzhiyun .remove = msm_iommu_remove,
840*4882a593Smuzhiyun };
841*4882a593Smuzhiyun
msm_iommu_driver_init(void)842*4882a593Smuzhiyun static int __init msm_iommu_driver_init(void)
843*4882a593Smuzhiyun {
844*4882a593Smuzhiyun int ret;
845*4882a593Smuzhiyun
846*4882a593Smuzhiyun ret = platform_driver_register(&msm_iommu_driver);
847*4882a593Smuzhiyun if (ret != 0)
848*4882a593Smuzhiyun pr_err("Failed to register IOMMU driver\n");
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun return ret;
851*4882a593Smuzhiyun }
852*4882a593Smuzhiyun subsys_initcall(msm_iommu_driver_init);
853*4882a593Smuzhiyun
854