1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
3 *
4 * Author: Stepan Moskovchenko <stepanm@codeaurora.org>
5 */
6
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/platform_device.h>
11 #include <linux/errno.h>
12 #include <linux/io.h>
13 #include <linux/io-pgtable.h>
14 #include <linux/interrupt.h>
15 #include <linux/list.h>
16 #include <linux/spinlock.h>
17 #include <linux/slab.h>
18 #include <linux/iommu.h>
19 #include <linux/clk.h>
20 #include <linux/err.h>
21 #include <linux/of_iommu.h>
22
23 #include <asm/cacheflush.h>
24 #include <linux/sizes.h>
25
26 #include "msm_iommu_hw-8xxx.h"
27 #include "msm_iommu.h"
28
29 #define MRC(reg, processor, op1, crn, crm, op2) \
30 __asm__ __volatile__ ( \
31 " mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
32 : "=r" (reg))
33
34 /* bitmap of the page sizes currently supported */
35 #define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
36
37 static DEFINE_SPINLOCK(msm_iommu_lock);
38 static LIST_HEAD(qcom_iommu_devices);
39 static struct iommu_ops msm_iommu_ops;
40
41 struct msm_priv {
42 struct list_head list_attached;
43 struct iommu_domain domain;
44 struct io_pgtable_cfg cfg;
45 struct io_pgtable_ops *iop;
46 struct device *dev;
47 spinlock_t pgtlock; /* pagetable lock */
48 };
49
to_msm_priv(struct iommu_domain * dom)50 static struct msm_priv *to_msm_priv(struct iommu_domain *dom)
51 {
52 return container_of(dom, struct msm_priv, domain);
53 }
54
__enable_clocks(struct msm_iommu_dev * iommu)55 static int __enable_clocks(struct msm_iommu_dev *iommu)
56 {
57 int ret;
58
59 ret = clk_enable(iommu->pclk);
60 if (ret)
61 goto fail;
62
63 if (iommu->clk) {
64 ret = clk_enable(iommu->clk);
65 if (ret)
66 clk_disable(iommu->pclk);
67 }
68 fail:
69 return ret;
70 }
71
__disable_clocks(struct msm_iommu_dev * iommu)72 static void __disable_clocks(struct msm_iommu_dev *iommu)
73 {
74 if (iommu->clk)
75 clk_disable(iommu->clk);
76 clk_disable(iommu->pclk);
77 }
78
msm_iommu_reset(void __iomem * base,int ncb)79 static void msm_iommu_reset(void __iomem *base, int ncb)
80 {
81 int ctx;
82
83 SET_RPUE(base, 0);
84 SET_RPUEIE(base, 0);
85 SET_ESRRESTORE(base, 0);
86 SET_TBE(base, 0);
87 SET_CR(base, 0);
88 SET_SPDMBE(base, 0);
89 SET_TESTBUSCR(base, 0);
90 SET_TLBRSW(base, 0);
91 SET_GLOBAL_TLBIALL(base, 0);
92 SET_RPU_ACR(base, 0);
93 SET_TLBLKCRWE(base, 1);
94
95 for (ctx = 0; ctx < ncb; ctx++) {
96 SET_BPRCOSH(base, ctx, 0);
97 SET_BPRCISH(base, ctx, 0);
98 SET_BPRCNSH(base, ctx, 0);
99 SET_BPSHCFG(base, ctx, 0);
100 SET_BPMTCFG(base, ctx, 0);
101 SET_ACTLR(base, ctx, 0);
102 SET_SCTLR(base, ctx, 0);
103 SET_FSRRESTORE(base, ctx, 0);
104 SET_TTBR0(base, ctx, 0);
105 SET_TTBR1(base, ctx, 0);
106 SET_TTBCR(base, ctx, 0);
107 SET_BFBCR(base, ctx, 0);
108 SET_PAR(base, ctx, 0);
109 SET_FAR(base, ctx, 0);
110 SET_CTX_TLBIALL(base, ctx, 0);
111 SET_TLBFLPTER(base, ctx, 0);
112 SET_TLBSLPTER(base, ctx, 0);
113 SET_TLBLKCR(base, ctx, 0);
114 SET_CONTEXTIDR(base, ctx, 0);
115 }
116 }
117
__flush_iotlb(void * cookie)118 static void __flush_iotlb(void *cookie)
119 {
120 struct msm_priv *priv = cookie;
121 struct msm_iommu_dev *iommu = NULL;
122 struct msm_iommu_ctx_dev *master;
123 int ret = 0;
124
125 list_for_each_entry(iommu, &priv->list_attached, dom_node) {
126 ret = __enable_clocks(iommu);
127 if (ret)
128 goto fail;
129
130 list_for_each_entry(master, &iommu->ctx_list, list)
131 SET_CTX_TLBIALL(iommu->base, master->num, 0);
132
133 __disable_clocks(iommu);
134 }
135 fail:
136 return;
137 }
138
__flush_iotlb_range(unsigned long iova,size_t size,size_t granule,bool leaf,void * cookie)139 static void __flush_iotlb_range(unsigned long iova, size_t size,
140 size_t granule, bool leaf, void *cookie)
141 {
142 struct msm_priv *priv = cookie;
143 struct msm_iommu_dev *iommu = NULL;
144 struct msm_iommu_ctx_dev *master;
145 int ret = 0;
146 int temp_size;
147
148 list_for_each_entry(iommu, &priv->list_attached, dom_node) {
149 ret = __enable_clocks(iommu);
150 if (ret)
151 goto fail;
152
153 list_for_each_entry(master, &iommu->ctx_list, list) {
154 temp_size = size;
155 do {
156 iova &= TLBIVA_VA;
157 iova |= GET_CONTEXTIDR_ASID(iommu->base,
158 master->num);
159 SET_TLBIVA(iommu->base, master->num, iova);
160 iova += granule;
161 } while (temp_size -= granule);
162 }
163
164 __disable_clocks(iommu);
165 }
166
167 fail:
168 return;
169 }
170
__flush_iotlb_walk(unsigned long iova,size_t size,size_t granule,void * cookie)171 static void __flush_iotlb_walk(unsigned long iova, size_t size,
172 size_t granule, void *cookie)
173 {
174 __flush_iotlb_range(iova, size, granule, false, cookie);
175 }
176
__flush_iotlb_page(struct iommu_iotlb_gather * gather,unsigned long iova,size_t granule,void * cookie)177 static void __flush_iotlb_page(struct iommu_iotlb_gather *gather,
178 unsigned long iova, size_t granule, void *cookie)
179 {
180 __flush_iotlb_range(iova, granule, granule, true, cookie);
181 }
182
183 static const struct iommu_flush_ops msm_iommu_flush_ops = {
184 .tlb_flush_all = __flush_iotlb,
185 .tlb_flush_walk = __flush_iotlb_walk,
186 .tlb_add_page = __flush_iotlb_page,
187 };
188
msm_iommu_alloc_ctx(unsigned long * map,int start,int end)189 static int msm_iommu_alloc_ctx(unsigned long *map, int start, int end)
190 {
191 int idx;
192
193 do {
194 idx = find_next_zero_bit(map, end, start);
195 if (idx == end)
196 return -ENOSPC;
197 } while (test_and_set_bit(idx, map));
198
199 return idx;
200 }
201
msm_iommu_free_ctx(unsigned long * map,int idx)202 static void msm_iommu_free_ctx(unsigned long *map, int idx)
203 {
204 clear_bit(idx, map);
205 }
206
config_mids(struct msm_iommu_dev * iommu,struct msm_iommu_ctx_dev * master)207 static void config_mids(struct msm_iommu_dev *iommu,
208 struct msm_iommu_ctx_dev *master)
209 {
210 int mid, ctx, i;
211
212 for (i = 0; i < master->num_mids; i++) {
213 mid = master->mids[i];
214 ctx = master->num;
215
216 SET_M2VCBR_N(iommu->base, mid, 0);
217 SET_CBACR_N(iommu->base, ctx, 0);
218
219 /* Set VMID = 0 */
220 SET_VMID(iommu->base, mid, 0);
221
222 /* Set the context number for that MID to this context */
223 SET_CBNDX(iommu->base, mid, ctx);
224
225 /* Set MID associated with this context bank to 0*/
226 SET_CBVMID(iommu->base, ctx, 0);
227
228 /* Set the ASID for TLB tagging for this context */
229 SET_CONTEXTIDR_ASID(iommu->base, ctx, ctx);
230
231 /* Set security bit override to be Non-secure */
232 SET_NSCFG(iommu->base, mid, 3);
233 }
234 }
235
__reset_context(void __iomem * base,int ctx)236 static void __reset_context(void __iomem *base, int ctx)
237 {
238 SET_BPRCOSH(base, ctx, 0);
239 SET_BPRCISH(base, ctx, 0);
240 SET_BPRCNSH(base, ctx, 0);
241 SET_BPSHCFG(base, ctx, 0);
242 SET_BPMTCFG(base, ctx, 0);
243 SET_ACTLR(base, ctx, 0);
244 SET_SCTLR(base, ctx, 0);
245 SET_FSRRESTORE(base, ctx, 0);
246 SET_TTBR0(base, ctx, 0);
247 SET_TTBR1(base, ctx, 0);
248 SET_TTBCR(base, ctx, 0);
249 SET_BFBCR(base, ctx, 0);
250 SET_PAR(base, ctx, 0);
251 SET_FAR(base, ctx, 0);
252 SET_CTX_TLBIALL(base, ctx, 0);
253 SET_TLBFLPTER(base, ctx, 0);
254 SET_TLBSLPTER(base, ctx, 0);
255 SET_TLBLKCR(base, ctx, 0);
256 }
257
__program_context(void __iomem * base,int ctx,struct msm_priv * priv)258 static void __program_context(void __iomem *base, int ctx,
259 struct msm_priv *priv)
260 {
261 __reset_context(base, ctx);
262
263 /* Turn on TEX Remap */
264 SET_TRE(base, ctx, 1);
265 SET_AFE(base, ctx, 1);
266
267 /* Set up HTW mode */
268 /* TLB miss configuration: perform HTW on miss */
269 SET_TLBMCFG(base, ctx, 0x3);
270
271 /* V2P configuration: HTW for access */
272 SET_V2PCFG(base, ctx, 0x3);
273
274 SET_TTBCR(base, ctx, priv->cfg.arm_v7s_cfg.tcr);
275 SET_TTBR0(base, ctx, priv->cfg.arm_v7s_cfg.ttbr);
276 SET_TTBR1(base, ctx, 0);
277
278 /* Set prrr and nmrr */
279 SET_PRRR(base, ctx, priv->cfg.arm_v7s_cfg.prrr);
280 SET_NMRR(base, ctx, priv->cfg.arm_v7s_cfg.nmrr);
281
282 /* Invalidate the TLB for this context */
283 SET_CTX_TLBIALL(base, ctx, 0);
284
285 /* Set interrupt number to "secure" interrupt */
286 SET_IRPTNDX(base, ctx, 0);
287
288 /* Enable context fault interrupt */
289 SET_CFEIE(base, ctx, 1);
290
291 /* Stall access on a context fault and let the handler deal with it */
292 SET_CFCFG(base, ctx, 1);
293
294 /* Redirect all cacheable requests to L2 slave port. */
295 SET_RCISH(base, ctx, 1);
296 SET_RCOSH(base, ctx, 1);
297 SET_RCNSH(base, ctx, 1);
298
299 /* Turn on BFB prefetch */
300 SET_BFBDFE(base, ctx, 1);
301
302 /* Enable the MMU */
303 SET_M(base, ctx, 1);
304 }
305
msm_iommu_domain_alloc(unsigned type)306 static struct iommu_domain *msm_iommu_domain_alloc(unsigned type)
307 {
308 struct msm_priv *priv;
309
310 if (type != IOMMU_DOMAIN_UNMANAGED)
311 return NULL;
312
313 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
314 if (!priv)
315 goto fail_nomem;
316
317 INIT_LIST_HEAD(&priv->list_attached);
318
319 priv->domain.geometry.aperture_start = 0;
320 priv->domain.geometry.aperture_end = (1ULL << 32) - 1;
321 priv->domain.geometry.force_aperture = true;
322
323 return &priv->domain;
324
325 fail_nomem:
326 kfree(priv);
327 return NULL;
328 }
329
msm_iommu_domain_free(struct iommu_domain * domain)330 static void msm_iommu_domain_free(struct iommu_domain *domain)
331 {
332 struct msm_priv *priv;
333 unsigned long flags;
334
335 spin_lock_irqsave(&msm_iommu_lock, flags);
336 priv = to_msm_priv(domain);
337 kfree(priv);
338 spin_unlock_irqrestore(&msm_iommu_lock, flags);
339 }
340
msm_iommu_domain_config(struct msm_priv * priv)341 static int msm_iommu_domain_config(struct msm_priv *priv)
342 {
343 spin_lock_init(&priv->pgtlock);
344
345 priv->cfg = (struct io_pgtable_cfg) {
346 .pgsize_bitmap = msm_iommu_ops.pgsize_bitmap,
347 .ias = 32,
348 .oas = 32,
349 .tlb = &msm_iommu_flush_ops,
350 .iommu_dev = priv->dev,
351 };
352
353 priv->iop = alloc_io_pgtable_ops(ARM_V7S, &priv->cfg, priv);
354 if (!priv->iop) {
355 dev_err(priv->dev, "Failed to allocate pgtable\n");
356 return -EINVAL;
357 }
358
359 msm_iommu_ops.pgsize_bitmap = priv->cfg.pgsize_bitmap;
360
361 return 0;
362 }
363
364 /* Must be called under msm_iommu_lock */
find_iommu_for_dev(struct device * dev)365 static struct msm_iommu_dev *find_iommu_for_dev(struct device *dev)
366 {
367 struct msm_iommu_dev *iommu, *ret = NULL;
368 struct msm_iommu_ctx_dev *master;
369
370 list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
371 master = list_first_entry(&iommu->ctx_list,
372 struct msm_iommu_ctx_dev,
373 list);
374 if (master->of_node == dev->of_node) {
375 ret = iommu;
376 break;
377 }
378 }
379
380 return ret;
381 }
382
msm_iommu_probe_device(struct device * dev)383 static struct iommu_device *msm_iommu_probe_device(struct device *dev)
384 {
385 struct msm_iommu_dev *iommu;
386 unsigned long flags;
387
388 spin_lock_irqsave(&msm_iommu_lock, flags);
389 iommu = find_iommu_for_dev(dev);
390 spin_unlock_irqrestore(&msm_iommu_lock, flags);
391
392 if (!iommu)
393 return ERR_PTR(-ENODEV);
394
395 return &iommu->iommu;
396 }
397
msm_iommu_release_device(struct device * dev)398 static void msm_iommu_release_device(struct device *dev)
399 {
400 }
401
msm_iommu_attach_dev(struct iommu_domain * domain,struct device * dev)402 static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
403 {
404 int ret = 0;
405 unsigned long flags;
406 struct msm_iommu_dev *iommu;
407 struct msm_priv *priv = to_msm_priv(domain);
408 struct msm_iommu_ctx_dev *master;
409
410 priv->dev = dev;
411 msm_iommu_domain_config(priv);
412
413 spin_lock_irqsave(&msm_iommu_lock, flags);
414 list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
415 master = list_first_entry(&iommu->ctx_list,
416 struct msm_iommu_ctx_dev,
417 list);
418 if (master->of_node == dev->of_node) {
419 ret = __enable_clocks(iommu);
420 if (ret)
421 goto fail;
422
423 list_for_each_entry(master, &iommu->ctx_list, list) {
424 if (master->num) {
425 dev_err(dev, "domain already attached");
426 ret = -EEXIST;
427 goto fail;
428 }
429 master->num =
430 msm_iommu_alloc_ctx(iommu->context_map,
431 0, iommu->ncb);
432 if (IS_ERR_VALUE(master->num)) {
433 ret = -ENODEV;
434 goto fail;
435 }
436 config_mids(iommu, master);
437 __program_context(iommu->base, master->num,
438 priv);
439 }
440 __disable_clocks(iommu);
441 list_add(&iommu->dom_node, &priv->list_attached);
442 }
443 }
444
445 fail:
446 spin_unlock_irqrestore(&msm_iommu_lock, flags);
447
448 return ret;
449 }
450
msm_iommu_detach_dev(struct iommu_domain * domain,struct device * dev)451 static void msm_iommu_detach_dev(struct iommu_domain *domain,
452 struct device *dev)
453 {
454 struct msm_priv *priv = to_msm_priv(domain);
455 unsigned long flags;
456 struct msm_iommu_dev *iommu;
457 struct msm_iommu_ctx_dev *master;
458 int ret;
459
460 free_io_pgtable_ops(priv->iop);
461
462 spin_lock_irqsave(&msm_iommu_lock, flags);
463 list_for_each_entry(iommu, &priv->list_attached, dom_node) {
464 ret = __enable_clocks(iommu);
465 if (ret)
466 goto fail;
467
468 list_for_each_entry(master, &iommu->ctx_list, list) {
469 msm_iommu_free_ctx(iommu->context_map, master->num);
470 __reset_context(iommu->base, master->num);
471 }
472 __disable_clocks(iommu);
473 }
474 fail:
475 spin_unlock_irqrestore(&msm_iommu_lock, flags);
476 }
477
msm_iommu_map(struct iommu_domain * domain,unsigned long iova,phys_addr_t pa,size_t len,int prot,gfp_t gfp)478 static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
479 phys_addr_t pa, size_t len, int prot, gfp_t gfp)
480 {
481 struct msm_priv *priv = to_msm_priv(domain);
482 unsigned long flags;
483 int ret;
484
485 spin_lock_irqsave(&priv->pgtlock, flags);
486 ret = priv->iop->map(priv->iop, iova, pa, len, prot, GFP_ATOMIC);
487 spin_unlock_irqrestore(&priv->pgtlock, flags);
488
489 return ret;
490 }
491
msm_iommu_sync_map(struct iommu_domain * domain,unsigned long iova,size_t size)492 static void msm_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
493 size_t size)
494 {
495 struct msm_priv *priv = to_msm_priv(domain);
496
497 __flush_iotlb_range(iova, size, SZ_4K, false, priv);
498 }
499
msm_iommu_unmap(struct iommu_domain * domain,unsigned long iova,size_t len,struct iommu_iotlb_gather * gather)500 static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
501 size_t len, struct iommu_iotlb_gather *gather)
502 {
503 struct msm_priv *priv = to_msm_priv(domain);
504 unsigned long flags;
505
506 spin_lock_irqsave(&priv->pgtlock, flags);
507 len = priv->iop->unmap(priv->iop, iova, len, gather);
508 spin_unlock_irqrestore(&priv->pgtlock, flags);
509
510 return len;
511 }
512
msm_iommu_iova_to_phys(struct iommu_domain * domain,dma_addr_t va)513 static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
514 dma_addr_t va)
515 {
516 struct msm_priv *priv;
517 struct msm_iommu_dev *iommu;
518 struct msm_iommu_ctx_dev *master;
519 unsigned int par;
520 unsigned long flags;
521 phys_addr_t ret = 0;
522
523 spin_lock_irqsave(&msm_iommu_lock, flags);
524
525 priv = to_msm_priv(domain);
526 iommu = list_first_entry(&priv->list_attached,
527 struct msm_iommu_dev, dom_node);
528
529 if (list_empty(&iommu->ctx_list))
530 goto fail;
531
532 master = list_first_entry(&iommu->ctx_list,
533 struct msm_iommu_ctx_dev, list);
534 if (!master)
535 goto fail;
536
537 ret = __enable_clocks(iommu);
538 if (ret)
539 goto fail;
540
541 /* Invalidate context TLB */
542 SET_CTX_TLBIALL(iommu->base, master->num, 0);
543 SET_V2PPR(iommu->base, master->num, va & V2Pxx_VA);
544
545 par = GET_PAR(iommu->base, master->num);
546
547 /* We are dealing with a supersection */
548 if (GET_NOFAULT_SS(iommu->base, master->num))
549 ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
550 else /* Upper 20 bits from PAR, lower 12 from VA */
551 ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
552
553 if (GET_FAULT(iommu->base, master->num))
554 ret = 0;
555
556 __disable_clocks(iommu);
557 fail:
558 spin_unlock_irqrestore(&msm_iommu_lock, flags);
559 return ret;
560 }
561
msm_iommu_capable(enum iommu_cap cap)562 static bool msm_iommu_capable(enum iommu_cap cap)
563 {
564 return false;
565 }
566
print_ctx_regs(void __iomem * base,int ctx)567 static void print_ctx_regs(void __iomem *base, int ctx)
568 {
569 unsigned int fsr = GET_FSR(base, ctx);
570 pr_err("FAR = %08x PAR = %08x\n",
571 GET_FAR(base, ctx), GET_PAR(base, ctx));
572 pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr,
573 (fsr & 0x02) ? "TF " : "",
574 (fsr & 0x04) ? "AFF " : "",
575 (fsr & 0x08) ? "APF " : "",
576 (fsr & 0x10) ? "TLBMF " : "",
577 (fsr & 0x20) ? "HTWDEEF " : "",
578 (fsr & 0x40) ? "HTWSEEF " : "",
579 (fsr & 0x80) ? "MHF " : "",
580 (fsr & 0x10000) ? "SL " : "",
581 (fsr & 0x40000000) ? "SS " : "",
582 (fsr & 0x80000000) ? "MULTI " : "");
583
584 pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
585 GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
586 pr_err("TTBR0 = %08x TTBR1 = %08x\n",
587 GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
588 pr_err("SCTLR = %08x ACTLR = %08x\n",
589 GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
590 }
591
insert_iommu_master(struct device * dev,struct msm_iommu_dev ** iommu,struct of_phandle_args * spec)592 static void insert_iommu_master(struct device *dev,
593 struct msm_iommu_dev **iommu,
594 struct of_phandle_args *spec)
595 {
596 struct msm_iommu_ctx_dev *master = dev_iommu_priv_get(dev);
597 int sid;
598
599 if (list_empty(&(*iommu)->ctx_list)) {
600 master = kzalloc(sizeof(*master), GFP_ATOMIC);
601 master->of_node = dev->of_node;
602 list_add(&master->list, &(*iommu)->ctx_list);
603 dev_iommu_priv_set(dev, master);
604 }
605
606 for (sid = 0; sid < master->num_mids; sid++)
607 if (master->mids[sid] == spec->args[0]) {
608 dev_warn(dev, "Stream ID 0x%hx repeated; ignoring\n",
609 sid);
610 return;
611 }
612
613 master->mids[master->num_mids++] = spec->args[0];
614 }
615
qcom_iommu_of_xlate(struct device * dev,struct of_phandle_args * spec)616 static int qcom_iommu_of_xlate(struct device *dev,
617 struct of_phandle_args *spec)
618 {
619 struct msm_iommu_dev *iommu = NULL, *iter;
620 unsigned long flags;
621 int ret = 0;
622
623 spin_lock_irqsave(&msm_iommu_lock, flags);
624 list_for_each_entry(iter, &qcom_iommu_devices, dev_node) {
625 if (iter->dev->of_node == spec->np) {
626 iommu = iter;
627 break;
628 }
629 }
630
631 if (!iommu) {
632 ret = -ENODEV;
633 goto fail;
634 }
635
636 insert_iommu_master(dev, &iommu, spec);
637 fail:
638 spin_unlock_irqrestore(&msm_iommu_lock, flags);
639
640 return ret;
641 }
642
msm_iommu_fault_handler(int irq,void * dev_id)643 irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
644 {
645 struct msm_iommu_dev *iommu = dev_id;
646 unsigned int fsr;
647 int i, ret;
648
649 spin_lock(&msm_iommu_lock);
650
651 if (!iommu) {
652 pr_err("Invalid device ID in context interrupt handler\n");
653 goto fail;
654 }
655
656 pr_err("Unexpected IOMMU page fault!\n");
657 pr_err("base = %08x\n", (unsigned int)iommu->base);
658
659 ret = __enable_clocks(iommu);
660 if (ret)
661 goto fail;
662
663 for (i = 0; i < iommu->ncb; i++) {
664 fsr = GET_FSR(iommu->base, i);
665 if (fsr) {
666 pr_err("Fault occurred in context %d.\n", i);
667 pr_err("Interesting registers:\n");
668 print_ctx_regs(iommu->base, i);
669 SET_FSR(iommu->base, i, 0x4000000F);
670 }
671 }
672 __disable_clocks(iommu);
673 fail:
674 spin_unlock(&msm_iommu_lock);
675 return 0;
676 }
677
678 static struct iommu_ops msm_iommu_ops = {
679 .capable = msm_iommu_capable,
680 .domain_alloc = msm_iommu_domain_alloc,
681 .domain_free = msm_iommu_domain_free,
682 .attach_dev = msm_iommu_attach_dev,
683 .detach_dev = msm_iommu_detach_dev,
684 .map = msm_iommu_map,
685 .unmap = msm_iommu_unmap,
686 /*
687 * Nothing is needed here, the barrier to guarantee
688 * completion of the tlb sync operation is implicitly
689 * taken care when the iommu client does a writel before
690 * kick starting the other master.
691 */
692 .iotlb_sync = NULL,
693 .iotlb_sync_map = msm_iommu_sync_map,
694 .iova_to_phys = msm_iommu_iova_to_phys,
695 .probe_device = msm_iommu_probe_device,
696 .release_device = msm_iommu_release_device,
697 .device_group = generic_device_group,
698 .pgsize_bitmap = MSM_IOMMU_PGSIZES,
699 .of_xlate = qcom_iommu_of_xlate,
700 };
701
msm_iommu_probe(struct platform_device * pdev)702 static int msm_iommu_probe(struct platform_device *pdev)
703 {
704 struct resource *r;
705 resource_size_t ioaddr;
706 struct msm_iommu_dev *iommu;
707 int ret, par, val;
708
709 iommu = devm_kzalloc(&pdev->dev, sizeof(*iommu), GFP_KERNEL);
710 if (!iommu)
711 return -ENODEV;
712
713 iommu->dev = &pdev->dev;
714 INIT_LIST_HEAD(&iommu->ctx_list);
715
716 iommu->pclk = devm_clk_get(iommu->dev, "smmu_pclk");
717 if (IS_ERR(iommu->pclk)) {
718 dev_err(iommu->dev, "could not get smmu_pclk\n");
719 return PTR_ERR(iommu->pclk);
720 }
721
722 ret = clk_prepare(iommu->pclk);
723 if (ret) {
724 dev_err(iommu->dev, "could not prepare smmu_pclk\n");
725 return ret;
726 }
727
728 iommu->clk = devm_clk_get(iommu->dev, "iommu_clk");
729 if (IS_ERR(iommu->clk)) {
730 dev_err(iommu->dev, "could not get iommu_clk\n");
731 clk_unprepare(iommu->pclk);
732 return PTR_ERR(iommu->clk);
733 }
734
735 ret = clk_prepare(iommu->clk);
736 if (ret) {
737 dev_err(iommu->dev, "could not prepare iommu_clk\n");
738 clk_unprepare(iommu->pclk);
739 return ret;
740 }
741
742 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
743 iommu->base = devm_ioremap_resource(iommu->dev, r);
744 if (IS_ERR(iommu->base)) {
745 dev_err(iommu->dev, "could not get iommu base\n");
746 ret = PTR_ERR(iommu->base);
747 goto fail;
748 }
749 ioaddr = r->start;
750
751 iommu->irq = platform_get_irq(pdev, 0);
752 if (iommu->irq < 0) {
753 ret = -ENODEV;
754 goto fail;
755 }
756
757 ret = of_property_read_u32(iommu->dev->of_node, "qcom,ncb", &val);
758 if (ret) {
759 dev_err(iommu->dev, "could not get ncb\n");
760 goto fail;
761 }
762 iommu->ncb = val;
763
764 msm_iommu_reset(iommu->base, iommu->ncb);
765 SET_M(iommu->base, 0, 1);
766 SET_PAR(iommu->base, 0, 0);
767 SET_V2PCFG(iommu->base, 0, 1);
768 SET_V2PPR(iommu->base, 0, 0);
769 par = GET_PAR(iommu->base, 0);
770 SET_V2PCFG(iommu->base, 0, 0);
771 SET_M(iommu->base, 0, 0);
772
773 if (!par) {
774 pr_err("Invalid PAR value detected\n");
775 ret = -ENODEV;
776 goto fail;
777 }
778
779 ret = devm_request_threaded_irq(iommu->dev, iommu->irq, NULL,
780 msm_iommu_fault_handler,
781 IRQF_ONESHOT | IRQF_SHARED,
782 "msm_iommu_secure_irpt_handler",
783 iommu);
784 if (ret) {
785 pr_err("Request IRQ %d failed with ret=%d\n", iommu->irq, ret);
786 goto fail;
787 }
788
789 list_add(&iommu->dev_node, &qcom_iommu_devices);
790
791 ret = iommu_device_sysfs_add(&iommu->iommu, iommu->dev, NULL,
792 "msm-smmu.%pa", &ioaddr);
793 if (ret) {
794 pr_err("Could not add msm-smmu at %pa to sysfs\n", &ioaddr);
795 goto fail;
796 }
797
798 iommu_device_set_ops(&iommu->iommu, &msm_iommu_ops);
799 iommu_device_set_fwnode(&iommu->iommu, &pdev->dev.of_node->fwnode);
800
801 ret = iommu_device_register(&iommu->iommu);
802 if (ret) {
803 pr_err("Could not register msm-smmu at %pa\n", &ioaddr);
804 goto fail;
805 }
806
807 bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
808
809 pr_info("device mapped at %p, irq %d with %d ctx banks\n",
810 iommu->base, iommu->irq, iommu->ncb);
811
812 return ret;
813 fail:
814 clk_unprepare(iommu->clk);
815 clk_unprepare(iommu->pclk);
816 return ret;
817 }
818
819 static const struct of_device_id msm_iommu_dt_match[] = {
820 { .compatible = "qcom,apq8064-iommu" },
821 {}
822 };
823
msm_iommu_remove(struct platform_device * pdev)824 static int msm_iommu_remove(struct platform_device *pdev)
825 {
826 struct msm_iommu_dev *iommu = platform_get_drvdata(pdev);
827
828 clk_unprepare(iommu->clk);
829 clk_unprepare(iommu->pclk);
830 return 0;
831 }
832
833 static struct platform_driver msm_iommu_driver = {
834 .driver = {
835 .name = "msm_iommu",
836 .of_match_table = msm_iommu_dt_match,
837 },
838 .probe = msm_iommu_probe,
839 .remove = msm_iommu_remove,
840 };
841
msm_iommu_driver_init(void)842 static int __init msm_iommu_driver_init(void)
843 {
844 int ret;
845
846 ret = platform_driver_register(&msm_iommu_driver);
847 if (ret != 0)
848 pr_err("Failed to register IOMMU driver\n");
849
850 return ret;
851 }
852 subsys_initcall(msm_iommu_driver_init);
853
854