1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2011-2014 NVIDIA CORPORATION. All rights reserved.
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #include <linux/bitops.h>
7*4882a593Smuzhiyun #include <linux/debugfs.h>
8*4882a593Smuzhiyun #include <linux/err.h>
9*4882a593Smuzhiyun #include <linux/iommu.h>
10*4882a593Smuzhiyun #include <linux/kernel.h>
11*4882a593Smuzhiyun #include <linux/of.h>
12*4882a593Smuzhiyun #include <linux/of_device.h>
13*4882a593Smuzhiyun #include <linux/platform_device.h>
14*4882a593Smuzhiyun #include <linux/slab.h>
15*4882a593Smuzhiyun #include <linux/spinlock.h>
16*4882a593Smuzhiyun #include <linux/dma-mapping.h>
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #include <soc/tegra/ahb.h>
19*4882a593Smuzhiyun #include <soc/tegra/mc.h>
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun struct tegra_smmu_group {
22*4882a593Smuzhiyun struct list_head list;
23*4882a593Smuzhiyun struct tegra_smmu *smmu;
24*4882a593Smuzhiyun const struct tegra_smmu_group_soc *soc;
25*4882a593Smuzhiyun struct iommu_group *group;
26*4882a593Smuzhiyun unsigned int swgroup;
27*4882a593Smuzhiyun };
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun struct tegra_smmu {
30*4882a593Smuzhiyun void __iomem *regs;
31*4882a593Smuzhiyun struct device *dev;
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun struct tegra_mc *mc;
34*4882a593Smuzhiyun const struct tegra_smmu_soc *soc;
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun struct list_head groups;
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun unsigned long pfn_mask;
39*4882a593Smuzhiyun unsigned long tlb_mask;
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun unsigned long *asids;
42*4882a593Smuzhiyun struct mutex lock;
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun struct list_head list;
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun struct dentry *debugfs;
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun struct iommu_device iommu; /* IOMMU Core code handle */
49*4882a593Smuzhiyun };
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun struct tegra_smmu_as {
52*4882a593Smuzhiyun struct iommu_domain domain;
53*4882a593Smuzhiyun struct tegra_smmu *smmu;
54*4882a593Smuzhiyun unsigned int use_count;
55*4882a593Smuzhiyun spinlock_t lock;
56*4882a593Smuzhiyun u32 *count;
57*4882a593Smuzhiyun struct page **pts;
58*4882a593Smuzhiyun struct page *pd;
59*4882a593Smuzhiyun dma_addr_t pd_dma;
60*4882a593Smuzhiyun unsigned id;
61*4882a593Smuzhiyun u32 attr;
62*4882a593Smuzhiyun };
63*4882a593Smuzhiyun
to_smmu_as(struct iommu_domain * dom)64*4882a593Smuzhiyun static struct tegra_smmu_as *to_smmu_as(struct iommu_domain *dom)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun return container_of(dom, struct tegra_smmu_as, domain);
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun
smmu_writel(struct tegra_smmu * smmu,u32 value,unsigned long offset)69*4882a593Smuzhiyun static inline void smmu_writel(struct tegra_smmu *smmu, u32 value,
70*4882a593Smuzhiyun unsigned long offset)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun writel(value, smmu->regs + offset);
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun
smmu_readl(struct tegra_smmu * smmu,unsigned long offset)75*4882a593Smuzhiyun static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun return readl(smmu->regs + offset);
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun #define SMMU_CONFIG 0x010
81*4882a593Smuzhiyun #define SMMU_CONFIG_ENABLE (1 << 0)
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun #define SMMU_TLB_CONFIG 0x14
84*4882a593Smuzhiyun #define SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29)
85*4882a593Smuzhiyun #define SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28)
86*4882a593Smuzhiyun #define SMMU_TLB_CONFIG_ACTIVE_LINES(smmu) \
87*4882a593Smuzhiyun ((smmu)->soc->num_tlb_lines & (smmu)->tlb_mask)
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun #define SMMU_PTC_CONFIG 0x18
90*4882a593Smuzhiyun #define SMMU_PTC_CONFIG_ENABLE (1 << 29)
91*4882a593Smuzhiyun #define SMMU_PTC_CONFIG_REQ_LIMIT(x) (((x) & 0x0f) << 24)
92*4882a593Smuzhiyun #define SMMU_PTC_CONFIG_INDEX_MAP(x) ((x) & 0x3f)
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun #define SMMU_PTB_ASID 0x01c
95*4882a593Smuzhiyun #define SMMU_PTB_ASID_VALUE(x) ((x) & 0x7f)
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun #define SMMU_PTB_DATA 0x020
98*4882a593Smuzhiyun #define SMMU_PTB_DATA_VALUE(dma, attr) ((dma) >> 12 | (attr))
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun #define SMMU_MK_PDE(dma, attr) ((dma) >> SMMU_PTE_SHIFT | (attr))
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun #define SMMU_TLB_FLUSH 0x030
103*4882a593Smuzhiyun #define SMMU_TLB_FLUSH_VA_MATCH_ALL (0 << 0)
104*4882a593Smuzhiyun #define SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0)
105*4882a593Smuzhiyun #define SMMU_TLB_FLUSH_VA_MATCH_GROUP (3 << 0)
106*4882a593Smuzhiyun #define SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \
107*4882a593Smuzhiyun SMMU_TLB_FLUSH_VA_MATCH_SECTION)
108*4882a593Smuzhiyun #define SMMU_TLB_FLUSH_VA_GROUP(addr) ((((addr) & 0xffffc000) >> 12) | \
109*4882a593Smuzhiyun SMMU_TLB_FLUSH_VA_MATCH_GROUP)
110*4882a593Smuzhiyun #define SMMU_TLB_FLUSH_ASID_MATCH (1 << 31)
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun #define SMMU_PTC_FLUSH 0x034
113*4882a593Smuzhiyun #define SMMU_PTC_FLUSH_TYPE_ALL (0 << 0)
114*4882a593Smuzhiyun #define SMMU_PTC_FLUSH_TYPE_ADR (1 << 0)
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun #define SMMU_PTC_FLUSH_HI 0x9b8
117*4882a593Smuzhiyun #define SMMU_PTC_FLUSH_HI_MASK 0x3
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun /* per-SWGROUP SMMU_*_ASID register */
120*4882a593Smuzhiyun #define SMMU_ASID_ENABLE (1 << 31)
121*4882a593Smuzhiyun #define SMMU_ASID_MASK 0x7f
122*4882a593Smuzhiyun #define SMMU_ASID_VALUE(x) ((x) & SMMU_ASID_MASK)
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun /* page table definitions */
125*4882a593Smuzhiyun #define SMMU_NUM_PDE 1024
126*4882a593Smuzhiyun #define SMMU_NUM_PTE 1024
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun #define SMMU_SIZE_PD (SMMU_NUM_PDE * 4)
129*4882a593Smuzhiyun #define SMMU_SIZE_PT (SMMU_NUM_PTE * 4)
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun #define SMMU_PDE_SHIFT 22
132*4882a593Smuzhiyun #define SMMU_PTE_SHIFT 12
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun #define SMMU_PAGE_MASK (~(SMMU_SIZE_PT-1))
135*4882a593Smuzhiyun #define SMMU_OFFSET_IN_PAGE(x) ((unsigned long)(x) & ~SMMU_PAGE_MASK)
136*4882a593Smuzhiyun #define SMMU_PFN_PHYS(x) ((phys_addr_t)(x) << SMMU_PTE_SHIFT)
137*4882a593Smuzhiyun #define SMMU_PHYS_PFN(x) ((unsigned long)((x) >> SMMU_PTE_SHIFT))
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun #define SMMU_PD_READABLE (1 << 31)
140*4882a593Smuzhiyun #define SMMU_PD_WRITABLE (1 << 30)
141*4882a593Smuzhiyun #define SMMU_PD_NONSECURE (1 << 29)
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun #define SMMU_PDE_READABLE (1 << 31)
144*4882a593Smuzhiyun #define SMMU_PDE_WRITABLE (1 << 30)
145*4882a593Smuzhiyun #define SMMU_PDE_NONSECURE (1 << 29)
146*4882a593Smuzhiyun #define SMMU_PDE_NEXT (1 << 28)
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun #define SMMU_PTE_READABLE (1 << 31)
149*4882a593Smuzhiyun #define SMMU_PTE_WRITABLE (1 << 30)
150*4882a593Smuzhiyun #define SMMU_PTE_NONSECURE (1 << 29)
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun #define SMMU_PDE_ATTR (SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \
153*4882a593Smuzhiyun SMMU_PDE_NONSECURE)
154*4882a593Smuzhiyun
iova_pd_index(unsigned long iova)155*4882a593Smuzhiyun static unsigned int iova_pd_index(unsigned long iova)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun return (iova >> SMMU_PDE_SHIFT) & (SMMU_NUM_PDE - 1);
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun
iova_pt_index(unsigned long iova)160*4882a593Smuzhiyun static unsigned int iova_pt_index(unsigned long iova)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun return (iova >> SMMU_PTE_SHIFT) & (SMMU_NUM_PTE - 1);
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun
smmu_dma_addr_valid(struct tegra_smmu * smmu,dma_addr_t addr)165*4882a593Smuzhiyun static bool smmu_dma_addr_valid(struct tegra_smmu *smmu, dma_addr_t addr)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun addr >>= 12;
168*4882a593Smuzhiyun return (addr & smmu->pfn_mask) == addr;
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun
smmu_pde_to_dma(struct tegra_smmu * smmu,u32 pde)171*4882a593Smuzhiyun static dma_addr_t smmu_pde_to_dma(struct tegra_smmu *smmu, u32 pde)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun return (dma_addr_t)(pde & smmu->pfn_mask) << 12;
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun
smmu_flush_ptc_all(struct tegra_smmu * smmu)176*4882a593Smuzhiyun static void smmu_flush_ptc_all(struct tegra_smmu *smmu)
177*4882a593Smuzhiyun {
178*4882a593Smuzhiyun smmu_writel(smmu, SMMU_PTC_FLUSH_TYPE_ALL, SMMU_PTC_FLUSH);
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun
smmu_flush_ptc(struct tegra_smmu * smmu,dma_addr_t dma,unsigned long offset)181*4882a593Smuzhiyun static inline void smmu_flush_ptc(struct tegra_smmu *smmu, dma_addr_t dma,
182*4882a593Smuzhiyun unsigned long offset)
183*4882a593Smuzhiyun {
184*4882a593Smuzhiyun u32 value;
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun offset &= ~(smmu->mc->soc->atom_size - 1);
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun if (smmu->mc->soc->num_address_bits > 32) {
189*4882a593Smuzhiyun #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
190*4882a593Smuzhiyun value = (dma >> 32) & SMMU_PTC_FLUSH_HI_MASK;
191*4882a593Smuzhiyun #else
192*4882a593Smuzhiyun value = 0;
193*4882a593Smuzhiyun #endif
194*4882a593Smuzhiyun smmu_writel(smmu, value, SMMU_PTC_FLUSH_HI);
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun value = (dma + offset) | SMMU_PTC_FLUSH_TYPE_ADR;
198*4882a593Smuzhiyun smmu_writel(smmu, value, SMMU_PTC_FLUSH);
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun
smmu_flush_tlb(struct tegra_smmu * smmu)201*4882a593Smuzhiyun static inline void smmu_flush_tlb(struct tegra_smmu *smmu)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun smmu_writel(smmu, SMMU_TLB_FLUSH_VA_MATCH_ALL, SMMU_TLB_FLUSH);
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun
smmu_flush_tlb_asid(struct tegra_smmu * smmu,unsigned long asid)206*4882a593Smuzhiyun static inline void smmu_flush_tlb_asid(struct tegra_smmu *smmu,
207*4882a593Smuzhiyun unsigned long asid)
208*4882a593Smuzhiyun {
209*4882a593Smuzhiyun u32 value;
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun if (smmu->soc->num_asids == 4)
212*4882a593Smuzhiyun value = (asid & 0x3) << 29;
213*4882a593Smuzhiyun else
214*4882a593Smuzhiyun value = (asid & 0x7f) << 24;
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_MATCH_ALL;
217*4882a593Smuzhiyun smmu_writel(smmu, value, SMMU_TLB_FLUSH);
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun
smmu_flush_tlb_section(struct tegra_smmu * smmu,unsigned long asid,unsigned long iova)220*4882a593Smuzhiyun static inline void smmu_flush_tlb_section(struct tegra_smmu *smmu,
221*4882a593Smuzhiyun unsigned long asid,
222*4882a593Smuzhiyun unsigned long iova)
223*4882a593Smuzhiyun {
224*4882a593Smuzhiyun u32 value;
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun if (smmu->soc->num_asids == 4)
227*4882a593Smuzhiyun value = (asid & 0x3) << 29;
228*4882a593Smuzhiyun else
229*4882a593Smuzhiyun value = (asid & 0x7f) << 24;
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_SECTION(iova);
232*4882a593Smuzhiyun smmu_writel(smmu, value, SMMU_TLB_FLUSH);
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun
smmu_flush_tlb_group(struct tegra_smmu * smmu,unsigned long asid,unsigned long iova)235*4882a593Smuzhiyun static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu,
236*4882a593Smuzhiyun unsigned long asid,
237*4882a593Smuzhiyun unsigned long iova)
238*4882a593Smuzhiyun {
239*4882a593Smuzhiyun u32 value;
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun if (smmu->soc->num_asids == 4)
242*4882a593Smuzhiyun value = (asid & 0x3) << 29;
243*4882a593Smuzhiyun else
244*4882a593Smuzhiyun value = (asid & 0x7f) << 24;
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_GROUP(iova);
247*4882a593Smuzhiyun smmu_writel(smmu, value, SMMU_TLB_FLUSH);
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun
smmu_flush(struct tegra_smmu * smmu)250*4882a593Smuzhiyun static inline void smmu_flush(struct tegra_smmu *smmu)
251*4882a593Smuzhiyun {
252*4882a593Smuzhiyun smmu_readl(smmu, SMMU_PTB_ASID);
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun
tegra_smmu_alloc_asid(struct tegra_smmu * smmu,unsigned int * idp)255*4882a593Smuzhiyun static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, unsigned int *idp)
256*4882a593Smuzhiyun {
257*4882a593Smuzhiyun unsigned long id;
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun mutex_lock(&smmu->lock);
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun id = find_first_zero_bit(smmu->asids, smmu->soc->num_asids);
262*4882a593Smuzhiyun if (id >= smmu->soc->num_asids) {
263*4882a593Smuzhiyun mutex_unlock(&smmu->lock);
264*4882a593Smuzhiyun return -ENOSPC;
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun set_bit(id, smmu->asids);
268*4882a593Smuzhiyun *idp = id;
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun mutex_unlock(&smmu->lock);
271*4882a593Smuzhiyun return 0;
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun
tegra_smmu_free_asid(struct tegra_smmu * smmu,unsigned int id)274*4882a593Smuzhiyun static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id)
275*4882a593Smuzhiyun {
276*4882a593Smuzhiyun mutex_lock(&smmu->lock);
277*4882a593Smuzhiyun clear_bit(id, smmu->asids);
278*4882a593Smuzhiyun mutex_unlock(&smmu->lock);
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun
tegra_smmu_capable(enum iommu_cap cap)281*4882a593Smuzhiyun static bool tegra_smmu_capable(enum iommu_cap cap)
282*4882a593Smuzhiyun {
283*4882a593Smuzhiyun return false;
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun
tegra_smmu_domain_alloc(unsigned type)286*4882a593Smuzhiyun static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun struct tegra_smmu_as *as;
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun if (type != IOMMU_DOMAIN_UNMANAGED)
291*4882a593Smuzhiyun return NULL;
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun as = kzalloc(sizeof(*as), GFP_KERNEL);
294*4882a593Smuzhiyun if (!as)
295*4882a593Smuzhiyun return NULL;
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE;
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun as->pd = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO);
300*4882a593Smuzhiyun if (!as->pd) {
301*4882a593Smuzhiyun kfree(as);
302*4882a593Smuzhiyun return NULL;
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun as->count = kcalloc(SMMU_NUM_PDE, sizeof(u32), GFP_KERNEL);
306*4882a593Smuzhiyun if (!as->count) {
307*4882a593Smuzhiyun __free_page(as->pd);
308*4882a593Smuzhiyun kfree(as);
309*4882a593Smuzhiyun return NULL;
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun as->pts = kcalloc(SMMU_NUM_PDE, sizeof(*as->pts), GFP_KERNEL);
313*4882a593Smuzhiyun if (!as->pts) {
314*4882a593Smuzhiyun kfree(as->count);
315*4882a593Smuzhiyun __free_page(as->pd);
316*4882a593Smuzhiyun kfree(as);
317*4882a593Smuzhiyun return NULL;
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun spin_lock_init(&as->lock);
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun /* setup aperture */
323*4882a593Smuzhiyun as->domain.geometry.aperture_start = 0;
324*4882a593Smuzhiyun as->domain.geometry.aperture_end = 0xffffffff;
325*4882a593Smuzhiyun as->domain.geometry.force_aperture = true;
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun return &as->domain;
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun
tegra_smmu_domain_free(struct iommu_domain * domain)330*4882a593Smuzhiyun static void tegra_smmu_domain_free(struct iommu_domain *domain)
331*4882a593Smuzhiyun {
332*4882a593Smuzhiyun struct tegra_smmu_as *as = to_smmu_as(domain);
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun /* TODO: free page directory and page tables */
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun WARN_ON_ONCE(as->use_count);
337*4882a593Smuzhiyun kfree(as->count);
338*4882a593Smuzhiyun kfree(as->pts);
339*4882a593Smuzhiyun kfree(as);
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun static const struct tegra_smmu_swgroup *
tegra_smmu_find_swgroup(struct tegra_smmu * smmu,unsigned int swgroup)343*4882a593Smuzhiyun tegra_smmu_find_swgroup(struct tegra_smmu *smmu, unsigned int swgroup)
344*4882a593Smuzhiyun {
345*4882a593Smuzhiyun const struct tegra_smmu_swgroup *group = NULL;
346*4882a593Smuzhiyun unsigned int i;
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun for (i = 0; i < smmu->soc->num_swgroups; i++) {
349*4882a593Smuzhiyun if (smmu->soc->swgroups[i].swgroup == swgroup) {
350*4882a593Smuzhiyun group = &smmu->soc->swgroups[i];
351*4882a593Smuzhiyun break;
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun return group;
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun
tegra_smmu_enable(struct tegra_smmu * smmu,unsigned int swgroup,unsigned int asid)358*4882a593Smuzhiyun static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup,
359*4882a593Smuzhiyun unsigned int asid)
360*4882a593Smuzhiyun {
361*4882a593Smuzhiyun const struct tegra_smmu_swgroup *group;
362*4882a593Smuzhiyun unsigned int i;
363*4882a593Smuzhiyun u32 value;
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun group = tegra_smmu_find_swgroup(smmu, swgroup);
366*4882a593Smuzhiyun if (group) {
367*4882a593Smuzhiyun value = smmu_readl(smmu, group->reg);
368*4882a593Smuzhiyun value &= ~SMMU_ASID_MASK;
369*4882a593Smuzhiyun value |= SMMU_ASID_VALUE(asid);
370*4882a593Smuzhiyun value |= SMMU_ASID_ENABLE;
371*4882a593Smuzhiyun smmu_writel(smmu, value, group->reg);
372*4882a593Smuzhiyun } else {
373*4882a593Smuzhiyun pr_warn("%s group from swgroup %u not found\n", __func__,
374*4882a593Smuzhiyun swgroup);
375*4882a593Smuzhiyun /* No point moving ahead if group was not found */
376*4882a593Smuzhiyun return;
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun for (i = 0; i < smmu->soc->num_clients; i++) {
380*4882a593Smuzhiyun const struct tegra_mc_client *client = &smmu->soc->clients[i];
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun if (client->swgroup != swgroup)
383*4882a593Smuzhiyun continue;
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun value = smmu_readl(smmu, client->smmu.reg);
386*4882a593Smuzhiyun value |= BIT(client->smmu.bit);
387*4882a593Smuzhiyun smmu_writel(smmu, value, client->smmu.reg);
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun
tegra_smmu_disable(struct tegra_smmu * smmu,unsigned int swgroup,unsigned int asid)391*4882a593Smuzhiyun static void tegra_smmu_disable(struct tegra_smmu *smmu, unsigned int swgroup,
392*4882a593Smuzhiyun unsigned int asid)
393*4882a593Smuzhiyun {
394*4882a593Smuzhiyun const struct tegra_smmu_swgroup *group;
395*4882a593Smuzhiyun unsigned int i;
396*4882a593Smuzhiyun u32 value;
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun group = tegra_smmu_find_swgroup(smmu, swgroup);
399*4882a593Smuzhiyun if (group) {
400*4882a593Smuzhiyun value = smmu_readl(smmu, group->reg);
401*4882a593Smuzhiyun value &= ~SMMU_ASID_MASK;
402*4882a593Smuzhiyun value |= SMMU_ASID_VALUE(asid);
403*4882a593Smuzhiyun value &= ~SMMU_ASID_ENABLE;
404*4882a593Smuzhiyun smmu_writel(smmu, value, group->reg);
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun for (i = 0; i < smmu->soc->num_clients; i++) {
408*4882a593Smuzhiyun const struct tegra_mc_client *client = &smmu->soc->clients[i];
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun if (client->swgroup != swgroup)
411*4882a593Smuzhiyun continue;
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun value = smmu_readl(smmu, client->smmu.reg);
414*4882a593Smuzhiyun value &= ~BIT(client->smmu.bit);
415*4882a593Smuzhiyun smmu_writel(smmu, value, client->smmu.reg);
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun
tegra_smmu_as_prepare(struct tegra_smmu * smmu,struct tegra_smmu_as * as)419*4882a593Smuzhiyun static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
420*4882a593Smuzhiyun struct tegra_smmu_as *as)
421*4882a593Smuzhiyun {
422*4882a593Smuzhiyun u32 value;
423*4882a593Smuzhiyun int err;
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun if (as->use_count > 0) {
426*4882a593Smuzhiyun as->use_count++;
427*4882a593Smuzhiyun return 0;
428*4882a593Smuzhiyun }
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun as->pd_dma = dma_map_page(smmu->dev, as->pd, 0, SMMU_SIZE_PD,
431*4882a593Smuzhiyun DMA_TO_DEVICE);
432*4882a593Smuzhiyun if (dma_mapping_error(smmu->dev, as->pd_dma))
433*4882a593Smuzhiyun return -ENOMEM;
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun /* We can't handle 64-bit DMA addresses */
436*4882a593Smuzhiyun if (!smmu_dma_addr_valid(smmu, as->pd_dma)) {
437*4882a593Smuzhiyun err = -ENOMEM;
438*4882a593Smuzhiyun goto err_unmap;
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun err = tegra_smmu_alloc_asid(smmu, &as->id);
442*4882a593Smuzhiyun if (err < 0)
443*4882a593Smuzhiyun goto err_unmap;
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun smmu_flush_ptc(smmu, as->pd_dma, 0);
446*4882a593Smuzhiyun smmu_flush_tlb_asid(smmu, as->id);
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun smmu_writel(smmu, as->id & 0x7f, SMMU_PTB_ASID);
449*4882a593Smuzhiyun value = SMMU_PTB_DATA_VALUE(as->pd_dma, as->attr);
450*4882a593Smuzhiyun smmu_writel(smmu, value, SMMU_PTB_DATA);
451*4882a593Smuzhiyun smmu_flush(smmu);
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun as->smmu = smmu;
454*4882a593Smuzhiyun as->use_count++;
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun return 0;
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun err_unmap:
459*4882a593Smuzhiyun dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
460*4882a593Smuzhiyun return err;
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun
tegra_smmu_as_unprepare(struct tegra_smmu * smmu,struct tegra_smmu_as * as)463*4882a593Smuzhiyun static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
464*4882a593Smuzhiyun struct tegra_smmu_as *as)
465*4882a593Smuzhiyun {
466*4882a593Smuzhiyun if (--as->use_count > 0)
467*4882a593Smuzhiyun return;
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun tegra_smmu_free_asid(smmu, as->id);
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun as->smmu = NULL;
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun
tegra_smmu_attach_dev(struct iommu_domain * domain,struct device * dev)476*4882a593Smuzhiyun static int tegra_smmu_attach_dev(struct iommu_domain *domain,
477*4882a593Smuzhiyun struct device *dev)
478*4882a593Smuzhiyun {
479*4882a593Smuzhiyun struct tegra_smmu *smmu = dev_iommu_priv_get(dev);
480*4882a593Smuzhiyun struct tegra_smmu_as *as = to_smmu_as(domain);
481*4882a593Smuzhiyun struct device_node *np = dev->of_node;
482*4882a593Smuzhiyun struct of_phandle_args args;
483*4882a593Smuzhiyun unsigned int index = 0;
484*4882a593Smuzhiyun int err = 0;
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
487*4882a593Smuzhiyun &args)) {
488*4882a593Smuzhiyun unsigned int swgroup = args.args[0];
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun if (args.np != smmu->dev->of_node) {
491*4882a593Smuzhiyun of_node_put(args.np);
492*4882a593Smuzhiyun continue;
493*4882a593Smuzhiyun }
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun of_node_put(args.np);
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun err = tegra_smmu_as_prepare(smmu, as);
498*4882a593Smuzhiyun if (err < 0)
499*4882a593Smuzhiyun return err;
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun tegra_smmu_enable(smmu, swgroup, as->id);
502*4882a593Smuzhiyun index++;
503*4882a593Smuzhiyun }
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun if (index == 0)
506*4882a593Smuzhiyun return -ENODEV;
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun return 0;
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun
tegra_smmu_detach_dev(struct iommu_domain * domain,struct device * dev)511*4882a593Smuzhiyun static void tegra_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
512*4882a593Smuzhiyun {
513*4882a593Smuzhiyun struct tegra_smmu_as *as = to_smmu_as(domain);
514*4882a593Smuzhiyun struct device_node *np = dev->of_node;
515*4882a593Smuzhiyun struct tegra_smmu *smmu = as->smmu;
516*4882a593Smuzhiyun struct of_phandle_args args;
517*4882a593Smuzhiyun unsigned int index = 0;
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
520*4882a593Smuzhiyun &args)) {
521*4882a593Smuzhiyun unsigned int swgroup = args.args[0];
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun if (args.np != smmu->dev->of_node) {
524*4882a593Smuzhiyun of_node_put(args.np);
525*4882a593Smuzhiyun continue;
526*4882a593Smuzhiyun }
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun of_node_put(args.np);
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun tegra_smmu_disable(smmu, swgroup, as->id);
531*4882a593Smuzhiyun tegra_smmu_as_unprepare(smmu, as);
532*4882a593Smuzhiyun index++;
533*4882a593Smuzhiyun }
534*4882a593Smuzhiyun }
535*4882a593Smuzhiyun
tegra_smmu_set_pde(struct tegra_smmu_as * as,unsigned long iova,u32 value)536*4882a593Smuzhiyun static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova,
537*4882a593Smuzhiyun u32 value)
538*4882a593Smuzhiyun {
539*4882a593Smuzhiyun unsigned int pd_index = iova_pd_index(iova);
540*4882a593Smuzhiyun struct tegra_smmu *smmu = as->smmu;
541*4882a593Smuzhiyun u32 *pd = page_address(as->pd);
542*4882a593Smuzhiyun unsigned long offset = pd_index * sizeof(*pd);
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun /* Set the page directory entry first */
545*4882a593Smuzhiyun pd[pd_index] = value;
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun /* The flush the page directory entry from caches */
548*4882a593Smuzhiyun dma_sync_single_range_for_device(smmu->dev, as->pd_dma, offset,
549*4882a593Smuzhiyun sizeof(*pd), DMA_TO_DEVICE);
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun /* And flush the iommu */
552*4882a593Smuzhiyun smmu_flush_ptc(smmu, as->pd_dma, offset);
553*4882a593Smuzhiyun smmu_flush_tlb_section(smmu, as->id, iova);
554*4882a593Smuzhiyun smmu_flush(smmu);
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun
tegra_smmu_pte_offset(struct page * pt_page,unsigned long iova)557*4882a593Smuzhiyun static u32 *tegra_smmu_pte_offset(struct page *pt_page, unsigned long iova)
558*4882a593Smuzhiyun {
559*4882a593Smuzhiyun u32 *pt = page_address(pt_page);
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun return pt + iova_pt_index(iova);
562*4882a593Smuzhiyun }
563*4882a593Smuzhiyun
tegra_smmu_pte_lookup(struct tegra_smmu_as * as,unsigned long iova,dma_addr_t * dmap)564*4882a593Smuzhiyun static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
565*4882a593Smuzhiyun dma_addr_t *dmap)
566*4882a593Smuzhiyun {
567*4882a593Smuzhiyun unsigned int pd_index = iova_pd_index(iova);
568*4882a593Smuzhiyun struct tegra_smmu *smmu = as->smmu;
569*4882a593Smuzhiyun struct page *pt_page;
570*4882a593Smuzhiyun u32 *pd;
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun pt_page = as->pts[pd_index];
573*4882a593Smuzhiyun if (!pt_page)
574*4882a593Smuzhiyun return NULL;
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun pd = page_address(as->pd);
577*4882a593Smuzhiyun *dmap = smmu_pde_to_dma(smmu, pd[pd_index]);
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun return tegra_smmu_pte_offset(pt_page, iova);
580*4882a593Smuzhiyun }
581*4882a593Smuzhiyun
as_get_pte(struct tegra_smmu_as * as,dma_addr_t iova,dma_addr_t * dmap,struct page * page)582*4882a593Smuzhiyun static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
583*4882a593Smuzhiyun dma_addr_t *dmap, struct page *page)
584*4882a593Smuzhiyun {
585*4882a593Smuzhiyun unsigned int pde = iova_pd_index(iova);
586*4882a593Smuzhiyun struct tegra_smmu *smmu = as->smmu;
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun if (!as->pts[pde]) {
589*4882a593Smuzhiyun dma_addr_t dma;
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun dma = dma_map_page(smmu->dev, page, 0, SMMU_SIZE_PT,
592*4882a593Smuzhiyun DMA_TO_DEVICE);
593*4882a593Smuzhiyun if (dma_mapping_error(smmu->dev, dma)) {
594*4882a593Smuzhiyun __free_page(page);
595*4882a593Smuzhiyun return NULL;
596*4882a593Smuzhiyun }
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun if (!smmu_dma_addr_valid(smmu, dma)) {
599*4882a593Smuzhiyun dma_unmap_page(smmu->dev, dma, SMMU_SIZE_PT,
600*4882a593Smuzhiyun DMA_TO_DEVICE);
601*4882a593Smuzhiyun __free_page(page);
602*4882a593Smuzhiyun return NULL;
603*4882a593Smuzhiyun }
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun as->pts[pde] = page;
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun tegra_smmu_set_pde(as, iova, SMMU_MK_PDE(dma, SMMU_PDE_ATTR |
608*4882a593Smuzhiyun SMMU_PDE_NEXT));
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun *dmap = dma;
611*4882a593Smuzhiyun } else {
612*4882a593Smuzhiyun u32 *pd = page_address(as->pd);
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun *dmap = smmu_pde_to_dma(smmu, pd[pde]);
615*4882a593Smuzhiyun }
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun return tegra_smmu_pte_offset(as->pts[pde], iova);
618*4882a593Smuzhiyun }
619*4882a593Smuzhiyun
tegra_smmu_pte_get_use(struct tegra_smmu_as * as,unsigned long iova)620*4882a593Smuzhiyun static void tegra_smmu_pte_get_use(struct tegra_smmu_as *as, unsigned long iova)
621*4882a593Smuzhiyun {
622*4882a593Smuzhiyun unsigned int pd_index = iova_pd_index(iova);
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun as->count[pd_index]++;
625*4882a593Smuzhiyun }
626*4882a593Smuzhiyun
tegra_smmu_pte_put_use(struct tegra_smmu_as * as,unsigned long iova)627*4882a593Smuzhiyun static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
628*4882a593Smuzhiyun {
629*4882a593Smuzhiyun unsigned int pde = iova_pd_index(iova);
630*4882a593Smuzhiyun struct page *page = as->pts[pde];
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun /*
633*4882a593Smuzhiyun * When no entries in this page table are used anymore, return the
634*4882a593Smuzhiyun * memory page to the system.
635*4882a593Smuzhiyun */
636*4882a593Smuzhiyun if (--as->count[pde] == 0) {
637*4882a593Smuzhiyun struct tegra_smmu *smmu = as->smmu;
638*4882a593Smuzhiyun u32 *pd = page_address(as->pd);
639*4882a593Smuzhiyun dma_addr_t pte_dma = smmu_pde_to_dma(smmu, pd[pde]);
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun tegra_smmu_set_pde(as, iova, 0);
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun dma_unmap_page(smmu->dev, pte_dma, SMMU_SIZE_PT, DMA_TO_DEVICE);
644*4882a593Smuzhiyun __free_page(page);
645*4882a593Smuzhiyun as->pts[pde] = NULL;
646*4882a593Smuzhiyun }
647*4882a593Smuzhiyun }
648*4882a593Smuzhiyun
tegra_smmu_set_pte(struct tegra_smmu_as * as,unsigned long iova,u32 * pte,dma_addr_t pte_dma,u32 val)649*4882a593Smuzhiyun static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova,
650*4882a593Smuzhiyun u32 *pte, dma_addr_t pte_dma, u32 val)
651*4882a593Smuzhiyun {
652*4882a593Smuzhiyun struct tegra_smmu *smmu = as->smmu;
653*4882a593Smuzhiyun unsigned long offset = SMMU_OFFSET_IN_PAGE(pte);
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun *pte = val;
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun dma_sync_single_range_for_device(smmu->dev, pte_dma, offset,
658*4882a593Smuzhiyun 4, DMA_TO_DEVICE);
659*4882a593Smuzhiyun smmu_flush_ptc(smmu, pte_dma, offset);
660*4882a593Smuzhiyun smmu_flush_tlb_group(smmu, as->id, iova);
661*4882a593Smuzhiyun smmu_flush(smmu);
662*4882a593Smuzhiyun }
663*4882a593Smuzhiyun
as_get_pde_page(struct tegra_smmu_as * as,unsigned long iova,gfp_t gfp,unsigned long * flags)664*4882a593Smuzhiyun static struct page *as_get_pde_page(struct tegra_smmu_as *as,
665*4882a593Smuzhiyun unsigned long iova, gfp_t gfp,
666*4882a593Smuzhiyun unsigned long *flags)
667*4882a593Smuzhiyun {
668*4882a593Smuzhiyun unsigned int pde = iova_pd_index(iova);
669*4882a593Smuzhiyun struct page *page = as->pts[pde];
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun /* at first check whether allocation needs to be done at all */
672*4882a593Smuzhiyun if (page)
673*4882a593Smuzhiyun return page;
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun /*
676*4882a593Smuzhiyun * In order to prevent exhaustion of the atomic memory pool, we
677*4882a593Smuzhiyun * allocate page in a sleeping context if GFP flags permit. Hence
678*4882a593Smuzhiyun * spinlock needs to be unlocked and re-locked after allocation.
679*4882a593Smuzhiyun */
680*4882a593Smuzhiyun if (!(gfp & __GFP_ATOMIC))
681*4882a593Smuzhiyun spin_unlock_irqrestore(&as->lock, *flags);
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun page = alloc_page(gfp | __GFP_DMA | __GFP_ZERO);
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun if (!(gfp & __GFP_ATOMIC))
686*4882a593Smuzhiyun spin_lock_irqsave(&as->lock, *flags);
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun /*
689*4882a593Smuzhiyun * In a case of blocking allocation, a concurrent mapping may win
690*4882a593Smuzhiyun * the PDE allocation. In this case the allocated page isn't needed
691*4882a593Smuzhiyun * if allocation succeeded and the allocation failure isn't fatal.
692*4882a593Smuzhiyun */
693*4882a593Smuzhiyun if (as->pts[pde]) {
694*4882a593Smuzhiyun if (page)
695*4882a593Smuzhiyun __free_page(page);
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun page = as->pts[pde];
698*4882a593Smuzhiyun }
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun return page;
701*4882a593Smuzhiyun }
702*4882a593Smuzhiyun
703*4882a593Smuzhiyun static int
__tegra_smmu_map(struct iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t size,int prot,gfp_t gfp,unsigned long * flags)704*4882a593Smuzhiyun __tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
705*4882a593Smuzhiyun phys_addr_t paddr, size_t size, int prot, gfp_t gfp,
706*4882a593Smuzhiyun unsigned long *flags)
707*4882a593Smuzhiyun {
708*4882a593Smuzhiyun struct tegra_smmu_as *as = to_smmu_as(domain);
709*4882a593Smuzhiyun dma_addr_t pte_dma;
710*4882a593Smuzhiyun struct page *page;
711*4882a593Smuzhiyun u32 pte_attrs;
712*4882a593Smuzhiyun u32 *pte;
713*4882a593Smuzhiyun
714*4882a593Smuzhiyun page = as_get_pde_page(as, iova, gfp, flags);
715*4882a593Smuzhiyun if (!page)
716*4882a593Smuzhiyun return -ENOMEM;
717*4882a593Smuzhiyun
718*4882a593Smuzhiyun pte = as_get_pte(as, iova, &pte_dma, page);
719*4882a593Smuzhiyun if (!pte)
720*4882a593Smuzhiyun return -ENOMEM;
721*4882a593Smuzhiyun
722*4882a593Smuzhiyun /* If we aren't overwriting a pre-existing entry, increment use */
723*4882a593Smuzhiyun if (*pte == 0)
724*4882a593Smuzhiyun tegra_smmu_pte_get_use(as, iova);
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun pte_attrs = SMMU_PTE_NONSECURE;
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun if (prot & IOMMU_READ)
729*4882a593Smuzhiyun pte_attrs |= SMMU_PTE_READABLE;
730*4882a593Smuzhiyun
731*4882a593Smuzhiyun if (prot & IOMMU_WRITE)
732*4882a593Smuzhiyun pte_attrs |= SMMU_PTE_WRITABLE;
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun tegra_smmu_set_pte(as, iova, pte, pte_dma,
735*4882a593Smuzhiyun SMMU_PHYS_PFN(paddr) | pte_attrs);
736*4882a593Smuzhiyun
737*4882a593Smuzhiyun return 0;
738*4882a593Smuzhiyun }
739*4882a593Smuzhiyun
740*4882a593Smuzhiyun static size_t
__tegra_smmu_unmap(struct iommu_domain * domain,unsigned long iova,size_t size,struct iommu_iotlb_gather * gather)741*4882a593Smuzhiyun __tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
742*4882a593Smuzhiyun size_t size, struct iommu_iotlb_gather *gather)
743*4882a593Smuzhiyun {
744*4882a593Smuzhiyun struct tegra_smmu_as *as = to_smmu_as(domain);
745*4882a593Smuzhiyun dma_addr_t pte_dma;
746*4882a593Smuzhiyun u32 *pte;
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun pte = tegra_smmu_pte_lookup(as, iova, &pte_dma);
749*4882a593Smuzhiyun if (!pte || !*pte)
750*4882a593Smuzhiyun return 0;
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun tegra_smmu_set_pte(as, iova, pte, pte_dma, 0);
753*4882a593Smuzhiyun tegra_smmu_pte_put_use(as, iova);
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun return size;
756*4882a593Smuzhiyun }
757*4882a593Smuzhiyun
tegra_smmu_map(struct iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t size,int prot,gfp_t gfp)758*4882a593Smuzhiyun static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
759*4882a593Smuzhiyun phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
760*4882a593Smuzhiyun {
761*4882a593Smuzhiyun struct tegra_smmu_as *as = to_smmu_as(domain);
762*4882a593Smuzhiyun unsigned long flags;
763*4882a593Smuzhiyun int ret;
764*4882a593Smuzhiyun
765*4882a593Smuzhiyun spin_lock_irqsave(&as->lock, flags);
766*4882a593Smuzhiyun ret = __tegra_smmu_map(domain, iova, paddr, size, prot, gfp, &flags);
767*4882a593Smuzhiyun spin_unlock_irqrestore(&as->lock, flags);
768*4882a593Smuzhiyun
769*4882a593Smuzhiyun return ret;
770*4882a593Smuzhiyun }
771*4882a593Smuzhiyun
tegra_smmu_unmap(struct iommu_domain * domain,unsigned long iova,size_t size,struct iommu_iotlb_gather * gather)772*4882a593Smuzhiyun static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
773*4882a593Smuzhiyun size_t size, struct iommu_iotlb_gather *gather)
774*4882a593Smuzhiyun {
775*4882a593Smuzhiyun struct tegra_smmu_as *as = to_smmu_as(domain);
776*4882a593Smuzhiyun unsigned long flags;
777*4882a593Smuzhiyun
778*4882a593Smuzhiyun spin_lock_irqsave(&as->lock, flags);
779*4882a593Smuzhiyun size = __tegra_smmu_unmap(domain, iova, size, gather);
780*4882a593Smuzhiyun spin_unlock_irqrestore(&as->lock, flags);
781*4882a593Smuzhiyun
782*4882a593Smuzhiyun return size;
783*4882a593Smuzhiyun }
784*4882a593Smuzhiyun
tegra_smmu_iova_to_phys(struct iommu_domain * domain,dma_addr_t iova)785*4882a593Smuzhiyun static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain,
786*4882a593Smuzhiyun dma_addr_t iova)
787*4882a593Smuzhiyun {
788*4882a593Smuzhiyun struct tegra_smmu_as *as = to_smmu_as(domain);
789*4882a593Smuzhiyun unsigned long pfn;
790*4882a593Smuzhiyun dma_addr_t pte_dma;
791*4882a593Smuzhiyun u32 *pte;
792*4882a593Smuzhiyun
793*4882a593Smuzhiyun pte = tegra_smmu_pte_lookup(as, iova, &pte_dma);
794*4882a593Smuzhiyun if (!pte || !*pte)
795*4882a593Smuzhiyun return 0;
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun pfn = *pte & as->smmu->pfn_mask;
798*4882a593Smuzhiyun
799*4882a593Smuzhiyun return SMMU_PFN_PHYS(pfn) + SMMU_OFFSET_IN_PAGE(iova);
800*4882a593Smuzhiyun }
801*4882a593Smuzhiyun
tegra_smmu_find(struct device_node * np)802*4882a593Smuzhiyun static struct tegra_smmu *tegra_smmu_find(struct device_node *np)
803*4882a593Smuzhiyun {
804*4882a593Smuzhiyun struct platform_device *pdev;
805*4882a593Smuzhiyun struct tegra_mc *mc;
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun pdev = of_find_device_by_node(np);
808*4882a593Smuzhiyun if (!pdev)
809*4882a593Smuzhiyun return NULL;
810*4882a593Smuzhiyun
811*4882a593Smuzhiyun mc = platform_get_drvdata(pdev);
812*4882a593Smuzhiyun if (!mc)
813*4882a593Smuzhiyun return NULL;
814*4882a593Smuzhiyun
815*4882a593Smuzhiyun return mc->smmu;
816*4882a593Smuzhiyun }
817*4882a593Smuzhiyun
tegra_smmu_configure(struct tegra_smmu * smmu,struct device * dev,struct of_phandle_args * args)818*4882a593Smuzhiyun static int tegra_smmu_configure(struct tegra_smmu *smmu, struct device *dev,
819*4882a593Smuzhiyun struct of_phandle_args *args)
820*4882a593Smuzhiyun {
821*4882a593Smuzhiyun const struct iommu_ops *ops = smmu->iommu.ops;
822*4882a593Smuzhiyun int err;
823*4882a593Smuzhiyun
824*4882a593Smuzhiyun err = iommu_fwspec_init(dev, &dev->of_node->fwnode, ops);
825*4882a593Smuzhiyun if (err < 0) {
826*4882a593Smuzhiyun dev_err(dev, "failed to initialize fwspec: %d\n", err);
827*4882a593Smuzhiyun return err;
828*4882a593Smuzhiyun }
829*4882a593Smuzhiyun
830*4882a593Smuzhiyun err = ops->of_xlate(dev, args);
831*4882a593Smuzhiyun if (err < 0) {
832*4882a593Smuzhiyun dev_err(dev, "failed to parse SW group ID: %d\n", err);
833*4882a593Smuzhiyun iommu_fwspec_free(dev);
834*4882a593Smuzhiyun return err;
835*4882a593Smuzhiyun }
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun return 0;
838*4882a593Smuzhiyun }
839*4882a593Smuzhiyun
tegra_smmu_probe_device(struct device * dev)840*4882a593Smuzhiyun static struct iommu_device *tegra_smmu_probe_device(struct device *dev)
841*4882a593Smuzhiyun {
842*4882a593Smuzhiyun struct device_node *np = dev->of_node;
843*4882a593Smuzhiyun struct tegra_smmu *smmu = NULL;
844*4882a593Smuzhiyun struct of_phandle_args args;
845*4882a593Smuzhiyun unsigned int index = 0;
846*4882a593Smuzhiyun int err;
847*4882a593Smuzhiyun
848*4882a593Smuzhiyun while (of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
849*4882a593Smuzhiyun &args) == 0) {
850*4882a593Smuzhiyun smmu = tegra_smmu_find(args.np);
851*4882a593Smuzhiyun if (smmu) {
852*4882a593Smuzhiyun err = tegra_smmu_configure(smmu, dev, &args);
853*4882a593Smuzhiyun of_node_put(args.np);
854*4882a593Smuzhiyun
855*4882a593Smuzhiyun if (err < 0)
856*4882a593Smuzhiyun return ERR_PTR(err);
857*4882a593Smuzhiyun
858*4882a593Smuzhiyun /*
859*4882a593Smuzhiyun * Only a single IOMMU master interface is currently
860*4882a593Smuzhiyun * supported by the Linux kernel, so abort after the
861*4882a593Smuzhiyun * first match.
862*4882a593Smuzhiyun */
863*4882a593Smuzhiyun dev_iommu_priv_set(dev, smmu);
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun break;
866*4882a593Smuzhiyun }
867*4882a593Smuzhiyun
868*4882a593Smuzhiyun of_node_put(args.np);
869*4882a593Smuzhiyun index++;
870*4882a593Smuzhiyun }
871*4882a593Smuzhiyun
872*4882a593Smuzhiyun if (!smmu)
873*4882a593Smuzhiyun return ERR_PTR(-ENODEV);
874*4882a593Smuzhiyun
875*4882a593Smuzhiyun return &smmu->iommu;
876*4882a593Smuzhiyun }
877*4882a593Smuzhiyun
tegra_smmu_release_device(struct device * dev)878*4882a593Smuzhiyun static void tegra_smmu_release_device(struct device *dev)
879*4882a593Smuzhiyun {
880*4882a593Smuzhiyun dev_iommu_priv_set(dev, NULL);
881*4882a593Smuzhiyun }
882*4882a593Smuzhiyun
883*4882a593Smuzhiyun static const struct tegra_smmu_group_soc *
tegra_smmu_find_group(struct tegra_smmu * smmu,unsigned int swgroup)884*4882a593Smuzhiyun tegra_smmu_find_group(struct tegra_smmu *smmu, unsigned int swgroup)
885*4882a593Smuzhiyun {
886*4882a593Smuzhiyun unsigned int i, j;
887*4882a593Smuzhiyun
888*4882a593Smuzhiyun for (i = 0; i < smmu->soc->num_groups; i++)
889*4882a593Smuzhiyun for (j = 0; j < smmu->soc->groups[i].num_swgroups; j++)
890*4882a593Smuzhiyun if (smmu->soc->groups[i].swgroups[j] == swgroup)
891*4882a593Smuzhiyun return &smmu->soc->groups[i];
892*4882a593Smuzhiyun
893*4882a593Smuzhiyun return NULL;
894*4882a593Smuzhiyun }
895*4882a593Smuzhiyun
tegra_smmu_group_release(void * iommu_data)896*4882a593Smuzhiyun static void tegra_smmu_group_release(void *iommu_data)
897*4882a593Smuzhiyun {
898*4882a593Smuzhiyun struct tegra_smmu_group *group = iommu_data;
899*4882a593Smuzhiyun struct tegra_smmu *smmu = group->smmu;
900*4882a593Smuzhiyun
901*4882a593Smuzhiyun mutex_lock(&smmu->lock);
902*4882a593Smuzhiyun list_del(&group->list);
903*4882a593Smuzhiyun mutex_unlock(&smmu->lock);
904*4882a593Smuzhiyun }
905*4882a593Smuzhiyun
tegra_smmu_group_get(struct tegra_smmu * smmu,unsigned int swgroup)906*4882a593Smuzhiyun static struct iommu_group *tegra_smmu_group_get(struct tegra_smmu *smmu,
907*4882a593Smuzhiyun unsigned int swgroup)
908*4882a593Smuzhiyun {
909*4882a593Smuzhiyun const struct tegra_smmu_group_soc *soc;
910*4882a593Smuzhiyun struct tegra_smmu_group *group;
911*4882a593Smuzhiyun struct iommu_group *grp;
912*4882a593Smuzhiyun
913*4882a593Smuzhiyun /* Find group_soc associating with swgroup */
914*4882a593Smuzhiyun soc = tegra_smmu_find_group(smmu, swgroup);
915*4882a593Smuzhiyun
916*4882a593Smuzhiyun mutex_lock(&smmu->lock);
917*4882a593Smuzhiyun
918*4882a593Smuzhiyun /* Find existing iommu_group associating with swgroup or group_soc */
919*4882a593Smuzhiyun list_for_each_entry(group, &smmu->groups, list)
920*4882a593Smuzhiyun if ((group->swgroup == swgroup) || (soc && group->soc == soc)) {
921*4882a593Smuzhiyun grp = iommu_group_ref_get(group->group);
922*4882a593Smuzhiyun mutex_unlock(&smmu->lock);
923*4882a593Smuzhiyun return grp;
924*4882a593Smuzhiyun }
925*4882a593Smuzhiyun
926*4882a593Smuzhiyun group = devm_kzalloc(smmu->dev, sizeof(*group), GFP_KERNEL);
927*4882a593Smuzhiyun if (!group) {
928*4882a593Smuzhiyun mutex_unlock(&smmu->lock);
929*4882a593Smuzhiyun return NULL;
930*4882a593Smuzhiyun }
931*4882a593Smuzhiyun
932*4882a593Smuzhiyun INIT_LIST_HEAD(&group->list);
933*4882a593Smuzhiyun group->swgroup = swgroup;
934*4882a593Smuzhiyun group->smmu = smmu;
935*4882a593Smuzhiyun group->soc = soc;
936*4882a593Smuzhiyun
937*4882a593Smuzhiyun group->group = iommu_group_alloc();
938*4882a593Smuzhiyun if (IS_ERR(group->group)) {
939*4882a593Smuzhiyun devm_kfree(smmu->dev, group);
940*4882a593Smuzhiyun mutex_unlock(&smmu->lock);
941*4882a593Smuzhiyun return NULL;
942*4882a593Smuzhiyun }
943*4882a593Smuzhiyun
944*4882a593Smuzhiyun iommu_group_set_iommudata(group->group, group, tegra_smmu_group_release);
945*4882a593Smuzhiyun if (soc)
946*4882a593Smuzhiyun iommu_group_set_name(group->group, soc->name);
947*4882a593Smuzhiyun list_add_tail(&group->list, &smmu->groups);
948*4882a593Smuzhiyun mutex_unlock(&smmu->lock);
949*4882a593Smuzhiyun
950*4882a593Smuzhiyun return group->group;
951*4882a593Smuzhiyun }
952*4882a593Smuzhiyun
tegra_smmu_device_group(struct device * dev)953*4882a593Smuzhiyun static struct iommu_group *tegra_smmu_device_group(struct device *dev)
954*4882a593Smuzhiyun {
955*4882a593Smuzhiyun struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
956*4882a593Smuzhiyun struct tegra_smmu *smmu = dev_iommu_priv_get(dev);
957*4882a593Smuzhiyun struct iommu_group *group;
958*4882a593Smuzhiyun
959*4882a593Smuzhiyun group = tegra_smmu_group_get(smmu, fwspec->ids[0]);
960*4882a593Smuzhiyun if (!group)
961*4882a593Smuzhiyun group = generic_device_group(dev);
962*4882a593Smuzhiyun
963*4882a593Smuzhiyun return group;
964*4882a593Smuzhiyun }
965*4882a593Smuzhiyun
tegra_smmu_of_xlate(struct device * dev,struct of_phandle_args * args)966*4882a593Smuzhiyun static int tegra_smmu_of_xlate(struct device *dev,
967*4882a593Smuzhiyun struct of_phandle_args *args)
968*4882a593Smuzhiyun {
969*4882a593Smuzhiyun u32 id = args->args[0];
970*4882a593Smuzhiyun
971*4882a593Smuzhiyun return iommu_fwspec_add_ids(dev, &id, 1);
972*4882a593Smuzhiyun }
973*4882a593Smuzhiyun
974*4882a593Smuzhiyun static const struct iommu_ops tegra_smmu_ops = {
975*4882a593Smuzhiyun .capable = tegra_smmu_capable,
976*4882a593Smuzhiyun .domain_alloc = tegra_smmu_domain_alloc,
977*4882a593Smuzhiyun .domain_free = tegra_smmu_domain_free,
978*4882a593Smuzhiyun .attach_dev = tegra_smmu_attach_dev,
979*4882a593Smuzhiyun .detach_dev = tegra_smmu_detach_dev,
980*4882a593Smuzhiyun .probe_device = tegra_smmu_probe_device,
981*4882a593Smuzhiyun .release_device = tegra_smmu_release_device,
982*4882a593Smuzhiyun .device_group = tegra_smmu_device_group,
983*4882a593Smuzhiyun .map = tegra_smmu_map,
984*4882a593Smuzhiyun .unmap = tegra_smmu_unmap,
985*4882a593Smuzhiyun .iova_to_phys = tegra_smmu_iova_to_phys,
986*4882a593Smuzhiyun .of_xlate = tegra_smmu_of_xlate,
987*4882a593Smuzhiyun .pgsize_bitmap = SZ_4K,
988*4882a593Smuzhiyun };
989*4882a593Smuzhiyun
tegra_smmu_ahb_enable(void)990*4882a593Smuzhiyun static void tegra_smmu_ahb_enable(void)
991*4882a593Smuzhiyun {
992*4882a593Smuzhiyun static const struct of_device_id ahb_match[] = {
993*4882a593Smuzhiyun { .compatible = "nvidia,tegra30-ahb", },
994*4882a593Smuzhiyun { }
995*4882a593Smuzhiyun };
996*4882a593Smuzhiyun struct device_node *ahb;
997*4882a593Smuzhiyun
998*4882a593Smuzhiyun ahb = of_find_matching_node(NULL, ahb_match);
999*4882a593Smuzhiyun if (ahb) {
1000*4882a593Smuzhiyun tegra_ahb_enable_smmu(ahb);
1001*4882a593Smuzhiyun of_node_put(ahb);
1002*4882a593Smuzhiyun }
1003*4882a593Smuzhiyun }
1004*4882a593Smuzhiyun
tegra_smmu_swgroups_show(struct seq_file * s,void * data)1005*4882a593Smuzhiyun static int tegra_smmu_swgroups_show(struct seq_file *s, void *data)
1006*4882a593Smuzhiyun {
1007*4882a593Smuzhiyun struct tegra_smmu *smmu = s->private;
1008*4882a593Smuzhiyun unsigned int i;
1009*4882a593Smuzhiyun u32 value;
1010*4882a593Smuzhiyun
1011*4882a593Smuzhiyun seq_printf(s, "swgroup enabled ASID\n");
1012*4882a593Smuzhiyun seq_printf(s, "------------------------\n");
1013*4882a593Smuzhiyun
1014*4882a593Smuzhiyun for (i = 0; i < smmu->soc->num_swgroups; i++) {
1015*4882a593Smuzhiyun const struct tegra_smmu_swgroup *group = &smmu->soc->swgroups[i];
1016*4882a593Smuzhiyun const char *status;
1017*4882a593Smuzhiyun unsigned int asid;
1018*4882a593Smuzhiyun
1019*4882a593Smuzhiyun value = smmu_readl(smmu, group->reg);
1020*4882a593Smuzhiyun
1021*4882a593Smuzhiyun if (value & SMMU_ASID_ENABLE)
1022*4882a593Smuzhiyun status = "yes";
1023*4882a593Smuzhiyun else
1024*4882a593Smuzhiyun status = "no";
1025*4882a593Smuzhiyun
1026*4882a593Smuzhiyun asid = value & SMMU_ASID_MASK;
1027*4882a593Smuzhiyun
1028*4882a593Smuzhiyun seq_printf(s, "%-9s %-7s %#04x\n", group->name, status,
1029*4882a593Smuzhiyun asid);
1030*4882a593Smuzhiyun }
1031*4882a593Smuzhiyun
1032*4882a593Smuzhiyun return 0;
1033*4882a593Smuzhiyun }
1034*4882a593Smuzhiyun
1035*4882a593Smuzhiyun DEFINE_SHOW_ATTRIBUTE(tegra_smmu_swgroups);
1036*4882a593Smuzhiyun
tegra_smmu_clients_show(struct seq_file * s,void * data)1037*4882a593Smuzhiyun static int tegra_smmu_clients_show(struct seq_file *s, void *data)
1038*4882a593Smuzhiyun {
1039*4882a593Smuzhiyun struct tegra_smmu *smmu = s->private;
1040*4882a593Smuzhiyun unsigned int i;
1041*4882a593Smuzhiyun u32 value;
1042*4882a593Smuzhiyun
1043*4882a593Smuzhiyun seq_printf(s, "client enabled\n");
1044*4882a593Smuzhiyun seq_printf(s, "--------------------\n");
1045*4882a593Smuzhiyun
1046*4882a593Smuzhiyun for (i = 0; i < smmu->soc->num_clients; i++) {
1047*4882a593Smuzhiyun const struct tegra_mc_client *client = &smmu->soc->clients[i];
1048*4882a593Smuzhiyun const char *status;
1049*4882a593Smuzhiyun
1050*4882a593Smuzhiyun value = smmu_readl(smmu, client->smmu.reg);
1051*4882a593Smuzhiyun
1052*4882a593Smuzhiyun if (value & BIT(client->smmu.bit))
1053*4882a593Smuzhiyun status = "yes";
1054*4882a593Smuzhiyun else
1055*4882a593Smuzhiyun status = "no";
1056*4882a593Smuzhiyun
1057*4882a593Smuzhiyun seq_printf(s, "%-12s %s\n", client->name, status);
1058*4882a593Smuzhiyun }
1059*4882a593Smuzhiyun
1060*4882a593Smuzhiyun return 0;
1061*4882a593Smuzhiyun }
1062*4882a593Smuzhiyun
1063*4882a593Smuzhiyun DEFINE_SHOW_ATTRIBUTE(tegra_smmu_clients);
1064*4882a593Smuzhiyun
tegra_smmu_debugfs_init(struct tegra_smmu * smmu)1065*4882a593Smuzhiyun static void tegra_smmu_debugfs_init(struct tegra_smmu *smmu)
1066*4882a593Smuzhiyun {
1067*4882a593Smuzhiyun smmu->debugfs = debugfs_create_dir("smmu", NULL);
1068*4882a593Smuzhiyun if (!smmu->debugfs)
1069*4882a593Smuzhiyun return;
1070*4882a593Smuzhiyun
1071*4882a593Smuzhiyun debugfs_create_file("swgroups", S_IRUGO, smmu->debugfs, smmu,
1072*4882a593Smuzhiyun &tegra_smmu_swgroups_fops);
1073*4882a593Smuzhiyun debugfs_create_file("clients", S_IRUGO, smmu->debugfs, smmu,
1074*4882a593Smuzhiyun &tegra_smmu_clients_fops);
1075*4882a593Smuzhiyun }
1076*4882a593Smuzhiyun
tegra_smmu_debugfs_exit(struct tegra_smmu * smmu)1077*4882a593Smuzhiyun static void tegra_smmu_debugfs_exit(struct tegra_smmu *smmu)
1078*4882a593Smuzhiyun {
1079*4882a593Smuzhiyun debugfs_remove_recursive(smmu->debugfs);
1080*4882a593Smuzhiyun }
1081*4882a593Smuzhiyun
tegra_smmu_probe(struct device * dev,const struct tegra_smmu_soc * soc,struct tegra_mc * mc)1082*4882a593Smuzhiyun struct tegra_smmu *tegra_smmu_probe(struct device *dev,
1083*4882a593Smuzhiyun const struct tegra_smmu_soc *soc,
1084*4882a593Smuzhiyun struct tegra_mc *mc)
1085*4882a593Smuzhiyun {
1086*4882a593Smuzhiyun struct tegra_smmu *smmu;
1087*4882a593Smuzhiyun size_t size;
1088*4882a593Smuzhiyun u32 value;
1089*4882a593Smuzhiyun int err;
1090*4882a593Smuzhiyun
1091*4882a593Smuzhiyun smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
1092*4882a593Smuzhiyun if (!smmu)
1093*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
1094*4882a593Smuzhiyun
1095*4882a593Smuzhiyun /*
1096*4882a593Smuzhiyun * This is a bit of a hack. Ideally we'd want to simply return this
1097*4882a593Smuzhiyun * value. However the IOMMU registration process will attempt to add
1098*4882a593Smuzhiyun * all devices to the IOMMU when bus_set_iommu() is called. In order
1099*4882a593Smuzhiyun * not to rely on global variables to track the IOMMU instance, we
1100*4882a593Smuzhiyun * set it here so that it can be looked up from the .probe_device()
1101*4882a593Smuzhiyun * callback via the IOMMU device's .drvdata field.
1102*4882a593Smuzhiyun */
1103*4882a593Smuzhiyun mc->smmu = smmu;
1104*4882a593Smuzhiyun
1105*4882a593Smuzhiyun size = BITS_TO_LONGS(soc->num_asids) * sizeof(long);
1106*4882a593Smuzhiyun
1107*4882a593Smuzhiyun smmu->asids = devm_kzalloc(dev, size, GFP_KERNEL);
1108*4882a593Smuzhiyun if (!smmu->asids)
1109*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
1110*4882a593Smuzhiyun
1111*4882a593Smuzhiyun INIT_LIST_HEAD(&smmu->groups);
1112*4882a593Smuzhiyun mutex_init(&smmu->lock);
1113*4882a593Smuzhiyun
1114*4882a593Smuzhiyun smmu->regs = mc->regs;
1115*4882a593Smuzhiyun smmu->soc = soc;
1116*4882a593Smuzhiyun smmu->dev = dev;
1117*4882a593Smuzhiyun smmu->mc = mc;
1118*4882a593Smuzhiyun
1119*4882a593Smuzhiyun smmu->pfn_mask =
1120*4882a593Smuzhiyun BIT_MASK(mc->soc->num_address_bits - SMMU_PTE_SHIFT) - 1;
1121*4882a593Smuzhiyun dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n",
1122*4882a593Smuzhiyun mc->soc->num_address_bits, smmu->pfn_mask);
1123*4882a593Smuzhiyun smmu->tlb_mask = (1 << fls(smmu->soc->num_tlb_lines)) - 1;
1124*4882a593Smuzhiyun dev_dbg(dev, "TLB lines: %u, mask: %#lx\n", smmu->soc->num_tlb_lines,
1125*4882a593Smuzhiyun smmu->tlb_mask);
1126*4882a593Smuzhiyun
1127*4882a593Smuzhiyun value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f);
1128*4882a593Smuzhiyun
1129*4882a593Smuzhiyun if (soc->supports_request_limit)
1130*4882a593Smuzhiyun value |= SMMU_PTC_CONFIG_REQ_LIMIT(8);
1131*4882a593Smuzhiyun
1132*4882a593Smuzhiyun smmu_writel(smmu, value, SMMU_PTC_CONFIG);
1133*4882a593Smuzhiyun
1134*4882a593Smuzhiyun value = SMMU_TLB_CONFIG_HIT_UNDER_MISS |
1135*4882a593Smuzhiyun SMMU_TLB_CONFIG_ACTIVE_LINES(smmu);
1136*4882a593Smuzhiyun
1137*4882a593Smuzhiyun if (soc->supports_round_robin_arbitration)
1138*4882a593Smuzhiyun value |= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION;
1139*4882a593Smuzhiyun
1140*4882a593Smuzhiyun smmu_writel(smmu, value, SMMU_TLB_CONFIG);
1141*4882a593Smuzhiyun
1142*4882a593Smuzhiyun smmu_flush_ptc_all(smmu);
1143*4882a593Smuzhiyun smmu_flush_tlb(smmu);
1144*4882a593Smuzhiyun smmu_writel(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG);
1145*4882a593Smuzhiyun smmu_flush(smmu);
1146*4882a593Smuzhiyun
1147*4882a593Smuzhiyun tegra_smmu_ahb_enable();
1148*4882a593Smuzhiyun
1149*4882a593Smuzhiyun err = iommu_device_sysfs_add(&smmu->iommu, dev, NULL, dev_name(dev));
1150*4882a593Smuzhiyun if (err)
1151*4882a593Smuzhiyun return ERR_PTR(err);
1152*4882a593Smuzhiyun
1153*4882a593Smuzhiyun iommu_device_set_ops(&smmu->iommu, &tegra_smmu_ops);
1154*4882a593Smuzhiyun iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
1155*4882a593Smuzhiyun
1156*4882a593Smuzhiyun err = iommu_device_register(&smmu->iommu);
1157*4882a593Smuzhiyun if (err) {
1158*4882a593Smuzhiyun iommu_device_sysfs_remove(&smmu->iommu);
1159*4882a593Smuzhiyun return ERR_PTR(err);
1160*4882a593Smuzhiyun }
1161*4882a593Smuzhiyun
1162*4882a593Smuzhiyun err = bus_set_iommu(&platform_bus_type, &tegra_smmu_ops);
1163*4882a593Smuzhiyun if (err < 0) {
1164*4882a593Smuzhiyun iommu_device_unregister(&smmu->iommu);
1165*4882a593Smuzhiyun iommu_device_sysfs_remove(&smmu->iommu);
1166*4882a593Smuzhiyun return ERR_PTR(err);
1167*4882a593Smuzhiyun }
1168*4882a593Smuzhiyun
1169*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_DEBUG_FS))
1170*4882a593Smuzhiyun tegra_smmu_debugfs_init(smmu);
1171*4882a593Smuzhiyun
1172*4882a593Smuzhiyun return smmu;
1173*4882a593Smuzhiyun }
1174*4882a593Smuzhiyun
tegra_smmu_remove(struct tegra_smmu * smmu)1175*4882a593Smuzhiyun void tegra_smmu_remove(struct tegra_smmu *smmu)
1176*4882a593Smuzhiyun {
1177*4882a593Smuzhiyun iommu_device_unregister(&smmu->iommu);
1178*4882a593Smuzhiyun iommu_device_sysfs_remove(&smmu->iommu);
1179*4882a593Smuzhiyun
1180*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_DEBUG_FS))
1181*4882a593Smuzhiyun tegra_smmu_debugfs_exit(smmu);
1182*4882a593Smuzhiyun }
1183