xref: /OK3568_Linux_fs/kernel/drivers/iommu/io-pgtable-arm.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * CPU-agnostic ARM page table allocator.
4  *
5  * Copyright (C) 2014 ARM Limited
6  *
7  * Author: Will Deacon <will.deacon@arm.com>
8  */
9 
10 #define pr_fmt(fmt)	"arm-lpae io-pgtable: " fmt
11 
12 #include <linux/atomic.h>
13 #include <linux/bitops.h>
14 #include <linux/io-pgtable.h>
15 #include <linux/kernel.h>
16 #include <linux/sizes.h>
17 #include <linux/slab.h>
18 #include <linux/types.h>
19 #include <linux/dma-mapping.h>
20 
21 #include <asm/barrier.h>
22 
23 #include "io-pgtable-arm.h"
24 
25 #define ARM_LPAE_MAX_ADDR_BITS		52
26 #define ARM_LPAE_S2_MAX_CONCAT_PAGES	16
27 #define ARM_LPAE_MAX_LEVELS		4
28 
29 /* Struct accessors */
30 #define io_pgtable_to_data(x)						\
31 	container_of((x), struct arm_lpae_io_pgtable, iop)
32 
33 #define io_pgtable_ops_to_data(x)					\
34 	io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
35 
36 /*
37  * Calculate the right shift amount to get to the portion describing level l
38  * in a virtual address mapped by the pagetable in d.
39  */
40 #define ARM_LPAE_LVL_SHIFT(l,d)						\
41 	(((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level) +		\
42 	ilog2(sizeof(arm_lpae_iopte)))
43 
44 #define ARM_LPAE_GRANULE(d)						\
45 	(sizeof(arm_lpae_iopte) << (d)->bits_per_level)
46 #define ARM_LPAE_PGD_SIZE(d)						\
47 	(sizeof(arm_lpae_iopte) << (d)->pgd_bits)
48 
49 #define ARM_LPAE_PTES_PER_TABLE(d)					\
50 	(ARM_LPAE_GRANULE(d) >> ilog2(sizeof(arm_lpae_iopte)))
51 
52 /*
53  * Calculate the index at level l used to map virtual address a using the
54  * pagetable in d.
55  */
56 #define ARM_LPAE_PGD_IDX(l,d)						\
57 	((l) == (d)->start_level ? (d)->pgd_bits - (d)->bits_per_level : 0)
58 
59 #define ARM_LPAE_LVL_IDX(a,l,d)						\
60 	(((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) &			\
61 	 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
62 
63 /* Calculate the block/page mapping size at level l for pagetable in d. */
64 #define ARM_LPAE_BLOCK_SIZE(l,d)	(1ULL << ARM_LPAE_LVL_SHIFT(l,d))
65 
66 /* Page table bits */
67 #define ARM_LPAE_PTE_TYPE_SHIFT		0
68 #define ARM_LPAE_PTE_TYPE_MASK		0x3
69 
70 #define ARM_LPAE_PTE_TYPE_BLOCK		1
71 #define ARM_LPAE_PTE_TYPE_TABLE		3
72 #define ARM_LPAE_PTE_TYPE_PAGE		3
73 
74 #define ARM_LPAE_PTE_ADDR_MASK		GENMASK_ULL(47,12)
75 
76 #define ARM_LPAE_PTE_NSTABLE		(((arm_lpae_iopte)1) << 63)
77 #define ARM_LPAE_PTE_XN			(((arm_lpae_iopte)3) << 53)
78 #define ARM_LPAE_PTE_AF			(((arm_lpae_iopte)1) << 10)
79 #define ARM_LPAE_PTE_SH_NS		(((arm_lpae_iopte)0) << 8)
80 #define ARM_LPAE_PTE_SH_OS		(((arm_lpae_iopte)2) << 8)
81 #define ARM_LPAE_PTE_SH_IS		(((arm_lpae_iopte)3) << 8)
82 #define ARM_LPAE_PTE_NS			(((arm_lpae_iopte)1) << 5)
83 #define ARM_LPAE_PTE_VALID		(((arm_lpae_iopte)1) << 0)
84 
85 #define ARM_LPAE_PTE_ATTR_LO_MASK	(((arm_lpae_iopte)0x3ff) << 2)
86 /* Ignore the contiguous bit for block splitting */
87 #define ARM_LPAE_PTE_ATTR_HI_MASK	(((arm_lpae_iopte)6) << 52)
88 #define ARM_LPAE_PTE_ATTR_MASK		(ARM_LPAE_PTE_ATTR_LO_MASK |	\
89 					 ARM_LPAE_PTE_ATTR_HI_MASK)
90 /* Software bit for solving coherency races */
91 #define ARM_LPAE_PTE_SW_SYNC		(((arm_lpae_iopte)1) << 55)
92 
93 /* Stage-1 PTE */
94 #define ARM_LPAE_PTE_AP_UNPRIV		(((arm_lpae_iopte)1) << 6)
95 #define ARM_LPAE_PTE_AP_RDONLY		(((arm_lpae_iopte)2) << 6)
96 #define ARM_LPAE_PTE_ATTRINDX_SHIFT	2
97 #define ARM_LPAE_PTE_nG			(((arm_lpae_iopte)1) << 11)
98 
99 /* Stage-2 PTE */
100 #define ARM_LPAE_PTE_HAP_FAULT		(((arm_lpae_iopte)0) << 6)
101 #define ARM_LPAE_PTE_HAP_READ		(((arm_lpae_iopte)1) << 6)
102 #define ARM_LPAE_PTE_HAP_WRITE		(((arm_lpae_iopte)2) << 6)
103 #define ARM_LPAE_PTE_MEMATTR_OIWB	(((arm_lpae_iopte)0xf) << 2)
104 #define ARM_LPAE_PTE_MEMATTR_NC		(((arm_lpae_iopte)0x5) << 2)
105 #define ARM_LPAE_PTE_MEMATTR_DEV	(((arm_lpae_iopte)0x1) << 2)
106 
107 /* Register bits */
108 #define ARM_LPAE_VTCR_SL0_MASK		0x3
109 
110 #define ARM_LPAE_TCR_T0SZ_SHIFT		0
111 
112 #define ARM_LPAE_VTCR_PS_SHIFT		16
113 #define ARM_LPAE_VTCR_PS_MASK		0x7
114 
115 #define ARM_LPAE_MAIR_ATTR_SHIFT(n)			((n) << 3)
116 #define ARM_LPAE_MAIR_ATTR_MASK				0xff
117 #define ARM_LPAE_MAIR_ATTR_DEVICE			0x04ULL
118 #define ARM_LPAE_MAIR_ATTR_NC				0x44ULL
119 #define ARM_LPAE_MAIR_ATTR_INC_OWBRANWA			0xe4ULL
120 #define ARM_LPAE_MAIR_ATTR_IWBRWA_OWBRANWA		0xefULL
121 #define ARM_LPAE_MAIR_ATTR_INC_OWBRWA			0xf4ULL
122 #define ARM_LPAE_MAIR_ATTR_WBRWA			0xffULL
123 #define ARM_LPAE_MAIR_ATTR_IDX_NC			0
124 #define ARM_LPAE_MAIR_ATTR_IDX_CACHE			1
125 #define ARM_LPAE_MAIR_ATTR_IDX_DEV			2
126 #define ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE		3
127 #define ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE_NWA		4
128 #define ARM_LPAE_MAIR_ATTR_IDX_ICACHE_OCACHE_NWA	5
129 
130 #define ARM_MALI_LPAE_TTBR_ADRMODE_TABLE (3u << 0)
131 #define ARM_MALI_LPAE_TTBR_READ_INNER	BIT(2)
132 #define ARM_MALI_LPAE_TTBR_SHARE_OUTER	BIT(4)
133 
134 #define ARM_MALI_LPAE_MEMATTR_IMP_DEF	0x88ULL
135 #define ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC 0x8DULL
136 
137 /* IOPTE accessors */
138 #define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d))
139 
140 #define iopte_type(pte,l)					\
141 	(((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
142 
143 #define iopte_prot(pte)	((pte) & ARM_LPAE_PTE_ATTR_MASK)
144 
145 struct arm_lpae_io_pgtable {
146 	struct io_pgtable	iop;
147 
148 	int			pgd_bits;
149 	int			start_level;
150 	int			bits_per_level;
151 
152 	void			*pgd;
153 };
154 
155 typedef u64 arm_lpae_iopte;
156 
iopte_leaf(arm_lpae_iopte pte,int lvl,enum io_pgtable_fmt fmt)157 static inline bool iopte_leaf(arm_lpae_iopte pte, int lvl,
158 			      enum io_pgtable_fmt fmt)
159 {
160 	if (lvl == (ARM_LPAE_MAX_LEVELS - 1) && fmt != ARM_MALI_LPAE)
161 		return iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_PAGE;
162 
163 	return iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_BLOCK;
164 }
165 
paddr_to_iopte(phys_addr_t paddr,struct arm_lpae_io_pgtable * data)166 static arm_lpae_iopte paddr_to_iopte(phys_addr_t paddr,
167 				     struct arm_lpae_io_pgtable *data)
168 {
169 	arm_lpae_iopte pte = paddr;
170 
171 	/* Of the bits which overlap, either 51:48 or 15:12 are always RES0 */
172 	return (pte | (pte >> (48 - 12))) & ARM_LPAE_PTE_ADDR_MASK;
173 }
174 
iopte_to_paddr(arm_lpae_iopte pte,struct arm_lpae_io_pgtable * data)175 static phys_addr_t iopte_to_paddr(arm_lpae_iopte pte,
176 				  struct arm_lpae_io_pgtable *data)
177 {
178 	u64 paddr = pte & ARM_LPAE_PTE_ADDR_MASK;
179 
180 	if (ARM_LPAE_GRANULE(data) < SZ_64K)
181 		return paddr;
182 
183 	/* Rotate the packed high-order bits back to the top */
184 	return (paddr | (paddr << (48 - 12))) & (ARM_LPAE_PTE_ADDR_MASK << 4);
185 }
186 
187 static bool selftest_running = false;
188 
__arm_lpae_dma_addr(void * pages)189 static dma_addr_t __arm_lpae_dma_addr(void *pages)
190 {
191 	return (dma_addr_t)virt_to_phys(pages);
192 }
193 
__arm_lpae_alloc_pages(size_t size,gfp_t gfp,struct io_pgtable_cfg * cfg)194 static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
195 				    struct io_pgtable_cfg *cfg)
196 {
197 	struct device *dev = cfg->iommu_dev;
198 	int order = get_order(size);
199 	struct page *p;
200 	dma_addr_t dma;
201 	void *pages;
202 
203 	VM_BUG_ON((gfp & __GFP_HIGHMEM));
204 	p = alloc_pages_node(dev ? dev_to_node(dev) : NUMA_NO_NODE,
205 			     gfp | __GFP_ZERO, order);
206 	if (!p)
207 		return NULL;
208 
209 	pages = page_address(p);
210 	if (!cfg->coherent_walk) {
211 		dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
212 		if (dma_mapping_error(dev, dma))
213 			goto out_free;
214 		/*
215 		 * We depend on the IOMMU being able to work with any physical
216 		 * address directly, so if the DMA layer suggests otherwise by
217 		 * translating or truncating them, that bodes very badly...
218 		 */
219 		if (dma != virt_to_phys(pages))
220 			goto out_unmap;
221 	}
222 
223 	return pages;
224 
225 out_unmap:
226 	dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
227 	dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
228 out_free:
229 	__free_pages(p, order);
230 	return NULL;
231 }
232 
__arm_lpae_free_pages(void * pages,size_t size,struct io_pgtable_cfg * cfg)233 static void __arm_lpae_free_pages(void *pages, size_t size,
234 				  struct io_pgtable_cfg *cfg)
235 {
236 	if (!cfg->coherent_walk)
237 		dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
238 				 size, DMA_TO_DEVICE);
239 	free_pages((unsigned long)pages, get_order(size));
240 }
241 
__arm_lpae_sync_pte(arm_lpae_iopte * ptep,int num_entries,struct io_pgtable_cfg * cfg)242 static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, int num_entries,
243 				struct io_pgtable_cfg *cfg)
244 {
245 	dma_sync_single_for_device(cfg->iommu_dev, __arm_lpae_dma_addr(ptep),
246 				   sizeof(*ptep) * num_entries, DMA_TO_DEVICE);
247 }
248 
__arm_lpae_clear_pte(arm_lpae_iopte * ptep,struct io_pgtable_cfg * cfg)249 static void __arm_lpae_clear_pte(arm_lpae_iopte *ptep, struct io_pgtable_cfg *cfg)
250 {
251 
252 	*ptep = 0;
253 
254 	if (!cfg->coherent_walk)
255 		__arm_lpae_sync_pte(ptep, 1, cfg);
256 }
257 
258 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
259 			       struct iommu_iotlb_gather *gather,
260 			       unsigned long iova, size_t size, size_t pgcount,
261 			       int lvl, arm_lpae_iopte *ptep);
262 
__arm_lpae_init_pte(struct arm_lpae_io_pgtable * data,phys_addr_t paddr,arm_lpae_iopte prot,int lvl,int num_entries,arm_lpae_iopte * ptep)263 static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
264 				phys_addr_t paddr, arm_lpae_iopte prot,
265 				int lvl, int num_entries, arm_lpae_iopte *ptep)
266 {
267 	arm_lpae_iopte pte = prot;
268 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
269 	size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
270 	int i;
271 
272 	if (data->iop.fmt != ARM_MALI_LPAE && lvl == ARM_LPAE_MAX_LEVELS - 1)
273 		pte |= ARM_LPAE_PTE_TYPE_PAGE;
274 	else
275 		pte |= ARM_LPAE_PTE_TYPE_BLOCK;
276 
277 	for (i = 0; i < num_entries; i++)
278 		ptep[i] = pte | paddr_to_iopte(paddr + i * sz, data);
279 
280 	if (!cfg->coherent_walk)
281 		__arm_lpae_sync_pte(ptep, num_entries, cfg);
282 }
283 
arm_lpae_init_pte(struct arm_lpae_io_pgtable * data,unsigned long iova,phys_addr_t paddr,arm_lpae_iopte prot,int lvl,int num_entries,arm_lpae_iopte * ptep)284 static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
285 			     unsigned long iova, phys_addr_t paddr,
286 			     arm_lpae_iopte prot, int lvl, int num_entries,
287 			     arm_lpae_iopte *ptep)
288 {
289 	int i;
290 
291 	for (i = 0; i < num_entries; i++)
292 		if (iopte_leaf(ptep[i], lvl, data->iop.fmt)) {
293 			/* We require an unmap first */
294 			WARN_ON(!selftest_running);
295 			return -EEXIST;
296 		} else if (iopte_type(ptep[i], lvl) == ARM_LPAE_PTE_TYPE_TABLE) {
297 			/*
298 			 * We need to unmap and free the old table before
299 			 * overwriting it with a block entry.
300 			 */
301 			arm_lpae_iopte *tblp;
302 			size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
303 
304 			tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
305 			if (__arm_lpae_unmap(data, NULL, iova + i * sz, sz, 1,
306 					     lvl, tblp) != sz) {
307 				WARN_ON(1);
308 				return -EINVAL;
309 			}
310 		}
311 
312 	__arm_lpae_init_pte(data, paddr, prot, lvl, num_entries, ptep);
313 	return 0;
314 }
315 
arm_lpae_install_table(arm_lpae_iopte * table,arm_lpae_iopte * ptep,arm_lpae_iopte curr,struct arm_lpae_io_pgtable * data)316 static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table,
317 					     arm_lpae_iopte *ptep,
318 					     arm_lpae_iopte curr,
319 					     struct arm_lpae_io_pgtable *data)
320 {
321 	arm_lpae_iopte old, new;
322 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
323 
324 	new = paddr_to_iopte(__pa(table), data) | ARM_LPAE_PTE_TYPE_TABLE;
325 	if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
326 		new |= ARM_LPAE_PTE_NSTABLE;
327 
328 	/*
329 	 * Ensure the table itself is visible before its PTE can be.
330 	 * Whilst we could get away with cmpxchg64_release below, this
331 	 * doesn't have any ordering semantics when !CONFIG_SMP.
332 	 */
333 	dma_wmb();
334 
335 	old = cmpxchg64_relaxed(ptep, curr, new);
336 
337 	if (cfg->coherent_walk || (old & ARM_LPAE_PTE_SW_SYNC))
338 		return old;
339 
340 	/* Even if it's not ours, there's no point waiting; just kick it */
341 	__arm_lpae_sync_pte(ptep, 1, cfg);
342 	if (old == curr)
343 		WRITE_ONCE(*ptep, new | ARM_LPAE_PTE_SW_SYNC);
344 
345 	return old;
346 }
347 
__arm_lpae_map(struct arm_lpae_io_pgtable * data,unsigned long iova,phys_addr_t paddr,size_t size,size_t pgcount,arm_lpae_iopte prot,int lvl,arm_lpae_iopte * ptep,gfp_t gfp,size_t * mapped)348 static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
349 			  phys_addr_t paddr, size_t size, size_t pgcount,
350 			  arm_lpae_iopte prot, int lvl, arm_lpae_iopte *ptep,
351 			  gfp_t gfp, size_t *mapped)
352 {
353 	arm_lpae_iopte *cptep, pte;
354 	size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
355 	size_t tblsz = ARM_LPAE_GRANULE(data);
356 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
357 	int ret = 0, num_entries, max_entries, map_idx_start;
358 
359 	/* Find our entry at the current level */
360 	map_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
361 	ptep += map_idx_start;
362 
363 	/* If we can install a leaf entry at this level, then do so */
364 	if (size == block_size) {
365 		max_entries = ARM_LPAE_PTES_PER_TABLE(data) - map_idx_start;
366 		num_entries = min_t(int, pgcount, max_entries);
367 		ret = arm_lpae_init_pte(data, iova, paddr, prot, lvl, num_entries, ptep);
368 		if (!ret && mapped)
369 			*mapped += num_entries * size;
370 
371 		return ret;
372 	}
373 
374 	/* We can't allocate tables at the final level */
375 	if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
376 		return -EINVAL;
377 
378 	/* Grab a pointer to the next level */
379 	pte = READ_ONCE(*ptep);
380 	if (!pte) {
381 		cptep = __arm_lpae_alloc_pages(tblsz, gfp, cfg);
382 		if (!cptep)
383 			return -ENOMEM;
384 
385 		pte = arm_lpae_install_table(cptep, ptep, 0, data);
386 		if (pte)
387 			__arm_lpae_free_pages(cptep, tblsz, cfg);
388 	} else if (!cfg->coherent_walk && !(pte & ARM_LPAE_PTE_SW_SYNC)) {
389 		__arm_lpae_sync_pte(ptep, 1, cfg);
390 	}
391 
392 	if (pte && !iopte_leaf(pte, lvl, data->iop.fmt)) {
393 		cptep = iopte_deref(pte, data);
394 	} else if (pte) {
395 		/* We require an unmap first */
396 		WARN_ON(!selftest_running);
397 		return -EEXIST;
398 	}
399 
400 	/* Rinse, repeat */
401 	return __arm_lpae_map(data, iova, paddr, size, pgcount, prot, lvl + 1,
402 			      cptep, gfp, mapped);
403 }
404 
arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable * data,int prot)405 static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
406 					   int prot)
407 {
408 	arm_lpae_iopte pte;
409 
410 	if (data->iop.fmt == ARM_64_LPAE_S1 ||
411 	    data->iop.fmt == ARM_32_LPAE_S1) {
412 		pte = ARM_LPAE_PTE_nG;
413 		if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
414 			pte |= ARM_LPAE_PTE_AP_RDONLY;
415 		if (!(prot & IOMMU_PRIV))
416 			pte |= ARM_LPAE_PTE_AP_UNPRIV;
417 	} else {
418 		pte = ARM_LPAE_PTE_HAP_FAULT;
419 		if (prot & IOMMU_READ)
420 			pte |= ARM_LPAE_PTE_HAP_READ;
421 		if (prot & IOMMU_WRITE)
422 			pte |= ARM_LPAE_PTE_HAP_WRITE;
423 	}
424 
425 	/*
426 	 * Note that this logic is structured to accommodate Mali LPAE
427 	 * having stage-1-like attributes but stage-2-like permissions.
428 	 */
429 	if (data->iop.fmt == ARM_64_LPAE_S2 ||
430 	    data->iop.fmt == ARM_32_LPAE_S2) {
431 		if (prot & IOMMU_MMIO)
432 			pte |= ARM_LPAE_PTE_MEMATTR_DEV;
433 		else if (prot & IOMMU_CACHE)
434 			pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
435 		else
436 			pte |= ARM_LPAE_PTE_MEMATTR_NC;
437 	} else {
438 		if (prot & IOMMU_MMIO)
439 			pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV
440 				<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
441 		else if ((prot & IOMMU_CACHE) && (prot & IOMMU_SYS_CACHE_NWA))
442 			pte |= (ARM_LPAE_MAIR_ATTR_IDX_ICACHE_OCACHE_NWA
443 				<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
444 		/* IOMMU_CACHE + IOMMU_SYS_CACHE equivalent to IOMMU_CACHE */
445 		else if (prot & IOMMU_CACHE)
446 			pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
447 				<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
448 		else if (prot & IOMMU_SYS_CACHE)
449 			pte |= (ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE
450 				<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
451 		else if (prot & IOMMU_SYS_CACHE_NWA)
452 			pte |= (ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE_NWA
453 				<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
454 	}
455 
456 	/*
457 	 * Also Mali has its own notions of shareability wherein its Inner
458 	 * domain covers the cores within the GPU, and its Outer domain is
459 	 * "outside the GPU" (i.e. either the Inner or System domain in CPU
460 	 * terms, depending on coherency).
461 	 */
462 	if (prot & IOMMU_CACHE && data->iop.fmt != ARM_MALI_LPAE)
463 		pte |= ARM_LPAE_PTE_SH_IS;
464 	else
465 		pte |= ARM_LPAE_PTE_SH_OS;
466 
467 	if (prot & IOMMU_NOEXEC)
468 		pte |= ARM_LPAE_PTE_XN;
469 
470 	if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
471 		pte |= ARM_LPAE_PTE_NS;
472 
473 	if (data->iop.fmt != ARM_MALI_LPAE)
474 		pte |= ARM_LPAE_PTE_AF;
475 
476 	return pte;
477 }
478 
arm_lpae_map_pages(struct io_pgtable_ops * ops,unsigned long iova,phys_addr_t paddr,size_t pgsize,size_t pgcount,int iommu_prot,gfp_t gfp,size_t * mapped)479 static int arm_lpae_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
480 			      phys_addr_t paddr, size_t pgsize, size_t pgcount,
481 			      int iommu_prot, gfp_t gfp, size_t *mapped)
482 {
483 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
484 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
485 	arm_lpae_iopte *ptep = data->pgd;
486 	int ret, lvl = data->start_level;
487 	arm_lpae_iopte prot;
488 	long iaext = (s64)iova >> cfg->ias;
489 
490 	/* If no access, then nothing to do */
491 	if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
492 		return 0;
493 
494 	if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize))
495 		return -EINVAL;
496 
497 	if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
498 		iaext = ~iaext;
499 	if (WARN_ON(iaext || paddr >> cfg->oas))
500 		return -ERANGE;
501 
502 	prot = arm_lpae_prot_to_pte(data, iommu_prot);
503 	ret = __arm_lpae_map(data, iova, paddr, pgsize, pgcount, prot, lvl,
504 			     ptep, gfp, mapped);
505 	/*
506 	 * Synchronise all PTE updates for the new mapping before there's
507 	 * a chance for anything to kick off a table walk for the new iova.
508 	 */
509 	wmb();
510 
511 	return ret;
512 }
513 
514 
arm_lpae_map(struct io_pgtable_ops * ops,unsigned long iova,phys_addr_t paddr,size_t size,int iommu_prot,gfp_t gfp)515 static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
516 			phys_addr_t paddr, size_t size, int iommu_prot, gfp_t gfp)
517 {
518 	return arm_lpae_map_pages(ops, iova, paddr, size, 1, iommu_prot, gfp,
519 				  NULL);
520 }
521 
__arm_lpae_free_pgtable(struct arm_lpae_io_pgtable * data,int lvl,arm_lpae_iopte * ptep)522 static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
523 				    arm_lpae_iopte *ptep)
524 {
525 	arm_lpae_iopte *start, *end;
526 	unsigned long table_size;
527 
528 	if (lvl == data->start_level)
529 		table_size = ARM_LPAE_PGD_SIZE(data);
530 	else
531 		table_size = ARM_LPAE_GRANULE(data);
532 
533 	start = ptep;
534 
535 	/* Only leaf entries at the last level */
536 	if (lvl == ARM_LPAE_MAX_LEVELS - 1)
537 		end = ptep;
538 	else
539 		end = (void *)ptep + table_size;
540 
541 	while (ptep != end) {
542 		arm_lpae_iopte pte = *ptep++;
543 
544 		if (!pte || iopte_leaf(pte, lvl, data->iop.fmt))
545 			continue;
546 
547 		__arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
548 	}
549 
550 	__arm_lpae_free_pages(start, table_size, &data->iop.cfg);
551 }
552 
arm_lpae_free_pgtable(struct io_pgtable * iop)553 static void arm_lpae_free_pgtable(struct io_pgtable *iop)
554 {
555 	struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
556 
557 	__arm_lpae_free_pgtable(data, data->start_level, data->pgd);
558 	kfree(data);
559 }
560 
arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable * data,struct iommu_iotlb_gather * gather,unsigned long iova,size_t size,arm_lpae_iopte blk_pte,int lvl,arm_lpae_iopte * ptep,size_t pgcount)561 static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
562 				       struct iommu_iotlb_gather *gather,
563 				       unsigned long iova, size_t size,
564 				       arm_lpae_iopte blk_pte, int lvl,
565 				       arm_lpae_iopte *ptep, size_t pgcount)
566 {
567 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
568 	arm_lpae_iopte pte, *tablep;
569 	phys_addr_t blk_paddr;
570 	size_t tablesz = ARM_LPAE_GRANULE(data);
571 	size_t split_sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
572 	int ptes_per_table = ARM_LPAE_PTES_PER_TABLE(data);
573 	int i, unmap_idx_start = -1, num_entries = 0, max_entries;
574 
575 	if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
576 		return 0;
577 
578 	tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg);
579 	if (!tablep)
580 		return 0; /* Bytes unmapped */
581 
582 	if (size == split_sz) {
583 		unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
584 		max_entries = ptes_per_table - unmap_idx_start;
585 		num_entries = min_t(int, pgcount, max_entries);
586 	}
587 
588 	blk_paddr = iopte_to_paddr(blk_pte, data);
589 	pte = iopte_prot(blk_pte);
590 
591 	for (i = 0; i < ptes_per_table; i++, blk_paddr += split_sz) {
592 		/* Unmap! */
593 		if (i >= unmap_idx_start && i < (unmap_idx_start + num_entries))
594 			continue;
595 
596 		__arm_lpae_init_pte(data, blk_paddr, pte, lvl, 1, &tablep[i]);
597 	}
598 
599 	pte = arm_lpae_install_table(tablep, ptep, blk_pte, data);
600 	if (pte != blk_pte) {
601 		__arm_lpae_free_pages(tablep, tablesz, cfg);
602 		/*
603 		 * We may race against someone unmapping another part of this
604 		 * block, but anything else is invalid. We can't misinterpret
605 		 * a page entry here since we're never at the last level.
606 		 */
607 		if (iopte_type(pte, lvl - 1) != ARM_LPAE_PTE_TYPE_TABLE)
608 			return 0;
609 
610 		tablep = iopte_deref(pte, data);
611 	} else if (unmap_idx_start >= 0) {
612 		for (i = 0; i < num_entries; i++)
613 			io_pgtable_tlb_add_page(&data->iop, gather, iova + i * size, size);
614 
615 		return num_entries * size;
616 	}
617 
618 	return __arm_lpae_unmap(data, gather, iova, size, pgcount, lvl, tablep);
619 }
620 
__arm_lpae_unmap(struct arm_lpae_io_pgtable * data,struct iommu_iotlb_gather * gather,unsigned long iova,size_t size,size_t pgcount,int lvl,arm_lpae_iopte * ptep)621 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
622 			       struct iommu_iotlb_gather *gather,
623 			       unsigned long iova, size_t size, size_t pgcount,
624 			       int lvl, arm_lpae_iopte *ptep)
625 {
626 	arm_lpae_iopte pte;
627 	struct io_pgtable *iop = &data->iop;
628 	int i = 0, num_entries, max_entries, unmap_idx_start;
629 
630 	/* Something went horribly wrong and we ran out of page table */
631 	if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
632 		return 0;
633 
634 	unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
635 	ptep += unmap_idx_start;
636 	pte = READ_ONCE(*ptep);
637 	if (WARN_ON(!pte))
638 		return 0;
639 
640 	/* If the size matches this level, we're in the right place */
641 	if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) {
642 		max_entries = ARM_LPAE_PTES_PER_TABLE(data) - unmap_idx_start;
643 		num_entries = min_t(int, pgcount, max_entries);
644 
645 		while (i < num_entries) {
646 			pte = READ_ONCE(*ptep);
647 			if (WARN_ON(!pte))
648 				break;
649 
650 			__arm_lpae_clear_pte(ptep, &iop->cfg);
651 
652 			if (!iopte_leaf(pte, lvl, iop->fmt)) {
653 				/* Also flush any partial walks */
654 				io_pgtable_tlb_flush_walk(iop, iova + i * size, size,
655 							  ARM_LPAE_GRANULE(data));
656 				__arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
657 			} else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) {
658 				/*
659 				 * Order the PTE update against queueing the IOVA, to
660 				 * guarantee that a flush callback from a different CPU
661 				 * has observed it before the TLBIALL can be issued.
662 				 */
663 				smp_wmb();
664 			} else {
665 				io_pgtable_tlb_add_page(iop, gather, iova + i * size, size);
666 			}
667 
668 			ptep++;
669 			i++;
670 		}
671 
672 		return i * size;
673 	} else if (iopte_leaf(pte, lvl, iop->fmt)) {
674 		/*
675 		 * Insert a table at the next level to map the old region,
676 		 * minus the part we want to unmap
677 		 */
678 		return arm_lpae_split_blk_unmap(data, gather, iova, size, pte,
679 						lvl + 1, ptep, pgcount);
680 	}
681 
682 	/* Keep on walkin' */
683 	ptep = iopte_deref(pte, data);
684 	return __arm_lpae_unmap(data, gather, iova, size, pgcount, lvl + 1, ptep);
685 }
686 
arm_lpae_unmap_pages(struct io_pgtable_ops * ops,unsigned long iova,size_t pgsize,size_t pgcount,struct iommu_iotlb_gather * gather)687 static size_t arm_lpae_unmap_pages(struct io_pgtable_ops *ops, unsigned long iova,
688 				   size_t pgsize, size_t pgcount,
689 				   struct iommu_iotlb_gather *gather)
690 {
691 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
692 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
693 	arm_lpae_iopte *ptep = data->pgd;
694 	long iaext = (s64)iova >> cfg->ias;
695 
696 	if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize || !pgcount))
697 		return 0;
698 
699 	if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
700 		iaext = ~iaext;
701 	if (WARN_ON(iaext))
702 		return 0;
703 
704 	return __arm_lpae_unmap(data, gather, iova, pgsize, pgcount,
705 				data->start_level, ptep);
706 }
707 
arm_lpae_unmap(struct io_pgtable_ops * ops,unsigned long iova,size_t size,struct iommu_iotlb_gather * gather)708 static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
709 			     size_t size, struct iommu_iotlb_gather *gather)
710 {
711 	return arm_lpae_unmap_pages(ops, iova, size, 1, gather);
712 }
713 
arm_lpae_iova_to_phys(struct io_pgtable_ops * ops,unsigned long iova)714 static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
715 					 unsigned long iova)
716 {
717 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
718 	arm_lpae_iopte pte, *ptep = data->pgd;
719 	int lvl = data->start_level;
720 
721 	do {
722 		/* Valid IOPTE pointer? */
723 		if (!ptep)
724 			return 0;
725 
726 		/* Grab the IOPTE we're interested in */
727 		ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
728 		pte = READ_ONCE(*ptep);
729 
730 		/* Valid entry? */
731 		if (!pte)
732 			return 0;
733 
734 		/* Leaf entry? */
735 		if (iopte_leaf(pte, lvl, data->iop.fmt))
736 			goto found_translation;
737 
738 		/* Take it to the next level */
739 		ptep = iopte_deref(pte, data);
740 	} while (++lvl < ARM_LPAE_MAX_LEVELS);
741 
742 	/* Ran out of page tables to walk */
743 	return 0;
744 
745 found_translation:
746 	iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1);
747 	return iopte_to_paddr(pte, data) | iova;
748 }
749 
arm_lpae_restrict_pgsizes(struct io_pgtable_cfg * cfg)750 static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
751 {
752 	unsigned long granule, page_sizes;
753 	unsigned int max_addr_bits = 48;
754 
755 	/*
756 	 * We need to restrict the supported page sizes to match the
757 	 * translation regime for a particular granule. Aim to match
758 	 * the CPU page size if possible, otherwise prefer smaller sizes.
759 	 * While we're at it, restrict the block sizes to match the
760 	 * chosen granule.
761 	 */
762 	if (cfg->pgsize_bitmap & PAGE_SIZE)
763 		granule = PAGE_SIZE;
764 	else if (cfg->pgsize_bitmap & ~PAGE_MASK)
765 		granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
766 	else if (cfg->pgsize_bitmap & PAGE_MASK)
767 		granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
768 	else
769 		granule = 0;
770 
771 	switch (granule) {
772 	case SZ_4K:
773 		page_sizes = (SZ_4K | SZ_2M | SZ_1G);
774 		break;
775 	case SZ_16K:
776 		page_sizes = (SZ_16K | SZ_32M);
777 		break;
778 	case SZ_64K:
779 		max_addr_bits = 52;
780 		page_sizes = (SZ_64K | SZ_512M);
781 		if (cfg->oas > 48)
782 			page_sizes |= 1ULL << 42; /* 4TB */
783 		break;
784 	default:
785 		page_sizes = 0;
786 	}
787 
788 	cfg->pgsize_bitmap &= page_sizes;
789 	cfg->ias = min(cfg->ias, max_addr_bits);
790 	cfg->oas = min(cfg->oas, max_addr_bits);
791 }
792 
793 static struct arm_lpae_io_pgtable *
arm_lpae_alloc_pgtable(struct io_pgtable_cfg * cfg)794 arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
795 {
796 	struct arm_lpae_io_pgtable *data;
797 	int levels, va_bits, pg_shift;
798 
799 	arm_lpae_restrict_pgsizes(cfg);
800 
801 	if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
802 		return NULL;
803 
804 	if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
805 		return NULL;
806 
807 	if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
808 		return NULL;
809 
810 	data = kmalloc(sizeof(*data), GFP_KERNEL);
811 	if (!data)
812 		return NULL;
813 
814 	pg_shift = __ffs(cfg->pgsize_bitmap);
815 	data->bits_per_level = pg_shift - ilog2(sizeof(arm_lpae_iopte));
816 
817 	va_bits = cfg->ias - pg_shift;
818 	levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
819 	data->start_level = ARM_LPAE_MAX_LEVELS - levels;
820 
821 	/* Calculate the actual size of our pgd (without concatenation) */
822 	data->pgd_bits = va_bits - (data->bits_per_level * (levels - 1));
823 
824 	data->iop.ops = (struct io_pgtable_ops) {
825 		.map		= arm_lpae_map,
826 		.map_pages	= arm_lpae_map_pages,
827 		.unmap		= arm_lpae_unmap,
828 		.unmap_pages	= arm_lpae_unmap_pages,
829 		.iova_to_phys	= arm_lpae_iova_to_phys,
830 	};
831 
832 	return data;
833 }
834 
835 static struct io_pgtable *
arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg * cfg,void * cookie)836 arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
837 {
838 	u64 reg;
839 	struct arm_lpae_io_pgtable *data;
840 	typeof(&cfg->arm_lpae_s1_cfg.tcr) tcr = &cfg->arm_lpae_s1_cfg.tcr;
841 	bool tg1;
842 
843 	if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
844 			    IO_PGTABLE_QUIRK_NON_STRICT |
845 			    IO_PGTABLE_QUIRK_ARM_TTBR1))
846 		return NULL;
847 
848 	data = arm_lpae_alloc_pgtable(cfg);
849 	if (!data)
850 		return NULL;
851 
852 	/* TCR */
853 	if (cfg->coherent_walk) {
854 		tcr->sh = ARM_LPAE_TCR_SH_IS;
855 		tcr->irgn = ARM_LPAE_TCR_RGN_WBWA;
856 		tcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
857 	} else {
858 		tcr->sh = ARM_LPAE_TCR_SH_OS;
859 		tcr->irgn = ARM_LPAE_TCR_RGN_NC;
860 		tcr->orgn = ARM_LPAE_TCR_RGN_NC;
861 	}
862 
863 	tg1 = cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1;
864 	switch (ARM_LPAE_GRANULE(data)) {
865 	case SZ_4K:
866 		tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_4K : ARM_LPAE_TCR_TG0_4K;
867 		break;
868 	case SZ_16K:
869 		tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_16K : ARM_LPAE_TCR_TG0_16K;
870 		break;
871 	case SZ_64K:
872 		tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_64K : ARM_LPAE_TCR_TG0_64K;
873 		break;
874 	}
875 
876 	switch (cfg->oas) {
877 	case 32:
878 		tcr->ips = ARM_LPAE_TCR_PS_32_BIT;
879 		break;
880 	case 36:
881 		tcr->ips = ARM_LPAE_TCR_PS_36_BIT;
882 		break;
883 	case 40:
884 		tcr->ips = ARM_LPAE_TCR_PS_40_BIT;
885 		break;
886 	case 42:
887 		tcr->ips = ARM_LPAE_TCR_PS_42_BIT;
888 		break;
889 	case 44:
890 		tcr->ips = ARM_LPAE_TCR_PS_44_BIT;
891 		break;
892 	case 48:
893 		tcr->ips = ARM_LPAE_TCR_PS_48_BIT;
894 		break;
895 	case 52:
896 		tcr->ips = ARM_LPAE_TCR_PS_52_BIT;
897 		break;
898 	default:
899 		goto out_free_data;
900 	}
901 
902 	tcr->tsz = 64ULL - cfg->ias;
903 
904 	/* MAIRs */
905 	reg = (ARM_LPAE_MAIR_ATTR_NC
906 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
907 	      (ARM_LPAE_MAIR_ATTR_WBRWA
908 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
909 	      (ARM_LPAE_MAIR_ATTR_DEVICE
910 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)) |
911 	      (ARM_LPAE_MAIR_ATTR_INC_OWBRWA
912 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE)) |
913 	      (ARM_LPAE_MAIR_ATTR_INC_OWBRANWA
914 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE_NWA)) |
915 	      (ARM_LPAE_MAIR_ATTR_IWBRWA_OWBRANWA
916 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_ICACHE_OCACHE_NWA));
917 
918 	cfg->arm_lpae_s1_cfg.mair = reg;
919 
920 	/* Looking good; allocate a pgd */
921 	data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
922 					   GFP_KERNEL, cfg);
923 	if (!data->pgd)
924 		goto out_free_data;
925 
926 	/* Ensure the empty pgd is visible before any actual TTBR write */
927 	wmb();
928 
929 	/* TTBR */
930 	cfg->arm_lpae_s1_cfg.ttbr = virt_to_phys(data->pgd);
931 	return &data->iop;
932 
933 out_free_data:
934 	kfree(data);
935 	return NULL;
936 }
937 
938 static struct io_pgtable *
arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg * cfg,void * cookie)939 arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
940 {
941 	u64 sl;
942 	struct arm_lpae_io_pgtable *data;
943 	typeof(&cfg->arm_lpae_s2_cfg.vtcr) vtcr = &cfg->arm_lpae_s2_cfg.vtcr;
944 
945 	/* The NS quirk doesn't apply at stage 2 */
946 	if (cfg->quirks & ~(IO_PGTABLE_QUIRK_NON_STRICT))
947 		return NULL;
948 
949 	data = arm_lpae_alloc_pgtable(cfg);
950 	if (!data)
951 		return NULL;
952 
953 	/*
954 	 * Concatenate PGDs at level 1 if possible in order to reduce
955 	 * the depth of the stage-2 walk.
956 	 */
957 	if (data->start_level == 0) {
958 		unsigned long pgd_pages;
959 
960 		pgd_pages = ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte);
961 		if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
962 			data->pgd_bits += data->bits_per_level;
963 			data->start_level++;
964 		}
965 	}
966 
967 	/* VTCR */
968 	if (cfg->coherent_walk) {
969 		vtcr->sh = ARM_LPAE_TCR_SH_IS;
970 		vtcr->irgn = ARM_LPAE_TCR_RGN_WBWA;
971 		vtcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
972 	} else {
973 		vtcr->sh = ARM_LPAE_TCR_SH_OS;
974 		vtcr->irgn = ARM_LPAE_TCR_RGN_NC;
975 		vtcr->orgn = ARM_LPAE_TCR_RGN_NC;
976 	}
977 
978 	sl = data->start_level;
979 
980 	switch (ARM_LPAE_GRANULE(data)) {
981 	case SZ_4K:
982 		vtcr->tg = ARM_LPAE_TCR_TG0_4K;
983 		sl++; /* SL0 format is different for 4K granule size */
984 		break;
985 	case SZ_16K:
986 		vtcr->tg = ARM_LPAE_TCR_TG0_16K;
987 		break;
988 	case SZ_64K:
989 		vtcr->tg = ARM_LPAE_TCR_TG0_64K;
990 		break;
991 	}
992 
993 	switch (cfg->oas) {
994 	case 32:
995 		vtcr->ps = ARM_LPAE_TCR_PS_32_BIT;
996 		break;
997 	case 36:
998 		vtcr->ps = ARM_LPAE_TCR_PS_36_BIT;
999 		break;
1000 	case 40:
1001 		vtcr->ps = ARM_LPAE_TCR_PS_40_BIT;
1002 		break;
1003 	case 42:
1004 		vtcr->ps = ARM_LPAE_TCR_PS_42_BIT;
1005 		break;
1006 	case 44:
1007 		vtcr->ps = ARM_LPAE_TCR_PS_44_BIT;
1008 		break;
1009 	case 48:
1010 		vtcr->ps = ARM_LPAE_TCR_PS_48_BIT;
1011 		break;
1012 	case 52:
1013 		vtcr->ps = ARM_LPAE_TCR_PS_52_BIT;
1014 		break;
1015 	default:
1016 		goto out_free_data;
1017 	}
1018 
1019 	vtcr->tsz = 64ULL - cfg->ias;
1020 	vtcr->sl = ~sl & ARM_LPAE_VTCR_SL0_MASK;
1021 
1022 	/* Allocate pgd pages */
1023 	data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
1024 					   GFP_KERNEL, cfg);
1025 	if (!data->pgd)
1026 		goto out_free_data;
1027 
1028 	/* Ensure the empty pgd is visible before any actual TTBR write */
1029 	wmb();
1030 
1031 	/* VTTBR */
1032 	cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
1033 	return &data->iop;
1034 
1035 out_free_data:
1036 	kfree(data);
1037 	return NULL;
1038 }
1039 
1040 static struct io_pgtable *
arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg * cfg,void * cookie)1041 arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
1042 {
1043 	if (cfg->ias > 32 || cfg->oas > 40)
1044 		return NULL;
1045 
1046 	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1047 	return arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
1048 }
1049 
1050 static struct io_pgtable *
arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg * cfg,void * cookie)1051 arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
1052 {
1053 	if (cfg->ias > 40 || cfg->oas > 40)
1054 		return NULL;
1055 
1056 	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1057 	return arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
1058 }
1059 
1060 static struct io_pgtable *
arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg * cfg,void * cookie)1061 arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
1062 {
1063 	struct arm_lpae_io_pgtable *data;
1064 
1065 	/* No quirks for Mali (hopefully) */
1066 	if (cfg->quirks)
1067 		return NULL;
1068 
1069 	if (cfg->ias > 48 || cfg->oas > 40)
1070 		return NULL;
1071 
1072 	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1073 
1074 	data = arm_lpae_alloc_pgtable(cfg);
1075 	if (!data)
1076 		return NULL;
1077 
1078 	/* Mali seems to need a full 4-level table regardless of IAS */
1079 	if (data->start_level > 0) {
1080 		data->start_level = 0;
1081 		data->pgd_bits = 0;
1082 	}
1083 	/*
1084 	 * MEMATTR: Mali has no actual notion of a non-cacheable type, so the
1085 	 * best we can do is mimic the out-of-tree driver and hope that the
1086 	 * "implementation-defined caching policy" is good enough. Similarly,
1087 	 * we'll use it for the sake of a valid attribute for our 'device'
1088 	 * index, although callers should never request that in practice.
1089 	 */
1090 	cfg->arm_mali_lpae_cfg.memattr =
1091 		(ARM_MALI_LPAE_MEMATTR_IMP_DEF
1092 		 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
1093 		(ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC
1094 		 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
1095 		(ARM_MALI_LPAE_MEMATTR_IMP_DEF
1096 		 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
1097 
1098 	data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), GFP_KERNEL,
1099 					   cfg);
1100 	if (!data->pgd)
1101 		goto out_free_data;
1102 
1103 	/* Ensure the empty pgd is visible before TRANSTAB can be written */
1104 	wmb();
1105 
1106 	cfg->arm_mali_lpae_cfg.transtab = virt_to_phys(data->pgd) |
1107 					  ARM_MALI_LPAE_TTBR_READ_INNER |
1108 					  ARM_MALI_LPAE_TTBR_ADRMODE_TABLE;
1109 	if (cfg->coherent_walk)
1110 		cfg->arm_mali_lpae_cfg.transtab |= ARM_MALI_LPAE_TTBR_SHARE_OUTER;
1111 
1112 	return &data->iop;
1113 
1114 out_free_data:
1115 	kfree(data);
1116 	return NULL;
1117 }
1118 
1119 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
1120 	.alloc	= arm_64_lpae_alloc_pgtable_s1,
1121 	.free	= arm_lpae_free_pgtable,
1122 };
1123 
1124 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
1125 	.alloc	= arm_64_lpae_alloc_pgtable_s2,
1126 	.free	= arm_lpae_free_pgtable,
1127 };
1128 
1129 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
1130 	.alloc	= arm_32_lpae_alloc_pgtable_s1,
1131 	.free	= arm_lpae_free_pgtable,
1132 };
1133 
1134 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
1135 	.alloc	= arm_32_lpae_alloc_pgtable_s2,
1136 	.free	= arm_lpae_free_pgtable,
1137 };
1138 
1139 struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns = {
1140 	.alloc	= arm_mali_lpae_alloc_pgtable,
1141 	.free	= arm_lpae_free_pgtable,
1142 };
1143 
1144 #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
1145 
1146 static struct io_pgtable_cfg *cfg_cookie __initdata;
1147 
dummy_tlb_flush_all(void * cookie)1148 static void __init dummy_tlb_flush_all(void *cookie)
1149 {
1150 	WARN_ON(cookie != cfg_cookie);
1151 }
1152 
dummy_tlb_flush(unsigned long iova,size_t size,size_t granule,void * cookie)1153 static void __init dummy_tlb_flush(unsigned long iova, size_t size,
1154 				   size_t granule, void *cookie)
1155 {
1156 	WARN_ON(cookie != cfg_cookie);
1157 	WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
1158 }
1159 
dummy_tlb_add_page(struct iommu_iotlb_gather * gather,unsigned long iova,size_t granule,void * cookie)1160 static void __init dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
1161 				      unsigned long iova, size_t granule,
1162 				      void *cookie)
1163 {
1164 	dummy_tlb_flush(iova, granule, granule, cookie);
1165 }
1166 
1167 static const struct iommu_flush_ops dummy_tlb_ops __initconst = {
1168 	.tlb_flush_all	= dummy_tlb_flush_all,
1169 	.tlb_flush_walk	= dummy_tlb_flush,
1170 	.tlb_add_page	= dummy_tlb_add_page,
1171 };
1172 
arm_lpae_dump_ops(struct io_pgtable_ops * ops)1173 static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
1174 {
1175 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
1176 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
1177 
1178 	pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
1179 		cfg->pgsize_bitmap, cfg->ias);
1180 	pr_err("data: %d levels, 0x%zx pgd_size, %u pg_shift, %u bits_per_level, pgd @ %p\n",
1181 		ARM_LPAE_MAX_LEVELS - data->start_level, ARM_LPAE_PGD_SIZE(data),
1182 		ilog2(ARM_LPAE_GRANULE(data)), data->bits_per_level, data->pgd);
1183 }
1184 
1185 #define __FAIL(ops, i)	({						\
1186 		WARN(1, "selftest: test failed for fmt idx %d\n", (i));	\
1187 		arm_lpae_dump_ops(ops);					\
1188 		selftest_running = false;				\
1189 		-EFAULT;						\
1190 })
1191 
arm_lpae_run_tests(struct io_pgtable_cfg * cfg)1192 static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
1193 {
1194 	static const enum io_pgtable_fmt fmts[] __initconst = {
1195 		ARM_64_LPAE_S1,
1196 		ARM_64_LPAE_S2,
1197 	};
1198 
1199 	int i, j;
1200 	unsigned long iova;
1201 	size_t size;
1202 	struct io_pgtable_ops *ops;
1203 
1204 	selftest_running = true;
1205 
1206 	for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
1207 		cfg_cookie = cfg;
1208 		ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
1209 		if (!ops) {
1210 			pr_err("selftest: failed to allocate io pgtable ops\n");
1211 			return -ENOMEM;
1212 		}
1213 
1214 		/*
1215 		 * Initial sanity checks.
1216 		 * Empty page tables shouldn't provide any translations.
1217 		 */
1218 		if (ops->iova_to_phys(ops, 42))
1219 			return __FAIL(ops, i);
1220 
1221 		if (ops->iova_to_phys(ops, SZ_1G + 42))
1222 			return __FAIL(ops, i);
1223 
1224 		if (ops->iova_to_phys(ops, SZ_2G + 42))
1225 			return __FAIL(ops, i);
1226 
1227 		/*
1228 		 * Distinct mappings of different granule sizes.
1229 		 */
1230 		iova = 0;
1231 		for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
1232 			size = 1UL << j;
1233 
1234 			if (ops->map(ops, iova, iova, size, IOMMU_READ |
1235 							    IOMMU_WRITE |
1236 							    IOMMU_NOEXEC |
1237 							    IOMMU_CACHE, GFP_KERNEL))
1238 				return __FAIL(ops, i);
1239 
1240 			/* Overlapping mappings */
1241 			if (!ops->map(ops, iova, iova + size, size,
1242 				      IOMMU_READ | IOMMU_NOEXEC, GFP_KERNEL))
1243 				return __FAIL(ops, i);
1244 
1245 			if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1246 				return __FAIL(ops, i);
1247 
1248 			iova += SZ_1G;
1249 		}
1250 
1251 		/* Partial unmap */
1252 		size = 1UL << __ffs(cfg->pgsize_bitmap);
1253 		if (ops->unmap(ops, SZ_1G + size, size, NULL) != size)
1254 			return __FAIL(ops, i);
1255 
1256 		/* Remap of partial unmap */
1257 		if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ, GFP_KERNEL))
1258 			return __FAIL(ops, i);
1259 
1260 		if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
1261 			return __FAIL(ops, i);
1262 
1263 		/* Full unmap */
1264 		iova = 0;
1265 		for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
1266 			size = 1UL << j;
1267 
1268 			if (ops->unmap(ops, iova, size, NULL) != size)
1269 				return __FAIL(ops, i);
1270 
1271 			if (ops->iova_to_phys(ops, iova + 42))
1272 				return __FAIL(ops, i);
1273 
1274 			/* Remap full block */
1275 			if (ops->map(ops, iova, iova, size, IOMMU_WRITE, GFP_KERNEL))
1276 				return __FAIL(ops, i);
1277 
1278 			if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1279 				return __FAIL(ops, i);
1280 
1281 			iova += SZ_1G;
1282 		}
1283 
1284 		free_io_pgtable_ops(ops);
1285 	}
1286 
1287 	selftest_running = false;
1288 	return 0;
1289 }
1290 
arm_lpae_do_selftests(void)1291 static int __init arm_lpae_do_selftests(void)
1292 {
1293 	static const unsigned long pgsize[] __initconst = {
1294 		SZ_4K | SZ_2M | SZ_1G,
1295 		SZ_16K | SZ_32M,
1296 		SZ_64K | SZ_512M,
1297 	};
1298 
1299 	static const unsigned int ias[] __initconst = {
1300 		32, 36, 40, 42, 44, 48,
1301 	};
1302 
1303 	int i, j, pass = 0, fail = 0;
1304 	struct io_pgtable_cfg cfg = {
1305 		.tlb = &dummy_tlb_ops,
1306 		.oas = 48,
1307 		.coherent_walk = true,
1308 	};
1309 
1310 	for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
1311 		for (j = 0; j < ARRAY_SIZE(ias); ++j) {
1312 			cfg.pgsize_bitmap = pgsize[i];
1313 			cfg.ias = ias[j];
1314 			pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
1315 				pgsize[i], ias[j]);
1316 			if (arm_lpae_run_tests(&cfg))
1317 				fail++;
1318 			else
1319 				pass++;
1320 		}
1321 	}
1322 
1323 	pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
1324 	return fail ? -EFAULT : 0;
1325 }
1326 subsys_initcall(arm_lpae_do_selftests);
1327 #endif
1328