xref: /OK3568_Linux_fs/kernel/drivers/iommu/io-pgtable-arm.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * CPU-agnostic ARM page table allocator.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2014 ARM Limited
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Author: Will Deacon <will.deacon@arm.com>
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #define pr_fmt(fmt)	"arm-lpae io-pgtable: " fmt
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <linux/atomic.h>
13*4882a593Smuzhiyun #include <linux/bitops.h>
14*4882a593Smuzhiyun #include <linux/io-pgtable.h>
15*4882a593Smuzhiyun #include <linux/kernel.h>
16*4882a593Smuzhiyun #include <linux/sizes.h>
17*4882a593Smuzhiyun #include <linux/slab.h>
18*4882a593Smuzhiyun #include <linux/types.h>
19*4882a593Smuzhiyun #include <linux/dma-mapping.h>
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun #include <asm/barrier.h>
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun #include "io-pgtable-arm.h"
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun #define ARM_LPAE_MAX_ADDR_BITS		52
26*4882a593Smuzhiyun #define ARM_LPAE_S2_MAX_CONCAT_PAGES	16
27*4882a593Smuzhiyun #define ARM_LPAE_MAX_LEVELS		4
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun /* Struct accessors */
30*4882a593Smuzhiyun #define io_pgtable_to_data(x)						\
31*4882a593Smuzhiyun 	container_of((x), struct arm_lpae_io_pgtable, iop)
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun #define io_pgtable_ops_to_data(x)					\
34*4882a593Smuzhiyun 	io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun /*
37*4882a593Smuzhiyun  * Calculate the right shift amount to get to the portion describing level l
38*4882a593Smuzhiyun  * in a virtual address mapped by the pagetable in d.
39*4882a593Smuzhiyun  */
40*4882a593Smuzhiyun #define ARM_LPAE_LVL_SHIFT(l,d)						\
41*4882a593Smuzhiyun 	(((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level) +		\
42*4882a593Smuzhiyun 	ilog2(sizeof(arm_lpae_iopte)))
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun #define ARM_LPAE_GRANULE(d)						\
45*4882a593Smuzhiyun 	(sizeof(arm_lpae_iopte) << (d)->bits_per_level)
46*4882a593Smuzhiyun #define ARM_LPAE_PGD_SIZE(d)						\
47*4882a593Smuzhiyun 	(sizeof(arm_lpae_iopte) << (d)->pgd_bits)
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun #define ARM_LPAE_PTES_PER_TABLE(d)					\
50*4882a593Smuzhiyun 	(ARM_LPAE_GRANULE(d) >> ilog2(sizeof(arm_lpae_iopte)))
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun /*
53*4882a593Smuzhiyun  * Calculate the index at level l used to map virtual address a using the
54*4882a593Smuzhiyun  * pagetable in d.
55*4882a593Smuzhiyun  */
56*4882a593Smuzhiyun #define ARM_LPAE_PGD_IDX(l,d)						\
57*4882a593Smuzhiyun 	((l) == (d)->start_level ? (d)->pgd_bits - (d)->bits_per_level : 0)
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun #define ARM_LPAE_LVL_IDX(a,l,d)						\
60*4882a593Smuzhiyun 	(((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) &			\
61*4882a593Smuzhiyun 	 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun /* Calculate the block/page mapping size at level l for pagetable in d. */
64*4882a593Smuzhiyun #define ARM_LPAE_BLOCK_SIZE(l,d)	(1ULL << ARM_LPAE_LVL_SHIFT(l,d))
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun /* Page table bits */
67*4882a593Smuzhiyun #define ARM_LPAE_PTE_TYPE_SHIFT		0
68*4882a593Smuzhiyun #define ARM_LPAE_PTE_TYPE_MASK		0x3
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun #define ARM_LPAE_PTE_TYPE_BLOCK		1
71*4882a593Smuzhiyun #define ARM_LPAE_PTE_TYPE_TABLE		3
72*4882a593Smuzhiyun #define ARM_LPAE_PTE_TYPE_PAGE		3
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun #define ARM_LPAE_PTE_ADDR_MASK		GENMASK_ULL(47,12)
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun #define ARM_LPAE_PTE_NSTABLE		(((arm_lpae_iopte)1) << 63)
77*4882a593Smuzhiyun #define ARM_LPAE_PTE_XN			(((arm_lpae_iopte)3) << 53)
78*4882a593Smuzhiyun #define ARM_LPAE_PTE_AF			(((arm_lpae_iopte)1) << 10)
79*4882a593Smuzhiyun #define ARM_LPAE_PTE_SH_NS		(((arm_lpae_iopte)0) << 8)
80*4882a593Smuzhiyun #define ARM_LPAE_PTE_SH_OS		(((arm_lpae_iopte)2) << 8)
81*4882a593Smuzhiyun #define ARM_LPAE_PTE_SH_IS		(((arm_lpae_iopte)3) << 8)
82*4882a593Smuzhiyun #define ARM_LPAE_PTE_NS			(((arm_lpae_iopte)1) << 5)
83*4882a593Smuzhiyun #define ARM_LPAE_PTE_VALID		(((arm_lpae_iopte)1) << 0)
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun #define ARM_LPAE_PTE_ATTR_LO_MASK	(((arm_lpae_iopte)0x3ff) << 2)
86*4882a593Smuzhiyun /* Ignore the contiguous bit for block splitting */
87*4882a593Smuzhiyun #define ARM_LPAE_PTE_ATTR_HI_MASK	(((arm_lpae_iopte)6) << 52)
88*4882a593Smuzhiyun #define ARM_LPAE_PTE_ATTR_MASK		(ARM_LPAE_PTE_ATTR_LO_MASK |	\
89*4882a593Smuzhiyun 					 ARM_LPAE_PTE_ATTR_HI_MASK)
90*4882a593Smuzhiyun /* Software bit for solving coherency races */
91*4882a593Smuzhiyun #define ARM_LPAE_PTE_SW_SYNC		(((arm_lpae_iopte)1) << 55)
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun /* Stage-1 PTE */
94*4882a593Smuzhiyun #define ARM_LPAE_PTE_AP_UNPRIV		(((arm_lpae_iopte)1) << 6)
95*4882a593Smuzhiyun #define ARM_LPAE_PTE_AP_RDONLY		(((arm_lpae_iopte)2) << 6)
96*4882a593Smuzhiyun #define ARM_LPAE_PTE_ATTRINDX_SHIFT	2
97*4882a593Smuzhiyun #define ARM_LPAE_PTE_nG			(((arm_lpae_iopte)1) << 11)
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun /* Stage-2 PTE */
100*4882a593Smuzhiyun #define ARM_LPAE_PTE_HAP_FAULT		(((arm_lpae_iopte)0) << 6)
101*4882a593Smuzhiyun #define ARM_LPAE_PTE_HAP_READ		(((arm_lpae_iopte)1) << 6)
102*4882a593Smuzhiyun #define ARM_LPAE_PTE_HAP_WRITE		(((arm_lpae_iopte)2) << 6)
103*4882a593Smuzhiyun #define ARM_LPAE_PTE_MEMATTR_OIWB	(((arm_lpae_iopte)0xf) << 2)
104*4882a593Smuzhiyun #define ARM_LPAE_PTE_MEMATTR_NC		(((arm_lpae_iopte)0x5) << 2)
105*4882a593Smuzhiyun #define ARM_LPAE_PTE_MEMATTR_DEV	(((arm_lpae_iopte)0x1) << 2)
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun /* Register bits */
108*4882a593Smuzhiyun #define ARM_LPAE_VTCR_SL0_MASK		0x3
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun #define ARM_LPAE_TCR_T0SZ_SHIFT		0
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun #define ARM_LPAE_VTCR_PS_SHIFT		16
113*4882a593Smuzhiyun #define ARM_LPAE_VTCR_PS_MASK		0x7
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun #define ARM_LPAE_MAIR_ATTR_SHIFT(n)			((n) << 3)
116*4882a593Smuzhiyun #define ARM_LPAE_MAIR_ATTR_MASK				0xff
117*4882a593Smuzhiyun #define ARM_LPAE_MAIR_ATTR_DEVICE			0x04ULL
118*4882a593Smuzhiyun #define ARM_LPAE_MAIR_ATTR_NC				0x44ULL
119*4882a593Smuzhiyun #define ARM_LPAE_MAIR_ATTR_INC_OWBRANWA			0xe4ULL
120*4882a593Smuzhiyun #define ARM_LPAE_MAIR_ATTR_IWBRWA_OWBRANWA		0xefULL
121*4882a593Smuzhiyun #define ARM_LPAE_MAIR_ATTR_INC_OWBRWA			0xf4ULL
122*4882a593Smuzhiyun #define ARM_LPAE_MAIR_ATTR_WBRWA			0xffULL
123*4882a593Smuzhiyun #define ARM_LPAE_MAIR_ATTR_IDX_NC			0
124*4882a593Smuzhiyun #define ARM_LPAE_MAIR_ATTR_IDX_CACHE			1
125*4882a593Smuzhiyun #define ARM_LPAE_MAIR_ATTR_IDX_DEV			2
126*4882a593Smuzhiyun #define ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE		3
127*4882a593Smuzhiyun #define ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE_NWA		4
128*4882a593Smuzhiyun #define ARM_LPAE_MAIR_ATTR_IDX_ICACHE_OCACHE_NWA	5
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun #define ARM_MALI_LPAE_TTBR_ADRMODE_TABLE (3u << 0)
131*4882a593Smuzhiyun #define ARM_MALI_LPAE_TTBR_READ_INNER	BIT(2)
132*4882a593Smuzhiyun #define ARM_MALI_LPAE_TTBR_SHARE_OUTER	BIT(4)
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun #define ARM_MALI_LPAE_MEMATTR_IMP_DEF	0x88ULL
135*4882a593Smuzhiyun #define ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC 0x8DULL
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun /* IOPTE accessors */
138*4882a593Smuzhiyun #define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d))
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun #define iopte_type(pte,l)					\
141*4882a593Smuzhiyun 	(((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun #define iopte_prot(pte)	((pte) & ARM_LPAE_PTE_ATTR_MASK)
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun struct arm_lpae_io_pgtable {
146*4882a593Smuzhiyun 	struct io_pgtable	iop;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	int			pgd_bits;
149*4882a593Smuzhiyun 	int			start_level;
150*4882a593Smuzhiyun 	int			bits_per_level;
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	void			*pgd;
153*4882a593Smuzhiyun };
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun typedef u64 arm_lpae_iopte;
156*4882a593Smuzhiyun 
iopte_leaf(arm_lpae_iopte pte,int lvl,enum io_pgtable_fmt fmt)157*4882a593Smuzhiyun static inline bool iopte_leaf(arm_lpae_iopte pte, int lvl,
158*4882a593Smuzhiyun 			      enum io_pgtable_fmt fmt)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun 	if (lvl == (ARM_LPAE_MAX_LEVELS - 1) && fmt != ARM_MALI_LPAE)
161*4882a593Smuzhiyun 		return iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_PAGE;
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	return iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_BLOCK;
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun 
paddr_to_iopte(phys_addr_t paddr,struct arm_lpae_io_pgtable * data)166*4882a593Smuzhiyun static arm_lpae_iopte paddr_to_iopte(phys_addr_t paddr,
167*4882a593Smuzhiyun 				     struct arm_lpae_io_pgtable *data)
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun 	arm_lpae_iopte pte = paddr;
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	/* Of the bits which overlap, either 51:48 or 15:12 are always RES0 */
172*4882a593Smuzhiyun 	return (pte | (pte >> (48 - 12))) & ARM_LPAE_PTE_ADDR_MASK;
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun 
iopte_to_paddr(arm_lpae_iopte pte,struct arm_lpae_io_pgtable * data)175*4882a593Smuzhiyun static phys_addr_t iopte_to_paddr(arm_lpae_iopte pte,
176*4882a593Smuzhiyun 				  struct arm_lpae_io_pgtable *data)
177*4882a593Smuzhiyun {
178*4882a593Smuzhiyun 	u64 paddr = pte & ARM_LPAE_PTE_ADDR_MASK;
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	if (ARM_LPAE_GRANULE(data) < SZ_64K)
181*4882a593Smuzhiyun 		return paddr;
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	/* Rotate the packed high-order bits back to the top */
184*4882a593Smuzhiyun 	return (paddr | (paddr << (48 - 12))) & (ARM_LPAE_PTE_ADDR_MASK << 4);
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun static bool selftest_running = false;
188*4882a593Smuzhiyun 
__arm_lpae_dma_addr(void * pages)189*4882a593Smuzhiyun static dma_addr_t __arm_lpae_dma_addr(void *pages)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun 	return (dma_addr_t)virt_to_phys(pages);
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun 
__arm_lpae_alloc_pages(size_t size,gfp_t gfp,struct io_pgtable_cfg * cfg)194*4882a593Smuzhiyun static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
195*4882a593Smuzhiyun 				    struct io_pgtable_cfg *cfg)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun 	struct device *dev = cfg->iommu_dev;
198*4882a593Smuzhiyun 	int order = get_order(size);
199*4882a593Smuzhiyun 	struct page *p;
200*4882a593Smuzhiyun 	dma_addr_t dma;
201*4882a593Smuzhiyun 	void *pages;
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	VM_BUG_ON((gfp & __GFP_HIGHMEM));
204*4882a593Smuzhiyun 	p = alloc_pages_node(dev ? dev_to_node(dev) : NUMA_NO_NODE,
205*4882a593Smuzhiyun 			     gfp | __GFP_ZERO, order);
206*4882a593Smuzhiyun 	if (!p)
207*4882a593Smuzhiyun 		return NULL;
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	pages = page_address(p);
210*4882a593Smuzhiyun 	if (!cfg->coherent_walk) {
211*4882a593Smuzhiyun 		dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
212*4882a593Smuzhiyun 		if (dma_mapping_error(dev, dma))
213*4882a593Smuzhiyun 			goto out_free;
214*4882a593Smuzhiyun 		/*
215*4882a593Smuzhiyun 		 * We depend on the IOMMU being able to work with any physical
216*4882a593Smuzhiyun 		 * address directly, so if the DMA layer suggests otherwise by
217*4882a593Smuzhiyun 		 * translating or truncating them, that bodes very badly...
218*4882a593Smuzhiyun 		 */
219*4882a593Smuzhiyun 		if (dma != virt_to_phys(pages))
220*4882a593Smuzhiyun 			goto out_unmap;
221*4882a593Smuzhiyun 	}
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	return pages;
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun out_unmap:
226*4882a593Smuzhiyun 	dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
227*4882a593Smuzhiyun 	dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
228*4882a593Smuzhiyun out_free:
229*4882a593Smuzhiyun 	__free_pages(p, order);
230*4882a593Smuzhiyun 	return NULL;
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun 
__arm_lpae_free_pages(void * pages,size_t size,struct io_pgtable_cfg * cfg)233*4882a593Smuzhiyun static void __arm_lpae_free_pages(void *pages, size_t size,
234*4882a593Smuzhiyun 				  struct io_pgtable_cfg *cfg)
235*4882a593Smuzhiyun {
236*4882a593Smuzhiyun 	if (!cfg->coherent_walk)
237*4882a593Smuzhiyun 		dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
238*4882a593Smuzhiyun 				 size, DMA_TO_DEVICE);
239*4882a593Smuzhiyun 	free_pages((unsigned long)pages, get_order(size));
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun 
__arm_lpae_sync_pte(arm_lpae_iopte * ptep,int num_entries,struct io_pgtable_cfg * cfg)242*4882a593Smuzhiyun static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, int num_entries,
243*4882a593Smuzhiyun 				struct io_pgtable_cfg *cfg)
244*4882a593Smuzhiyun {
245*4882a593Smuzhiyun 	dma_sync_single_for_device(cfg->iommu_dev, __arm_lpae_dma_addr(ptep),
246*4882a593Smuzhiyun 				   sizeof(*ptep) * num_entries, DMA_TO_DEVICE);
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun 
__arm_lpae_clear_pte(arm_lpae_iopte * ptep,struct io_pgtable_cfg * cfg)249*4882a593Smuzhiyun static void __arm_lpae_clear_pte(arm_lpae_iopte *ptep, struct io_pgtable_cfg *cfg)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	*ptep = 0;
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	if (!cfg->coherent_walk)
255*4882a593Smuzhiyun 		__arm_lpae_sync_pte(ptep, 1, cfg);
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
259*4882a593Smuzhiyun 			       struct iommu_iotlb_gather *gather,
260*4882a593Smuzhiyun 			       unsigned long iova, size_t size, size_t pgcount,
261*4882a593Smuzhiyun 			       int lvl, arm_lpae_iopte *ptep);
262*4882a593Smuzhiyun 
__arm_lpae_init_pte(struct arm_lpae_io_pgtable * data,phys_addr_t paddr,arm_lpae_iopte prot,int lvl,int num_entries,arm_lpae_iopte * ptep)263*4882a593Smuzhiyun static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
264*4882a593Smuzhiyun 				phys_addr_t paddr, arm_lpae_iopte prot,
265*4882a593Smuzhiyun 				int lvl, int num_entries, arm_lpae_iopte *ptep)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun 	arm_lpae_iopte pte = prot;
268*4882a593Smuzhiyun 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
269*4882a593Smuzhiyun 	size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
270*4882a593Smuzhiyun 	int i;
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	if (data->iop.fmt != ARM_MALI_LPAE && lvl == ARM_LPAE_MAX_LEVELS - 1)
273*4882a593Smuzhiyun 		pte |= ARM_LPAE_PTE_TYPE_PAGE;
274*4882a593Smuzhiyun 	else
275*4882a593Smuzhiyun 		pte |= ARM_LPAE_PTE_TYPE_BLOCK;
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	for (i = 0; i < num_entries; i++)
278*4882a593Smuzhiyun 		ptep[i] = pte | paddr_to_iopte(paddr + i * sz, data);
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	if (!cfg->coherent_walk)
281*4882a593Smuzhiyun 		__arm_lpae_sync_pte(ptep, num_entries, cfg);
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun 
arm_lpae_init_pte(struct arm_lpae_io_pgtable * data,unsigned long iova,phys_addr_t paddr,arm_lpae_iopte prot,int lvl,int num_entries,arm_lpae_iopte * ptep)284*4882a593Smuzhiyun static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
285*4882a593Smuzhiyun 			     unsigned long iova, phys_addr_t paddr,
286*4882a593Smuzhiyun 			     arm_lpae_iopte prot, int lvl, int num_entries,
287*4882a593Smuzhiyun 			     arm_lpae_iopte *ptep)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun 	int i;
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	for (i = 0; i < num_entries; i++)
292*4882a593Smuzhiyun 		if (iopte_leaf(ptep[i], lvl, data->iop.fmt)) {
293*4882a593Smuzhiyun 			/* We require an unmap first */
294*4882a593Smuzhiyun 			WARN_ON(!selftest_running);
295*4882a593Smuzhiyun 			return -EEXIST;
296*4882a593Smuzhiyun 		} else if (iopte_type(ptep[i], lvl) == ARM_LPAE_PTE_TYPE_TABLE) {
297*4882a593Smuzhiyun 			/*
298*4882a593Smuzhiyun 			 * We need to unmap and free the old table before
299*4882a593Smuzhiyun 			 * overwriting it with a block entry.
300*4882a593Smuzhiyun 			 */
301*4882a593Smuzhiyun 			arm_lpae_iopte *tblp;
302*4882a593Smuzhiyun 			size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 			tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
305*4882a593Smuzhiyun 			if (__arm_lpae_unmap(data, NULL, iova + i * sz, sz, 1,
306*4882a593Smuzhiyun 					     lvl, tblp) != sz) {
307*4882a593Smuzhiyun 				WARN_ON(1);
308*4882a593Smuzhiyun 				return -EINVAL;
309*4882a593Smuzhiyun 			}
310*4882a593Smuzhiyun 		}
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 	__arm_lpae_init_pte(data, paddr, prot, lvl, num_entries, ptep);
313*4882a593Smuzhiyun 	return 0;
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun 
arm_lpae_install_table(arm_lpae_iopte * table,arm_lpae_iopte * ptep,arm_lpae_iopte curr,struct arm_lpae_io_pgtable * data)316*4882a593Smuzhiyun static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table,
317*4882a593Smuzhiyun 					     arm_lpae_iopte *ptep,
318*4882a593Smuzhiyun 					     arm_lpae_iopte curr,
319*4882a593Smuzhiyun 					     struct arm_lpae_io_pgtable *data)
320*4882a593Smuzhiyun {
321*4882a593Smuzhiyun 	arm_lpae_iopte old, new;
322*4882a593Smuzhiyun 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	new = paddr_to_iopte(__pa(table), data) | ARM_LPAE_PTE_TYPE_TABLE;
325*4882a593Smuzhiyun 	if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
326*4882a593Smuzhiyun 		new |= ARM_LPAE_PTE_NSTABLE;
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	/*
329*4882a593Smuzhiyun 	 * Ensure the table itself is visible before its PTE can be.
330*4882a593Smuzhiyun 	 * Whilst we could get away with cmpxchg64_release below, this
331*4882a593Smuzhiyun 	 * doesn't have any ordering semantics when !CONFIG_SMP.
332*4882a593Smuzhiyun 	 */
333*4882a593Smuzhiyun 	dma_wmb();
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	old = cmpxchg64_relaxed(ptep, curr, new);
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	if (cfg->coherent_walk || (old & ARM_LPAE_PTE_SW_SYNC))
338*4882a593Smuzhiyun 		return old;
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	/* Even if it's not ours, there's no point waiting; just kick it */
341*4882a593Smuzhiyun 	__arm_lpae_sync_pte(ptep, 1, cfg);
342*4882a593Smuzhiyun 	if (old == curr)
343*4882a593Smuzhiyun 		WRITE_ONCE(*ptep, new | ARM_LPAE_PTE_SW_SYNC);
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	return old;
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun 
__arm_lpae_map(struct arm_lpae_io_pgtable * data,unsigned long iova,phys_addr_t paddr,size_t size,size_t pgcount,arm_lpae_iopte prot,int lvl,arm_lpae_iopte * ptep,gfp_t gfp,size_t * mapped)348*4882a593Smuzhiyun static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
349*4882a593Smuzhiyun 			  phys_addr_t paddr, size_t size, size_t pgcount,
350*4882a593Smuzhiyun 			  arm_lpae_iopte prot, int lvl, arm_lpae_iopte *ptep,
351*4882a593Smuzhiyun 			  gfp_t gfp, size_t *mapped)
352*4882a593Smuzhiyun {
353*4882a593Smuzhiyun 	arm_lpae_iopte *cptep, pte;
354*4882a593Smuzhiyun 	size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
355*4882a593Smuzhiyun 	size_t tblsz = ARM_LPAE_GRANULE(data);
356*4882a593Smuzhiyun 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
357*4882a593Smuzhiyun 	int ret = 0, num_entries, max_entries, map_idx_start;
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	/* Find our entry at the current level */
360*4882a593Smuzhiyun 	map_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
361*4882a593Smuzhiyun 	ptep += map_idx_start;
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	/* If we can install a leaf entry at this level, then do so */
364*4882a593Smuzhiyun 	if (size == block_size) {
365*4882a593Smuzhiyun 		max_entries = ARM_LPAE_PTES_PER_TABLE(data) - map_idx_start;
366*4882a593Smuzhiyun 		num_entries = min_t(int, pgcount, max_entries);
367*4882a593Smuzhiyun 		ret = arm_lpae_init_pte(data, iova, paddr, prot, lvl, num_entries, ptep);
368*4882a593Smuzhiyun 		if (!ret && mapped)
369*4882a593Smuzhiyun 			*mapped += num_entries * size;
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 		return ret;
372*4882a593Smuzhiyun 	}
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 	/* We can't allocate tables at the final level */
375*4882a593Smuzhiyun 	if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
376*4882a593Smuzhiyun 		return -EINVAL;
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	/* Grab a pointer to the next level */
379*4882a593Smuzhiyun 	pte = READ_ONCE(*ptep);
380*4882a593Smuzhiyun 	if (!pte) {
381*4882a593Smuzhiyun 		cptep = __arm_lpae_alloc_pages(tblsz, gfp, cfg);
382*4882a593Smuzhiyun 		if (!cptep)
383*4882a593Smuzhiyun 			return -ENOMEM;
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 		pte = arm_lpae_install_table(cptep, ptep, 0, data);
386*4882a593Smuzhiyun 		if (pte)
387*4882a593Smuzhiyun 			__arm_lpae_free_pages(cptep, tblsz, cfg);
388*4882a593Smuzhiyun 	} else if (!cfg->coherent_walk && !(pte & ARM_LPAE_PTE_SW_SYNC)) {
389*4882a593Smuzhiyun 		__arm_lpae_sync_pte(ptep, 1, cfg);
390*4882a593Smuzhiyun 	}
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	if (pte && !iopte_leaf(pte, lvl, data->iop.fmt)) {
393*4882a593Smuzhiyun 		cptep = iopte_deref(pte, data);
394*4882a593Smuzhiyun 	} else if (pte) {
395*4882a593Smuzhiyun 		/* We require an unmap first */
396*4882a593Smuzhiyun 		WARN_ON(!selftest_running);
397*4882a593Smuzhiyun 		return -EEXIST;
398*4882a593Smuzhiyun 	}
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	/* Rinse, repeat */
401*4882a593Smuzhiyun 	return __arm_lpae_map(data, iova, paddr, size, pgcount, prot, lvl + 1,
402*4882a593Smuzhiyun 			      cptep, gfp, mapped);
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun 
arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable * data,int prot)405*4882a593Smuzhiyun static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
406*4882a593Smuzhiyun 					   int prot)
407*4882a593Smuzhiyun {
408*4882a593Smuzhiyun 	arm_lpae_iopte pte;
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	if (data->iop.fmt == ARM_64_LPAE_S1 ||
411*4882a593Smuzhiyun 	    data->iop.fmt == ARM_32_LPAE_S1) {
412*4882a593Smuzhiyun 		pte = ARM_LPAE_PTE_nG;
413*4882a593Smuzhiyun 		if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
414*4882a593Smuzhiyun 			pte |= ARM_LPAE_PTE_AP_RDONLY;
415*4882a593Smuzhiyun 		if (!(prot & IOMMU_PRIV))
416*4882a593Smuzhiyun 			pte |= ARM_LPAE_PTE_AP_UNPRIV;
417*4882a593Smuzhiyun 	} else {
418*4882a593Smuzhiyun 		pte = ARM_LPAE_PTE_HAP_FAULT;
419*4882a593Smuzhiyun 		if (prot & IOMMU_READ)
420*4882a593Smuzhiyun 			pte |= ARM_LPAE_PTE_HAP_READ;
421*4882a593Smuzhiyun 		if (prot & IOMMU_WRITE)
422*4882a593Smuzhiyun 			pte |= ARM_LPAE_PTE_HAP_WRITE;
423*4882a593Smuzhiyun 	}
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	/*
426*4882a593Smuzhiyun 	 * Note that this logic is structured to accommodate Mali LPAE
427*4882a593Smuzhiyun 	 * having stage-1-like attributes but stage-2-like permissions.
428*4882a593Smuzhiyun 	 */
429*4882a593Smuzhiyun 	if (data->iop.fmt == ARM_64_LPAE_S2 ||
430*4882a593Smuzhiyun 	    data->iop.fmt == ARM_32_LPAE_S2) {
431*4882a593Smuzhiyun 		if (prot & IOMMU_MMIO)
432*4882a593Smuzhiyun 			pte |= ARM_LPAE_PTE_MEMATTR_DEV;
433*4882a593Smuzhiyun 		else if (prot & IOMMU_CACHE)
434*4882a593Smuzhiyun 			pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
435*4882a593Smuzhiyun 		else
436*4882a593Smuzhiyun 			pte |= ARM_LPAE_PTE_MEMATTR_NC;
437*4882a593Smuzhiyun 	} else {
438*4882a593Smuzhiyun 		if (prot & IOMMU_MMIO)
439*4882a593Smuzhiyun 			pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV
440*4882a593Smuzhiyun 				<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
441*4882a593Smuzhiyun 		else if ((prot & IOMMU_CACHE) && (prot & IOMMU_SYS_CACHE_NWA))
442*4882a593Smuzhiyun 			pte |= (ARM_LPAE_MAIR_ATTR_IDX_ICACHE_OCACHE_NWA
443*4882a593Smuzhiyun 				<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
444*4882a593Smuzhiyun 		/* IOMMU_CACHE + IOMMU_SYS_CACHE equivalent to IOMMU_CACHE */
445*4882a593Smuzhiyun 		else if (prot & IOMMU_CACHE)
446*4882a593Smuzhiyun 			pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
447*4882a593Smuzhiyun 				<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
448*4882a593Smuzhiyun 		else if (prot & IOMMU_SYS_CACHE)
449*4882a593Smuzhiyun 			pte |= (ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE
450*4882a593Smuzhiyun 				<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
451*4882a593Smuzhiyun 		else if (prot & IOMMU_SYS_CACHE_NWA)
452*4882a593Smuzhiyun 			pte |= (ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE_NWA
453*4882a593Smuzhiyun 				<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
454*4882a593Smuzhiyun 	}
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 	/*
457*4882a593Smuzhiyun 	 * Also Mali has its own notions of shareability wherein its Inner
458*4882a593Smuzhiyun 	 * domain covers the cores within the GPU, and its Outer domain is
459*4882a593Smuzhiyun 	 * "outside the GPU" (i.e. either the Inner or System domain in CPU
460*4882a593Smuzhiyun 	 * terms, depending on coherency).
461*4882a593Smuzhiyun 	 */
462*4882a593Smuzhiyun 	if (prot & IOMMU_CACHE && data->iop.fmt != ARM_MALI_LPAE)
463*4882a593Smuzhiyun 		pte |= ARM_LPAE_PTE_SH_IS;
464*4882a593Smuzhiyun 	else
465*4882a593Smuzhiyun 		pte |= ARM_LPAE_PTE_SH_OS;
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 	if (prot & IOMMU_NOEXEC)
468*4882a593Smuzhiyun 		pte |= ARM_LPAE_PTE_XN;
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun 	if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
471*4882a593Smuzhiyun 		pte |= ARM_LPAE_PTE_NS;
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	if (data->iop.fmt != ARM_MALI_LPAE)
474*4882a593Smuzhiyun 		pte |= ARM_LPAE_PTE_AF;
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 	return pte;
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun 
arm_lpae_map_pages(struct io_pgtable_ops * ops,unsigned long iova,phys_addr_t paddr,size_t pgsize,size_t pgcount,int iommu_prot,gfp_t gfp,size_t * mapped)479*4882a593Smuzhiyun static int arm_lpae_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
480*4882a593Smuzhiyun 			      phys_addr_t paddr, size_t pgsize, size_t pgcount,
481*4882a593Smuzhiyun 			      int iommu_prot, gfp_t gfp, size_t *mapped)
482*4882a593Smuzhiyun {
483*4882a593Smuzhiyun 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
484*4882a593Smuzhiyun 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
485*4882a593Smuzhiyun 	arm_lpae_iopte *ptep = data->pgd;
486*4882a593Smuzhiyun 	int ret, lvl = data->start_level;
487*4882a593Smuzhiyun 	arm_lpae_iopte prot;
488*4882a593Smuzhiyun 	long iaext = (s64)iova >> cfg->ias;
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 	/* If no access, then nothing to do */
491*4882a593Smuzhiyun 	if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
492*4882a593Smuzhiyun 		return 0;
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun 	if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize))
495*4882a593Smuzhiyun 		return -EINVAL;
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 	if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
498*4882a593Smuzhiyun 		iaext = ~iaext;
499*4882a593Smuzhiyun 	if (WARN_ON(iaext || paddr >> cfg->oas))
500*4882a593Smuzhiyun 		return -ERANGE;
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun 	prot = arm_lpae_prot_to_pte(data, iommu_prot);
503*4882a593Smuzhiyun 	ret = __arm_lpae_map(data, iova, paddr, pgsize, pgcount, prot, lvl,
504*4882a593Smuzhiyun 			     ptep, gfp, mapped);
505*4882a593Smuzhiyun 	/*
506*4882a593Smuzhiyun 	 * Synchronise all PTE updates for the new mapping before there's
507*4882a593Smuzhiyun 	 * a chance for anything to kick off a table walk for the new iova.
508*4882a593Smuzhiyun 	 */
509*4882a593Smuzhiyun 	wmb();
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 	return ret;
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun 
arm_lpae_map(struct io_pgtable_ops * ops,unsigned long iova,phys_addr_t paddr,size_t size,int iommu_prot,gfp_t gfp)515*4882a593Smuzhiyun static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
516*4882a593Smuzhiyun 			phys_addr_t paddr, size_t size, int iommu_prot, gfp_t gfp)
517*4882a593Smuzhiyun {
518*4882a593Smuzhiyun 	return arm_lpae_map_pages(ops, iova, paddr, size, 1, iommu_prot, gfp,
519*4882a593Smuzhiyun 				  NULL);
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun 
__arm_lpae_free_pgtable(struct arm_lpae_io_pgtable * data,int lvl,arm_lpae_iopte * ptep)522*4882a593Smuzhiyun static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
523*4882a593Smuzhiyun 				    arm_lpae_iopte *ptep)
524*4882a593Smuzhiyun {
525*4882a593Smuzhiyun 	arm_lpae_iopte *start, *end;
526*4882a593Smuzhiyun 	unsigned long table_size;
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 	if (lvl == data->start_level)
529*4882a593Smuzhiyun 		table_size = ARM_LPAE_PGD_SIZE(data);
530*4882a593Smuzhiyun 	else
531*4882a593Smuzhiyun 		table_size = ARM_LPAE_GRANULE(data);
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 	start = ptep;
534*4882a593Smuzhiyun 
535*4882a593Smuzhiyun 	/* Only leaf entries at the last level */
536*4882a593Smuzhiyun 	if (lvl == ARM_LPAE_MAX_LEVELS - 1)
537*4882a593Smuzhiyun 		end = ptep;
538*4882a593Smuzhiyun 	else
539*4882a593Smuzhiyun 		end = (void *)ptep + table_size;
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 	while (ptep != end) {
542*4882a593Smuzhiyun 		arm_lpae_iopte pte = *ptep++;
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 		if (!pte || iopte_leaf(pte, lvl, data->iop.fmt))
545*4882a593Smuzhiyun 			continue;
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 		__arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
548*4882a593Smuzhiyun 	}
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun 	__arm_lpae_free_pages(start, table_size, &data->iop.cfg);
551*4882a593Smuzhiyun }
552*4882a593Smuzhiyun 
arm_lpae_free_pgtable(struct io_pgtable * iop)553*4882a593Smuzhiyun static void arm_lpae_free_pgtable(struct io_pgtable *iop)
554*4882a593Smuzhiyun {
555*4882a593Smuzhiyun 	struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 	__arm_lpae_free_pgtable(data, data->start_level, data->pgd);
558*4882a593Smuzhiyun 	kfree(data);
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun 
arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable * data,struct iommu_iotlb_gather * gather,unsigned long iova,size_t size,arm_lpae_iopte blk_pte,int lvl,arm_lpae_iopte * ptep,size_t pgcount)561*4882a593Smuzhiyun static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
562*4882a593Smuzhiyun 				       struct iommu_iotlb_gather *gather,
563*4882a593Smuzhiyun 				       unsigned long iova, size_t size,
564*4882a593Smuzhiyun 				       arm_lpae_iopte blk_pte, int lvl,
565*4882a593Smuzhiyun 				       arm_lpae_iopte *ptep, size_t pgcount)
566*4882a593Smuzhiyun {
567*4882a593Smuzhiyun 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
568*4882a593Smuzhiyun 	arm_lpae_iopte pte, *tablep;
569*4882a593Smuzhiyun 	phys_addr_t blk_paddr;
570*4882a593Smuzhiyun 	size_t tablesz = ARM_LPAE_GRANULE(data);
571*4882a593Smuzhiyun 	size_t split_sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
572*4882a593Smuzhiyun 	int ptes_per_table = ARM_LPAE_PTES_PER_TABLE(data);
573*4882a593Smuzhiyun 	int i, unmap_idx_start = -1, num_entries = 0, max_entries;
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 	if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
576*4882a593Smuzhiyun 		return 0;
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun 	tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg);
579*4882a593Smuzhiyun 	if (!tablep)
580*4882a593Smuzhiyun 		return 0; /* Bytes unmapped */
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun 	if (size == split_sz) {
583*4882a593Smuzhiyun 		unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
584*4882a593Smuzhiyun 		max_entries = ptes_per_table - unmap_idx_start;
585*4882a593Smuzhiyun 		num_entries = min_t(int, pgcount, max_entries);
586*4882a593Smuzhiyun 	}
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun 	blk_paddr = iopte_to_paddr(blk_pte, data);
589*4882a593Smuzhiyun 	pte = iopte_prot(blk_pte);
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun 	for (i = 0; i < ptes_per_table; i++, blk_paddr += split_sz) {
592*4882a593Smuzhiyun 		/* Unmap! */
593*4882a593Smuzhiyun 		if (i >= unmap_idx_start && i < (unmap_idx_start + num_entries))
594*4882a593Smuzhiyun 			continue;
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun 		__arm_lpae_init_pte(data, blk_paddr, pte, lvl, 1, &tablep[i]);
597*4882a593Smuzhiyun 	}
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 	pte = arm_lpae_install_table(tablep, ptep, blk_pte, data);
600*4882a593Smuzhiyun 	if (pte != blk_pte) {
601*4882a593Smuzhiyun 		__arm_lpae_free_pages(tablep, tablesz, cfg);
602*4882a593Smuzhiyun 		/*
603*4882a593Smuzhiyun 		 * We may race against someone unmapping another part of this
604*4882a593Smuzhiyun 		 * block, but anything else is invalid. We can't misinterpret
605*4882a593Smuzhiyun 		 * a page entry here since we're never at the last level.
606*4882a593Smuzhiyun 		 */
607*4882a593Smuzhiyun 		if (iopte_type(pte, lvl - 1) != ARM_LPAE_PTE_TYPE_TABLE)
608*4882a593Smuzhiyun 			return 0;
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun 		tablep = iopte_deref(pte, data);
611*4882a593Smuzhiyun 	} else if (unmap_idx_start >= 0) {
612*4882a593Smuzhiyun 		for (i = 0; i < num_entries; i++)
613*4882a593Smuzhiyun 			io_pgtable_tlb_add_page(&data->iop, gather, iova + i * size, size);
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun 		return num_entries * size;
616*4882a593Smuzhiyun 	}
617*4882a593Smuzhiyun 
618*4882a593Smuzhiyun 	return __arm_lpae_unmap(data, gather, iova, size, pgcount, lvl, tablep);
619*4882a593Smuzhiyun }
620*4882a593Smuzhiyun 
__arm_lpae_unmap(struct arm_lpae_io_pgtable * data,struct iommu_iotlb_gather * gather,unsigned long iova,size_t size,size_t pgcount,int lvl,arm_lpae_iopte * ptep)621*4882a593Smuzhiyun static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
622*4882a593Smuzhiyun 			       struct iommu_iotlb_gather *gather,
623*4882a593Smuzhiyun 			       unsigned long iova, size_t size, size_t pgcount,
624*4882a593Smuzhiyun 			       int lvl, arm_lpae_iopte *ptep)
625*4882a593Smuzhiyun {
626*4882a593Smuzhiyun 	arm_lpae_iopte pte;
627*4882a593Smuzhiyun 	struct io_pgtable *iop = &data->iop;
628*4882a593Smuzhiyun 	int i = 0, num_entries, max_entries, unmap_idx_start;
629*4882a593Smuzhiyun 
630*4882a593Smuzhiyun 	/* Something went horribly wrong and we ran out of page table */
631*4882a593Smuzhiyun 	if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
632*4882a593Smuzhiyun 		return 0;
633*4882a593Smuzhiyun 
634*4882a593Smuzhiyun 	unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
635*4882a593Smuzhiyun 	ptep += unmap_idx_start;
636*4882a593Smuzhiyun 	pte = READ_ONCE(*ptep);
637*4882a593Smuzhiyun 	if (WARN_ON(!pte))
638*4882a593Smuzhiyun 		return 0;
639*4882a593Smuzhiyun 
640*4882a593Smuzhiyun 	/* If the size matches this level, we're in the right place */
641*4882a593Smuzhiyun 	if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) {
642*4882a593Smuzhiyun 		max_entries = ARM_LPAE_PTES_PER_TABLE(data) - unmap_idx_start;
643*4882a593Smuzhiyun 		num_entries = min_t(int, pgcount, max_entries);
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun 		while (i < num_entries) {
646*4882a593Smuzhiyun 			pte = READ_ONCE(*ptep);
647*4882a593Smuzhiyun 			if (WARN_ON(!pte))
648*4882a593Smuzhiyun 				break;
649*4882a593Smuzhiyun 
650*4882a593Smuzhiyun 			__arm_lpae_clear_pte(ptep, &iop->cfg);
651*4882a593Smuzhiyun 
652*4882a593Smuzhiyun 			if (!iopte_leaf(pte, lvl, iop->fmt)) {
653*4882a593Smuzhiyun 				/* Also flush any partial walks */
654*4882a593Smuzhiyun 				io_pgtable_tlb_flush_walk(iop, iova + i * size, size,
655*4882a593Smuzhiyun 							  ARM_LPAE_GRANULE(data));
656*4882a593Smuzhiyun 				__arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
657*4882a593Smuzhiyun 			} else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) {
658*4882a593Smuzhiyun 				/*
659*4882a593Smuzhiyun 				 * Order the PTE update against queueing the IOVA, to
660*4882a593Smuzhiyun 				 * guarantee that a flush callback from a different CPU
661*4882a593Smuzhiyun 				 * has observed it before the TLBIALL can be issued.
662*4882a593Smuzhiyun 				 */
663*4882a593Smuzhiyun 				smp_wmb();
664*4882a593Smuzhiyun 			} else {
665*4882a593Smuzhiyun 				io_pgtable_tlb_add_page(iop, gather, iova + i * size, size);
666*4882a593Smuzhiyun 			}
667*4882a593Smuzhiyun 
668*4882a593Smuzhiyun 			ptep++;
669*4882a593Smuzhiyun 			i++;
670*4882a593Smuzhiyun 		}
671*4882a593Smuzhiyun 
672*4882a593Smuzhiyun 		return i * size;
673*4882a593Smuzhiyun 	} else if (iopte_leaf(pte, lvl, iop->fmt)) {
674*4882a593Smuzhiyun 		/*
675*4882a593Smuzhiyun 		 * Insert a table at the next level to map the old region,
676*4882a593Smuzhiyun 		 * minus the part we want to unmap
677*4882a593Smuzhiyun 		 */
678*4882a593Smuzhiyun 		return arm_lpae_split_blk_unmap(data, gather, iova, size, pte,
679*4882a593Smuzhiyun 						lvl + 1, ptep, pgcount);
680*4882a593Smuzhiyun 	}
681*4882a593Smuzhiyun 
682*4882a593Smuzhiyun 	/* Keep on walkin' */
683*4882a593Smuzhiyun 	ptep = iopte_deref(pte, data);
684*4882a593Smuzhiyun 	return __arm_lpae_unmap(data, gather, iova, size, pgcount, lvl + 1, ptep);
685*4882a593Smuzhiyun }
686*4882a593Smuzhiyun 
arm_lpae_unmap_pages(struct io_pgtable_ops * ops,unsigned long iova,size_t pgsize,size_t pgcount,struct iommu_iotlb_gather * gather)687*4882a593Smuzhiyun static size_t arm_lpae_unmap_pages(struct io_pgtable_ops *ops, unsigned long iova,
688*4882a593Smuzhiyun 				   size_t pgsize, size_t pgcount,
689*4882a593Smuzhiyun 				   struct iommu_iotlb_gather *gather)
690*4882a593Smuzhiyun {
691*4882a593Smuzhiyun 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
692*4882a593Smuzhiyun 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
693*4882a593Smuzhiyun 	arm_lpae_iopte *ptep = data->pgd;
694*4882a593Smuzhiyun 	long iaext = (s64)iova >> cfg->ias;
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun 	if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize || !pgcount))
697*4882a593Smuzhiyun 		return 0;
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun 	if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
700*4882a593Smuzhiyun 		iaext = ~iaext;
701*4882a593Smuzhiyun 	if (WARN_ON(iaext))
702*4882a593Smuzhiyun 		return 0;
703*4882a593Smuzhiyun 
704*4882a593Smuzhiyun 	return __arm_lpae_unmap(data, gather, iova, pgsize, pgcount,
705*4882a593Smuzhiyun 				data->start_level, ptep);
706*4882a593Smuzhiyun }
707*4882a593Smuzhiyun 
arm_lpae_unmap(struct io_pgtable_ops * ops,unsigned long iova,size_t size,struct iommu_iotlb_gather * gather)708*4882a593Smuzhiyun static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
709*4882a593Smuzhiyun 			     size_t size, struct iommu_iotlb_gather *gather)
710*4882a593Smuzhiyun {
711*4882a593Smuzhiyun 	return arm_lpae_unmap_pages(ops, iova, size, 1, gather);
712*4882a593Smuzhiyun }
713*4882a593Smuzhiyun 
arm_lpae_iova_to_phys(struct io_pgtable_ops * ops,unsigned long iova)714*4882a593Smuzhiyun static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
715*4882a593Smuzhiyun 					 unsigned long iova)
716*4882a593Smuzhiyun {
717*4882a593Smuzhiyun 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
718*4882a593Smuzhiyun 	arm_lpae_iopte pte, *ptep = data->pgd;
719*4882a593Smuzhiyun 	int lvl = data->start_level;
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun 	do {
722*4882a593Smuzhiyun 		/* Valid IOPTE pointer? */
723*4882a593Smuzhiyun 		if (!ptep)
724*4882a593Smuzhiyun 			return 0;
725*4882a593Smuzhiyun 
726*4882a593Smuzhiyun 		/* Grab the IOPTE we're interested in */
727*4882a593Smuzhiyun 		ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
728*4882a593Smuzhiyun 		pte = READ_ONCE(*ptep);
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun 		/* Valid entry? */
731*4882a593Smuzhiyun 		if (!pte)
732*4882a593Smuzhiyun 			return 0;
733*4882a593Smuzhiyun 
734*4882a593Smuzhiyun 		/* Leaf entry? */
735*4882a593Smuzhiyun 		if (iopte_leaf(pte, lvl, data->iop.fmt))
736*4882a593Smuzhiyun 			goto found_translation;
737*4882a593Smuzhiyun 
738*4882a593Smuzhiyun 		/* Take it to the next level */
739*4882a593Smuzhiyun 		ptep = iopte_deref(pte, data);
740*4882a593Smuzhiyun 	} while (++lvl < ARM_LPAE_MAX_LEVELS);
741*4882a593Smuzhiyun 
742*4882a593Smuzhiyun 	/* Ran out of page tables to walk */
743*4882a593Smuzhiyun 	return 0;
744*4882a593Smuzhiyun 
745*4882a593Smuzhiyun found_translation:
746*4882a593Smuzhiyun 	iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1);
747*4882a593Smuzhiyun 	return iopte_to_paddr(pte, data) | iova;
748*4882a593Smuzhiyun }
749*4882a593Smuzhiyun 
arm_lpae_restrict_pgsizes(struct io_pgtable_cfg * cfg)750*4882a593Smuzhiyun static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
751*4882a593Smuzhiyun {
752*4882a593Smuzhiyun 	unsigned long granule, page_sizes;
753*4882a593Smuzhiyun 	unsigned int max_addr_bits = 48;
754*4882a593Smuzhiyun 
755*4882a593Smuzhiyun 	/*
756*4882a593Smuzhiyun 	 * We need to restrict the supported page sizes to match the
757*4882a593Smuzhiyun 	 * translation regime for a particular granule. Aim to match
758*4882a593Smuzhiyun 	 * the CPU page size if possible, otherwise prefer smaller sizes.
759*4882a593Smuzhiyun 	 * While we're at it, restrict the block sizes to match the
760*4882a593Smuzhiyun 	 * chosen granule.
761*4882a593Smuzhiyun 	 */
762*4882a593Smuzhiyun 	if (cfg->pgsize_bitmap & PAGE_SIZE)
763*4882a593Smuzhiyun 		granule = PAGE_SIZE;
764*4882a593Smuzhiyun 	else if (cfg->pgsize_bitmap & ~PAGE_MASK)
765*4882a593Smuzhiyun 		granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
766*4882a593Smuzhiyun 	else if (cfg->pgsize_bitmap & PAGE_MASK)
767*4882a593Smuzhiyun 		granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
768*4882a593Smuzhiyun 	else
769*4882a593Smuzhiyun 		granule = 0;
770*4882a593Smuzhiyun 
771*4882a593Smuzhiyun 	switch (granule) {
772*4882a593Smuzhiyun 	case SZ_4K:
773*4882a593Smuzhiyun 		page_sizes = (SZ_4K | SZ_2M | SZ_1G);
774*4882a593Smuzhiyun 		break;
775*4882a593Smuzhiyun 	case SZ_16K:
776*4882a593Smuzhiyun 		page_sizes = (SZ_16K | SZ_32M);
777*4882a593Smuzhiyun 		break;
778*4882a593Smuzhiyun 	case SZ_64K:
779*4882a593Smuzhiyun 		max_addr_bits = 52;
780*4882a593Smuzhiyun 		page_sizes = (SZ_64K | SZ_512M);
781*4882a593Smuzhiyun 		if (cfg->oas > 48)
782*4882a593Smuzhiyun 			page_sizes |= 1ULL << 42; /* 4TB */
783*4882a593Smuzhiyun 		break;
784*4882a593Smuzhiyun 	default:
785*4882a593Smuzhiyun 		page_sizes = 0;
786*4882a593Smuzhiyun 	}
787*4882a593Smuzhiyun 
788*4882a593Smuzhiyun 	cfg->pgsize_bitmap &= page_sizes;
789*4882a593Smuzhiyun 	cfg->ias = min(cfg->ias, max_addr_bits);
790*4882a593Smuzhiyun 	cfg->oas = min(cfg->oas, max_addr_bits);
791*4882a593Smuzhiyun }
792*4882a593Smuzhiyun 
793*4882a593Smuzhiyun static struct arm_lpae_io_pgtable *
arm_lpae_alloc_pgtable(struct io_pgtable_cfg * cfg)794*4882a593Smuzhiyun arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
795*4882a593Smuzhiyun {
796*4882a593Smuzhiyun 	struct arm_lpae_io_pgtable *data;
797*4882a593Smuzhiyun 	int levels, va_bits, pg_shift;
798*4882a593Smuzhiyun 
799*4882a593Smuzhiyun 	arm_lpae_restrict_pgsizes(cfg);
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun 	if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
802*4882a593Smuzhiyun 		return NULL;
803*4882a593Smuzhiyun 
804*4882a593Smuzhiyun 	if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
805*4882a593Smuzhiyun 		return NULL;
806*4882a593Smuzhiyun 
807*4882a593Smuzhiyun 	if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
808*4882a593Smuzhiyun 		return NULL;
809*4882a593Smuzhiyun 
810*4882a593Smuzhiyun 	data = kmalloc(sizeof(*data), GFP_KERNEL);
811*4882a593Smuzhiyun 	if (!data)
812*4882a593Smuzhiyun 		return NULL;
813*4882a593Smuzhiyun 
814*4882a593Smuzhiyun 	pg_shift = __ffs(cfg->pgsize_bitmap);
815*4882a593Smuzhiyun 	data->bits_per_level = pg_shift - ilog2(sizeof(arm_lpae_iopte));
816*4882a593Smuzhiyun 
817*4882a593Smuzhiyun 	va_bits = cfg->ias - pg_shift;
818*4882a593Smuzhiyun 	levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
819*4882a593Smuzhiyun 	data->start_level = ARM_LPAE_MAX_LEVELS - levels;
820*4882a593Smuzhiyun 
821*4882a593Smuzhiyun 	/* Calculate the actual size of our pgd (without concatenation) */
822*4882a593Smuzhiyun 	data->pgd_bits = va_bits - (data->bits_per_level * (levels - 1));
823*4882a593Smuzhiyun 
824*4882a593Smuzhiyun 	data->iop.ops = (struct io_pgtable_ops) {
825*4882a593Smuzhiyun 		.map		= arm_lpae_map,
826*4882a593Smuzhiyun 		.map_pages	= arm_lpae_map_pages,
827*4882a593Smuzhiyun 		.unmap		= arm_lpae_unmap,
828*4882a593Smuzhiyun 		.unmap_pages	= arm_lpae_unmap_pages,
829*4882a593Smuzhiyun 		.iova_to_phys	= arm_lpae_iova_to_phys,
830*4882a593Smuzhiyun 	};
831*4882a593Smuzhiyun 
832*4882a593Smuzhiyun 	return data;
833*4882a593Smuzhiyun }
834*4882a593Smuzhiyun 
835*4882a593Smuzhiyun static struct io_pgtable *
arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg * cfg,void * cookie)836*4882a593Smuzhiyun arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
837*4882a593Smuzhiyun {
838*4882a593Smuzhiyun 	u64 reg;
839*4882a593Smuzhiyun 	struct arm_lpae_io_pgtable *data;
840*4882a593Smuzhiyun 	typeof(&cfg->arm_lpae_s1_cfg.tcr) tcr = &cfg->arm_lpae_s1_cfg.tcr;
841*4882a593Smuzhiyun 	bool tg1;
842*4882a593Smuzhiyun 
843*4882a593Smuzhiyun 	if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
844*4882a593Smuzhiyun 			    IO_PGTABLE_QUIRK_NON_STRICT |
845*4882a593Smuzhiyun 			    IO_PGTABLE_QUIRK_ARM_TTBR1))
846*4882a593Smuzhiyun 		return NULL;
847*4882a593Smuzhiyun 
848*4882a593Smuzhiyun 	data = arm_lpae_alloc_pgtable(cfg);
849*4882a593Smuzhiyun 	if (!data)
850*4882a593Smuzhiyun 		return NULL;
851*4882a593Smuzhiyun 
852*4882a593Smuzhiyun 	/* TCR */
853*4882a593Smuzhiyun 	if (cfg->coherent_walk) {
854*4882a593Smuzhiyun 		tcr->sh = ARM_LPAE_TCR_SH_IS;
855*4882a593Smuzhiyun 		tcr->irgn = ARM_LPAE_TCR_RGN_WBWA;
856*4882a593Smuzhiyun 		tcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
857*4882a593Smuzhiyun 	} else {
858*4882a593Smuzhiyun 		tcr->sh = ARM_LPAE_TCR_SH_OS;
859*4882a593Smuzhiyun 		tcr->irgn = ARM_LPAE_TCR_RGN_NC;
860*4882a593Smuzhiyun 		tcr->orgn = ARM_LPAE_TCR_RGN_NC;
861*4882a593Smuzhiyun 	}
862*4882a593Smuzhiyun 
863*4882a593Smuzhiyun 	tg1 = cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1;
864*4882a593Smuzhiyun 	switch (ARM_LPAE_GRANULE(data)) {
865*4882a593Smuzhiyun 	case SZ_4K:
866*4882a593Smuzhiyun 		tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_4K : ARM_LPAE_TCR_TG0_4K;
867*4882a593Smuzhiyun 		break;
868*4882a593Smuzhiyun 	case SZ_16K:
869*4882a593Smuzhiyun 		tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_16K : ARM_LPAE_TCR_TG0_16K;
870*4882a593Smuzhiyun 		break;
871*4882a593Smuzhiyun 	case SZ_64K:
872*4882a593Smuzhiyun 		tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_64K : ARM_LPAE_TCR_TG0_64K;
873*4882a593Smuzhiyun 		break;
874*4882a593Smuzhiyun 	}
875*4882a593Smuzhiyun 
876*4882a593Smuzhiyun 	switch (cfg->oas) {
877*4882a593Smuzhiyun 	case 32:
878*4882a593Smuzhiyun 		tcr->ips = ARM_LPAE_TCR_PS_32_BIT;
879*4882a593Smuzhiyun 		break;
880*4882a593Smuzhiyun 	case 36:
881*4882a593Smuzhiyun 		tcr->ips = ARM_LPAE_TCR_PS_36_BIT;
882*4882a593Smuzhiyun 		break;
883*4882a593Smuzhiyun 	case 40:
884*4882a593Smuzhiyun 		tcr->ips = ARM_LPAE_TCR_PS_40_BIT;
885*4882a593Smuzhiyun 		break;
886*4882a593Smuzhiyun 	case 42:
887*4882a593Smuzhiyun 		tcr->ips = ARM_LPAE_TCR_PS_42_BIT;
888*4882a593Smuzhiyun 		break;
889*4882a593Smuzhiyun 	case 44:
890*4882a593Smuzhiyun 		tcr->ips = ARM_LPAE_TCR_PS_44_BIT;
891*4882a593Smuzhiyun 		break;
892*4882a593Smuzhiyun 	case 48:
893*4882a593Smuzhiyun 		tcr->ips = ARM_LPAE_TCR_PS_48_BIT;
894*4882a593Smuzhiyun 		break;
895*4882a593Smuzhiyun 	case 52:
896*4882a593Smuzhiyun 		tcr->ips = ARM_LPAE_TCR_PS_52_BIT;
897*4882a593Smuzhiyun 		break;
898*4882a593Smuzhiyun 	default:
899*4882a593Smuzhiyun 		goto out_free_data;
900*4882a593Smuzhiyun 	}
901*4882a593Smuzhiyun 
902*4882a593Smuzhiyun 	tcr->tsz = 64ULL - cfg->ias;
903*4882a593Smuzhiyun 
904*4882a593Smuzhiyun 	/* MAIRs */
905*4882a593Smuzhiyun 	reg = (ARM_LPAE_MAIR_ATTR_NC
906*4882a593Smuzhiyun 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
907*4882a593Smuzhiyun 	      (ARM_LPAE_MAIR_ATTR_WBRWA
908*4882a593Smuzhiyun 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
909*4882a593Smuzhiyun 	      (ARM_LPAE_MAIR_ATTR_DEVICE
910*4882a593Smuzhiyun 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)) |
911*4882a593Smuzhiyun 	      (ARM_LPAE_MAIR_ATTR_INC_OWBRWA
912*4882a593Smuzhiyun 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE)) |
913*4882a593Smuzhiyun 	      (ARM_LPAE_MAIR_ATTR_INC_OWBRANWA
914*4882a593Smuzhiyun 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE_NWA)) |
915*4882a593Smuzhiyun 	      (ARM_LPAE_MAIR_ATTR_IWBRWA_OWBRANWA
916*4882a593Smuzhiyun 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_ICACHE_OCACHE_NWA));
917*4882a593Smuzhiyun 
918*4882a593Smuzhiyun 	cfg->arm_lpae_s1_cfg.mair = reg;
919*4882a593Smuzhiyun 
920*4882a593Smuzhiyun 	/* Looking good; allocate a pgd */
921*4882a593Smuzhiyun 	data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
922*4882a593Smuzhiyun 					   GFP_KERNEL, cfg);
923*4882a593Smuzhiyun 	if (!data->pgd)
924*4882a593Smuzhiyun 		goto out_free_data;
925*4882a593Smuzhiyun 
926*4882a593Smuzhiyun 	/* Ensure the empty pgd is visible before any actual TTBR write */
927*4882a593Smuzhiyun 	wmb();
928*4882a593Smuzhiyun 
929*4882a593Smuzhiyun 	/* TTBR */
930*4882a593Smuzhiyun 	cfg->arm_lpae_s1_cfg.ttbr = virt_to_phys(data->pgd);
931*4882a593Smuzhiyun 	return &data->iop;
932*4882a593Smuzhiyun 
933*4882a593Smuzhiyun out_free_data:
934*4882a593Smuzhiyun 	kfree(data);
935*4882a593Smuzhiyun 	return NULL;
936*4882a593Smuzhiyun }
937*4882a593Smuzhiyun 
938*4882a593Smuzhiyun static struct io_pgtable *
arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg * cfg,void * cookie)939*4882a593Smuzhiyun arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
940*4882a593Smuzhiyun {
941*4882a593Smuzhiyun 	u64 sl;
942*4882a593Smuzhiyun 	struct arm_lpae_io_pgtable *data;
943*4882a593Smuzhiyun 	typeof(&cfg->arm_lpae_s2_cfg.vtcr) vtcr = &cfg->arm_lpae_s2_cfg.vtcr;
944*4882a593Smuzhiyun 
945*4882a593Smuzhiyun 	/* The NS quirk doesn't apply at stage 2 */
946*4882a593Smuzhiyun 	if (cfg->quirks & ~(IO_PGTABLE_QUIRK_NON_STRICT))
947*4882a593Smuzhiyun 		return NULL;
948*4882a593Smuzhiyun 
949*4882a593Smuzhiyun 	data = arm_lpae_alloc_pgtable(cfg);
950*4882a593Smuzhiyun 	if (!data)
951*4882a593Smuzhiyun 		return NULL;
952*4882a593Smuzhiyun 
953*4882a593Smuzhiyun 	/*
954*4882a593Smuzhiyun 	 * Concatenate PGDs at level 1 if possible in order to reduce
955*4882a593Smuzhiyun 	 * the depth of the stage-2 walk.
956*4882a593Smuzhiyun 	 */
957*4882a593Smuzhiyun 	if (data->start_level == 0) {
958*4882a593Smuzhiyun 		unsigned long pgd_pages;
959*4882a593Smuzhiyun 
960*4882a593Smuzhiyun 		pgd_pages = ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte);
961*4882a593Smuzhiyun 		if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
962*4882a593Smuzhiyun 			data->pgd_bits += data->bits_per_level;
963*4882a593Smuzhiyun 			data->start_level++;
964*4882a593Smuzhiyun 		}
965*4882a593Smuzhiyun 	}
966*4882a593Smuzhiyun 
967*4882a593Smuzhiyun 	/* VTCR */
968*4882a593Smuzhiyun 	if (cfg->coherent_walk) {
969*4882a593Smuzhiyun 		vtcr->sh = ARM_LPAE_TCR_SH_IS;
970*4882a593Smuzhiyun 		vtcr->irgn = ARM_LPAE_TCR_RGN_WBWA;
971*4882a593Smuzhiyun 		vtcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
972*4882a593Smuzhiyun 	} else {
973*4882a593Smuzhiyun 		vtcr->sh = ARM_LPAE_TCR_SH_OS;
974*4882a593Smuzhiyun 		vtcr->irgn = ARM_LPAE_TCR_RGN_NC;
975*4882a593Smuzhiyun 		vtcr->orgn = ARM_LPAE_TCR_RGN_NC;
976*4882a593Smuzhiyun 	}
977*4882a593Smuzhiyun 
978*4882a593Smuzhiyun 	sl = data->start_level;
979*4882a593Smuzhiyun 
980*4882a593Smuzhiyun 	switch (ARM_LPAE_GRANULE(data)) {
981*4882a593Smuzhiyun 	case SZ_4K:
982*4882a593Smuzhiyun 		vtcr->tg = ARM_LPAE_TCR_TG0_4K;
983*4882a593Smuzhiyun 		sl++; /* SL0 format is different for 4K granule size */
984*4882a593Smuzhiyun 		break;
985*4882a593Smuzhiyun 	case SZ_16K:
986*4882a593Smuzhiyun 		vtcr->tg = ARM_LPAE_TCR_TG0_16K;
987*4882a593Smuzhiyun 		break;
988*4882a593Smuzhiyun 	case SZ_64K:
989*4882a593Smuzhiyun 		vtcr->tg = ARM_LPAE_TCR_TG0_64K;
990*4882a593Smuzhiyun 		break;
991*4882a593Smuzhiyun 	}
992*4882a593Smuzhiyun 
993*4882a593Smuzhiyun 	switch (cfg->oas) {
994*4882a593Smuzhiyun 	case 32:
995*4882a593Smuzhiyun 		vtcr->ps = ARM_LPAE_TCR_PS_32_BIT;
996*4882a593Smuzhiyun 		break;
997*4882a593Smuzhiyun 	case 36:
998*4882a593Smuzhiyun 		vtcr->ps = ARM_LPAE_TCR_PS_36_BIT;
999*4882a593Smuzhiyun 		break;
1000*4882a593Smuzhiyun 	case 40:
1001*4882a593Smuzhiyun 		vtcr->ps = ARM_LPAE_TCR_PS_40_BIT;
1002*4882a593Smuzhiyun 		break;
1003*4882a593Smuzhiyun 	case 42:
1004*4882a593Smuzhiyun 		vtcr->ps = ARM_LPAE_TCR_PS_42_BIT;
1005*4882a593Smuzhiyun 		break;
1006*4882a593Smuzhiyun 	case 44:
1007*4882a593Smuzhiyun 		vtcr->ps = ARM_LPAE_TCR_PS_44_BIT;
1008*4882a593Smuzhiyun 		break;
1009*4882a593Smuzhiyun 	case 48:
1010*4882a593Smuzhiyun 		vtcr->ps = ARM_LPAE_TCR_PS_48_BIT;
1011*4882a593Smuzhiyun 		break;
1012*4882a593Smuzhiyun 	case 52:
1013*4882a593Smuzhiyun 		vtcr->ps = ARM_LPAE_TCR_PS_52_BIT;
1014*4882a593Smuzhiyun 		break;
1015*4882a593Smuzhiyun 	default:
1016*4882a593Smuzhiyun 		goto out_free_data;
1017*4882a593Smuzhiyun 	}
1018*4882a593Smuzhiyun 
1019*4882a593Smuzhiyun 	vtcr->tsz = 64ULL - cfg->ias;
1020*4882a593Smuzhiyun 	vtcr->sl = ~sl & ARM_LPAE_VTCR_SL0_MASK;
1021*4882a593Smuzhiyun 
1022*4882a593Smuzhiyun 	/* Allocate pgd pages */
1023*4882a593Smuzhiyun 	data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
1024*4882a593Smuzhiyun 					   GFP_KERNEL, cfg);
1025*4882a593Smuzhiyun 	if (!data->pgd)
1026*4882a593Smuzhiyun 		goto out_free_data;
1027*4882a593Smuzhiyun 
1028*4882a593Smuzhiyun 	/* Ensure the empty pgd is visible before any actual TTBR write */
1029*4882a593Smuzhiyun 	wmb();
1030*4882a593Smuzhiyun 
1031*4882a593Smuzhiyun 	/* VTTBR */
1032*4882a593Smuzhiyun 	cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
1033*4882a593Smuzhiyun 	return &data->iop;
1034*4882a593Smuzhiyun 
1035*4882a593Smuzhiyun out_free_data:
1036*4882a593Smuzhiyun 	kfree(data);
1037*4882a593Smuzhiyun 	return NULL;
1038*4882a593Smuzhiyun }
1039*4882a593Smuzhiyun 
1040*4882a593Smuzhiyun static struct io_pgtable *
arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg * cfg,void * cookie)1041*4882a593Smuzhiyun arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
1042*4882a593Smuzhiyun {
1043*4882a593Smuzhiyun 	if (cfg->ias > 32 || cfg->oas > 40)
1044*4882a593Smuzhiyun 		return NULL;
1045*4882a593Smuzhiyun 
1046*4882a593Smuzhiyun 	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1047*4882a593Smuzhiyun 	return arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
1048*4882a593Smuzhiyun }
1049*4882a593Smuzhiyun 
1050*4882a593Smuzhiyun static struct io_pgtable *
arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg * cfg,void * cookie)1051*4882a593Smuzhiyun arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
1052*4882a593Smuzhiyun {
1053*4882a593Smuzhiyun 	if (cfg->ias > 40 || cfg->oas > 40)
1054*4882a593Smuzhiyun 		return NULL;
1055*4882a593Smuzhiyun 
1056*4882a593Smuzhiyun 	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1057*4882a593Smuzhiyun 	return arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
1058*4882a593Smuzhiyun }
1059*4882a593Smuzhiyun 
1060*4882a593Smuzhiyun static struct io_pgtable *
arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg * cfg,void * cookie)1061*4882a593Smuzhiyun arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
1062*4882a593Smuzhiyun {
1063*4882a593Smuzhiyun 	struct arm_lpae_io_pgtable *data;
1064*4882a593Smuzhiyun 
1065*4882a593Smuzhiyun 	/* No quirks for Mali (hopefully) */
1066*4882a593Smuzhiyun 	if (cfg->quirks)
1067*4882a593Smuzhiyun 		return NULL;
1068*4882a593Smuzhiyun 
1069*4882a593Smuzhiyun 	if (cfg->ias > 48 || cfg->oas > 40)
1070*4882a593Smuzhiyun 		return NULL;
1071*4882a593Smuzhiyun 
1072*4882a593Smuzhiyun 	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1073*4882a593Smuzhiyun 
1074*4882a593Smuzhiyun 	data = arm_lpae_alloc_pgtable(cfg);
1075*4882a593Smuzhiyun 	if (!data)
1076*4882a593Smuzhiyun 		return NULL;
1077*4882a593Smuzhiyun 
1078*4882a593Smuzhiyun 	/* Mali seems to need a full 4-level table regardless of IAS */
1079*4882a593Smuzhiyun 	if (data->start_level > 0) {
1080*4882a593Smuzhiyun 		data->start_level = 0;
1081*4882a593Smuzhiyun 		data->pgd_bits = 0;
1082*4882a593Smuzhiyun 	}
1083*4882a593Smuzhiyun 	/*
1084*4882a593Smuzhiyun 	 * MEMATTR: Mali has no actual notion of a non-cacheable type, so the
1085*4882a593Smuzhiyun 	 * best we can do is mimic the out-of-tree driver and hope that the
1086*4882a593Smuzhiyun 	 * "implementation-defined caching policy" is good enough. Similarly,
1087*4882a593Smuzhiyun 	 * we'll use it for the sake of a valid attribute for our 'device'
1088*4882a593Smuzhiyun 	 * index, although callers should never request that in practice.
1089*4882a593Smuzhiyun 	 */
1090*4882a593Smuzhiyun 	cfg->arm_mali_lpae_cfg.memattr =
1091*4882a593Smuzhiyun 		(ARM_MALI_LPAE_MEMATTR_IMP_DEF
1092*4882a593Smuzhiyun 		 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
1093*4882a593Smuzhiyun 		(ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC
1094*4882a593Smuzhiyun 		 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
1095*4882a593Smuzhiyun 		(ARM_MALI_LPAE_MEMATTR_IMP_DEF
1096*4882a593Smuzhiyun 		 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
1097*4882a593Smuzhiyun 
1098*4882a593Smuzhiyun 	data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), GFP_KERNEL,
1099*4882a593Smuzhiyun 					   cfg);
1100*4882a593Smuzhiyun 	if (!data->pgd)
1101*4882a593Smuzhiyun 		goto out_free_data;
1102*4882a593Smuzhiyun 
1103*4882a593Smuzhiyun 	/* Ensure the empty pgd is visible before TRANSTAB can be written */
1104*4882a593Smuzhiyun 	wmb();
1105*4882a593Smuzhiyun 
1106*4882a593Smuzhiyun 	cfg->arm_mali_lpae_cfg.transtab = virt_to_phys(data->pgd) |
1107*4882a593Smuzhiyun 					  ARM_MALI_LPAE_TTBR_READ_INNER |
1108*4882a593Smuzhiyun 					  ARM_MALI_LPAE_TTBR_ADRMODE_TABLE;
1109*4882a593Smuzhiyun 	if (cfg->coherent_walk)
1110*4882a593Smuzhiyun 		cfg->arm_mali_lpae_cfg.transtab |= ARM_MALI_LPAE_TTBR_SHARE_OUTER;
1111*4882a593Smuzhiyun 
1112*4882a593Smuzhiyun 	return &data->iop;
1113*4882a593Smuzhiyun 
1114*4882a593Smuzhiyun out_free_data:
1115*4882a593Smuzhiyun 	kfree(data);
1116*4882a593Smuzhiyun 	return NULL;
1117*4882a593Smuzhiyun }
1118*4882a593Smuzhiyun 
1119*4882a593Smuzhiyun struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
1120*4882a593Smuzhiyun 	.alloc	= arm_64_lpae_alloc_pgtable_s1,
1121*4882a593Smuzhiyun 	.free	= arm_lpae_free_pgtable,
1122*4882a593Smuzhiyun };
1123*4882a593Smuzhiyun 
1124*4882a593Smuzhiyun struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
1125*4882a593Smuzhiyun 	.alloc	= arm_64_lpae_alloc_pgtable_s2,
1126*4882a593Smuzhiyun 	.free	= arm_lpae_free_pgtable,
1127*4882a593Smuzhiyun };
1128*4882a593Smuzhiyun 
1129*4882a593Smuzhiyun struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
1130*4882a593Smuzhiyun 	.alloc	= arm_32_lpae_alloc_pgtable_s1,
1131*4882a593Smuzhiyun 	.free	= arm_lpae_free_pgtable,
1132*4882a593Smuzhiyun };
1133*4882a593Smuzhiyun 
1134*4882a593Smuzhiyun struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
1135*4882a593Smuzhiyun 	.alloc	= arm_32_lpae_alloc_pgtable_s2,
1136*4882a593Smuzhiyun 	.free	= arm_lpae_free_pgtable,
1137*4882a593Smuzhiyun };
1138*4882a593Smuzhiyun 
1139*4882a593Smuzhiyun struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns = {
1140*4882a593Smuzhiyun 	.alloc	= arm_mali_lpae_alloc_pgtable,
1141*4882a593Smuzhiyun 	.free	= arm_lpae_free_pgtable,
1142*4882a593Smuzhiyun };
1143*4882a593Smuzhiyun 
1144*4882a593Smuzhiyun #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
1145*4882a593Smuzhiyun 
1146*4882a593Smuzhiyun static struct io_pgtable_cfg *cfg_cookie __initdata;
1147*4882a593Smuzhiyun 
dummy_tlb_flush_all(void * cookie)1148*4882a593Smuzhiyun static void __init dummy_tlb_flush_all(void *cookie)
1149*4882a593Smuzhiyun {
1150*4882a593Smuzhiyun 	WARN_ON(cookie != cfg_cookie);
1151*4882a593Smuzhiyun }
1152*4882a593Smuzhiyun 
dummy_tlb_flush(unsigned long iova,size_t size,size_t granule,void * cookie)1153*4882a593Smuzhiyun static void __init dummy_tlb_flush(unsigned long iova, size_t size,
1154*4882a593Smuzhiyun 				   size_t granule, void *cookie)
1155*4882a593Smuzhiyun {
1156*4882a593Smuzhiyun 	WARN_ON(cookie != cfg_cookie);
1157*4882a593Smuzhiyun 	WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
1158*4882a593Smuzhiyun }
1159*4882a593Smuzhiyun 
dummy_tlb_add_page(struct iommu_iotlb_gather * gather,unsigned long iova,size_t granule,void * cookie)1160*4882a593Smuzhiyun static void __init dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
1161*4882a593Smuzhiyun 				      unsigned long iova, size_t granule,
1162*4882a593Smuzhiyun 				      void *cookie)
1163*4882a593Smuzhiyun {
1164*4882a593Smuzhiyun 	dummy_tlb_flush(iova, granule, granule, cookie);
1165*4882a593Smuzhiyun }
1166*4882a593Smuzhiyun 
1167*4882a593Smuzhiyun static const struct iommu_flush_ops dummy_tlb_ops __initconst = {
1168*4882a593Smuzhiyun 	.tlb_flush_all	= dummy_tlb_flush_all,
1169*4882a593Smuzhiyun 	.tlb_flush_walk	= dummy_tlb_flush,
1170*4882a593Smuzhiyun 	.tlb_add_page	= dummy_tlb_add_page,
1171*4882a593Smuzhiyun };
1172*4882a593Smuzhiyun 
arm_lpae_dump_ops(struct io_pgtable_ops * ops)1173*4882a593Smuzhiyun static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
1174*4882a593Smuzhiyun {
1175*4882a593Smuzhiyun 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
1176*4882a593Smuzhiyun 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
1177*4882a593Smuzhiyun 
1178*4882a593Smuzhiyun 	pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
1179*4882a593Smuzhiyun 		cfg->pgsize_bitmap, cfg->ias);
1180*4882a593Smuzhiyun 	pr_err("data: %d levels, 0x%zx pgd_size, %u pg_shift, %u bits_per_level, pgd @ %p\n",
1181*4882a593Smuzhiyun 		ARM_LPAE_MAX_LEVELS - data->start_level, ARM_LPAE_PGD_SIZE(data),
1182*4882a593Smuzhiyun 		ilog2(ARM_LPAE_GRANULE(data)), data->bits_per_level, data->pgd);
1183*4882a593Smuzhiyun }
1184*4882a593Smuzhiyun 
1185*4882a593Smuzhiyun #define __FAIL(ops, i)	({						\
1186*4882a593Smuzhiyun 		WARN(1, "selftest: test failed for fmt idx %d\n", (i));	\
1187*4882a593Smuzhiyun 		arm_lpae_dump_ops(ops);					\
1188*4882a593Smuzhiyun 		selftest_running = false;				\
1189*4882a593Smuzhiyun 		-EFAULT;						\
1190*4882a593Smuzhiyun })
1191*4882a593Smuzhiyun 
arm_lpae_run_tests(struct io_pgtable_cfg * cfg)1192*4882a593Smuzhiyun static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
1193*4882a593Smuzhiyun {
1194*4882a593Smuzhiyun 	static const enum io_pgtable_fmt fmts[] __initconst = {
1195*4882a593Smuzhiyun 		ARM_64_LPAE_S1,
1196*4882a593Smuzhiyun 		ARM_64_LPAE_S2,
1197*4882a593Smuzhiyun 	};
1198*4882a593Smuzhiyun 
1199*4882a593Smuzhiyun 	int i, j;
1200*4882a593Smuzhiyun 	unsigned long iova;
1201*4882a593Smuzhiyun 	size_t size;
1202*4882a593Smuzhiyun 	struct io_pgtable_ops *ops;
1203*4882a593Smuzhiyun 
1204*4882a593Smuzhiyun 	selftest_running = true;
1205*4882a593Smuzhiyun 
1206*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
1207*4882a593Smuzhiyun 		cfg_cookie = cfg;
1208*4882a593Smuzhiyun 		ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
1209*4882a593Smuzhiyun 		if (!ops) {
1210*4882a593Smuzhiyun 			pr_err("selftest: failed to allocate io pgtable ops\n");
1211*4882a593Smuzhiyun 			return -ENOMEM;
1212*4882a593Smuzhiyun 		}
1213*4882a593Smuzhiyun 
1214*4882a593Smuzhiyun 		/*
1215*4882a593Smuzhiyun 		 * Initial sanity checks.
1216*4882a593Smuzhiyun 		 * Empty page tables shouldn't provide any translations.
1217*4882a593Smuzhiyun 		 */
1218*4882a593Smuzhiyun 		if (ops->iova_to_phys(ops, 42))
1219*4882a593Smuzhiyun 			return __FAIL(ops, i);
1220*4882a593Smuzhiyun 
1221*4882a593Smuzhiyun 		if (ops->iova_to_phys(ops, SZ_1G + 42))
1222*4882a593Smuzhiyun 			return __FAIL(ops, i);
1223*4882a593Smuzhiyun 
1224*4882a593Smuzhiyun 		if (ops->iova_to_phys(ops, SZ_2G + 42))
1225*4882a593Smuzhiyun 			return __FAIL(ops, i);
1226*4882a593Smuzhiyun 
1227*4882a593Smuzhiyun 		/*
1228*4882a593Smuzhiyun 		 * Distinct mappings of different granule sizes.
1229*4882a593Smuzhiyun 		 */
1230*4882a593Smuzhiyun 		iova = 0;
1231*4882a593Smuzhiyun 		for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
1232*4882a593Smuzhiyun 			size = 1UL << j;
1233*4882a593Smuzhiyun 
1234*4882a593Smuzhiyun 			if (ops->map(ops, iova, iova, size, IOMMU_READ |
1235*4882a593Smuzhiyun 							    IOMMU_WRITE |
1236*4882a593Smuzhiyun 							    IOMMU_NOEXEC |
1237*4882a593Smuzhiyun 							    IOMMU_CACHE, GFP_KERNEL))
1238*4882a593Smuzhiyun 				return __FAIL(ops, i);
1239*4882a593Smuzhiyun 
1240*4882a593Smuzhiyun 			/* Overlapping mappings */
1241*4882a593Smuzhiyun 			if (!ops->map(ops, iova, iova + size, size,
1242*4882a593Smuzhiyun 				      IOMMU_READ | IOMMU_NOEXEC, GFP_KERNEL))
1243*4882a593Smuzhiyun 				return __FAIL(ops, i);
1244*4882a593Smuzhiyun 
1245*4882a593Smuzhiyun 			if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1246*4882a593Smuzhiyun 				return __FAIL(ops, i);
1247*4882a593Smuzhiyun 
1248*4882a593Smuzhiyun 			iova += SZ_1G;
1249*4882a593Smuzhiyun 		}
1250*4882a593Smuzhiyun 
1251*4882a593Smuzhiyun 		/* Partial unmap */
1252*4882a593Smuzhiyun 		size = 1UL << __ffs(cfg->pgsize_bitmap);
1253*4882a593Smuzhiyun 		if (ops->unmap(ops, SZ_1G + size, size, NULL) != size)
1254*4882a593Smuzhiyun 			return __FAIL(ops, i);
1255*4882a593Smuzhiyun 
1256*4882a593Smuzhiyun 		/* Remap of partial unmap */
1257*4882a593Smuzhiyun 		if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ, GFP_KERNEL))
1258*4882a593Smuzhiyun 			return __FAIL(ops, i);
1259*4882a593Smuzhiyun 
1260*4882a593Smuzhiyun 		if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
1261*4882a593Smuzhiyun 			return __FAIL(ops, i);
1262*4882a593Smuzhiyun 
1263*4882a593Smuzhiyun 		/* Full unmap */
1264*4882a593Smuzhiyun 		iova = 0;
1265*4882a593Smuzhiyun 		for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
1266*4882a593Smuzhiyun 			size = 1UL << j;
1267*4882a593Smuzhiyun 
1268*4882a593Smuzhiyun 			if (ops->unmap(ops, iova, size, NULL) != size)
1269*4882a593Smuzhiyun 				return __FAIL(ops, i);
1270*4882a593Smuzhiyun 
1271*4882a593Smuzhiyun 			if (ops->iova_to_phys(ops, iova + 42))
1272*4882a593Smuzhiyun 				return __FAIL(ops, i);
1273*4882a593Smuzhiyun 
1274*4882a593Smuzhiyun 			/* Remap full block */
1275*4882a593Smuzhiyun 			if (ops->map(ops, iova, iova, size, IOMMU_WRITE, GFP_KERNEL))
1276*4882a593Smuzhiyun 				return __FAIL(ops, i);
1277*4882a593Smuzhiyun 
1278*4882a593Smuzhiyun 			if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1279*4882a593Smuzhiyun 				return __FAIL(ops, i);
1280*4882a593Smuzhiyun 
1281*4882a593Smuzhiyun 			iova += SZ_1G;
1282*4882a593Smuzhiyun 		}
1283*4882a593Smuzhiyun 
1284*4882a593Smuzhiyun 		free_io_pgtable_ops(ops);
1285*4882a593Smuzhiyun 	}
1286*4882a593Smuzhiyun 
1287*4882a593Smuzhiyun 	selftest_running = false;
1288*4882a593Smuzhiyun 	return 0;
1289*4882a593Smuzhiyun }
1290*4882a593Smuzhiyun 
arm_lpae_do_selftests(void)1291*4882a593Smuzhiyun static int __init arm_lpae_do_selftests(void)
1292*4882a593Smuzhiyun {
1293*4882a593Smuzhiyun 	static const unsigned long pgsize[] __initconst = {
1294*4882a593Smuzhiyun 		SZ_4K | SZ_2M | SZ_1G,
1295*4882a593Smuzhiyun 		SZ_16K | SZ_32M,
1296*4882a593Smuzhiyun 		SZ_64K | SZ_512M,
1297*4882a593Smuzhiyun 	};
1298*4882a593Smuzhiyun 
1299*4882a593Smuzhiyun 	static const unsigned int ias[] __initconst = {
1300*4882a593Smuzhiyun 		32, 36, 40, 42, 44, 48,
1301*4882a593Smuzhiyun 	};
1302*4882a593Smuzhiyun 
1303*4882a593Smuzhiyun 	int i, j, pass = 0, fail = 0;
1304*4882a593Smuzhiyun 	struct io_pgtable_cfg cfg = {
1305*4882a593Smuzhiyun 		.tlb = &dummy_tlb_ops,
1306*4882a593Smuzhiyun 		.oas = 48,
1307*4882a593Smuzhiyun 		.coherent_walk = true,
1308*4882a593Smuzhiyun 	};
1309*4882a593Smuzhiyun 
1310*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
1311*4882a593Smuzhiyun 		for (j = 0; j < ARRAY_SIZE(ias); ++j) {
1312*4882a593Smuzhiyun 			cfg.pgsize_bitmap = pgsize[i];
1313*4882a593Smuzhiyun 			cfg.ias = ias[j];
1314*4882a593Smuzhiyun 			pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
1315*4882a593Smuzhiyun 				pgsize[i], ias[j]);
1316*4882a593Smuzhiyun 			if (arm_lpae_run_tests(&cfg))
1317*4882a593Smuzhiyun 				fail++;
1318*4882a593Smuzhiyun 			else
1319*4882a593Smuzhiyun 				pass++;
1320*4882a593Smuzhiyun 		}
1321*4882a593Smuzhiyun 	}
1322*4882a593Smuzhiyun 
1323*4882a593Smuzhiyun 	pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
1324*4882a593Smuzhiyun 	return fail ? -EFAULT : 0;
1325*4882a593Smuzhiyun }
1326*4882a593Smuzhiyun subsys_initcall(arm_lpae_do_selftests);
1327*4882a593Smuzhiyun #endif
1328