xref: /rk3399_ARM-atf/lib/gpt_rme/gpt_rme.c (revision b99926ef7b287738c4b4a87ee7ab4eaed1e4038f)
1f19dc624Sjohpow01 /*
220e2683dSAlexeiFedorov  * Copyright (c) 2022-2024, Arm Limited. All rights reserved.
3f19dc624Sjohpow01  *
4f19dc624Sjohpow01  * SPDX-License-Identifier: BSD-3-Clause
5f19dc624Sjohpow01  */
6f19dc624Sjohpow01 
7f19dc624Sjohpow01 #include <assert.h>
8f19dc624Sjohpow01 #include <errno.h>
92461bd3aSManish Pandey #include <inttypes.h>
10f19dc624Sjohpow01 #include <limits.h>
11f19dc624Sjohpow01 #include <stdint.h>
12f19dc624Sjohpow01 
13f19dc624Sjohpow01 #include <arch.h>
1462d64652SOlivier Deprez #include <arch_features.h>
15f19dc624Sjohpow01 #include <arch_helpers.h>
16f19dc624Sjohpow01 #include <common/debug.h>
17f19dc624Sjohpow01 #include "gpt_rme_private.h"
18f19dc624Sjohpow01 #include <lib/gpt_rme/gpt_rme.h>
19f19dc624Sjohpow01 #include <lib/smccc.h>
20f19dc624Sjohpow01 #include <lib/spinlock.h>
21f19dc624Sjohpow01 #include <lib/xlat_tables/xlat_tables_v2.h>
22f19dc624Sjohpow01 
23f19dc624Sjohpow01 #if !ENABLE_RME
24*b99926efSAlexeiFedorov #error "ENABLE_RME must be enabled to use the GPT library"
25f19dc624Sjohpow01 #endif
26f19dc624Sjohpow01 
27f19dc624Sjohpow01 /*
28f19dc624Sjohpow01  * Lookup T from PPS
29f19dc624Sjohpow01  *
30f19dc624Sjohpow01  *   PPS    Size    T
31f19dc624Sjohpow01  *   0b000  4GB     32
32f19dc624Sjohpow01  *   0b001  64GB    36
33f19dc624Sjohpow01  *   0b010  1TB     40
34f19dc624Sjohpow01  *   0b011  4TB     42
35f19dc624Sjohpow01  *   0b100  16TB    44
36f19dc624Sjohpow01  *   0b101  256TB   48
37f19dc624Sjohpow01  *   0b110  4PB     52
38f19dc624Sjohpow01  *
39f19dc624Sjohpow01  * See section 15.1.27 of the RME specification.
40f19dc624Sjohpow01  */
41f19dc624Sjohpow01 static const gpt_t_val_e gpt_t_lookup[] = {PPS_4GB_T, PPS_64GB_T,
42f19dc624Sjohpow01 					   PPS_1TB_T, PPS_4TB_T,
43f19dc624Sjohpow01 					   PPS_16TB_T, PPS_256TB_T,
44f19dc624Sjohpow01 					   PPS_4PB_T};
45f19dc624Sjohpow01 
46f19dc624Sjohpow01 /*
47f19dc624Sjohpow01  * Lookup P from PGS
48f19dc624Sjohpow01  *
49f19dc624Sjohpow01  *   PGS    Size    P
50f19dc624Sjohpow01  *   0b00   4KB     12
51f19dc624Sjohpow01  *   0b10   16KB    14
52f19dc624Sjohpow01  *   0b01   64KB    16
53f19dc624Sjohpow01  *
54f19dc624Sjohpow01  * Note that pgs=0b10 is 16KB and pgs=0b01 is 64KB, this is not a typo.
55f19dc624Sjohpow01  *
56f19dc624Sjohpow01  * See section 15.1.27 of the RME specification.
57f19dc624Sjohpow01  */
58f19dc624Sjohpow01 static const gpt_p_val_e gpt_p_lookup[] = {PGS_4KB_P, PGS_64KB_P, PGS_16KB_P};
59f19dc624Sjohpow01 
60f19dc624Sjohpow01 /*
61*b99926efSAlexeiFedorov  * This structure contains GPT configuration data
62f19dc624Sjohpow01  */
63f19dc624Sjohpow01 typedef struct {
64f19dc624Sjohpow01 	uintptr_t plat_gpt_l0_base;
65f19dc624Sjohpow01 	gpccr_pps_e pps;
66f19dc624Sjohpow01 	gpt_t_val_e t;
67f19dc624Sjohpow01 	gpccr_pgs_e pgs;
68f19dc624Sjohpow01 	gpt_p_val_e p;
69f19dc624Sjohpow01 } gpt_config_t;
70f19dc624Sjohpow01 
71f19dc624Sjohpow01 static gpt_config_t gpt_config;
72f19dc624Sjohpow01 
73*b99926efSAlexeiFedorov /* These variables are used during initialization of the L1 tables */
74f19dc624Sjohpow01 static unsigned int gpt_next_l1_tbl_idx;
75f19dc624Sjohpow01 static uintptr_t gpt_l1_tbl;
76f19dc624Sjohpow01 
77f19dc624Sjohpow01 /*
78f19dc624Sjohpow01  * This function checks to see if a GPI value is valid.
79f19dc624Sjohpow01  *
80f19dc624Sjohpow01  * These are valid GPI values.
81f19dc624Sjohpow01  *   GPT_GPI_NO_ACCESS   U(0x0)
82f19dc624Sjohpow01  *   GPT_GPI_SECURE      U(0x8)
83f19dc624Sjohpow01  *   GPT_GPI_NS          U(0x9)
84f19dc624Sjohpow01  *   GPT_GPI_ROOT        U(0xA)
85f19dc624Sjohpow01  *   GPT_GPI_REALM       U(0xB)
86f19dc624Sjohpow01  *   GPT_GPI_ANY         U(0xF)
87f19dc624Sjohpow01  *
88f19dc624Sjohpow01  * Parameters
89f19dc624Sjohpow01  *   gpi		GPI to check for validity.
90f19dc624Sjohpow01  *
91f19dc624Sjohpow01  * Return
92f19dc624Sjohpow01  *   true for a valid GPI, false for an invalid one.
93f19dc624Sjohpow01  */
9420e2683dSAlexeiFedorov static bool is_gpi_valid(unsigned int gpi)
95f19dc624Sjohpow01 {
96f19dc624Sjohpow01 	if ((gpi == GPT_GPI_NO_ACCESS) || (gpi == GPT_GPI_ANY) ||
97f19dc624Sjohpow01 	    ((gpi >= GPT_GPI_SECURE) && (gpi <= GPT_GPI_REALM))) {
98f19dc624Sjohpow01 		return true;
99f19dc624Sjohpow01 	}
1006a00e9b0SRobert Wakim 	return false;
101f19dc624Sjohpow01 }
102f19dc624Sjohpow01 
103f19dc624Sjohpow01 /*
104f19dc624Sjohpow01  * This function checks to see if two PAS regions overlap.
105f19dc624Sjohpow01  *
106f19dc624Sjohpow01  * Parameters
107f19dc624Sjohpow01  *   base_1: base address of first PAS
108f19dc624Sjohpow01  *   size_1: size of first PAS
109f19dc624Sjohpow01  *   base_2: base address of second PAS
110f19dc624Sjohpow01  *   size_2: size of second PAS
111f19dc624Sjohpow01  *
112f19dc624Sjohpow01  * Return
113f19dc624Sjohpow01  *   True if PAS regions overlap, false if they do not.
114f19dc624Sjohpow01  */
11520e2683dSAlexeiFedorov static bool check_pas_overlap(uintptr_t base_1, size_t size_1,
116f19dc624Sjohpow01 			      uintptr_t base_2, size_t size_2)
117f19dc624Sjohpow01 {
118f19dc624Sjohpow01 	if (((base_1 + size_1) > base_2) && ((base_2 + size_2) > base_1)) {
119f19dc624Sjohpow01 		return true;
120f19dc624Sjohpow01 	}
1216a00e9b0SRobert Wakim 	return false;
122f19dc624Sjohpow01 }
123f19dc624Sjohpow01 
124f19dc624Sjohpow01 /*
125f19dc624Sjohpow01  * This helper function checks to see if a PAS region from index 0 to
126f19dc624Sjohpow01  * (pas_idx - 1) occupies the L0 region at index l0_idx in the L0 table.
127f19dc624Sjohpow01  *
128f19dc624Sjohpow01  * Parameters
129f19dc624Sjohpow01  *   l0_idx:      Index of the L0 entry to check
130f19dc624Sjohpow01  *   pas_regions: PAS region array
131f19dc624Sjohpow01  *   pas_idx:     Upper bound of the PAS array index.
132f19dc624Sjohpow01  *
133f19dc624Sjohpow01  * Return
134f19dc624Sjohpow01  *   True if a PAS region occupies the L0 region in question, false if not.
135f19dc624Sjohpow01  */
13620e2683dSAlexeiFedorov static bool does_previous_pas_exist_here(unsigned int l0_idx,
137f19dc624Sjohpow01 					 pas_region_t *pas_regions,
138f19dc624Sjohpow01 					 unsigned int pas_idx)
139f19dc624Sjohpow01 {
140*b99926efSAlexeiFedorov 	/* Iterate over PAS regions up to pas_idx */
141f19dc624Sjohpow01 	for (unsigned int i = 0U; i < pas_idx; i++) {
14220e2683dSAlexeiFedorov 		if (check_pas_overlap((GPT_L0GPTSZ_ACTUAL_SIZE * l0_idx),
143f19dc624Sjohpow01 		    GPT_L0GPTSZ_ACTUAL_SIZE,
144f19dc624Sjohpow01 		    pas_regions[i].base_pa, pas_regions[i].size)) {
145f19dc624Sjohpow01 			return true;
146f19dc624Sjohpow01 		}
147f19dc624Sjohpow01 	}
148f19dc624Sjohpow01 	return false;
149f19dc624Sjohpow01 }
150f19dc624Sjohpow01 
151f19dc624Sjohpow01 /*
152f19dc624Sjohpow01  * This function iterates over all of the PAS regions and checks them to ensure
153f19dc624Sjohpow01  * proper alignment of base and size, that the GPI is valid, and that no regions
154f19dc624Sjohpow01  * overlap. As a part of the overlap checks, this function checks existing L0
155f19dc624Sjohpow01  * mappings against the new PAS regions in the event that gpt_init_pas_l1_tables
156f19dc624Sjohpow01  * is called multiple times to place L1 tables in different areas of memory. It
157f19dc624Sjohpow01  * also counts the number of L1 tables needed and returns it on success.
158f19dc624Sjohpow01  *
159f19dc624Sjohpow01  * Parameters
160f19dc624Sjohpow01  *   *pas_regions	Pointer to array of PAS region structures.
161f19dc624Sjohpow01  *   pas_region_cnt	Total number of PAS regions in the array.
162f19dc624Sjohpow01  *
163f19dc624Sjohpow01  * Return
164f19dc624Sjohpow01  *   Negative Linux error code in the event of a failure, number of L1 regions
165f19dc624Sjohpow01  *   required when successful.
166f19dc624Sjohpow01  */
16720e2683dSAlexeiFedorov static int validate_pas_mappings(pas_region_t *pas_regions,
168f19dc624Sjohpow01 				 unsigned int pas_region_cnt)
169f19dc624Sjohpow01 {
170f19dc624Sjohpow01 	unsigned int idx;
171f19dc624Sjohpow01 	unsigned int l1_cnt = 0U;
172f19dc624Sjohpow01 	unsigned int pas_l1_cnt;
173f19dc624Sjohpow01 	uint64_t *l0_desc = (uint64_t *)gpt_config.plat_gpt_l0_base;
174f19dc624Sjohpow01 
175f19dc624Sjohpow01 	assert(pas_regions != NULL);
176f19dc624Sjohpow01 	assert(pas_region_cnt != 0U);
177f19dc624Sjohpow01 
178f19dc624Sjohpow01 	for (idx = 0U; idx < pas_region_cnt; idx++) {
179*b99926efSAlexeiFedorov 		/* Check for arithmetic overflow in region */
180f19dc624Sjohpow01 		if ((ULONG_MAX - pas_regions[idx].base_pa) <
181f19dc624Sjohpow01 		    pas_regions[idx].size) {
182*b99926efSAlexeiFedorov 			ERROR("GPT: Address overflow in PAS[%u]!\n", idx);
183f19dc624Sjohpow01 			return -EOVERFLOW;
184f19dc624Sjohpow01 		}
185f19dc624Sjohpow01 
186*b99926efSAlexeiFedorov 		/* Initial checks for PAS validity */
187f19dc624Sjohpow01 		if (((pas_regions[idx].base_pa + pas_regions[idx].size) >
188f19dc624Sjohpow01 		    GPT_PPS_ACTUAL_SIZE(gpt_config.t)) ||
18920e2683dSAlexeiFedorov 		    !is_gpi_valid(GPT_PAS_ATTR_GPI(pas_regions[idx].attrs))) {
190*b99926efSAlexeiFedorov 			ERROR("GPT: PAS[%u] is invalid!\n", idx);
191f19dc624Sjohpow01 			return -EFAULT;
192f19dc624Sjohpow01 		}
193f19dc624Sjohpow01 
194f19dc624Sjohpow01 		/*
195f19dc624Sjohpow01 		 * Make sure this PAS does not overlap with another one. We
196f19dc624Sjohpow01 		 * start from idx + 1 instead of 0 since prior PAS mappings will
197f19dc624Sjohpow01 		 * have already checked themselves against this one.
198f19dc624Sjohpow01 		 */
199*b99926efSAlexeiFedorov 		for (unsigned int i = idx + 1U; i < pas_region_cnt; i++) {
20020e2683dSAlexeiFedorov 			if (check_pas_overlap(pas_regions[idx].base_pa,
201f19dc624Sjohpow01 			    pas_regions[idx].size,
202f19dc624Sjohpow01 			    pas_regions[i].base_pa,
203f19dc624Sjohpow01 			    pas_regions[i].size)) {
204*b99926efSAlexeiFedorov 				ERROR("GPT: PAS[%u] overlaps with PAS[%u]\n",
205f19dc624Sjohpow01 					i, idx);
206f19dc624Sjohpow01 				return -EFAULT;
207f19dc624Sjohpow01 			}
208f19dc624Sjohpow01 		}
209f19dc624Sjohpow01 
210f19dc624Sjohpow01 		/*
211f19dc624Sjohpow01 		 * Since this function can be called multiple times with
212f19dc624Sjohpow01 		 * separate L1 tables we need to check the existing L0 mapping
213f19dc624Sjohpow01 		 * to see if this PAS would fall into one that has already been
214f19dc624Sjohpow01 		 * initialized.
215f19dc624Sjohpow01 		 */
216f19dc624Sjohpow01 		for (unsigned int i = GPT_L0_IDX(pas_regions[idx].base_pa);
217*b99926efSAlexeiFedorov 		     i <= GPT_L0_IDX(pas_regions[idx].base_pa +
218*b99926efSAlexeiFedorov 						pas_regions[idx].size - 1UL);
219f19dc624Sjohpow01 		     i++) {
220f19dc624Sjohpow01 			if ((GPT_L0_TYPE(l0_desc[i]) == GPT_L0_TYPE_BLK_DESC) &&
221f19dc624Sjohpow01 			    (GPT_L0_BLKD_GPI(l0_desc[i]) == GPT_GPI_ANY)) {
222*b99926efSAlexeiFedorov 				/* This descriptor is unused so continue */
223f19dc624Sjohpow01 				continue;
224f19dc624Sjohpow01 			}
225f19dc624Sjohpow01 
226f19dc624Sjohpow01 			/*
227f19dc624Sjohpow01 			 * This descriptor has been initialized in a previous
228f19dc624Sjohpow01 			 * call to this function so cannot be initialized again.
229f19dc624Sjohpow01 			 */
230*b99926efSAlexeiFedorov 			ERROR("GPT: PAS[%u] overlaps with previous L0[%d]!\n",
231f19dc624Sjohpow01 			      idx, i);
232f19dc624Sjohpow01 			return -EFAULT;
233f19dc624Sjohpow01 		}
234f19dc624Sjohpow01 
235*b99926efSAlexeiFedorov 		/* Check for block mapping (L0) type */
236f19dc624Sjohpow01 		if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) ==
237f19dc624Sjohpow01 		    GPT_PAS_ATTR_MAP_TYPE_BLOCK) {
238*b99926efSAlexeiFedorov 			/* Make sure base and size are block-aligned */
239f19dc624Sjohpow01 			if (!GPT_IS_L0_ALIGNED(pas_regions[idx].base_pa) ||
240f19dc624Sjohpow01 			    !GPT_IS_L0_ALIGNED(pas_regions[idx].size)) {
241*b99926efSAlexeiFedorov 				ERROR("GPT: PAS[%u] is not block-aligned!\n",
242f19dc624Sjohpow01 				      idx);
243f19dc624Sjohpow01 				return -EFAULT;
244f19dc624Sjohpow01 			}
245f19dc624Sjohpow01 
246f19dc624Sjohpow01 			continue;
247f19dc624Sjohpow01 		}
248f19dc624Sjohpow01 
249*b99926efSAlexeiFedorov 		/* Check for granule mapping (L1) type */
250f19dc624Sjohpow01 		if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) ==
251f19dc624Sjohpow01 		    GPT_PAS_ATTR_MAP_TYPE_GRANULE) {
252*b99926efSAlexeiFedorov 			/* Make sure base and size are granule-aligned */
253f19dc624Sjohpow01 			if (!GPT_IS_L1_ALIGNED(gpt_config.p, pas_regions[idx].base_pa) ||
254f19dc624Sjohpow01 			    !GPT_IS_L1_ALIGNED(gpt_config.p, pas_regions[idx].size)) {
255*b99926efSAlexeiFedorov 				ERROR("GPT: PAS[%u] is not granule-aligned!\n",
256f19dc624Sjohpow01 				      idx);
257f19dc624Sjohpow01 				return -EFAULT;
258f19dc624Sjohpow01 			}
259f19dc624Sjohpow01 
260*b99926efSAlexeiFedorov 			/* Find how many L1 tables this PAS occupies */
261f19dc624Sjohpow01 			pas_l1_cnt = (GPT_L0_IDX(pas_regions[idx].base_pa +
262*b99926efSAlexeiFedorov 				     pas_regions[idx].size - 1UL) -
263*b99926efSAlexeiFedorov 				     GPT_L0_IDX(pas_regions[idx].base_pa) + 1U);
264f19dc624Sjohpow01 
265f19dc624Sjohpow01 			/*
266f19dc624Sjohpow01 			 * This creates a situation where, if multiple PAS
267f19dc624Sjohpow01 			 * regions occupy the same table descriptor, we can get
268f19dc624Sjohpow01 			 * an artificially high total L1 table count. The way we
269f19dc624Sjohpow01 			 * handle this is by checking each PAS against those
270f19dc624Sjohpow01 			 * before it in the array, and if they both occupy the
271f19dc624Sjohpow01 			 * same PAS we subtract from pas_l1_cnt and only the
272f19dc624Sjohpow01 			 * first PAS in the array gets to count it.
273f19dc624Sjohpow01 			 */
274f19dc624Sjohpow01 
275f19dc624Sjohpow01 			/*
276f19dc624Sjohpow01 			 * If L1 count is greater than 1 we know the start and
277f19dc624Sjohpow01 			 * end PAs are in different L0 regions so we must check
278f19dc624Sjohpow01 			 * both for overlap against other PAS.
279f19dc624Sjohpow01 			 */
280f19dc624Sjohpow01 			if (pas_l1_cnt > 1) {
28120e2683dSAlexeiFedorov 				if (does_previous_pas_exist_here(
282f19dc624Sjohpow01 				    GPT_L0_IDX(pas_regions[idx].base_pa +
283*b99926efSAlexeiFedorov 				    pas_regions[idx].size - 1UL),
284f19dc624Sjohpow01 				    pas_regions, idx)) {
285*b99926efSAlexeiFedorov 					pas_l1_cnt--;
286f19dc624Sjohpow01 				}
287f19dc624Sjohpow01 			}
288f19dc624Sjohpow01 
28920e2683dSAlexeiFedorov 			if (does_previous_pas_exist_here(
290f19dc624Sjohpow01 			    GPT_L0_IDX(pas_regions[idx].base_pa),
291f19dc624Sjohpow01 			    pas_regions, idx)) {
292*b99926efSAlexeiFedorov 				pas_l1_cnt--;
293f19dc624Sjohpow01 			}
294f19dc624Sjohpow01 
295f19dc624Sjohpow01 			l1_cnt += pas_l1_cnt;
296f19dc624Sjohpow01 			continue;
297f19dc624Sjohpow01 		}
298f19dc624Sjohpow01 
299*b99926efSAlexeiFedorov 		/* If execution reaches this point, mapping type is invalid */
300*b99926efSAlexeiFedorov 		ERROR("GPT: PAS[%u] has invalid mapping type 0x%x.\n", idx,
301f19dc624Sjohpow01 		      GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs));
302f19dc624Sjohpow01 		return -EINVAL;
303f19dc624Sjohpow01 	}
304f19dc624Sjohpow01 
305f19dc624Sjohpow01 	return l1_cnt;
306f19dc624Sjohpow01 }
307f19dc624Sjohpow01 
308f19dc624Sjohpow01 /*
309f19dc624Sjohpow01  * This function validates L0 initialization parameters.
310f19dc624Sjohpow01  *
311f19dc624Sjohpow01  * Parameters
312f19dc624Sjohpow01  *   l0_mem_base	Base address of memory used for L0 tables.
313f19dc624Sjohpow01  *   l1_mem_size	Size of memory available for L0 tables.
314f19dc624Sjohpow01  *
315f19dc624Sjohpow01  * Return
316f19dc624Sjohpow01  *   Negative Linux error code in the event of a failure, 0 for success.
317f19dc624Sjohpow01  */
31820e2683dSAlexeiFedorov static int validate_l0_params(gpccr_pps_e pps, uintptr_t l0_mem_base,
319f19dc624Sjohpow01 				size_t l0_mem_size)
320f19dc624Sjohpow01 {
321f19dc624Sjohpow01 	size_t l0_alignment;
322f19dc624Sjohpow01 
323f19dc624Sjohpow01 	/*
324f19dc624Sjohpow01 	 * Make sure PPS is valid and then store it since macros need this value
325f19dc624Sjohpow01 	 * to work.
326f19dc624Sjohpow01 	 */
327f19dc624Sjohpow01 	if (pps > GPT_PPS_MAX) {
328*b99926efSAlexeiFedorov 		ERROR("GPT: Invalid PPS: 0x%x\n", pps);
329f19dc624Sjohpow01 		return -EINVAL;
330f19dc624Sjohpow01 	}
331f19dc624Sjohpow01 	gpt_config.pps = pps;
332f19dc624Sjohpow01 	gpt_config.t = gpt_t_lookup[pps];
333f19dc624Sjohpow01 
334*b99926efSAlexeiFedorov 	/* Alignment must be the greater of 4KB or l0 table size */
335f19dc624Sjohpow01 	l0_alignment = PAGE_SIZE_4KB;
336f19dc624Sjohpow01 	if (l0_alignment < GPT_L0_TABLE_SIZE(gpt_config.t)) {
337f19dc624Sjohpow01 		l0_alignment = GPT_L0_TABLE_SIZE(gpt_config.t);
338f19dc624Sjohpow01 	}
339f19dc624Sjohpow01 
340*b99926efSAlexeiFedorov 	/* Check base address */
341*b99926efSAlexeiFedorov 	if ((l0_mem_base == 0UL) ||
342*b99926efSAlexeiFedorov 	   ((l0_mem_base & (l0_alignment - 1UL)) != 0UL)) {
343*b99926efSAlexeiFedorov 		ERROR("GPT: Invalid L0 base address: 0x%lx\n", l0_mem_base);
344f19dc624Sjohpow01 		return -EFAULT;
345f19dc624Sjohpow01 	}
346f19dc624Sjohpow01 
347*b99926efSAlexeiFedorov 	/* Check size */
348f19dc624Sjohpow01 	if (l0_mem_size < GPT_L0_TABLE_SIZE(gpt_config.t)) {
349*b99926efSAlexeiFedorov 		ERROR("%sL0%s\n", "GPT: Inadequate ", " memory\n");
350*b99926efSAlexeiFedorov 		ERROR("      Expected 0x%lx bytes, got 0x%lx bytes\n",
351f19dc624Sjohpow01 		      GPT_L0_TABLE_SIZE(gpt_config.t),
352f19dc624Sjohpow01 		      l0_mem_size);
353f19dc624Sjohpow01 		return -ENOMEM;
354f19dc624Sjohpow01 	}
355f19dc624Sjohpow01 
356f19dc624Sjohpow01 	return 0;
357f19dc624Sjohpow01 }
358f19dc624Sjohpow01 
359f19dc624Sjohpow01 /*
360f19dc624Sjohpow01  * In the event that L1 tables are needed, this function validates
361f19dc624Sjohpow01  * the L1 table generation parameters.
362f19dc624Sjohpow01  *
363f19dc624Sjohpow01  * Parameters
364f19dc624Sjohpow01  *   l1_mem_base	Base address of memory used for L1 table allocation.
365f19dc624Sjohpow01  *   l1_mem_size	Total size of memory available for L1 tables.
366f19dc624Sjohpow01  *   l1_gpt_cnt		Number of L1 tables needed.
367f19dc624Sjohpow01  *
368f19dc624Sjohpow01  * Return
369f19dc624Sjohpow01  *   Negative Linux error code in the event of a failure, 0 for success.
370f19dc624Sjohpow01  */
37120e2683dSAlexeiFedorov static int validate_l1_params(uintptr_t l1_mem_base, size_t l1_mem_size,
372f19dc624Sjohpow01 				unsigned int l1_gpt_cnt)
373f19dc624Sjohpow01 {
374f19dc624Sjohpow01 	size_t l1_gpt_mem_sz;
375f19dc624Sjohpow01 
376f19dc624Sjohpow01 	/* Check if the granularity is supported */
377f19dc624Sjohpow01 	if (!xlat_arch_is_granule_size_supported(
378f19dc624Sjohpow01 	    GPT_PGS_ACTUAL_SIZE(gpt_config.p))) {
379f19dc624Sjohpow01 		return -EPERM;
380f19dc624Sjohpow01 	}
381f19dc624Sjohpow01 
382*b99926efSAlexeiFedorov 	/* Make sure L1 tables are aligned to their size */
383*b99926efSAlexeiFedorov 	if ((l1_mem_base & (GPT_L1_TABLE_SIZE(gpt_config.p) - 1UL)) != 0UL) {
384*b99926efSAlexeiFedorov 		ERROR("GPT: Unaligned L1 GPT base address: 0x%"PRIxPTR"\n",
385f19dc624Sjohpow01 		      l1_mem_base);
386f19dc624Sjohpow01 		return -EFAULT;
387f19dc624Sjohpow01 	}
388f19dc624Sjohpow01 
389*b99926efSAlexeiFedorov 	/* Get total memory needed for L1 tables */
390f19dc624Sjohpow01 	l1_gpt_mem_sz = l1_gpt_cnt * GPT_L1_TABLE_SIZE(gpt_config.p);
391f19dc624Sjohpow01 
392*b99926efSAlexeiFedorov 	/* Check for overflow */
393f19dc624Sjohpow01 	if ((l1_gpt_mem_sz / GPT_L1_TABLE_SIZE(gpt_config.p)) != l1_gpt_cnt) {
394*b99926efSAlexeiFedorov 		ERROR("GPT: Overflow calculating L1 memory size\n");
395f19dc624Sjohpow01 		return -ENOMEM;
396f19dc624Sjohpow01 	}
397f19dc624Sjohpow01 
398*b99926efSAlexeiFedorov 	/* Make sure enough space was supplied */
399f19dc624Sjohpow01 	if (l1_mem_size < l1_gpt_mem_sz) {
400*b99926efSAlexeiFedorov 		ERROR("%sL1 GPTs%s", "GPT: Inadequate ", " memory\n");
401*b99926efSAlexeiFedorov 		ERROR("      Expected 0x%lx bytes, got 0x%lx bytes\n",
402f19dc624Sjohpow01 		      l1_gpt_mem_sz, l1_mem_size);
403f19dc624Sjohpow01 		return -ENOMEM;
404f19dc624Sjohpow01 	}
405f19dc624Sjohpow01 
406*b99926efSAlexeiFedorov 	VERBOSE("GPT: Requested 0x%lx bytes for L1 GPTs\n", l1_gpt_mem_sz);
407f19dc624Sjohpow01 	return 0;
408f19dc624Sjohpow01 }
409f19dc624Sjohpow01 
410f19dc624Sjohpow01 /*
411f19dc624Sjohpow01  * This function initializes L0 block descriptors (regions that cannot be
412f19dc624Sjohpow01  * transitioned at the granule level) according to the provided PAS.
413f19dc624Sjohpow01  *
414f19dc624Sjohpow01  * Parameters
415f19dc624Sjohpow01  *   *pas		Pointer to the structure defining the PAS region to
416f19dc624Sjohpow01  *			initialize.
417f19dc624Sjohpow01  */
41820e2683dSAlexeiFedorov static void generate_l0_blk_desc(pas_region_t *pas)
419f19dc624Sjohpow01 {
420f19dc624Sjohpow01 	uint64_t gpt_desc;
421f19dc624Sjohpow01 	unsigned int end_idx;
422f19dc624Sjohpow01 	unsigned int idx;
423f19dc624Sjohpow01 	uint64_t *l0_gpt_arr;
424f19dc624Sjohpow01 
425f19dc624Sjohpow01 	assert(gpt_config.plat_gpt_l0_base != 0U);
426f19dc624Sjohpow01 	assert(pas != NULL);
427f19dc624Sjohpow01 
428f19dc624Sjohpow01 	/*
429f19dc624Sjohpow01 	 * Checking of PAS parameters has already been done in
43020e2683dSAlexeiFedorov 	 * validate_pas_mappings so no need to check the same things again.
431f19dc624Sjohpow01 	 */
432f19dc624Sjohpow01 
433f19dc624Sjohpow01 	l0_gpt_arr = (uint64_t *)gpt_config.plat_gpt_l0_base;
434f19dc624Sjohpow01 
435f19dc624Sjohpow01 	/* Create the GPT Block descriptor for this PAS region */
436f19dc624Sjohpow01 	gpt_desc = GPT_L0_BLK_DESC(GPT_PAS_ATTR_GPI(pas->attrs));
437f19dc624Sjohpow01 
438f19dc624Sjohpow01 	/* Start index of this region in L0 GPTs */
4396a00e9b0SRobert Wakim 	idx = GPT_L0_IDX(pas->base_pa);
440f19dc624Sjohpow01 
441f19dc624Sjohpow01 	/*
442f19dc624Sjohpow01 	 * Determine number of L0 GPT descriptors covered by
443f19dc624Sjohpow01 	 * this PAS region and use the count to populate these
444f19dc624Sjohpow01 	 * descriptors.
445f19dc624Sjohpow01 	 */
4466a00e9b0SRobert Wakim 	end_idx = GPT_L0_IDX(pas->base_pa + pas->size);
447f19dc624Sjohpow01 
448*b99926efSAlexeiFedorov 	/* Generate the needed block descriptors */
449f19dc624Sjohpow01 	for (; idx < end_idx; idx++) {
450f19dc624Sjohpow01 		l0_gpt_arr[idx] = gpt_desc;
451*b99926efSAlexeiFedorov 		VERBOSE("GPT: L0 entry (BLOCK) index %u [%p]: GPI = 0x%"PRIx64" (0x%"PRIx64")\n",
452f19dc624Sjohpow01 			idx, &l0_gpt_arr[idx],
453f19dc624Sjohpow01 			(gpt_desc >> GPT_L0_BLK_DESC_GPI_SHIFT) &
454f19dc624Sjohpow01 			GPT_L0_BLK_DESC_GPI_MASK, l0_gpt_arr[idx]);
455f19dc624Sjohpow01 	}
456f19dc624Sjohpow01 }
457f19dc624Sjohpow01 
458f19dc624Sjohpow01 /*
459f19dc624Sjohpow01  * Helper function to determine if the end physical address lies in the same L0
460f19dc624Sjohpow01  * region as the current physical address. If true, the end physical address is
461f19dc624Sjohpow01  * returned else, the start address of the next region is returned.
462f19dc624Sjohpow01  *
463f19dc624Sjohpow01  * Parameters
464f19dc624Sjohpow01  *   cur_pa		Physical address of the current PA in the loop through
465f19dc624Sjohpow01  *			the range.
466f19dc624Sjohpow01  *   end_pa		Physical address of the end PA in a PAS range.
467f19dc624Sjohpow01  *
468f19dc624Sjohpow01  * Return
469f19dc624Sjohpow01  *   The PA of the end of the current range.
470f19dc624Sjohpow01  */
47120e2683dSAlexeiFedorov static uintptr_t get_l1_end_pa(uintptr_t cur_pa, uintptr_t end_pa)
472f19dc624Sjohpow01 {
473f19dc624Sjohpow01 	uintptr_t cur_idx;
474f19dc624Sjohpow01 	uintptr_t end_idx;
475f19dc624Sjohpow01 
4766a00e9b0SRobert Wakim 	cur_idx = GPT_L0_IDX(cur_pa);
4776a00e9b0SRobert Wakim 	end_idx = GPT_L0_IDX(end_pa);
478f19dc624Sjohpow01 
479f19dc624Sjohpow01 	assert(cur_idx <= end_idx);
480f19dc624Sjohpow01 
481f19dc624Sjohpow01 	if (cur_idx == end_idx) {
482f19dc624Sjohpow01 		return end_pa;
483f19dc624Sjohpow01 	}
484f19dc624Sjohpow01 
485f19dc624Sjohpow01 	return (cur_idx + 1U) << GPT_L0_IDX_SHIFT;
486f19dc624Sjohpow01 }
487f19dc624Sjohpow01 
488f19dc624Sjohpow01 /*
489f19dc624Sjohpow01  * Helper function to fill out GPI entries in a single L1 table. This function
490f19dc624Sjohpow01  * fills out entire L1 descriptors at a time to save memory writes.
491f19dc624Sjohpow01  *
492f19dc624Sjohpow01  * Parameters
493f19dc624Sjohpow01  *   gpi		GPI to set this range to
494f19dc624Sjohpow01  *   l1			Pointer to L1 table to fill out
495f19dc624Sjohpow01  *   first		Address of first granule in range.
496f19dc624Sjohpow01  *   last		Address of last granule in range (inclusive).
497f19dc624Sjohpow01  */
49820e2683dSAlexeiFedorov static void fill_l1_tbl(uint64_t gpi, uint64_t *l1, uintptr_t first,
499f19dc624Sjohpow01 			    uintptr_t last)
500f19dc624Sjohpow01 {
501f19dc624Sjohpow01 	uint64_t gpi_field = GPT_BUILD_L1_DESC(gpi);
502*b99926efSAlexeiFedorov 	uint64_t gpi_mask = ULONG_MAX;
503f19dc624Sjohpow01 
504f19dc624Sjohpow01 	assert(first <= last);
505f19dc624Sjohpow01 	assert((first & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) == 0U);
506f19dc624Sjohpow01 	assert((last & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) == 0U);
507f19dc624Sjohpow01 	assert(GPT_L0_IDX(first) == GPT_L0_IDX(last));
508f19dc624Sjohpow01 	assert(l1 != NULL);
509f19dc624Sjohpow01 
510*b99926efSAlexeiFedorov 	/* Shift the mask if we're starting in the middle of an L1 entry */
511f19dc624Sjohpow01 	gpi_mask = gpi_mask << (GPT_L1_GPI_IDX(gpt_config.p, first) << 2);
512f19dc624Sjohpow01 
513*b99926efSAlexeiFedorov 	/* Fill out each L1 entry for this region */
514f19dc624Sjohpow01 	for (unsigned int i = GPT_L1_IDX(gpt_config.p, first);
515f19dc624Sjohpow01 	     i <= GPT_L1_IDX(gpt_config.p, last); i++) {
516*b99926efSAlexeiFedorov 		/* Account for stopping in the middle of an L1 entry */
517f19dc624Sjohpow01 		if (i == GPT_L1_IDX(gpt_config.p, last)) {
518*b99926efSAlexeiFedorov 			gpi_mask &= (gpi_mask >> ((15U -
519f19dc624Sjohpow01 				    GPT_L1_GPI_IDX(gpt_config.p, last)) << 2));
520f19dc624Sjohpow01 		}
521f19dc624Sjohpow01 
522*b99926efSAlexeiFedorov 		/* Write GPI values */
523f19dc624Sjohpow01 		assert((l1[i] & gpi_mask) ==
524f19dc624Sjohpow01 		       (GPT_BUILD_L1_DESC(GPT_GPI_ANY) & gpi_mask));
525f19dc624Sjohpow01 		l1[i] = (l1[i] & ~gpi_mask) | (gpi_mask & gpi_field);
526f19dc624Sjohpow01 
527*b99926efSAlexeiFedorov 		/* Reset mask */
528*b99926efSAlexeiFedorov 		gpi_mask = ULONG_MAX;
529f19dc624Sjohpow01 	}
530f19dc624Sjohpow01 }
531f19dc624Sjohpow01 
532f19dc624Sjohpow01 /*
533f19dc624Sjohpow01  * This function finds the next available unused L1 table and initializes all
534f19dc624Sjohpow01  * granules descriptor entries to GPI_ANY. This ensures that there are no chunks
535f19dc624Sjohpow01  * of GPI_NO_ACCESS (0b0000) memory floating around in the system in the
536f19dc624Sjohpow01  * event that a PAS region stops midway through an L1 table, thus guaranteeing
537f19dc624Sjohpow01  * that all memory not explicitly assigned is GPI_ANY. This function does not
538f19dc624Sjohpow01  * check for overflow conditions, that should be done by the caller.
539f19dc624Sjohpow01  *
540f19dc624Sjohpow01  * Return
541f19dc624Sjohpow01  *   Pointer to the next available L1 table.
542f19dc624Sjohpow01  */
54320e2683dSAlexeiFedorov static uint64_t *get_new_l1_tbl(void)
544f19dc624Sjohpow01 {
545*b99926efSAlexeiFedorov 	/* Retrieve the next L1 table */
546f19dc624Sjohpow01 	uint64_t *l1 = (uint64_t *)((uint64_t)(gpt_l1_tbl) +
547f19dc624Sjohpow01 		       (GPT_L1_TABLE_SIZE(gpt_config.p) *
548f19dc624Sjohpow01 		       gpt_next_l1_tbl_idx));
549f19dc624Sjohpow01 
550*b99926efSAlexeiFedorov 	/* Increment L1 counter */
551f19dc624Sjohpow01 	gpt_next_l1_tbl_idx++;
552f19dc624Sjohpow01 
553f19dc624Sjohpow01 	/* Initialize all GPIs to GPT_GPI_ANY */
554f19dc624Sjohpow01 	for (unsigned int i = 0U; i < GPT_L1_ENTRY_COUNT(gpt_config.p); i++) {
555f19dc624Sjohpow01 		l1[i] = GPT_BUILD_L1_DESC(GPT_GPI_ANY);
556f19dc624Sjohpow01 	}
557f19dc624Sjohpow01 
558f19dc624Sjohpow01 	return l1;
559f19dc624Sjohpow01 }
560f19dc624Sjohpow01 
561f19dc624Sjohpow01 /*
562f19dc624Sjohpow01  * When L1 tables are needed, this function creates the necessary L0 table
563f19dc624Sjohpow01  * descriptors and fills out the L1 table entries according to the supplied
564f19dc624Sjohpow01  * PAS range.
565f19dc624Sjohpow01  *
566f19dc624Sjohpow01  * Parameters
567f19dc624Sjohpow01  *   *pas		Pointer to the structure defining the PAS region.
568f19dc624Sjohpow01  */
56920e2683dSAlexeiFedorov static void generate_l0_tbl_desc(pas_region_t *pas)
570f19dc624Sjohpow01 {
571f19dc624Sjohpow01 	uintptr_t end_pa;
572f19dc624Sjohpow01 	uintptr_t cur_pa;
573f19dc624Sjohpow01 	uintptr_t last_gran_pa;
574f19dc624Sjohpow01 	uint64_t *l0_gpt_base;
575f19dc624Sjohpow01 	uint64_t *l1_gpt_arr;
576f19dc624Sjohpow01 	unsigned int l0_idx;
577f19dc624Sjohpow01 
578f19dc624Sjohpow01 	assert(gpt_config.plat_gpt_l0_base != 0U);
579f19dc624Sjohpow01 	assert(pas != NULL);
580f19dc624Sjohpow01 
581f19dc624Sjohpow01 	/*
582f19dc624Sjohpow01 	 * Checking of PAS parameters has already been done in
58320e2683dSAlexeiFedorov 	 * validate_pas_mappings so no need to check the same things again.
584f19dc624Sjohpow01 	 */
585f19dc624Sjohpow01 
586f19dc624Sjohpow01 	end_pa = pas->base_pa + pas->size;
587f19dc624Sjohpow01 	l0_gpt_base = (uint64_t *)gpt_config.plat_gpt_l0_base;
588f19dc624Sjohpow01 
589f19dc624Sjohpow01 	/* We start working from the granule at base PA */
590f19dc624Sjohpow01 	cur_pa = pas->base_pa;
591f19dc624Sjohpow01 
592*b99926efSAlexeiFedorov 	/* Iterate over each L0 region in this memory range */
593f19dc624Sjohpow01 	for (l0_idx = GPT_L0_IDX(pas->base_pa);
594f19dc624Sjohpow01 	     l0_idx <= GPT_L0_IDX(end_pa - 1U);
595f19dc624Sjohpow01 	     l0_idx++) {
596f19dc624Sjohpow01 
597f19dc624Sjohpow01 		/*
598f19dc624Sjohpow01 		 * See if the L0 entry is already a table descriptor or if we
599f19dc624Sjohpow01 		 * need to create one.
600f19dc624Sjohpow01 		 */
601f19dc624Sjohpow01 		if (GPT_L0_TYPE(l0_gpt_base[l0_idx]) == GPT_L0_TYPE_TBL_DESC) {
602*b99926efSAlexeiFedorov 			/* Get the L1 array from the L0 entry */
603f19dc624Sjohpow01 			l1_gpt_arr = GPT_L0_TBLD_ADDR(l0_gpt_base[l0_idx]);
604f19dc624Sjohpow01 		} else {
605*b99926efSAlexeiFedorov 			/* Get a new L1 table from the L1 memory space */
60620e2683dSAlexeiFedorov 			l1_gpt_arr = get_new_l1_tbl();
607f19dc624Sjohpow01 
608*b99926efSAlexeiFedorov 			/* Fill out the L0 descriptor and flush it */
609f19dc624Sjohpow01 			l0_gpt_base[l0_idx] = GPT_L0_TBL_DESC(l1_gpt_arr);
610f19dc624Sjohpow01 		}
611f19dc624Sjohpow01 
612*b99926efSAlexeiFedorov 		VERBOSE("GPT: L0 entry (TABLE) index %u [%p] ==> L1 Addr %p (0x%"PRIx64")\n",
613*b99926efSAlexeiFedorov 			l0_idx, &l0_gpt_base[l0_idx], l1_gpt_arr, l0_gpt_base[l0_idx]);
614f19dc624Sjohpow01 
615f19dc624Sjohpow01 		/*
616f19dc624Sjohpow01 		 * Determine the PA of the last granule in this L0 descriptor.
617f19dc624Sjohpow01 		 */
61820e2683dSAlexeiFedorov 		last_gran_pa = get_l1_end_pa(cur_pa, end_pa) -
619f19dc624Sjohpow01 			       GPT_PGS_ACTUAL_SIZE(gpt_config.p);
620f19dc624Sjohpow01 
621f19dc624Sjohpow01 		/*
622f19dc624Sjohpow01 		 * Fill up L1 GPT entries between these two addresses. This
623f19dc624Sjohpow01 		 * function needs the addresses of the first granule and last
624f19dc624Sjohpow01 		 * granule in the range.
625f19dc624Sjohpow01 		 */
62620e2683dSAlexeiFedorov 		fill_l1_tbl(GPT_PAS_ATTR_GPI(pas->attrs), l1_gpt_arr,
627f19dc624Sjohpow01 				cur_pa, last_gran_pa);
628f19dc624Sjohpow01 
629*b99926efSAlexeiFedorov 		/* Advance cur_pa to first granule in next L0 region */
63020e2683dSAlexeiFedorov 		cur_pa = get_l1_end_pa(cur_pa, end_pa);
631f19dc624Sjohpow01 	}
632f19dc624Sjohpow01 }
633f19dc624Sjohpow01 
634f19dc624Sjohpow01 /*
635f19dc624Sjohpow01  * This function flushes a range of L0 descriptors used by a given PAS region
636f19dc624Sjohpow01  * array. There is a chance that some unmodified L0 descriptors would be flushed
637f19dc624Sjohpow01  * in the case that there are "holes" in an array of PAS regions but overall
638f19dc624Sjohpow01  * this should be faster than individually flushing each modified L0 descriptor
639f19dc624Sjohpow01  * as they are created.
640f19dc624Sjohpow01  *
641f19dc624Sjohpow01  * Parameters
642f19dc624Sjohpow01  *   *pas		Pointer to an array of PAS regions.
643f19dc624Sjohpow01  *   pas_count		Number of entries in the PAS array.
644f19dc624Sjohpow01  */
645f19dc624Sjohpow01 static void flush_l0_for_pas_array(pas_region_t *pas, unsigned int pas_count)
646f19dc624Sjohpow01 {
647f19dc624Sjohpow01 	unsigned int idx;
648f19dc624Sjohpow01 	unsigned int start_idx;
649f19dc624Sjohpow01 	unsigned int end_idx;
650f19dc624Sjohpow01 	uint64_t *l0 = (uint64_t *)gpt_config.plat_gpt_l0_base;
651f19dc624Sjohpow01 
652f19dc624Sjohpow01 	assert(pas != NULL);
653*b99926efSAlexeiFedorov 	assert(pas_count != 0U);
654f19dc624Sjohpow01 
655*b99926efSAlexeiFedorov 	/* Initial start and end values */
656f19dc624Sjohpow01 	start_idx = GPT_L0_IDX(pas[0].base_pa);
657*b99926efSAlexeiFedorov 	end_idx = GPT_L0_IDX(pas[0].base_pa + pas[0].size - 1UL);
658f19dc624Sjohpow01 
659*b99926efSAlexeiFedorov 	/* Find lowest and highest L0 indices used in this PAS array */
660*b99926efSAlexeiFedorov 	for (idx = 1U; idx < pas_count; idx++) {
661f19dc624Sjohpow01 		if (GPT_L0_IDX(pas[idx].base_pa) < start_idx) {
662f19dc624Sjohpow01 			start_idx = GPT_L0_IDX(pas[idx].base_pa);
663f19dc624Sjohpow01 		}
664*b99926efSAlexeiFedorov 		if (GPT_L0_IDX(pas[idx].base_pa + pas[idx].size - 1UL) > end_idx) {
665*b99926efSAlexeiFedorov 			end_idx = GPT_L0_IDX(pas[idx].base_pa + pas[idx].size - 1UL);
666f19dc624Sjohpow01 		}
667f19dc624Sjohpow01 	}
668f19dc624Sjohpow01 
669f19dc624Sjohpow01 	/*
670f19dc624Sjohpow01 	 * Flush all covered L0 descriptors, add 1 because we need to include
671f19dc624Sjohpow01 	 * the end index value.
672f19dc624Sjohpow01 	 */
673f19dc624Sjohpow01 	flush_dcache_range((uintptr_t)&l0[start_idx],
674*b99926efSAlexeiFedorov 			   ((end_idx + 1U) - start_idx) * sizeof(uint64_t));
675f19dc624Sjohpow01 }
676f19dc624Sjohpow01 
677f19dc624Sjohpow01 /*
678f19dc624Sjohpow01  * Public API to enable granule protection checks once the tables have all been
679f19dc624Sjohpow01  * initialized. This function is called at first initialization and then again
680f19dc624Sjohpow01  * later during warm boots of CPU cores.
681f19dc624Sjohpow01  *
682f19dc624Sjohpow01  * Return
683f19dc624Sjohpow01  *   Negative Linux error code in the event of a failure, 0 for success.
684f19dc624Sjohpow01  */
685f19dc624Sjohpow01 int gpt_enable(void)
686f19dc624Sjohpow01 {
687f19dc624Sjohpow01 	u_register_t gpccr_el3;
688f19dc624Sjohpow01 
689f19dc624Sjohpow01 	/*
690f19dc624Sjohpow01 	 * Granule tables must be initialised before enabling
691f19dc624Sjohpow01 	 * granule protection.
692f19dc624Sjohpow01 	 */
693*b99926efSAlexeiFedorov 	if (gpt_config.plat_gpt_l0_base == 0UL) {
694*b99926efSAlexeiFedorov 		ERROR("GPT: Tables have not been initialized!\n");
695f19dc624Sjohpow01 		return -EPERM;
696f19dc624Sjohpow01 	}
697f19dc624Sjohpow01 
698f19dc624Sjohpow01 	/* Write the base address of the L0 tables into GPTBR */
699f19dc624Sjohpow01 	write_gptbr_el3(((gpt_config.plat_gpt_l0_base >> GPTBR_BADDR_VAL_SHIFT)
700f19dc624Sjohpow01 			>> GPTBR_BADDR_SHIFT) & GPTBR_BADDR_MASK);
701f19dc624Sjohpow01 
702f19dc624Sjohpow01 	/* GPCCR_EL3.PPS */
703f19dc624Sjohpow01 	gpccr_el3 = SET_GPCCR_PPS(gpt_config.pps);
704f19dc624Sjohpow01 
705f19dc624Sjohpow01 	/* GPCCR_EL3.PGS */
706f19dc624Sjohpow01 	gpccr_el3 |= SET_GPCCR_PGS(gpt_config.pgs);
707f19dc624Sjohpow01 
70877612b90SSoby Mathew 	/*
70977612b90SSoby Mathew 	 * Since EL3 maps the L1 region as Inner shareable, use the same
71077612b90SSoby Mathew 	 * shareability attribute for GPC as well so that
71177612b90SSoby Mathew 	 * GPC fetches are visible to PEs
71277612b90SSoby Mathew 	 */
71377612b90SSoby Mathew 	gpccr_el3 |= SET_GPCCR_SH(GPCCR_SH_IS);
714f19dc624Sjohpow01 
715*b99926efSAlexeiFedorov 	/* Outer and Inner cacheability set to Normal memory, WB, RA, WA */
716f19dc624Sjohpow01 	gpccr_el3 |= SET_GPCCR_ORGN(GPCCR_ORGN_WB_RA_WA);
717f19dc624Sjohpow01 	gpccr_el3 |= SET_GPCCR_IRGN(GPCCR_IRGN_WB_RA_WA);
718f19dc624Sjohpow01 
71914cddd7aSKathleen Capella 	/* Prepopulate GPCCR_EL3 but don't enable GPC yet */
72014cddd7aSKathleen Capella 	write_gpccr_el3(gpccr_el3);
72114cddd7aSKathleen Capella 	isb();
72214cddd7aSKathleen Capella 
72314cddd7aSKathleen Capella 	/* Invalidate any stale TLB entries and any cached register fields */
72414cddd7aSKathleen Capella 	tlbipaallos();
72514cddd7aSKathleen Capella 	dsb();
72614cddd7aSKathleen Capella 	isb();
72714cddd7aSKathleen Capella 
728f19dc624Sjohpow01 	/* Enable GPT */
729f19dc624Sjohpow01 	gpccr_el3 |= GPCCR_GPC_BIT;
730f19dc624Sjohpow01 
731*b99926efSAlexeiFedorov 	/* TODO: Configure GPCCR_EL3_GPCP for Fault control */
732f19dc624Sjohpow01 	write_gpccr_el3(gpccr_el3);
73377612b90SSoby Mathew 	isb();
734f19dc624Sjohpow01 	tlbipaallos();
735f19dc624Sjohpow01 	dsb();
736f19dc624Sjohpow01 	isb();
737f19dc624Sjohpow01 
738f19dc624Sjohpow01 	return 0;
739f19dc624Sjohpow01 }
740f19dc624Sjohpow01 
741f19dc624Sjohpow01 /*
742f19dc624Sjohpow01  * Public API to disable granule protection checks.
743f19dc624Sjohpow01  */
744f19dc624Sjohpow01 void gpt_disable(void)
745f19dc624Sjohpow01 {
746f19dc624Sjohpow01 	u_register_t gpccr_el3 = read_gpccr_el3();
747f19dc624Sjohpow01 
748f19dc624Sjohpow01 	write_gpccr_el3(gpccr_el3 & ~GPCCR_GPC_BIT);
749f19dc624Sjohpow01 	dsbsy();
750f19dc624Sjohpow01 	isb();
751f19dc624Sjohpow01 }
752f19dc624Sjohpow01 
753f19dc624Sjohpow01 /*
754f19dc624Sjohpow01  * Public API that initializes the entire protected space to GPT_GPI_ANY using
755f19dc624Sjohpow01  * the L0 tables (block descriptors). Ideally, this function is invoked prior
756f19dc624Sjohpow01  * to DDR discovery and initialization. The MMU must be initialized before
757f19dc624Sjohpow01  * calling this function.
758f19dc624Sjohpow01  *
759f19dc624Sjohpow01  * Parameters
760f19dc624Sjohpow01  *   pps		PPS value to use for table generation
761f19dc624Sjohpow01  *   l0_mem_base	Base address of L0 tables in memory.
762f19dc624Sjohpow01  *   l0_mem_size	Total size of memory available for L0 tables.
763f19dc624Sjohpow01  *
764f19dc624Sjohpow01  * Return
765f19dc624Sjohpow01  *   Negative Linux error code in the event of a failure, 0 for success.
766f19dc624Sjohpow01  */
767a0d5147bSAlexeiFedorov int gpt_init_l0_tables(gpccr_pps_e pps, uintptr_t l0_mem_base,
768f19dc624Sjohpow01 		       size_t l0_mem_size)
769f19dc624Sjohpow01 {
770f19dc624Sjohpow01 	int ret;
771f19dc624Sjohpow01 	uint64_t gpt_desc;
772f19dc624Sjohpow01 
773*b99926efSAlexeiFedorov 	/* Ensure that MMU and Data caches are enabled */
774f19dc624Sjohpow01 	assert((read_sctlr_el3() & SCTLR_C_BIT) != 0U);
775f19dc624Sjohpow01 
776*b99926efSAlexeiFedorov 	/* Validate other parameters */
77720e2683dSAlexeiFedorov 	ret = validate_l0_params(pps, l0_mem_base, l0_mem_size);
7786a00e9b0SRobert Wakim 	if (ret != 0) {
779f19dc624Sjohpow01 		return ret;
780f19dc624Sjohpow01 	}
781f19dc624Sjohpow01 
782*b99926efSAlexeiFedorov 	/* Create the descriptor to initialize L0 entries with */
783f19dc624Sjohpow01 	gpt_desc = GPT_L0_BLK_DESC(GPT_GPI_ANY);
784f19dc624Sjohpow01 
785f19dc624Sjohpow01 	/* Iterate through all L0 entries */
786f19dc624Sjohpow01 	for (unsigned int i = 0U; i < GPT_L0_REGION_COUNT(gpt_config.t); i++) {
787f19dc624Sjohpow01 		((uint64_t *)l0_mem_base)[i] = gpt_desc;
788f19dc624Sjohpow01 	}
789f19dc624Sjohpow01 
790*b99926efSAlexeiFedorov 	/* Flush updated L0 tables to memory */
791f19dc624Sjohpow01 	flush_dcache_range((uintptr_t)l0_mem_base,
792f19dc624Sjohpow01 			   (size_t)GPT_L0_TABLE_SIZE(gpt_config.t));
793f19dc624Sjohpow01 
794*b99926efSAlexeiFedorov 	/* Stash the L0 base address once initial setup is complete */
795f19dc624Sjohpow01 	gpt_config.plat_gpt_l0_base = l0_mem_base;
796f19dc624Sjohpow01 
797f19dc624Sjohpow01 	return 0;
798f19dc624Sjohpow01 }
799f19dc624Sjohpow01 
800f19dc624Sjohpow01 /*
801f19dc624Sjohpow01  * Public API that carves out PAS regions from the L0 tables and builds any L1
802f19dc624Sjohpow01  * tables that are needed. This function ideally is run after DDR discovery and
803f19dc624Sjohpow01  * initialization. The L0 tables must have already been initialized to GPI_ANY
804f19dc624Sjohpow01  * when this function is called.
805f19dc624Sjohpow01  *
806f19dc624Sjohpow01  * This function can be called multiple times with different L1 memory ranges
807f19dc624Sjohpow01  * and PAS regions if it is desirable to place L1 tables in different locations
808f19dc624Sjohpow01  * in memory. (ex: you have multiple DDR banks and want to place the L1 tables
809f19dc624Sjohpow01  * in the DDR bank that they control)
810f19dc624Sjohpow01  *
811f19dc624Sjohpow01  * Parameters
812f19dc624Sjohpow01  *   pgs		PGS value to use for table generation.
813f19dc624Sjohpow01  *   l1_mem_base	Base address of memory used for L1 tables.
814f19dc624Sjohpow01  *   l1_mem_size	Total size of memory available for L1 tables.
815f19dc624Sjohpow01  *   *pas_regions	Pointer to PAS regions structure array.
816f19dc624Sjohpow01  *   pas_count		Total number of PAS regions.
817f19dc624Sjohpow01  *
818f19dc624Sjohpow01  * Return
819f19dc624Sjohpow01  *   Negative Linux error code in the event of a failure, 0 for success.
820f19dc624Sjohpow01  */
821f19dc624Sjohpow01 int gpt_init_pas_l1_tables(gpccr_pgs_e pgs, uintptr_t l1_mem_base,
822f19dc624Sjohpow01 			   size_t l1_mem_size, pas_region_t *pas_regions,
823f19dc624Sjohpow01 			   unsigned int pas_count)
824f19dc624Sjohpow01 {
825f19dc624Sjohpow01 	int ret;
826f19dc624Sjohpow01 	int l1_gpt_cnt;
827f19dc624Sjohpow01 
828*b99926efSAlexeiFedorov 	/* Ensure that MMU and Data caches are enabled */
829f19dc624Sjohpow01 	assert((read_sctlr_el3() & SCTLR_C_BIT) != 0U);
830f19dc624Sjohpow01 
831*b99926efSAlexeiFedorov 	/* PGS is needed for validate_pas_mappings so check it now */
832f19dc624Sjohpow01 	if (pgs > GPT_PGS_MAX) {
833*b99926efSAlexeiFedorov 		ERROR("GPT: Invalid PGS: 0x%x\n", pgs);
834f19dc624Sjohpow01 		return -EINVAL;
835f19dc624Sjohpow01 	}
836f19dc624Sjohpow01 	gpt_config.pgs = pgs;
837f19dc624Sjohpow01 	gpt_config.p = gpt_p_lookup[pgs];
838f19dc624Sjohpow01 
839*b99926efSAlexeiFedorov 	/* Make sure L0 tables have been initialized */
840f19dc624Sjohpow01 	if (gpt_config.plat_gpt_l0_base == 0U) {
841*b99926efSAlexeiFedorov 		ERROR("GPT: L0 tables must be initialized first!\n");
842f19dc624Sjohpow01 		return -EPERM;
843f19dc624Sjohpow01 	}
844f19dc624Sjohpow01 
845*b99926efSAlexeiFedorov 	/* Check if L1 GPTs are required and how many */
84620e2683dSAlexeiFedorov 	l1_gpt_cnt = validate_pas_mappings(pas_regions, pas_count);
847f19dc624Sjohpow01 	if (l1_gpt_cnt < 0) {
848f19dc624Sjohpow01 		return l1_gpt_cnt;
849f19dc624Sjohpow01 	}
850f19dc624Sjohpow01 
851*b99926efSAlexeiFedorov 	VERBOSE("GPT: %i L1 GPTs requested\n", l1_gpt_cnt);
852f19dc624Sjohpow01 
853*b99926efSAlexeiFedorov 	/* If L1 tables are needed then validate the L1 parameters */
854f19dc624Sjohpow01 	if (l1_gpt_cnt > 0) {
85520e2683dSAlexeiFedorov 		ret = validate_l1_params(l1_mem_base, l1_mem_size,
856*b99926efSAlexeiFedorov 					(unsigned int)l1_gpt_cnt);
8576a00e9b0SRobert Wakim 		if (ret != 0) {
858f19dc624Sjohpow01 			return ret;
859f19dc624Sjohpow01 		}
860f19dc624Sjohpow01 
861*b99926efSAlexeiFedorov 		/* Set up parameters for L1 table generation */
862f19dc624Sjohpow01 		gpt_l1_tbl = l1_mem_base;
863f19dc624Sjohpow01 		gpt_next_l1_tbl_idx = 0U;
864f19dc624Sjohpow01 	}
865f19dc624Sjohpow01 
866*b99926efSAlexeiFedorov 	INFO("GPT: Boot Configuration\n");
867f19dc624Sjohpow01 	INFO("  PPS/T:     0x%x/%u\n", gpt_config.pps, gpt_config.t);
868f19dc624Sjohpow01 	INFO("  PGS/P:     0x%x/%u\n", gpt_config.pgs, gpt_config.p);
869f19dc624Sjohpow01 	INFO("  L0GPTSZ/S: 0x%x/%u\n", GPT_L0GPTSZ, GPT_S_VAL);
870*b99926efSAlexeiFedorov 	INFO("  PAS count: %u\n", pas_count);
871*b99926efSAlexeiFedorov 	INFO("  L0 base:   0x%"PRIxPTR"\n", gpt_config.plat_gpt_l0_base);
872f19dc624Sjohpow01 
873*b99926efSAlexeiFedorov 	/* Generate the tables in memory */
874f19dc624Sjohpow01 	for (unsigned int idx = 0U; idx < pas_count; idx++) {
875*b99926efSAlexeiFedorov 		VERBOSE("GPT: PAS[%u]: base 0x%"PRIxPTR"\tsize 0x%lx\tGPI 0x%x\ttype 0x%x\n",
876f19dc624Sjohpow01 			idx, pas_regions[idx].base_pa, pas_regions[idx].size,
877f19dc624Sjohpow01 			GPT_PAS_ATTR_GPI(pas_regions[idx].attrs),
878f19dc624Sjohpow01 			GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs));
879f19dc624Sjohpow01 
880f19dc624Sjohpow01 		/* Check if a block or table descriptor is required */
881f19dc624Sjohpow01 		if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) ==
882f19dc624Sjohpow01 		    GPT_PAS_ATTR_MAP_TYPE_BLOCK) {
88320e2683dSAlexeiFedorov 			generate_l0_blk_desc(&pas_regions[idx]);
884f19dc624Sjohpow01 
885f19dc624Sjohpow01 		} else {
88620e2683dSAlexeiFedorov 			generate_l0_tbl_desc(&pas_regions[idx]);
887f19dc624Sjohpow01 		}
888f19dc624Sjohpow01 	}
889f19dc624Sjohpow01 
890*b99926efSAlexeiFedorov 	/* Flush modified L0 tables */
891f19dc624Sjohpow01 	flush_l0_for_pas_array(pas_regions, pas_count);
892f19dc624Sjohpow01 
893*b99926efSAlexeiFedorov 	/* Flush L1 tables if needed */
894f19dc624Sjohpow01 	if (l1_gpt_cnt > 0) {
895f19dc624Sjohpow01 		flush_dcache_range(l1_mem_base,
896f19dc624Sjohpow01 				   GPT_L1_TABLE_SIZE(gpt_config.p) *
897f19dc624Sjohpow01 				   l1_gpt_cnt);
898f19dc624Sjohpow01 	}
899f19dc624Sjohpow01 
900*b99926efSAlexeiFedorov 	/* Make sure that all the entries are written to the memory */
901f19dc624Sjohpow01 	dsbishst();
90277612b90SSoby Mathew 	tlbipaallos();
90377612b90SSoby Mathew 	dsb();
90477612b90SSoby Mathew 	isb();
905f19dc624Sjohpow01 
906f19dc624Sjohpow01 	return 0;
907f19dc624Sjohpow01 }
908f19dc624Sjohpow01 
909f19dc624Sjohpow01 /*
910f19dc624Sjohpow01  * Public API to initialize the runtime gpt_config structure based on the values
911f19dc624Sjohpow01  * present in the GPTBR_EL3 and GPCCR_EL3 registers. GPT initialization
912f19dc624Sjohpow01  * typically happens in a bootloader stage prior to setting up the EL3 runtime
913f19dc624Sjohpow01  * environment for the granule transition service so this function detects the
914f19dc624Sjohpow01  * initialization from a previous stage. Granule protection checks must be
915f19dc624Sjohpow01  * enabled already or this function will return an error.
916f19dc624Sjohpow01  *
917f19dc624Sjohpow01  * Return
918f19dc624Sjohpow01  *   Negative Linux error code in the event of a failure, 0 for success.
919f19dc624Sjohpow01  */
920f19dc624Sjohpow01 int gpt_runtime_init(void)
921f19dc624Sjohpow01 {
922f19dc624Sjohpow01 	u_register_t reg;
923f19dc624Sjohpow01 
924*b99926efSAlexeiFedorov 	/* Ensure that MMU and Data caches are enabled */
925f19dc624Sjohpow01 	assert((read_sctlr_el3() & SCTLR_C_BIT) != 0U);
926f19dc624Sjohpow01 
927*b99926efSAlexeiFedorov 	/* Ensure GPC are already enabled */
928f19dc624Sjohpow01 	if ((read_gpccr_el3() & GPCCR_GPC_BIT) == 0U) {
929*b99926efSAlexeiFedorov 		ERROR("GPT: Granule protection checks are not enabled!\n");
930f19dc624Sjohpow01 		return -EPERM;
931f19dc624Sjohpow01 	}
932f19dc624Sjohpow01 
933f19dc624Sjohpow01 	/*
934f19dc624Sjohpow01 	 * Read the L0 table address from GPTBR, we don't need the L1 base
935f19dc624Sjohpow01 	 * address since those are included in the L0 tables as needed.
936f19dc624Sjohpow01 	 */
937f19dc624Sjohpow01 	reg = read_gptbr_el3();
938f19dc624Sjohpow01 	gpt_config.plat_gpt_l0_base = ((reg >> GPTBR_BADDR_SHIFT) &
939f19dc624Sjohpow01 				      GPTBR_BADDR_MASK) <<
940f19dc624Sjohpow01 				      GPTBR_BADDR_VAL_SHIFT;
941f19dc624Sjohpow01 
942*b99926efSAlexeiFedorov 	/* Read GPCCR to get PGS and PPS values */
943f19dc624Sjohpow01 	reg = read_gpccr_el3();
944f19dc624Sjohpow01 	gpt_config.pps = (reg >> GPCCR_PPS_SHIFT) & GPCCR_PPS_MASK;
945f19dc624Sjohpow01 	gpt_config.t = gpt_t_lookup[gpt_config.pps];
946f19dc624Sjohpow01 	gpt_config.pgs = (reg >> GPCCR_PGS_SHIFT) & GPCCR_PGS_MASK;
947f19dc624Sjohpow01 	gpt_config.p = gpt_p_lookup[gpt_config.pgs];
948f19dc624Sjohpow01 
949*b99926efSAlexeiFedorov 	VERBOSE("GPT: Runtime Configuration\n");
950f19dc624Sjohpow01 	VERBOSE("  PPS/T:     0x%x/%u\n", gpt_config.pps, gpt_config.t);
951f19dc624Sjohpow01 	VERBOSE("  PGS/P:     0x%x/%u\n", gpt_config.pgs, gpt_config.p);
952f19dc624Sjohpow01 	VERBOSE("  L0GPTSZ/S: 0x%x/%u\n", GPT_L0GPTSZ, GPT_S_VAL);
953*b99926efSAlexeiFedorov 	VERBOSE("  L0 base:   0x%"PRIxPTR"\n", gpt_config.plat_gpt_l0_base);
954f19dc624Sjohpow01 
955f19dc624Sjohpow01 	return 0;
956f19dc624Sjohpow01 }
957f19dc624Sjohpow01 
958f19dc624Sjohpow01 /*
959f19dc624Sjohpow01  * The L1 descriptors are protected by a spinlock to ensure that multiple
960f19dc624Sjohpow01  * CPUs do not attempt to change the descriptors at once. In the future it
961f19dc624Sjohpow01  * would be better to have separate spinlocks for each L1 descriptor.
962f19dc624Sjohpow01  */
963f19dc624Sjohpow01 static spinlock_t gpt_lock;
964f19dc624Sjohpow01 
965f19dc624Sjohpow01 /*
9666a00e9b0SRobert Wakim  * A helper to write the value (target_pas << gpi_shift) to the index of
967*b99926efSAlexeiFedorov  * the gpt_l1_addr.
9686a00e9b0SRobert Wakim  */
9696a00e9b0SRobert Wakim static inline void write_gpt(uint64_t *gpt_l1_desc, uint64_t *gpt_l1_addr,
9706a00e9b0SRobert Wakim 			     unsigned int gpi_shift, unsigned int idx,
9716a00e9b0SRobert Wakim 			     unsigned int target_pas)
9726a00e9b0SRobert Wakim {
9736a00e9b0SRobert Wakim 	*gpt_l1_desc &= ~(GPT_L1_GRAN_DESC_GPI_MASK << gpi_shift);
9746a00e9b0SRobert Wakim 	*gpt_l1_desc |= ((uint64_t)target_pas << gpi_shift);
9756a00e9b0SRobert Wakim 	gpt_l1_addr[idx] = *gpt_l1_desc;
9766a00e9b0SRobert Wakim }
9776a00e9b0SRobert Wakim 
9786a00e9b0SRobert Wakim /*
9796a00e9b0SRobert Wakim  * Helper to retrieve the gpt_l1_* information from the base address
980*b99926efSAlexeiFedorov  * returned in gpi_info.
9816a00e9b0SRobert Wakim  */
9826a00e9b0SRobert Wakim static int get_gpi_params(uint64_t base, gpi_info_t *gpi_info)
9836a00e9b0SRobert Wakim {
9846a00e9b0SRobert Wakim 	uint64_t gpt_l0_desc, *gpt_l0_base;
9856a00e9b0SRobert Wakim 
9866a00e9b0SRobert Wakim 	gpt_l0_base = (uint64_t *)gpt_config.plat_gpt_l0_base;
9876a00e9b0SRobert Wakim 	gpt_l0_desc = gpt_l0_base[GPT_L0_IDX(base)];
9886a00e9b0SRobert Wakim 	if (GPT_L0_TYPE(gpt_l0_desc) != GPT_L0_TYPE_TBL_DESC) {
989*b99926efSAlexeiFedorov 		VERBOSE("GPT: Granule is not covered by a table descriptor!\n");
9906a00e9b0SRobert Wakim 		VERBOSE("      Base=0x%"PRIx64"\n", base);
9916a00e9b0SRobert Wakim 		return -EINVAL;
9926a00e9b0SRobert Wakim 	}
9936a00e9b0SRobert Wakim 
994*b99926efSAlexeiFedorov 	/* Get the table index and GPI shift from PA */
9956a00e9b0SRobert Wakim 	gpi_info->gpt_l1_addr = GPT_L0_TBLD_ADDR(gpt_l0_desc);
9966a00e9b0SRobert Wakim 	gpi_info->idx = GPT_L1_IDX(gpt_config.p, base);
9976a00e9b0SRobert Wakim 	gpi_info->gpi_shift = GPT_L1_GPI_IDX(gpt_config.p, base) << 2;
9986a00e9b0SRobert Wakim 
9996a00e9b0SRobert Wakim 	gpi_info->gpt_l1_desc = (gpi_info->gpt_l1_addr)[gpi_info->idx];
10006a00e9b0SRobert Wakim 	gpi_info->gpi = (gpi_info->gpt_l1_desc >> gpi_info->gpi_shift) &
10016a00e9b0SRobert Wakim 		GPT_L1_GRAN_DESC_GPI_MASK;
10026a00e9b0SRobert Wakim 	return 0;
10036a00e9b0SRobert Wakim }
10046a00e9b0SRobert Wakim 
10056a00e9b0SRobert Wakim /*
10066a00e9b0SRobert Wakim  * This function is the granule transition delegate service. When a granule
10076a00e9b0SRobert Wakim  * transition request occurs it is routed to this function to have the request,
10086a00e9b0SRobert Wakim  * if valid, fulfilled following A1.1.1 Delegate of RME supplement
1009f19dc624Sjohpow01  *
10106a00e9b0SRobert Wakim  * TODO: implement support for transitioning multiple granules at once.
1011f19dc624Sjohpow01  *
1012f19dc624Sjohpow01  * Parameters
10136a00e9b0SRobert Wakim  *   base		Base address of the region to transition, must be
10146a00e9b0SRobert Wakim  *			aligned to granule size.
10156a00e9b0SRobert Wakim  *   size		Size of region to transition, must be aligned to granule
10166a00e9b0SRobert Wakim  *			size.
1017f19dc624Sjohpow01  *   src_sec_state	Security state of the caller.
1018f19dc624Sjohpow01  *
1019f19dc624Sjohpow01  * Return
1020f19dc624Sjohpow01  *   Negative Linux error code in the event of a failure, 0 for success.
1021f19dc624Sjohpow01  */
10226a00e9b0SRobert Wakim int gpt_delegate_pas(uint64_t base, size_t size, unsigned int src_sec_state)
1023f19dc624Sjohpow01 {
10246a00e9b0SRobert Wakim 	gpi_info_t gpi_info;
10256a00e9b0SRobert Wakim 	uint64_t nse;
10266a00e9b0SRobert Wakim 	int res;
10276a00e9b0SRobert Wakim 	unsigned int target_pas;
1028f19dc624Sjohpow01 
1029*b99926efSAlexeiFedorov 	/* Ensure that the tables have been set up before taking requests */
10306a00e9b0SRobert Wakim 	assert(gpt_config.plat_gpt_l0_base != 0UL);
10316a00e9b0SRobert Wakim 
1032*b99926efSAlexeiFedorov 	/* Ensure that caches are enabled */
10336a00e9b0SRobert Wakim 	assert((read_sctlr_el3() & SCTLR_C_BIT) != 0UL);
10346a00e9b0SRobert Wakim 
10356a00e9b0SRobert Wakim 	/* Delegate request can only come from REALM or SECURE */
10366a00e9b0SRobert Wakim 	assert(src_sec_state == SMC_FROM_REALM ||
10376a00e9b0SRobert Wakim 	       src_sec_state == SMC_FROM_SECURE);
10386a00e9b0SRobert Wakim 
1039*b99926efSAlexeiFedorov 	/* See if this is a single or a range of granule transition */
10406a00e9b0SRobert Wakim 	if (size != GPT_PGS_ACTUAL_SIZE(gpt_config.p)) {
1041f19dc624Sjohpow01 		return -EINVAL;
1042f19dc624Sjohpow01 	}
1043f19dc624Sjohpow01 
10446a00e9b0SRobert Wakim 	/* Check that base and size are valid */
10456a00e9b0SRobert Wakim 	if ((ULONG_MAX - base) < size) {
1046*b99926efSAlexeiFedorov 		VERBOSE("GPT: Transition request address overflow!\n");
10476a00e9b0SRobert Wakim 		VERBOSE("      Base=0x%"PRIx64"\n", base);
10486a00e9b0SRobert Wakim 		VERBOSE("      Size=0x%lx\n", size);
10496a00e9b0SRobert Wakim 		return -EINVAL;
10506a00e9b0SRobert Wakim 	}
10516a00e9b0SRobert Wakim 
1052*b99926efSAlexeiFedorov 	/* Make sure base and size are valid */
1053*b99926efSAlexeiFedorov 	if (((base & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) != 0UL) ||
1054*b99926efSAlexeiFedorov 	    ((size & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) != 0UL) ||
10556a00e9b0SRobert Wakim 	    (size == 0UL) ||
10566a00e9b0SRobert Wakim 	    ((base + size) >= GPT_PPS_ACTUAL_SIZE(gpt_config.t))) {
1057*b99926efSAlexeiFedorov 		VERBOSE("GPT: Invalid granule transition address range!\n");
10586a00e9b0SRobert Wakim 		VERBOSE("      Base=0x%"PRIx64"\n", base);
10596a00e9b0SRobert Wakim 		VERBOSE("      Size=0x%lx\n", size);
10606a00e9b0SRobert Wakim 		return -EINVAL;
10616a00e9b0SRobert Wakim 	}
10626a00e9b0SRobert Wakim 
10636a00e9b0SRobert Wakim 	target_pas = GPT_GPI_REALM;
10646a00e9b0SRobert Wakim 	if (src_sec_state == SMC_FROM_SECURE) {
10656a00e9b0SRobert Wakim 		target_pas = GPT_GPI_SECURE;
10666a00e9b0SRobert Wakim 	}
10676a00e9b0SRobert Wakim 
10686a00e9b0SRobert Wakim 	/*
10696a00e9b0SRobert Wakim 	 * Access to L1 tables is controlled by a global lock to ensure
10706a00e9b0SRobert Wakim 	 * that no more than one CPU is allowed to make changes at any
10716a00e9b0SRobert Wakim 	 * given time.
10726a00e9b0SRobert Wakim 	 */
10736a00e9b0SRobert Wakim 	spin_lock(&gpt_lock);
10746a00e9b0SRobert Wakim 	res = get_gpi_params(base, &gpi_info);
10756a00e9b0SRobert Wakim 	if (res != 0) {
10766a00e9b0SRobert Wakim 		spin_unlock(&gpt_lock);
10776a00e9b0SRobert Wakim 		return res;
10786a00e9b0SRobert Wakim 	}
10796a00e9b0SRobert Wakim 
10806a00e9b0SRobert Wakim 	/* Check that the current address is in NS state */
10816a00e9b0SRobert Wakim 	if (gpi_info.gpi != GPT_GPI_NS) {
1082*b99926efSAlexeiFedorov 		VERBOSE("GPT: Only Granule in NS state can be delegated.\n");
10836a00e9b0SRobert Wakim 		VERBOSE("      Caller: %u, Current GPI: %u\n", src_sec_state,
10846a00e9b0SRobert Wakim 			gpi_info.gpi);
10856a00e9b0SRobert Wakim 		spin_unlock(&gpt_lock);
1086e50fedbcSJavier Almansa Sobrino 		return -EPERM;
10876a00e9b0SRobert Wakim 	}
10886a00e9b0SRobert Wakim 
10896a00e9b0SRobert Wakim 	if (src_sec_state == SMC_FROM_SECURE) {
10906a00e9b0SRobert Wakim 		nse = (uint64_t)GPT_NSE_SECURE << GPT_NSE_SHIFT;
1091f19dc624Sjohpow01 	} else {
10926a00e9b0SRobert Wakim 		nse = (uint64_t)GPT_NSE_REALM << GPT_NSE_SHIFT;
1093f19dc624Sjohpow01 	}
1094f19dc624Sjohpow01 
10956a00e9b0SRobert Wakim 	/*
10966a00e9b0SRobert Wakim 	 * In order to maintain mutual distrust between Realm and Secure
10976a00e9b0SRobert Wakim 	 * states, remove any data speculatively fetched into the target
1098*b99926efSAlexeiFedorov 	 * physical address space. Issue DC CIPAPA over address range.
10996a00e9b0SRobert Wakim 	 */
110062d64652SOlivier Deprez 	if (is_feat_mte2_supported()) {
110162d64652SOlivier Deprez 		flush_dcache_to_popa_range_mte2(nse | base,
110262d64652SOlivier Deprez 					GPT_PGS_ACTUAL_SIZE(gpt_config.p));
110362d64652SOlivier Deprez 	} else {
11046a00e9b0SRobert Wakim 		flush_dcache_to_popa_range(nse | base,
11056a00e9b0SRobert Wakim 					   GPT_PGS_ACTUAL_SIZE(gpt_config.p));
110662d64652SOlivier Deprez 	}
11076a00e9b0SRobert Wakim 
11086a00e9b0SRobert Wakim 	write_gpt(&gpi_info.gpt_l1_desc, gpi_info.gpt_l1_addr,
11096a00e9b0SRobert Wakim 		  gpi_info.gpi_shift, gpi_info.idx, target_pas);
11106a00e9b0SRobert Wakim 	dsboshst();
11116a00e9b0SRobert Wakim 
11126a00e9b0SRobert Wakim 	gpt_tlbi_by_pa_ll(base, GPT_PGS_ACTUAL_SIZE(gpt_config.p));
11136a00e9b0SRobert Wakim 	dsbosh();
11146a00e9b0SRobert Wakim 
11156a00e9b0SRobert Wakim 	nse = (uint64_t)GPT_NSE_NS << GPT_NSE_SHIFT;
11166a00e9b0SRobert Wakim 
111762d64652SOlivier Deprez 	if (is_feat_mte2_supported()) {
111862d64652SOlivier Deprez 		flush_dcache_to_popa_range_mte2(nse | base,
111962d64652SOlivier Deprez 					   GPT_PGS_ACTUAL_SIZE(gpt_config.p));
112062d64652SOlivier Deprez 	} else {
11216a00e9b0SRobert Wakim 		flush_dcache_to_popa_range(nse | base,
11226a00e9b0SRobert Wakim 					   GPT_PGS_ACTUAL_SIZE(gpt_config.p));
112362d64652SOlivier Deprez 	}
11246a00e9b0SRobert Wakim 
1125*b99926efSAlexeiFedorov 	/* Unlock access to the L1 tables */
11266a00e9b0SRobert Wakim 	spin_unlock(&gpt_lock);
11276a00e9b0SRobert Wakim 
11286a00e9b0SRobert Wakim 	/*
11296a00e9b0SRobert Wakim 	 * The isb() will be done as part of context
1130*b99926efSAlexeiFedorov 	 * synchronization when returning to lower EL.
11316a00e9b0SRobert Wakim 	 */
1132*b99926efSAlexeiFedorov 	VERBOSE("GPT: Granule 0x%"PRIx64" GPI 0x%x->0x%x\n",
11336a00e9b0SRobert Wakim 		base, gpi_info.gpi, target_pas);
1134f19dc624Sjohpow01 
1135f19dc624Sjohpow01 	return 0;
1136f19dc624Sjohpow01 }
1137f19dc624Sjohpow01 
1138f19dc624Sjohpow01 /*
11396a00e9b0SRobert Wakim  * This function is the granule transition undelegate service. When a granule
1140f19dc624Sjohpow01  * transition request occurs it is routed to this function where the request is
1141f19dc624Sjohpow01  * validated then fulfilled if possible.
1142f19dc624Sjohpow01  *
1143f19dc624Sjohpow01  * TODO: implement support for transitioning multiple granules at once.
1144f19dc624Sjohpow01  *
1145f19dc624Sjohpow01  * Parameters
1146f19dc624Sjohpow01  *   base		Base address of the region to transition, must be
1147f19dc624Sjohpow01  *			aligned to granule size.
1148f19dc624Sjohpow01  *   size		Size of region to transition, must be aligned to granule
1149f19dc624Sjohpow01  *			size.
1150f19dc624Sjohpow01  *   src_sec_state	Security state of the caller.
1151f19dc624Sjohpow01  *
1152f19dc624Sjohpow01  * Return
1153f19dc624Sjohpow01  *    Negative Linux error code in the event of a failure, 0 for success.
1154f19dc624Sjohpow01  */
11556a00e9b0SRobert Wakim int gpt_undelegate_pas(uint64_t base, size_t size, unsigned int src_sec_state)
1156f19dc624Sjohpow01 {
11576a00e9b0SRobert Wakim 	gpi_info_t gpi_info;
11586a00e9b0SRobert Wakim 	uint64_t nse;
11596a00e9b0SRobert Wakim 	int res;
1160f19dc624Sjohpow01 
1161*b99926efSAlexeiFedorov 	/* Ensure that the tables have been set up before taking requests */
11626a00e9b0SRobert Wakim 	assert(gpt_config.plat_gpt_l0_base != 0UL);
1163f19dc624Sjohpow01 
1164*b99926efSAlexeiFedorov 	/* Ensure that MMU and caches are enabled */
11656a00e9b0SRobert Wakim 	assert((read_sctlr_el3() & SCTLR_C_BIT) != 0UL);
116677612b90SSoby Mathew 
11676a00e9b0SRobert Wakim 	/* Delegate request can only come from REALM or SECURE */
11686a00e9b0SRobert Wakim 	assert(src_sec_state == SMC_FROM_REALM ||
11696a00e9b0SRobert Wakim 	       src_sec_state == SMC_FROM_SECURE);
11706a00e9b0SRobert Wakim 
1171*b99926efSAlexeiFedorov 	/* See if this is a single or a range of granule transition */
11726a00e9b0SRobert Wakim 	if (size != GPT_PGS_ACTUAL_SIZE(gpt_config.p)) {
11736a00e9b0SRobert Wakim 		return -EINVAL;
11746a00e9b0SRobert Wakim 	}
11756a00e9b0SRobert Wakim 
11766a00e9b0SRobert Wakim 	/* Check that base and size are valid */
1177f19dc624Sjohpow01 	if ((ULONG_MAX - base) < size) {
1178*b99926efSAlexeiFedorov 		VERBOSE("GPT: Transition request address overflow!\n");
11792461bd3aSManish Pandey 		VERBOSE("      Base=0x%"PRIx64"\n", base);
1180f19dc624Sjohpow01 		VERBOSE("      Size=0x%lx\n", size);
1181f19dc624Sjohpow01 		return -EINVAL;
1182f19dc624Sjohpow01 	}
1183f19dc624Sjohpow01 
1184*b99926efSAlexeiFedorov 	/* Make sure base and size are valid */
1185*b99926efSAlexeiFedorov 	if (((base & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) != 0UL) ||
1186*b99926efSAlexeiFedorov 	    ((size & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) != 0UL) ||
11876a00e9b0SRobert Wakim 	    (size == 0UL) ||
1188f19dc624Sjohpow01 	    ((base + size) >= GPT_PPS_ACTUAL_SIZE(gpt_config.t))) {
1189*b99926efSAlexeiFedorov 		VERBOSE("GPT: Invalid granule transition address range!\n");
11902461bd3aSManish Pandey 		VERBOSE("      Base=0x%"PRIx64"\n", base);
1191f19dc624Sjohpow01 		VERBOSE("      Size=0x%lx\n", size);
1192f19dc624Sjohpow01 		return -EINVAL;
1193f19dc624Sjohpow01 	}
1194f19dc624Sjohpow01 
1195f19dc624Sjohpow01 	/*
1196f19dc624Sjohpow01 	 * Access to L1 tables is controlled by a global lock to ensure
1197f19dc624Sjohpow01 	 * that no more than one CPU is allowed to make changes at any
1198f19dc624Sjohpow01 	 * given time.
1199f19dc624Sjohpow01 	 */
1200f19dc624Sjohpow01 	spin_lock(&gpt_lock);
1201f19dc624Sjohpow01 
12026a00e9b0SRobert Wakim 	res = get_gpi_params(base, &gpi_info);
12036a00e9b0SRobert Wakim 	if (res != 0) {
1204f19dc624Sjohpow01 		spin_unlock(&gpt_lock);
12056a00e9b0SRobert Wakim 		return res;
1206f19dc624Sjohpow01 	}
1207f19dc624Sjohpow01 
12086a00e9b0SRobert Wakim 	/* Check that the current address is in the delegated state */
12096a00e9b0SRobert Wakim 	if ((src_sec_state == SMC_FROM_REALM  &&
12106a00e9b0SRobert Wakim 	     gpi_info.gpi != GPT_GPI_REALM) ||
12116a00e9b0SRobert Wakim 	    (src_sec_state == SMC_FROM_SECURE &&
12126a00e9b0SRobert Wakim 	     gpi_info.gpi != GPT_GPI_SECURE)) {
1213*b99926efSAlexeiFedorov 		VERBOSE("GPT: Only Granule in REALM or SECURE state can be undelegated.\n");
1214*b99926efSAlexeiFedorov 		VERBOSE("      Caller: %u Current GPI: %u\n", src_sec_state,
12156a00e9b0SRobert Wakim 			gpi_info.gpi);
12166a00e9b0SRobert Wakim 		spin_unlock(&gpt_lock);
1217e50fedbcSJavier Almansa Sobrino 		return -EPERM;
12186a00e9b0SRobert Wakim 	}
1219f19dc624Sjohpow01 
12206a00e9b0SRobert Wakim 
12216a00e9b0SRobert Wakim 	/* In order to maintain mutual distrust between Realm and Secure
12226a00e9b0SRobert Wakim 	 * states, remove access now, in order to guarantee that writes
12236a00e9b0SRobert Wakim 	 * to the currently-accessible physical address space will not
12246a00e9b0SRobert Wakim 	 * later become observable.
12256a00e9b0SRobert Wakim 	 */
12266a00e9b0SRobert Wakim 	write_gpt(&gpi_info.gpt_l1_desc, gpi_info.gpt_l1_addr,
12276a00e9b0SRobert Wakim 		  gpi_info.gpi_shift, gpi_info.idx, GPT_GPI_NO_ACCESS);
12286a00e9b0SRobert Wakim 	dsboshst();
12296a00e9b0SRobert Wakim 
12306a00e9b0SRobert Wakim 	gpt_tlbi_by_pa_ll(base, GPT_PGS_ACTUAL_SIZE(gpt_config.p));
12316a00e9b0SRobert Wakim 	dsbosh();
12326a00e9b0SRobert Wakim 
12336a00e9b0SRobert Wakim 	if (src_sec_state == SMC_FROM_SECURE) {
12346a00e9b0SRobert Wakim 		nse = (uint64_t)GPT_NSE_SECURE << GPT_NSE_SHIFT;
12356a00e9b0SRobert Wakim 	} else {
12366a00e9b0SRobert Wakim 		nse = (uint64_t)GPT_NSE_REALM << GPT_NSE_SHIFT;
12376a00e9b0SRobert Wakim 	}
12386a00e9b0SRobert Wakim 
12396a00e9b0SRobert Wakim 	/* Ensure that the scrubbed data has made it past the PoPA */
124062d64652SOlivier Deprez 	if (is_feat_mte2_supported()) {
124162d64652SOlivier Deprez 		flush_dcache_to_popa_range_mte2(nse | base,
124262d64652SOlivier Deprez 					   GPT_PGS_ACTUAL_SIZE(gpt_config.p));
124362d64652SOlivier Deprez 	} else {
12446a00e9b0SRobert Wakim 		flush_dcache_to_popa_range(nse | base,
12456a00e9b0SRobert Wakim 					   GPT_PGS_ACTUAL_SIZE(gpt_config.p));
124662d64652SOlivier Deprez 	}
12476a00e9b0SRobert Wakim 
12486a00e9b0SRobert Wakim 	/*
12496a00e9b0SRobert Wakim 	 * Remove any data loaded speculatively
12506a00e9b0SRobert Wakim 	 * in NS space from before the scrubbing
12516a00e9b0SRobert Wakim 	 */
12526a00e9b0SRobert Wakim 	nse = (uint64_t)GPT_NSE_NS << GPT_NSE_SHIFT;
12536a00e9b0SRobert Wakim 
125462d64652SOlivier Deprez 	if (is_feat_mte2_supported()) {
125562d64652SOlivier Deprez 		flush_dcache_to_popa_range_mte2(nse | base,
125662d64652SOlivier Deprez 					   GPT_PGS_ACTUAL_SIZE(gpt_config.p));
125762d64652SOlivier Deprez 	} else {
12586a00e9b0SRobert Wakim 		flush_dcache_to_popa_range(nse | base,
12596a00e9b0SRobert Wakim 					   GPT_PGS_ACTUAL_SIZE(gpt_config.p));
126062d64652SOlivier Deprez 	}
12616a00e9b0SRobert Wakim 
12626a00e9b0SRobert Wakim 	/* Clear existing GPI encoding and transition granule. */
12636a00e9b0SRobert Wakim 	write_gpt(&gpi_info.gpt_l1_desc, gpi_info.gpt_l1_addr,
12646a00e9b0SRobert Wakim 		  gpi_info.gpi_shift, gpi_info.idx, GPT_GPI_NS);
12656a00e9b0SRobert Wakim 	dsboshst();
12666a00e9b0SRobert Wakim 
12676a00e9b0SRobert Wakim 	/* Ensure that all agents observe the new NS configuration */
12686a00e9b0SRobert Wakim 	gpt_tlbi_by_pa_ll(base, GPT_PGS_ACTUAL_SIZE(gpt_config.p));
12696a00e9b0SRobert Wakim 	dsbosh();
1270f19dc624Sjohpow01 
1271f19dc624Sjohpow01 	/* Unlock access to the L1 tables. */
1272f19dc624Sjohpow01 	spin_unlock(&gpt_lock);
1273f19dc624Sjohpow01 
127477612b90SSoby Mathew 	/*
127577612b90SSoby Mathew 	 * The isb() will be done as part of context
1276*b99926efSAlexeiFedorov 	 * synchronization when returning to lower EL.
127777612b90SSoby Mathew 	 */
1278*b99926efSAlexeiFedorov 	VERBOSE("GPT: Granule 0x%"PRIx64" GPI 0x%x->0x%x\n",
12796a00e9b0SRobert Wakim 		base, gpi_info.gpi, GPT_GPI_NS);
1280f19dc624Sjohpow01 
1281f19dc624Sjohpow01 	return 0;
1282f19dc624Sjohpow01 }
1283