xref: /rk3399_ARM-atf/lib/gpt_rme/gpt_rme.c (revision ec0088bbab9335c5273e57a84b81adf2201a51db)
1f19dc624Sjohpow01 /*
220e2683dSAlexeiFedorov  * Copyright (c) 2022-2024, Arm Limited. All rights reserved.
3f19dc624Sjohpow01  *
4f19dc624Sjohpow01  * SPDX-License-Identifier: BSD-3-Clause
5f19dc624Sjohpow01  */
6f19dc624Sjohpow01 
7f19dc624Sjohpow01 #include <assert.h>
8f19dc624Sjohpow01 #include <errno.h>
92461bd3aSManish Pandey #include <inttypes.h>
10f19dc624Sjohpow01 #include <limits.h>
11f19dc624Sjohpow01 #include <stdint.h>
12f19dc624Sjohpow01 
13f19dc624Sjohpow01 #include <arch.h>
1462d64652SOlivier Deprez #include <arch_features.h>
15f19dc624Sjohpow01 #include <arch_helpers.h>
16f19dc624Sjohpow01 #include <common/debug.h>
17f19dc624Sjohpow01 #include "gpt_rme_private.h"
18f19dc624Sjohpow01 #include <lib/gpt_rme/gpt_rme.h>
19f19dc624Sjohpow01 #include <lib/smccc.h>
20f19dc624Sjohpow01 #include <lib/spinlock.h>
21f19dc624Sjohpow01 #include <lib/xlat_tables/xlat_tables_v2.h>
22f19dc624Sjohpow01 
23f19dc624Sjohpow01 #if !ENABLE_RME
24b99926efSAlexeiFedorov #error "ENABLE_RME must be enabled to use the GPT library"
25f19dc624Sjohpow01 #endif
26f19dc624Sjohpow01 
27f19dc624Sjohpow01 /*
28f19dc624Sjohpow01  * Lookup T from PPS
29f19dc624Sjohpow01  *
30f19dc624Sjohpow01  *   PPS    Size    T
31f19dc624Sjohpow01  *   0b000  4GB     32
32f19dc624Sjohpow01  *   0b001  64GB    36
33f19dc624Sjohpow01  *   0b010  1TB     40
34f19dc624Sjohpow01  *   0b011  4TB     42
35f19dc624Sjohpow01  *   0b100  16TB    44
36f19dc624Sjohpow01  *   0b101  256TB   48
37f19dc624Sjohpow01  *   0b110  4PB     52
38f19dc624Sjohpow01  *
39f19dc624Sjohpow01  * See section 15.1.27 of the RME specification.
40f19dc624Sjohpow01  */
41f19dc624Sjohpow01 static const gpt_t_val_e gpt_t_lookup[] = {PPS_4GB_T, PPS_64GB_T,
42f19dc624Sjohpow01 					   PPS_1TB_T, PPS_4TB_T,
43f19dc624Sjohpow01 					   PPS_16TB_T, PPS_256TB_T,
44f19dc624Sjohpow01 					   PPS_4PB_T};
45f19dc624Sjohpow01 
46f19dc624Sjohpow01 /*
47f19dc624Sjohpow01  * Lookup P from PGS
48f19dc624Sjohpow01  *
49f19dc624Sjohpow01  *   PGS    Size    P
50f19dc624Sjohpow01  *   0b00   4KB     12
51f19dc624Sjohpow01  *   0b10   16KB    14
52f19dc624Sjohpow01  *   0b01   64KB    16
53f19dc624Sjohpow01  *
54f19dc624Sjohpow01  * Note that pgs=0b10 is 16KB and pgs=0b01 is 64KB, this is not a typo.
55f19dc624Sjohpow01  *
56f19dc624Sjohpow01  * See section 15.1.27 of the RME specification.
57f19dc624Sjohpow01  */
58f19dc624Sjohpow01 static const gpt_p_val_e gpt_p_lookup[] = {PGS_4KB_P, PGS_64KB_P, PGS_16KB_P};
59f19dc624Sjohpow01 
60*ec0088bbSAlexeiFedorov static void shatter_2mb(uintptr_t base, const gpi_info_t *gpi_info,
61*ec0088bbSAlexeiFedorov 				uint64_t l1_desc);
62*ec0088bbSAlexeiFedorov static void shatter_32mb(uintptr_t base, const gpi_info_t *gpi_info,
63*ec0088bbSAlexeiFedorov 				uint64_t l1_desc);
64*ec0088bbSAlexeiFedorov static void shatter_512mb(uintptr_t base, const gpi_info_t *gpi_info,
65*ec0088bbSAlexeiFedorov 				uint64_t l1_desc);
66*ec0088bbSAlexeiFedorov 
67f19dc624Sjohpow01 /*
68b99926efSAlexeiFedorov  * This structure contains GPT configuration data
69f19dc624Sjohpow01  */
70f19dc624Sjohpow01 typedef struct {
71f19dc624Sjohpow01 	uintptr_t plat_gpt_l0_base;
72f19dc624Sjohpow01 	gpccr_pps_e pps;
73f19dc624Sjohpow01 	gpt_t_val_e t;
74f19dc624Sjohpow01 	gpccr_pgs_e pgs;
75f19dc624Sjohpow01 	gpt_p_val_e p;
76f19dc624Sjohpow01 } gpt_config_t;
77f19dc624Sjohpow01 
78f19dc624Sjohpow01 static gpt_config_t gpt_config;
79f19dc624Sjohpow01 
80*ec0088bbSAlexeiFedorov /*
81*ec0088bbSAlexeiFedorov  * Number of L1 entries in 2MB, depending on GPCCR_EL3.PGS:
82*ec0088bbSAlexeiFedorov  * +-------+------------+
83*ec0088bbSAlexeiFedorov  * |  PGS  | L1 entries |
84*ec0088bbSAlexeiFedorov  * +-------+------------+
85*ec0088bbSAlexeiFedorov  * |  4KB  |     32     |
86*ec0088bbSAlexeiFedorov  * +-------+------------+
87*ec0088bbSAlexeiFedorov  * |  16KB |     8      |
88*ec0088bbSAlexeiFedorov  * +-------+------------+
89*ec0088bbSAlexeiFedorov  * |  64KB |     2      |
90*ec0088bbSAlexeiFedorov  * +-------+------------+
91*ec0088bbSAlexeiFedorov  */
92*ec0088bbSAlexeiFedorov static unsigned int gpt_l1_cnt_2mb;
93*ec0088bbSAlexeiFedorov 
94*ec0088bbSAlexeiFedorov /*
95*ec0088bbSAlexeiFedorov  * Mask for the L1 index field, depending on
96*ec0088bbSAlexeiFedorov  * GPCCR_EL3.L0GPTSZ and GPCCR_EL3.PGS:
97*ec0088bbSAlexeiFedorov  * +---------+-------------------------------+
98*ec0088bbSAlexeiFedorov  * |         |             PGS               |
99*ec0088bbSAlexeiFedorov  * +---------+----------+----------+---------+
100*ec0088bbSAlexeiFedorov  * | L0GPTSZ |   4KB    |   16KB   |   64KB  |
101*ec0088bbSAlexeiFedorov  * +---------+----------+----------+---------+
102*ec0088bbSAlexeiFedorov  * |  1GB    |  0x3FFF  |  0xFFF   |  0x3FF  |
103*ec0088bbSAlexeiFedorov  * +---------+----------+----------+---------+
104*ec0088bbSAlexeiFedorov  * |  16GB   | 0x3FFFF  |  0xFFFF  | 0x3FFF  |
105*ec0088bbSAlexeiFedorov  * +---------+----------+----------+---------+
106*ec0088bbSAlexeiFedorov  * |  64GB   | 0xFFFFF  | 0x3FFFF  | 0xFFFF  |
107*ec0088bbSAlexeiFedorov  * +---------+----------+----------+---------+
108*ec0088bbSAlexeiFedorov  * |  512GB  | 0x7FFFFF | 0x1FFFFF | 0x7FFFF |
109*ec0088bbSAlexeiFedorov  * +---------+----------+----------+---------+
110*ec0088bbSAlexeiFedorov  */
111*ec0088bbSAlexeiFedorov static uint64_t gpt_l1_index_mask;
112*ec0088bbSAlexeiFedorov 
113*ec0088bbSAlexeiFedorov /* Number of 128-bit L1 entries in 2MB, 32MB and 512MB */
114*ec0088bbSAlexeiFedorov #define L1_QWORDS_2MB	(gpt_l1_cnt_2mb / 2U)
115*ec0088bbSAlexeiFedorov #define L1_QWORDS_32MB	(L1_QWORDS_2MB * 16U)
116*ec0088bbSAlexeiFedorov #define L1_QWORDS_512MB	(L1_QWORDS_32MB * 16U)
117*ec0088bbSAlexeiFedorov 
118*ec0088bbSAlexeiFedorov /* Size in bytes of L1 entries in 2MB, 32MB */
119*ec0088bbSAlexeiFedorov #define L1_BYTES_2MB	(gpt_l1_cnt_2mb * sizeof(uint64_t))
120*ec0088bbSAlexeiFedorov #define L1_BYTES_32MB	(L1_BYTES_2MB * 16U)
121*ec0088bbSAlexeiFedorov 
122*ec0088bbSAlexeiFedorov /* Get the index into the L1 table from a physical address */
123*ec0088bbSAlexeiFedorov #define GPT_L1_INDEX(_pa)	\
124*ec0088bbSAlexeiFedorov 	(((_pa) >> (unsigned int)GPT_L1_IDX_SHIFT(gpt_config.p)) & gpt_l1_index_mask)
125*ec0088bbSAlexeiFedorov 
126b99926efSAlexeiFedorov /* These variables are used during initialization of the L1 tables */
127f19dc624Sjohpow01 static uintptr_t gpt_l1_tbl;
128f19dc624Sjohpow01 
129*ec0088bbSAlexeiFedorov /* These variable is used during runtime */
130*ec0088bbSAlexeiFedorov 
131*ec0088bbSAlexeiFedorov /* Bitlock base address for each 512 MB block of PPS */
132*ec0088bbSAlexeiFedorov static bitlock_t *gpt_bitlock_base;
133*ec0088bbSAlexeiFedorov 
134*ec0088bbSAlexeiFedorov static void tlbi_page_dsbosh(uintptr_t base)
135*ec0088bbSAlexeiFedorov {
136*ec0088bbSAlexeiFedorov 	/* Look-up table for invalidation TLBs for 4KB, 16KB and 64KB pages */
137*ec0088bbSAlexeiFedorov 	static const gpt_tlbi_lookup_t tlbi_page_lookup[] = {
138*ec0088bbSAlexeiFedorov 		{ tlbirpalos_4k, ~(SZ_4K - 1UL) },
139*ec0088bbSAlexeiFedorov 		{ tlbirpalos_64k, ~(SZ_64K - 1UL) },
140*ec0088bbSAlexeiFedorov 		{ tlbirpalos_16k, ~(SZ_16K - 1UL) }
141*ec0088bbSAlexeiFedorov 	};
142*ec0088bbSAlexeiFedorov 
143*ec0088bbSAlexeiFedorov 	tlbi_page_lookup[gpt_config.pgs].function(
144*ec0088bbSAlexeiFedorov 			base & tlbi_page_lookup[gpt_config.pgs].mask);
145*ec0088bbSAlexeiFedorov 	dsbosh();
146*ec0088bbSAlexeiFedorov }
147*ec0088bbSAlexeiFedorov 
148*ec0088bbSAlexeiFedorov /*
149*ec0088bbSAlexeiFedorov  * Helper function to fill out GPI entries in a single L1 table
150*ec0088bbSAlexeiFedorov  * with Granules or Contiguous descriptor.
151*ec0088bbSAlexeiFedorov  *
152*ec0088bbSAlexeiFedorov  * Parameters
153*ec0088bbSAlexeiFedorov  *   l1			Pointer to 2MB, 32MB or 512MB aligned L1 table entry to fill out
154*ec0088bbSAlexeiFedorov  *   l1_desc		GPT Granules or Contiguous descriptor set this range to
155*ec0088bbSAlexeiFedorov  *   cnt		Number of double 128-bit L1 entries to fill
156*ec0088bbSAlexeiFedorov  *
157*ec0088bbSAlexeiFedorov  */
158*ec0088bbSAlexeiFedorov static void fill_desc(uint64_t *l1, uint64_t l1_desc, unsigned int cnt)
159*ec0088bbSAlexeiFedorov {
160*ec0088bbSAlexeiFedorov 	uint128_t *l1_quad = (uint128_t *)l1;
161*ec0088bbSAlexeiFedorov 	uint128_t l1_quad_desc = (uint128_t)l1_desc | ((uint128_t)l1_desc << 64);
162*ec0088bbSAlexeiFedorov 
163*ec0088bbSAlexeiFedorov 	VERBOSE("GPT: %s(%p 0x%"PRIx64" %u)\n", __func__, l1, l1_desc, cnt);
164*ec0088bbSAlexeiFedorov 
165*ec0088bbSAlexeiFedorov 	for (unsigned int i = 0U; i < cnt; i++) {
166*ec0088bbSAlexeiFedorov 		*l1_quad++ = l1_quad_desc;
167*ec0088bbSAlexeiFedorov 	}
168*ec0088bbSAlexeiFedorov }
169*ec0088bbSAlexeiFedorov 
170*ec0088bbSAlexeiFedorov static void shatter_2mb(uintptr_t base, const gpi_info_t *gpi_info,
171*ec0088bbSAlexeiFedorov 				uint64_t l1_desc)
172*ec0088bbSAlexeiFedorov {
173*ec0088bbSAlexeiFedorov 	unsigned long idx = GPT_L1_INDEX(ALIGN_2MB(base));
174*ec0088bbSAlexeiFedorov 
175*ec0088bbSAlexeiFedorov 	VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n",
176*ec0088bbSAlexeiFedorov 				__func__, base, l1_desc);
177*ec0088bbSAlexeiFedorov 
178*ec0088bbSAlexeiFedorov 	/* Convert 2MB Contiguous block to Granules */
179*ec0088bbSAlexeiFedorov 	fill_desc(&gpi_info->gpt_l1_addr[idx], l1_desc, L1_QWORDS_2MB);
180*ec0088bbSAlexeiFedorov }
181*ec0088bbSAlexeiFedorov 
182*ec0088bbSAlexeiFedorov static void shatter_32mb(uintptr_t base, const gpi_info_t *gpi_info,
183*ec0088bbSAlexeiFedorov 				uint64_t l1_desc)
184*ec0088bbSAlexeiFedorov {
185*ec0088bbSAlexeiFedorov 	unsigned long idx = GPT_L1_INDEX(ALIGN_2MB(base));
186*ec0088bbSAlexeiFedorov 	const uint64_t *l1_gran = &gpi_info->gpt_l1_addr[idx];
187*ec0088bbSAlexeiFedorov 	uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 2MB);
188*ec0088bbSAlexeiFedorov 	uint64_t *l1;
189*ec0088bbSAlexeiFedorov 
190*ec0088bbSAlexeiFedorov 	VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n",
191*ec0088bbSAlexeiFedorov 				__func__, base, l1_desc);
192*ec0088bbSAlexeiFedorov 
193*ec0088bbSAlexeiFedorov 	/* Get index corresponding to 32MB aligned address */
194*ec0088bbSAlexeiFedorov 	idx = GPT_L1_INDEX(ALIGN_32MB(base));
195*ec0088bbSAlexeiFedorov 	l1 = &gpi_info->gpt_l1_addr[idx];
196*ec0088bbSAlexeiFedorov 
197*ec0088bbSAlexeiFedorov 	/* 16 x 2MB blocks in 32MB */
198*ec0088bbSAlexeiFedorov 	for (unsigned int i = 0U; i < 16U; i++) {
199*ec0088bbSAlexeiFedorov 		/* Fill with Granules or Contiguous descriptors */
200*ec0088bbSAlexeiFedorov 		fill_desc(l1, (l1 == l1_gran) ? l1_desc : l1_cont_desc,
201*ec0088bbSAlexeiFedorov 							L1_QWORDS_2MB);
202*ec0088bbSAlexeiFedorov 		l1 = (uint64_t *)((uintptr_t)l1 + L1_BYTES_2MB);
203*ec0088bbSAlexeiFedorov 	}
204*ec0088bbSAlexeiFedorov }
205*ec0088bbSAlexeiFedorov 
206*ec0088bbSAlexeiFedorov static void shatter_512mb(uintptr_t base, const gpi_info_t *gpi_info,
207*ec0088bbSAlexeiFedorov 				uint64_t l1_desc)
208*ec0088bbSAlexeiFedorov {
209*ec0088bbSAlexeiFedorov 	unsigned long idx = GPT_L1_INDEX(ALIGN_32MB(base));
210*ec0088bbSAlexeiFedorov 	const uint64_t *l1_32mb = &gpi_info->gpt_l1_addr[idx];
211*ec0088bbSAlexeiFedorov 	uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 32MB);
212*ec0088bbSAlexeiFedorov 	uint64_t *l1;
213*ec0088bbSAlexeiFedorov 
214*ec0088bbSAlexeiFedorov 	VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n",
215*ec0088bbSAlexeiFedorov 				__func__, base, l1_desc);
216*ec0088bbSAlexeiFedorov 
217*ec0088bbSAlexeiFedorov 	/* Get index corresponding to 512MB aligned address */
218*ec0088bbSAlexeiFedorov 	idx = GPT_L1_INDEX(ALIGN_512MB(base));
219*ec0088bbSAlexeiFedorov 	l1 = &gpi_info->gpt_l1_addr[idx];
220*ec0088bbSAlexeiFedorov 
221*ec0088bbSAlexeiFedorov 	/* 16 x 32MB blocks in 512MB */
222*ec0088bbSAlexeiFedorov 	for (unsigned int i = 0U; i < 16U; i++) {
223*ec0088bbSAlexeiFedorov 		if (l1 == l1_32mb) {
224*ec0088bbSAlexeiFedorov 			/* Shatter this 32MB block */
225*ec0088bbSAlexeiFedorov 			shatter_32mb(base, gpi_info, l1_desc);
226*ec0088bbSAlexeiFedorov 		} else {
227*ec0088bbSAlexeiFedorov 			/* Fill 32MB with Contiguous descriptors */
228*ec0088bbSAlexeiFedorov 			fill_desc(l1, l1_cont_desc, L1_QWORDS_32MB);
229*ec0088bbSAlexeiFedorov 		}
230*ec0088bbSAlexeiFedorov 
231*ec0088bbSAlexeiFedorov 		l1 = (uint64_t *)((uintptr_t)l1 + L1_BYTES_32MB);
232*ec0088bbSAlexeiFedorov 	}
233*ec0088bbSAlexeiFedorov }
234*ec0088bbSAlexeiFedorov 
235f19dc624Sjohpow01 /*
236f19dc624Sjohpow01  * This function checks to see if a GPI value is valid.
237f19dc624Sjohpow01  *
238f19dc624Sjohpow01  * These are valid GPI values.
239f19dc624Sjohpow01  *   GPT_GPI_NO_ACCESS   U(0x0)
240f19dc624Sjohpow01  *   GPT_GPI_SECURE      U(0x8)
241f19dc624Sjohpow01  *   GPT_GPI_NS          U(0x9)
242f19dc624Sjohpow01  *   GPT_GPI_ROOT        U(0xA)
243f19dc624Sjohpow01  *   GPT_GPI_REALM       U(0xB)
244f19dc624Sjohpow01  *   GPT_GPI_ANY         U(0xF)
245f19dc624Sjohpow01  *
246f19dc624Sjohpow01  * Parameters
247f19dc624Sjohpow01  *   gpi		GPI to check for validity.
248f19dc624Sjohpow01  *
249f19dc624Sjohpow01  * Return
250f19dc624Sjohpow01  *   true for a valid GPI, false for an invalid one.
251f19dc624Sjohpow01  */
25220e2683dSAlexeiFedorov static bool is_gpi_valid(unsigned int gpi)
253f19dc624Sjohpow01 {
254f19dc624Sjohpow01 	if ((gpi == GPT_GPI_NO_ACCESS) || (gpi == GPT_GPI_ANY) ||
255f19dc624Sjohpow01 	    ((gpi >= GPT_GPI_SECURE) && (gpi <= GPT_GPI_REALM))) {
256f19dc624Sjohpow01 		return true;
257f19dc624Sjohpow01 	}
2586a00e9b0SRobert Wakim 	return false;
259f19dc624Sjohpow01 }
260f19dc624Sjohpow01 
261f19dc624Sjohpow01 /*
262f19dc624Sjohpow01  * This function checks to see if two PAS regions overlap.
263f19dc624Sjohpow01  *
264f19dc624Sjohpow01  * Parameters
265f19dc624Sjohpow01  *   base_1: base address of first PAS
266f19dc624Sjohpow01  *   size_1: size of first PAS
267f19dc624Sjohpow01  *   base_2: base address of second PAS
268f19dc624Sjohpow01  *   size_2: size of second PAS
269f19dc624Sjohpow01  *
270f19dc624Sjohpow01  * Return
271f19dc624Sjohpow01  *   True if PAS regions overlap, false if they do not.
272f19dc624Sjohpow01  */
27320e2683dSAlexeiFedorov static bool check_pas_overlap(uintptr_t base_1, size_t size_1,
274f19dc624Sjohpow01 			      uintptr_t base_2, size_t size_2)
275f19dc624Sjohpow01 {
276f19dc624Sjohpow01 	if (((base_1 + size_1) > base_2) && ((base_2 + size_2) > base_1)) {
277f19dc624Sjohpow01 		return true;
278f19dc624Sjohpow01 	}
2796a00e9b0SRobert Wakim 	return false;
280f19dc624Sjohpow01 }
281f19dc624Sjohpow01 
282f19dc624Sjohpow01 /*
283f19dc624Sjohpow01  * This helper function checks to see if a PAS region from index 0 to
284f19dc624Sjohpow01  * (pas_idx - 1) occupies the L0 region at index l0_idx in the L0 table.
285f19dc624Sjohpow01  *
286f19dc624Sjohpow01  * Parameters
287f19dc624Sjohpow01  *   l0_idx:      Index of the L0 entry to check
288f19dc624Sjohpow01  *   pas_regions: PAS region array
289f19dc624Sjohpow01  *   pas_idx:     Upper bound of the PAS array index.
290f19dc624Sjohpow01  *
291f19dc624Sjohpow01  * Return
292f19dc624Sjohpow01  *   True if a PAS region occupies the L0 region in question, false if not.
293f19dc624Sjohpow01  */
29420e2683dSAlexeiFedorov static bool does_previous_pas_exist_here(unsigned int l0_idx,
295f19dc624Sjohpow01 					 pas_region_t *pas_regions,
296f19dc624Sjohpow01 					 unsigned int pas_idx)
297f19dc624Sjohpow01 {
298b99926efSAlexeiFedorov 	/* Iterate over PAS regions up to pas_idx */
299f19dc624Sjohpow01 	for (unsigned int i = 0U; i < pas_idx; i++) {
30020e2683dSAlexeiFedorov 		if (check_pas_overlap((GPT_L0GPTSZ_ACTUAL_SIZE * l0_idx),
301f19dc624Sjohpow01 		    GPT_L0GPTSZ_ACTUAL_SIZE,
302f19dc624Sjohpow01 		    pas_regions[i].base_pa, pas_regions[i].size)) {
303f19dc624Sjohpow01 			return true;
304f19dc624Sjohpow01 		}
305f19dc624Sjohpow01 	}
306f19dc624Sjohpow01 	return false;
307f19dc624Sjohpow01 }
308f19dc624Sjohpow01 
309f19dc624Sjohpow01 /*
310f19dc624Sjohpow01  * This function iterates over all of the PAS regions and checks them to ensure
311f19dc624Sjohpow01  * proper alignment of base and size, that the GPI is valid, and that no regions
312f19dc624Sjohpow01  * overlap. As a part of the overlap checks, this function checks existing L0
313f19dc624Sjohpow01  * mappings against the new PAS regions in the event that gpt_init_pas_l1_tables
314f19dc624Sjohpow01  * is called multiple times to place L1 tables in different areas of memory. It
315f19dc624Sjohpow01  * also counts the number of L1 tables needed and returns it on success.
316f19dc624Sjohpow01  *
317f19dc624Sjohpow01  * Parameters
318f19dc624Sjohpow01  *   *pas_regions	Pointer to array of PAS region structures.
319f19dc624Sjohpow01  *   pas_region_cnt	Total number of PAS regions in the array.
320f19dc624Sjohpow01  *
321f19dc624Sjohpow01  * Return
322f19dc624Sjohpow01  *   Negative Linux error code in the event of a failure, number of L1 regions
323f19dc624Sjohpow01  *   required when successful.
324f19dc624Sjohpow01  */
32520e2683dSAlexeiFedorov static int validate_pas_mappings(pas_region_t *pas_regions,
326f19dc624Sjohpow01 				 unsigned int pas_region_cnt)
327f19dc624Sjohpow01 {
328f19dc624Sjohpow01 	unsigned int idx;
329f19dc624Sjohpow01 	unsigned int l1_cnt = 0U;
330f19dc624Sjohpow01 	unsigned int pas_l1_cnt;
331f19dc624Sjohpow01 	uint64_t *l0_desc = (uint64_t *)gpt_config.plat_gpt_l0_base;
332f19dc624Sjohpow01 
333f19dc624Sjohpow01 	assert(pas_regions != NULL);
334f19dc624Sjohpow01 	assert(pas_region_cnt != 0U);
335f19dc624Sjohpow01 
336f19dc624Sjohpow01 	for (idx = 0U; idx < pas_region_cnt; idx++) {
337b99926efSAlexeiFedorov 		/* Check for arithmetic overflow in region */
338f19dc624Sjohpow01 		if ((ULONG_MAX - pas_regions[idx].base_pa) <
339f19dc624Sjohpow01 		    pas_regions[idx].size) {
340b99926efSAlexeiFedorov 			ERROR("GPT: Address overflow in PAS[%u]!\n", idx);
341f19dc624Sjohpow01 			return -EOVERFLOW;
342f19dc624Sjohpow01 		}
343f19dc624Sjohpow01 
344b99926efSAlexeiFedorov 		/* Initial checks for PAS validity */
345f19dc624Sjohpow01 		if (((pas_regions[idx].base_pa + pas_regions[idx].size) >
346f19dc624Sjohpow01 		    GPT_PPS_ACTUAL_SIZE(gpt_config.t)) ||
34720e2683dSAlexeiFedorov 		    !is_gpi_valid(GPT_PAS_ATTR_GPI(pas_regions[idx].attrs))) {
348b99926efSAlexeiFedorov 			ERROR("GPT: PAS[%u] is invalid!\n", idx);
349f19dc624Sjohpow01 			return -EFAULT;
350f19dc624Sjohpow01 		}
351f19dc624Sjohpow01 
352f19dc624Sjohpow01 		/*
353f19dc624Sjohpow01 		 * Make sure this PAS does not overlap with another one. We
354f19dc624Sjohpow01 		 * start from idx + 1 instead of 0 since prior PAS mappings will
355f19dc624Sjohpow01 		 * have already checked themselves against this one.
356f19dc624Sjohpow01 		 */
357b99926efSAlexeiFedorov 		for (unsigned int i = idx + 1U; i < pas_region_cnt; i++) {
35820e2683dSAlexeiFedorov 			if (check_pas_overlap(pas_regions[idx].base_pa,
359f19dc624Sjohpow01 			    pas_regions[idx].size,
360f19dc624Sjohpow01 			    pas_regions[i].base_pa,
361f19dc624Sjohpow01 			    pas_regions[i].size)) {
362b99926efSAlexeiFedorov 				ERROR("GPT: PAS[%u] overlaps with PAS[%u]\n",
363f19dc624Sjohpow01 					i, idx);
364f19dc624Sjohpow01 				return -EFAULT;
365f19dc624Sjohpow01 			}
366f19dc624Sjohpow01 		}
367f19dc624Sjohpow01 
368f19dc624Sjohpow01 		/*
369f19dc624Sjohpow01 		 * Since this function can be called multiple times with
370f19dc624Sjohpow01 		 * separate L1 tables we need to check the existing L0 mapping
371f19dc624Sjohpow01 		 * to see if this PAS would fall into one that has already been
372f19dc624Sjohpow01 		 * initialized.
373f19dc624Sjohpow01 		 */
374*ec0088bbSAlexeiFedorov 		for (unsigned int i =
375*ec0088bbSAlexeiFedorov 			(unsigned int)GPT_L0_IDX(pas_regions[idx].base_pa);
376b99926efSAlexeiFedorov 			i <= GPT_L0_IDX(pas_regions[idx].base_pa +
377b99926efSAlexeiFedorov 					pas_regions[idx].size - 1UL);
378f19dc624Sjohpow01 			i++) {
379f19dc624Sjohpow01 			if ((GPT_L0_TYPE(l0_desc[i]) == GPT_L0_TYPE_BLK_DESC) &&
380f19dc624Sjohpow01 			    (GPT_L0_BLKD_GPI(l0_desc[i]) == GPT_GPI_ANY)) {
381b99926efSAlexeiFedorov 				/* This descriptor is unused so continue */
382f19dc624Sjohpow01 				continue;
383f19dc624Sjohpow01 			}
384f19dc624Sjohpow01 
385f19dc624Sjohpow01 			/*
386f19dc624Sjohpow01 			 * This descriptor has been initialized in a previous
387f19dc624Sjohpow01 			 * call to this function so cannot be initialized again.
388f19dc624Sjohpow01 			 */
389*ec0088bbSAlexeiFedorov 			ERROR("GPT: PAS[%u] overlaps with previous L0[%u]!\n",
390f19dc624Sjohpow01 			      idx, i);
391f19dc624Sjohpow01 			return -EFAULT;
392f19dc624Sjohpow01 		}
393f19dc624Sjohpow01 
394b99926efSAlexeiFedorov 		/* Check for block mapping (L0) type */
395f19dc624Sjohpow01 		if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) ==
396f19dc624Sjohpow01 		    GPT_PAS_ATTR_MAP_TYPE_BLOCK) {
397b99926efSAlexeiFedorov 			/* Make sure base and size are block-aligned */
398f19dc624Sjohpow01 			if (!GPT_IS_L0_ALIGNED(pas_regions[idx].base_pa) ||
399f19dc624Sjohpow01 			    !GPT_IS_L0_ALIGNED(pas_regions[idx].size)) {
400b99926efSAlexeiFedorov 				ERROR("GPT: PAS[%u] is not block-aligned!\n",
401f19dc624Sjohpow01 				      idx);
402f19dc624Sjohpow01 				return -EFAULT;
403f19dc624Sjohpow01 			}
404f19dc624Sjohpow01 
405f19dc624Sjohpow01 			continue;
406f19dc624Sjohpow01 		}
407f19dc624Sjohpow01 
408b99926efSAlexeiFedorov 		/* Check for granule mapping (L1) type */
409f19dc624Sjohpow01 		if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) ==
410f19dc624Sjohpow01 		    GPT_PAS_ATTR_MAP_TYPE_GRANULE) {
411b99926efSAlexeiFedorov 			/* Make sure base and size are granule-aligned */
412f19dc624Sjohpow01 			if (!GPT_IS_L1_ALIGNED(gpt_config.p, pas_regions[idx].base_pa) ||
413f19dc624Sjohpow01 			    !GPT_IS_L1_ALIGNED(gpt_config.p, pas_regions[idx].size)) {
414b99926efSAlexeiFedorov 				ERROR("GPT: PAS[%u] is not granule-aligned!\n",
415f19dc624Sjohpow01 				      idx);
416f19dc624Sjohpow01 				return -EFAULT;
417f19dc624Sjohpow01 			}
418f19dc624Sjohpow01 
419b99926efSAlexeiFedorov 			/* Find how many L1 tables this PAS occupies */
420f19dc624Sjohpow01 			pas_l1_cnt = (GPT_L0_IDX(pas_regions[idx].base_pa +
421b99926efSAlexeiFedorov 				     pas_regions[idx].size - 1UL) -
422b99926efSAlexeiFedorov 				     GPT_L0_IDX(pas_regions[idx].base_pa) + 1U);
423f19dc624Sjohpow01 
424f19dc624Sjohpow01 			/*
425f19dc624Sjohpow01 			 * This creates a situation where, if multiple PAS
426f19dc624Sjohpow01 			 * regions occupy the same table descriptor, we can get
427f19dc624Sjohpow01 			 * an artificially high total L1 table count. The way we
428f19dc624Sjohpow01 			 * handle this is by checking each PAS against those
429f19dc624Sjohpow01 			 * before it in the array, and if they both occupy the
430f19dc624Sjohpow01 			 * same PAS we subtract from pas_l1_cnt and only the
431f19dc624Sjohpow01 			 * first PAS in the array gets to count it.
432f19dc624Sjohpow01 			 */
433f19dc624Sjohpow01 
434f19dc624Sjohpow01 			/*
435f19dc624Sjohpow01 			 * If L1 count is greater than 1 we know the start and
436f19dc624Sjohpow01 			 * end PAs are in different L0 regions so we must check
437f19dc624Sjohpow01 			 * both for overlap against other PAS.
438f19dc624Sjohpow01 			 */
439f19dc624Sjohpow01 			if (pas_l1_cnt > 1) {
44020e2683dSAlexeiFedorov 				if (does_previous_pas_exist_here(
441f19dc624Sjohpow01 				    GPT_L0_IDX(pas_regions[idx].base_pa +
442b99926efSAlexeiFedorov 				    pas_regions[idx].size - 1UL),
443f19dc624Sjohpow01 				    pas_regions, idx)) {
444b99926efSAlexeiFedorov 					pas_l1_cnt--;
445f19dc624Sjohpow01 				}
446f19dc624Sjohpow01 			}
447f19dc624Sjohpow01 
44820e2683dSAlexeiFedorov 			if (does_previous_pas_exist_here(
449f19dc624Sjohpow01 			    GPT_L0_IDX(pas_regions[idx].base_pa),
450f19dc624Sjohpow01 			    pas_regions, idx)) {
451b99926efSAlexeiFedorov 				pas_l1_cnt--;
452f19dc624Sjohpow01 			}
453f19dc624Sjohpow01 
454f19dc624Sjohpow01 			l1_cnt += pas_l1_cnt;
455f19dc624Sjohpow01 			continue;
456f19dc624Sjohpow01 		}
457f19dc624Sjohpow01 
458b99926efSAlexeiFedorov 		/* If execution reaches this point, mapping type is invalid */
459b99926efSAlexeiFedorov 		ERROR("GPT: PAS[%u] has invalid mapping type 0x%x.\n", idx,
460f19dc624Sjohpow01 		      GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs));
461f19dc624Sjohpow01 		return -EINVAL;
462f19dc624Sjohpow01 	}
463f19dc624Sjohpow01 
464f19dc624Sjohpow01 	return l1_cnt;
465f19dc624Sjohpow01 }
466f19dc624Sjohpow01 
467f19dc624Sjohpow01 /*
468f19dc624Sjohpow01  * This function validates L0 initialization parameters.
469f19dc624Sjohpow01  *
470f19dc624Sjohpow01  * Parameters
471f19dc624Sjohpow01  *   l0_mem_base	Base address of memory used for L0 tables.
472f19dc624Sjohpow01  *   l1_mem_size	Size of memory available for L0 tables.
473f19dc624Sjohpow01  *
474f19dc624Sjohpow01  * Return
475f19dc624Sjohpow01  *   Negative Linux error code in the event of a failure, 0 for success.
476f19dc624Sjohpow01  */
47720e2683dSAlexeiFedorov static int validate_l0_params(gpccr_pps_e pps, uintptr_t l0_mem_base,
478f19dc624Sjohpow01 				size_t l0_mem_size)
479f19dc624Sjohpow01 {
480*ec0088bbSAlexeiFedorov 	size_t l0_alignment, locks_size;
481f19dc624Sjohpow01 
482f19dc624Sjohpow01 	/*
483f19dc624Sjohpow01 	 * Make sure PPS is valid and then store it since macros need this value
484f19dc624Sjohpow01 	 * to work.
485f19dc624Sjohpow01 	 */
486f19dc624Sjohpow01 	if (pps > GPT_PPS_MAX) {
487b99926efSAlexeiFedorov 		ERROR("GPT: Invalid PPS: 0x%x\n", pps);
488f19dc624Sjohpow01 		return -EINVAL;
489f19dc624Sjohpow01 	}
490f19dc624Sjohpow01 	gpt_config.pps = pps;
491f19dc624Sjohpow01 	gpt_config.t = gpt_t_lookup[pps];
492f19dc624Sjohpow01 
493b99926efSAlexeiFedorov 	/* Alignment must be the greater of 4KB or l0 table size */
494f19dc624Sjohpow01 	l0_alignment = PAGE_SIZE_4KB;
495f19dc624Sjohpow01 	if (l0_alignment < GPT_L0_TABLE_SIZE(gpt_config.t)) {
496f19dc624Sjohpow01 		l0_alignment = GPT_L0_TABLE_SIZE(gpt_config.t);
497f19dc624Sjohpow01 	}
498f19dc624Sjohpow01 
499b99926efSAlexeiFedorov 	/* Check base address */
500b99926efSAlexeiFedorov 	if ((l0_mem_base == 0UL) ||
501b99926efSAlexeiFedorov 	   ((l0_mem_base & (l0_alignment - 1UL)) != 0UL)) {
502b99926efSAlexeiFedorov 		ERROR("GPT: Invalid L0 base address: 0x%lx\n", l0_mem_base);
503f19dc624Sjohpow01 		return -EFAULT;
504f19dc624Sjohpow01 	}
505f19dc624Sjohpow01 
506b99926efSAlexeiFedorov 	/* Check size */
507f19dc624Sjohpow01 	if (l0_mem_size < GPT_L0_TABLE_SIZE(gpt_config.t)) {
508*ec0088bbSAlexeiFedorov 		ERROR("%sL0%s\n", (const char *)"GPT: Inadequate ",
509*ec0088bbSAlexeiFedorov 			(const char *)" memory\n");
510b99926efSAlexeiFedorov 		ERROR("      Expected 0x%lx bytes, got 0x%lx bytes\n",
511*ec0088bbSAlexeiFedorov 			GPT_L0_TABLE_SIZE(gpt_config.t), l0_mem_size);
512*ec0088bbSAlexeiFedorov 		return -ENOMEM;
513*ec0088bbSAlexeiFedorov 	}
514*ec0088bbSAlexeiFedorov 
515*ec0088bbSAlexeiFedorov 	/*
516*ec0088bbSAlexeiFedorov 	 * Size of bitlocks in bytes for the protected address space
517*ec0088bbSAlexeiFedorov 	 * with 512MB per bitlock.
518*ec0088bbSAlexeiFedorov 	 */
519*ec0088bbSAlexeiFedorov 	locks_size = GPT_PPS_ACTUAL_SIZE(gpt_config.t) / (SZ_512M * 8U);
520*ec0088bbSAlexeiFedorov 
521*ec0088bbSAlexeiFedorov 	/* Check space for bitlocks */
522*ec0088bbSAlexeiFedorov 	if (locks_size > (l0_mem_size - GPT_L0_TABLE_SIZE(gpt_config.t))) {
523*ec0088bbSAlexeiFedorov 		ERROR("%sbitlock%s", (const char *)"GPT: Inadequate ",
524*ec0088bbSAlexeiFedorov 			(const char *)" memory\n");
525*ec0088bbSAlexeiFedorov 		ERROR("      Expected 0x%lx bytes, got 0x%lx bytes\n",
526*ec0088bbSAlexeiFedorov 			locks_size,
527*ec0088bbSAlexeiFedorov 			l0_mem_size - GPT_L0_TABLE_SIZE(gpt_config.t));
528f19dc624Sjohpow01 		return -ENOMEM;
529f19dc624Sjohpow01 	}
530f19dc624Sjohpow01 
531f19dc624Sjohpow01 	return 0;
532f19dc624Sjohpow01 }
533f19dc624Sjohpow01 
534f19dc624Sjohpow01 /*
535f19dc624Sjohpow01  * In the event that L1 tables are needed, this function validates
536f19dc624Sjohpow01  * the L1 table generation parameters.
537f19dc624Sjohpow01  *
538f19dc624Sjohpow01  * Parameters
539f19dc624Sjohpow01  *   l1_mem_base	Base address of memory used for L1 table allocation.
540f19dc624Sjohpow01  *   l1_mem_size	Total size of memory available for L1 tables.
541f19dc624Sjohpow01  *   l1_gpt_cnt		Number of L1 tables needed.
542f19dc624Sjohpow01  *
543f19dc624Sjohpow01  * Return
544f19dc624Sjohpow01  *   Negative Linux error code in the event of a failure, 0 for success.
545f19dc624Sjohpow01  */
54620e2683dSAlexeiFedorov static int validate_l1_params(uintptr_t l1_mem_base, size_t l1_mem_size,
547f19dc624Sjohpow01 				unsigned int l1_gpt_cnt)
548f19dc624Sjohpow01 {
549f19dc624Sjohpow01 	size_t l1_gpt_mem_sz;
550f19dc624Sjohpow01 
551f19dc624Sjohpow01 	/* Check if the granularity is supported */
552f19dc624Sjohpow01 	if (!xlat_arch_is_granule_size_supported(
553f19dc624Sjohpow01 	    GPT_PGS_ACTUAL_SIZE(gpt_config.p))) {
554f19dc624Sjohpow01 		return -EPERM;
555f19dc624Sjohpow01 	}
556f19dc624Sjohpow01 
557b99926efSAlexeiFedorov 	/* Make sure L1 tables are aligned to their size */
558b99926efSAlexeiFedorov 	if ((l1_mem_base & (GPT_L1_TABLE_SIZE(gpt_config.p) - 1UL)) != 0UL) {
559b99926efSAlexeiFedorov 		ERROR("GPT: Unaligned L1 GPT base address: 0x%"PRIxPTR"\n",
560f19dc624Sjohpow01 		      l1_mem_base);
561f19dc624Sjohpow01 		return -EFAULT;
562f19dc624Sjohpow01 	}
563f19dc624Sjohpow01 
564b99926efSAlexeiFedorov 	/* Get total memory needed for L1 tables */
565f19dc624Sjohpow01 	l1_gpt_mem_sz = l1_gpt_cnt * GPT_L1_TABLE_SIZE(gpt_config.p);
566f19dc624Sjohpow01 
567b99926efSAlexeiFedorov 	/* Check for overflow */
568f19dc624Sjohpow01 	if ((l1_gpt_mem_sz / GPT_L1_TABLE_SIZE(gpt_config.p)) != l1_gpt_cnt) {
569b99926efSAlexeiFedorov 		ERROR("GPT: Overflow calculating L1 memory size\n");
570f19dc624Sjohpow01 		return -ENOMEM;
571f19dc624Sjohpow01 	}
572f19dc624Sjohpow01 
573b99926efSAlexeiFedorov 	/* Make sure enough space was supplied */
574f19dc624Sjohpow01 	if (l1_mem_size < l1_gpt_mem_sz) {
575*ec0088bbSAlexeiFedorov 		ERROR("%sL1 GPTs%s", (const char *)"GPT: Inadequate ",
576*ec0088bbSAlexeiFedorov 			(const char *)" memory\n");
577b99926efSAlexeiFedorov 		ERROR("      Expected 0x%lx bytes, got 0x%lx bytes\n",
578f19dc624Sjohpow01 			l1_gpt_mem_sz, l1_mem_size);
579f19dc624Sjohpow01 		return -ENOMEM;
580f19dc624Sjohpow01 	}
581f19dc624Sjohpow01 
582b99926efSAlexeiFedorov 	VERBOSE("GPT: Requested 0x%lx bytes for L1 GPTs\n", l1_gpt_mem_sz);
583f19dc624Sjohpow01 	return 0;
584f19dc624Sjohpow01 }
585f19dc624Sjohpow01 
586f19dc624Sjohpow01 /*
587f19dc624Sjohpow01  * This function initializes L0 block descriptors (regions that cannot be
588f19dc624Sjohpow01  * transitioned at the granule level) according to the provided PAS.
589f19dc624Sjohpow01  *
590f19dc624Sjohpow01  * Parameters
591f19dc624Sjohpow01  *   *pas		Pointer to the structure defining the PAS region to
592f19dc624Sjohpow01  *			initialize.
593f19dc624Sjohpow01  */
59420e2683dSAlexeiFedorov static void generate_l0_blk_desc(pas_region_t *pas)
595f19dc624Sjohpow01 {
596f19dc624Sjohpow01 	uint64_t gpt_desc;
597*ec0088bbSAlexeiFedorov 	unsigned long idx, end_idx;
598f19dc624Sjohpow01 	uint64_t *l0_gpt_arr;
599f19dc624Sjohpow01 
600f19dc624Sjohpow01 	assert(gpt_config.plat_gpt_l0_base != 0U);
601f19dc624Sjohpow01 	assert(pas != NULL);
602f19dc624Sjohpow01 
603f19dc624Sjohpow01 	/*
604f19dc624Sjohpow01 	 * Checking of PAS parameters has already been done in
60520e2683dSAlexeiFedorov 	 * validate_pas_mappings so no need to check the same things again.
606f19dc624Sjohpow01 	 */
607f19dc624Sjohpow01 
608f19dc624Sjohpow01 	l0_gpt_arr = (uint64_t *)gpt_config.plat_gpt_l0_base;
609f19dc624Sjohpow01 
610f19dc624Sjohpow01 	/* Create the GPT Block descriptor for this PAS region */
611f19dc624Sjohpow01 	gpt_desc = GPT_L0_BLK_DESC(GPT_PAS_ATTR_GPI(pas->attrs));
612f19dc624Sjohpow01 
613f19dc624Sjohpow01 	/* Start index of this region in L0 GPTs */
6146a00e9b0SRobert Wakim 	idx = GPT_L0_IDX(pas->base_pa);
615f19dc624Sjohpow01 
616f19dc624Sjohpow01 	/*
617f19dc624Sjohpow01 	 * Determine number of L0 GPT descriptors covered by
618f19dc624Sjohpow01 	 * this PAS region and use the count to populate these
619f19dc624Sjohpow01 	 * descriptors.
620f19dc624Sjohpow01 	 */
6216a00e9b0SRobert Wakim 	end_idx = GPT_L0_IDX(pas->base_pa + pas->size);
622f19dc624Sjohpow01 
623b99926efSAlexeiFedorov 	/* Generate the needed block descriptors */
624f19dc624Sjohpow01 	for (; idx < end_idx; idx++) {
625f19dc624Sjohpow01 		l0_gpt_arr[idx] = gpt_desc;
626*ec0088bbSAlexeiFedorov 		VERBOSE("GPT: L0 entry (BLOCK) index %lu [%p]: GPI = 0x%"PRIx64" (0x%"PRIx64")\n",
627f19dc624Sjohpow01 			idx, &l0_gpt_arr[idx],
628f19dc624Sjohpow01 			(gpt_desc >> GPT_L0_BLK_DESC_GPI_SHIFT) &
629f19dc624Sjohpow01 			GPT_L0_BLK_DESC_GPI_MASK, l0_gpt_arr[idx]);
630f19dc624Sjohpow01 	}
631f19dc624Sjohpow01 }
632f19dc624Sjohpow01 
633f19dc624Sjohpow01 /*
634f19dc624Sjohpow01  * Helper function to determine if the end physical address lies in the same L0
635f19dc624Sjohpow01  * region as the current physical address. If true, the end physical address is
636f19dc624Sjohpow01  * returned else, the start address of the next region is returned.
637f19dc624Sjohpow01  *
638f19dc624Sjohpow01  * Parameters
639f19dc624Sjohpow01  *   cur_pa		Physical address of the current PA in the loop through
640f19dc624Sjohpow01  *			the range.
641f19dc624Sjohpow01  *   end_pa		Physical address of the end PA in a PAS range.
642f19dc624Sjohpow01  *
643f19dc624Sjohpow01  * Return
644f19dc624Sjohpow01  *   The PA of the end of the current range.
645f19dc624Sjohpow01  */
64620e2683dSAlexeiFedorov static uintptr_t get_l1_end_pa(uintptr_t cur_pa, uintptr_t end_pa)
647f19dc624Sjohpow01 {
648f19dc624Sjohpow01 	uintptr_t cur_idx;
649f19dc624Sjohpow01 	uintptr_t end_idx;
650f19dc624Sjohpow01 
6516a00e9b0SRobert Wakim 	cur_idx = GPT_L0_IDX(cur_pa);
6526a00e9b0SRobert Wakim 	end_idx = GPT_L0_IDX(end_pa);
653f19dc624Sjohpow01 
654f19dc624Sjohpow01 	assert(cur_idx <= end_idx);
655f19dc624Sjohpow01 
656f19dc624Sjohpow01 	if (cur_idx == end_idx) {
657f19dc624Sjohpow01 		return end_pa;
658f19dc624Sjohpow01 	}
659f19dc624Sjohpow01 
660*ec0088bbSAlexeiFedorov 	return (cur_idx + 1UL) << GPT_L0_IDX_SHIFT;
661f19dc624Sjohpow01 }
662f19dc624Sjohpow01 
663f19dc624Sjohpow01 /*
664*ec0088bbSAlexeiFedorov  * Helper function to fill out GPI entries from 'first' granule address of
665*ec0088bbSAlexeiFedorov  * the specified 'length' in a single L1 table with 'l1_desc' Contiguous
666*ec0088bbSAlexeiFedorov  * descriptor.
667f19dc624Sjohpow01  *
668f19dc624Sjohpow01  * Parameters
669f19dc624Sjohpow01  *   l1			Pointer to L1 table to fill out
670*ec0088bbSAlexeiFedorov  *   first		Address of first granule in range
671*ec0088bbSAlexeiFedorov  *   length		Length of the range in bytes
672*ec0088bbSAlexeiFedorov  *   gpi		GPI set this range to
673*ec0088bbSAlexeiFedorov  *
674*ec0088bbSAlexeiFedorov  * Return
675*ec0088bbSAlexeiFedorov  *   Address of next granule in range.
676f19dc624Sjohpow01  */
677*ec0088bbSAlexeiFedorov static uintptr_t fill_l1_cont_desc(uint64_t *l1, uintptr_t first,
678*ec0088bbSAlexeiFedorov 				   size_t length, unsigned int gpi)
679f19dc624Sjohpow01 {
680*ec0088bbSAlexeiFedorov 	/*
681*ec0088bbSAlexeiFedorov 	 * Look up table for contiguous blocks and descriptors.
682*ec0088bbSAlexeiFedorov 	 * Entries should be defined in descending block sizes:
683*ec0088bbSAlexeiFedorov 	 * 512MB, 32MB and 2MB.
684*ec0088bbSAlexeiFedorov 	 */
685*ec0088bbSAlexeiFedorov 	static const gpt_fill_lookup_t gpt_fill_lookup[] = {
686*ec0088bbSAlexeiFedorov #if (RME_GPT_MAX_BLOCK == 512)
687*ec0088bbSAlexeiFedorov 		{ SZ_512M, GPT_L1_CONT_DESC_512MB },
688*ec0088bbSAlexeiFedorov #endif
689*ec0088bbSAlexeiFedorov #if (RME_GPT_MAX_BLOCK >= 32)
690*ec0088bbSAlexeiFedorov 		{ SZ_32M, GPT_L1_CONT_DESC_32MB },
691*ec0088bbSAlexeiFedorov #endif
692*ec0088bbSAlexeiFedorov #if (RME_GPT_MAX_BLOCK != 0)
693*ec0088bbSAlexeiFedorov 		{ SZ_2M, GPT_L1_CONT_DESC_2MB }
694*ec0088bbSAlexeiFedorov #endif
695*ec0088bbSAlexeiFedorov 	};
696f19dc624Sjohpow01 
697*ec0088bbSAlexeiFedorov 	/*
698*ec0088bbSAlexeiFedorov 	 * Iterate through all block sizes (512MB, 32MB and 2MB)
699*ec0088bbSAlexeiFedorov 	 * starting with maximum supported.
700*ec0088bbSAlexeiFedorov 	 */
701*ec0088bbSAlexeiFedorov 	for (unsigned long i = 0UL; i < ARRAY_SIZE(gpt_fill_lookup); i++) {
702*ec0088bbSAlexeiFedorov 		/* Calculate index */
703*ec0088bbSAlexeiFedorov 		unsigned long idx = GPT_L1_INDEX(first);
704*ec0088bbSAlexeiFedorov 
705*ec0088bbSAlexeiFedorov 		/* Contiguous block size */
706*ec0088bbSAlexeiFedorov 		size_t cont_size = gpt_fill_lookup[i].size;
707*ec0088bbSAlexeiFedorov 
708*ec0088bbSAlexeiFedorov 		if (GPT_REGION_IS_CONT(length, first, cont_size)) {
709*ec0088bbSAlexeiFedorov 
710*ec0088bbSAlexeiFedorov 			/* Generate Contiguous descriptor */
711*ec0088bbSAlexeiFedorov 			uint64_t l1_desc = GPT_L1_GPI_CONT_DESC(gpi,
712*ec0088bbSAlexeiFedorov 						gpt_fill_lookup[i].desc);
713*ec0088bbSAlexeiFedorov 
714*ec0088bbSAlexeiFedorov 			/* Number of 128-bit L1 entries in block */
715*ec0088bbSAlexeiFedorov 			unsigned int cnt;
716*ec0088bbSAlexeiFedorov 
717*ec0088bbSAlexeiFedorov 			switch (cont_size) {
718*ec0088bbSAlexeiFedorov 			case SZ_512M:
719*ec0088bbSAlexeiFedorov 				cnt = L1_QWORDS_512MB;
720*ec0088bbSAlexeiFedorov 				break;
721*ec0088bbSAlexeiFedorov 			case SZ_32M:
722*ec0088bbSAlexeiFedorov 				cnt = L1_QWORDS_32MB;
723*ec0088bbSAlexeiFedorov 				break;
724*ec0088bbSAlexeiFedorov 			default:			/* SZ_2MB */
725*ec0088bbSAlexeiFedorov 				cnt = L1_QWORDS_2MB;
726*ec0088bbSAlexeiFedorov 			}
727*ec0088bbSAlexeiFedorov 
728*ec0088bbSAlexeiFedorov 			VERBOSE("GPT: Contiguous descriptor 0x%"PRIxPTR" %luMB\n",
729*ec0088bbSAlexeiFedorov 				first, cont_size / SZ_1M);
730*ec0088bbSAlexeiFedorov 
731*ec0088bbSAlexeiFedorov 			/* Fill Contiguous descriptors */
732*ec0088bbSAlexeiFedorov 			fill_desc(&l1[idx], l1_desc, cnt);
733*ec0088bbSAlexeiFedorov 			first += cont_size;
734*ec0088bbSAlexeiFedorov 			length -= cont_size;
735*ec0088bbSAlexeiFedorov 
736*ec0088bbSAlexeiFedorov 			if (length == 0UL) {
737*ec0088bbSAlexeiFedorov 				break;
738*ec0088bbSAlexeiFedorov 			}
739*ec0088bbSAlexeiFedorov 		}
740*ec0088bbSAlexeiFedorov 	}
741*ec0088bbSAlexeiFedorov 
742*ec0088bbSAlexeiFedorov 	return first;
743*ec0088bbSAlexeiFedorov }
744*ec0088bbSAlexeiFedorov 
745*ec0088bbSAlexeiFedorov /* Build Granules descriptor with the same 'gpi' for every GPI entry */
746*ec0088bbSAlexeiFedorov static uint64_t build_l1_desc(unsigned int gpi)
747*ec0088bbSAlexeiFedorov {
748*ec0088bbSAlexeiFedorov 	uint64_t l1_desc = (uint64_t)gpi | ((uint64_t)gpi << 4);
749*ec0088bbSAlexeiFedorov 
750*ec0088bbSAlexeiFedorov 	l1_desc |= (l1_desc << 8);
751*ec0088bbSAlexeiFedorov 	l1_desc |= (l1_desc << 16);
752*ec0088bbSAlexeiFedorov 	return (l1_desc | (l1_desc << 32));
753*ec0088bbSAlexeiFedorov }
754*ec0088bbSAlexeiFedorov 
755*ec0088bbSAlexeiFedorov /*
756*ec0088bbSAlexeiFedorov  * Helper function to fill out GPI entries from 'first' to 'last' granule
757*ec0088bbSAlexeiFedorov  * address in a single L1 table with 'l1_desc' Granules descriptor.
758*ec0088bbSAlexeiFedorov  *
759*ec0088bbSAlexeiFedorov  * Parameters
760*ec0088bbSAlexeiFedorov  *   l1			Pointer to L1 table to fill out
761*ec0088bbSAlexeiFedorov  *   first		Address of first granule in range
762*ec0088bbSAlexeiFedorov  *   last		Address of last granule in range (inclusive)
763*ec0088bbSAlexeiFedorov  *   gpi		GPI set this range to
764*ec0088bbSAlexeiFedorov  *
765*ec0088bbSAlexeiFedorov  * Return
766*ec0088bbSAlexeiFedorov  *   Address of next granule in range.
767*ec0088bbSAlexeiFedorov  */
768*ec0088bbSAlexeiFedorov static uintptr_t fill_l1_gran_desc(uint64_t *l1, uintptr_t first,
769*ec0088bbSAlexeiFedorov 				   uintptr_t last, unsigned int gpi)
770*ec0088bbSAlexeiFedorov {
771*ec0088bbSAlexeiFedorov 	uint64_t gpi_mask;
772*ec0088bbSAlexeiFedorov 	unsigned long i;
773*ec0088bbSAlexeiFedorov 
774*ec0088bbSAlexeiFedorov 	/* Generate Granules descriptor */
775*ec0088bbSAlexeiFedorov 	uint64_t l1_desc = build_l1_desc(gpi);
776f19dc624Sjohpow01 
777b99926efSAlexeiFedorov 	/* Shift the mask if we're starting in the middle of an L1 entry */
778*ec0088bbSAlexeiFedorov 	gpi_mask = ULONG_MAX << (GPT_L1_GPI_IDX(gpt_config.p, first) << 2);
779f19dc624Sjohpow01 
780b99926efSAlexeiFedorov 	/* Fill out each L1 entry for this region */
781*ec0088bbSAlexeiFedorov 	for (i = GPT_L1_INDEX(first); i <= GPT_L1_INDEX(last); i++) {
782*ec0088bbSAlexeiFedorov 
783b99926efSAlexeiFedorov 		/* Account for stopping in the middle of an L1 entry */
784*ec0088bbSAlexeiFedorov 		if (i == GPT_L1_INDEX(last)) {
785b99926efSAlexeiFedorov 			gpi_mask &= (gpi_mask >> ((15U -
786f19dc624Sjohpow01 				    GPT_L1_GPI_IDX(gpt_config.p, last)) << 2));
787f19dc624Sjohpow01 		}
788f19dc624Sjohpow01 
789*ec0088bbSAlexeiFedorov 		assert((l1[i] & gpi_mask) == (GPT_L1_ANY_DESC & gpi_mask));
790*ec0088bbSAlexeiFedorov 
791b99926efSAlexeiFedorov 		/* Write GPI values */
792*ec0088bbSAlexeiFedorov 		l1[i] = (l1[i] & ~gpi_mask) | (l1_desc & gpi_mask);
793f19dc624Sjohpow01 
794b99926efSAlexeiFedorov 		/* Reset mask */
795b99926efSAlexeiFedorov 		gpi_mask = ULONG_MAX;
796f19dc624Sjohpow01 	}
797*ec0088bbSAlexeiFedorov 
798*ec0088bbSAlexeiFedorov 	return last + GPT_PGS_ACTUAL_SIZE(gpt_config.p);
799*ec0088bbSAlexeiFedorov }
800*ec0088bbSAlexeiFedorov 
801*ec0088bbSAlexeiFedorov /*
802*ec0088bbSAlexeiFedorov  * Helper function to fill out GPI entries in a single L1 table.
803*ec0088bbSAlexeiFedorov  * This function fills out an entire L1 table with either Contiguous
804*ec0088bbSAlexeiFedorov  * or Granules descriptors depending on region length and alignment.
805*ec0088bbSAlexeiFedorov  *
806*ec0088bbSAlexeiFedorov  * Parameters
807*ec0088bbSAlexeiFedorov  *   l1			Pointer to L1 table to fill out
808*ec0088bbSAlexeiFedorov  *   first		Address of first granule in range
809*ec0088bbSAlexeiFedorov  *   last		Address of last granule in range (inclusive)
810*ec0088bbSAlexeiFedorov  *   gpi		GPI set this range to
811*ec0088bbSAlexeiFedorov  */
812*ec0088bbSAlexeiFedorov static void fill_l1_tbl(uint64_t *l1, uintptr_t first, uintptr_t last,
813*ec0088bbSAlexeiFedorov 			unsigned int gpi)
814*ec0088bbSAlexeiFedorov {
815*ec0088bbSAlexeiFedorov 	assert(l1 != NULL);
816*ec0088bbSAlexeiFedorov 	assert(first <= last);
817*ec0088bbSAlexeiFedorov 	assert((first & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) == 0UL);
818*ec0088bbSAlexeiFedorov 	assert((last & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) == 0UL);
819*ec0088bbSAlexeiFedorov 	assert(GPT_L0_IDX(first) == GPT_L0_IDX(last));
820*ec0088bbSAlexeiFedorov 
821*ec0088bbSAlexeiFedorov 	while (first < last) {
822*ec0088bbSAlexeiFedorov 		/* Region length */
823*ec0088bbSAlexeiFedorov 		size_t length = last - first + GPT_PGS_ACTUAL_SIZE(gpt_config.p);
824*ec0088bbSAlexeiFedorov 
825*ec0088bbSAlexeiFedorov 		if (length < SZ_2M) {
826*ec0088bbSAlexeiFedorov 			/*
827*ec0088bbSAlexeiFedorov 			 * Fill with Granule descriptor in case of
828*ec0088bbSAlexeiFedorov 			 * region length < 2MB.
829*ec0088bbSAlexeiFedorov 			 */
830*ec0088bbSAlexeiFedorov 			first = fill_l1_gran_desc(l1, first, last, gpi);
831*ec0088bbSAlexeiFedorov 
832*ec0088bbSAlexeiFedorov 		} else if ((first & (SZ_2M - UL(1))) == UL(0)) {
833*ec0088bbSAlexeiFedorov 			/*
834*ec0088bbSAlexeiFedorov 			 * For region length >= 2MB and at least 2MB aligned
835*ec0088bbSAlexeiFedorov 			 * call to fill_l1_cont_desc will iterate through
836*ec0088bbSAlexeiFedorov 			 * all block sizes (512MB, 32MB and 2MB) supported and
837*ec0088bbSAlexeiFedorov 			 * fill corresponding Contiguous descriptors.
838*ec0088bbSAlexeiFedorov 			 */
839*ec0088bbSAlexeiFedorov 			first = fill_l1_cont_desc(l1, first, length, gpi);
840*ec0088bbSAlexeiFedorov 		} else {
841*ec0088bbSAlexeiFedorov 			/*
842*ec0088bbSAlexeiFedorov 			 * For not aligned region >= 2MB fill with Granules
843*ec0088bbSAlexeiFedorov 			 * descriptors up to the next 2MB aligned address.
844*ec0088bbSAlexeiFedorov 			 */
845*ec0088bbSAlexeiFedorov 			uintptr_t new_last = ALIGN_2MB(first + SZ_2M) -
846*ec0088bbSAlexeiFedorov 					GPT_PGS_ACTUAL_SIZE(gpt_config.p);
847*ec0088bbSAlexeiFedorov 
848*ec0088bbSAlexeiFedorov 			first = fill_l1_gran_desc(l1, first, new_last, gpi);
849*ec0088bbSAlexeiFedorov 		}
850*ec0088bbSAlexeiFedorov 	}
851*ec0088bbSAlexeiFedorov 
852*ec0088bbSAlexeiFedorov 	assert(first == (last + GPT_PGS_ACTUAL_SIZE(gpt_config.p)));
853f19dc624Sjohpow01 }
854f19dc624Sjohpow01 
855f19dc624Sjohpow01 /*
856f19dc624Sjohpow01  * This function finds the next available unused L1 table and initializes all
857f19dc624Sjohpow01  * granules descriptor entries to GPI_ANY. This ensures that there are no chunks
858f19dc624Sjohpow01  * of GPI_NO_ACCESS (0b0000) memory floating around in the system in the
859f19dc624Sjohpow01  * event that a PAS region stops midway through an L1 table, thus guaranteeing
860f19dc624Sjohpow01  * that all memory not explicitly assigned is GPI_ANY. This function does not
861f19dc624Sjohpow01  * check for overflow conditions, that should be done by the caller.
862f19dc624Sjohpow01  *
863f19dc624Sjohpow01  * Return
864f19dc624Sjohpow01  *   Pointer to the next available L1 table.
865f19dc624Sjohpow01  */
86620e2683dSAlexeiFedorov static uint64_t *get_new_l1_tbl(void)
867f19dc624Sjohpow01 {
868b99926efSAlexeiFedorov 	/* Retrieve the next L1 table */
869*ec0088bbSAlexeiFedorov 	uint64_t *l1 = (uint64_t *)gpt_l1_tbl;
870f19dc624Sjohpow01 
871*ec0088bbSAlexeiFedorov 	/* Increment L1 GPT address */
872*ec0088bbSAlexeiFedorov 	gpt_l1_tbl += GPT_L1_TABLE_SIZE(gpt_config.p);
873f19dc624Sjohpow01 
874f19dc624Sjohpow01 	/* Initialize all GPIs to GPT_GPI_ANY */
875f19dc624Sjohpow01 	for (unsigned int i = 0U; i < GPT_L1_ENTRY_COUNT(gpt_config.p); i++) {
876*ec0088bbSAlexeiFedorov 		l1[i] = GPT_L1_ANY_DESC;
877f19dc624Sjohpow01 	}
878f19dc624Sjohpow01 
879f19dc624Sjohpow01 	return l1;
880f19dc624Sjohpow01 }
881f19dc624Sjohpow01 
882f19dc624Sjohpow01 /*
883f19dc624Sjohpow01  * When L1 tables are needed, this function creates the necessary L0 table
884f19dc624Sjohpow01  * descriptors and fills out the L1 table entries according to the supplied
885f19dc624Sjohpow01  * PAS range.
886f19dc624Sjohpow01  *
887f19dc624Sjohpow01  * Parameters
888f19dc624Sjohpow01  *   *pas		Pointer to the structure defining the PAS region.
889f19dc624Sjohpow01  */
89020e2683dSAlexeiFedorov static void generate_l0_tbl_desc(pas_region_t *pas)
891f19dc624Sjohpow01 {
892f19dc624Sjohpow01 	uintptr_t end_pa;
893f19dc624Sjohpow01 	uintptr_t cur_pa;
894f19dc624Sjohpow01 	uintptr_t last_gran_pa;
895f19dc624Sjohpow01 	uint64_t *l0_gpt_base;
896f19dc624Sjohpow01 	uint64_t *l1_gpt_arr;
897*ec0088bbSAlexeiFedorov 	unsigned int l0_idx, gpi;
898f19dc624Sjohpow01 
899f19dc624Sjohpow01 	assert(gpt_config.plat_gpt_l0_base != 0U);
900f19dc624Sjohpow01 	assert(pas != NULL);
901f19dc624Sjohpow01 
902f19dc624Sjohpow01 	/*
903f19dc624Sjohpow01 	 * Checking of PAS parameters has already been done in
90420e2683dSAlexeiFedorov 	 * validate_pas_mappings so no need to check the same things again.
905f19dc624Sjohpow01 	 */
906f19dc624Sjohpow01 	end_pa = pas->base_pa + pas->size;
907f19dc624Sjohpow01 	l0_gpt_base = (uint64_t *)gpt_config.plat_gpt_l0_base;
908f19dc624Sjohpow01 
909f19dc624Sjohpow01 	/* We start working from the granule at base PA */
910f19dc624Sjohpow01 	cur_pa = pas->base_pa;
911f19dc624Sjohpow01 
912*ec0088bbSAlexeiFedorov 	/* Get GPI */
913*ec0088bbSAlexeiFedorov 	gpi = GPT_PAS_ATTR_GPI(pas->attrs);
914f19dc624Sjohpow01 
915*ec0088bbSAlexeiFedorov 	/* Iterate over each L0 region in this memory range */
916*ec0088bbSAlexeiFedorov 	for (l0_idx = (unsigned int)GPT_L0_IDX(pas->base_pa);
917*ec0088bbSAlexeiFedorov 	     l0_idx <= (unsigned int)GPT_L0_IDX(end_pa - 1UL);
918*ec0088bbSAlexeiFedorov 	     l0_idx++) {
919f19dc624Sjohpow01 		/*
920f19dc624Sjohpow01 		 * See if the L0 entry is already a table descriptor or if we
921f19dc624Sjohpow01 		 * need to create one.
922f19dc624Sjohpow01 		 */
923f19dc624Sjohpow01 		if (GPT_L0_TYPE(l0_gpt_base[l0_idx]) == GPT_L0_TYPE_TBL_DESC) {
924b99926efSAlexeiFedorov 			/* Get the L1 array from the L0 entry */
925f19dc624Sjohpow01 			l1_gpt_arr = GPT_L0_TBLD_ADDR(l0_gpt_base[l0_idx]);
926f19dc624Sjohpow01 		} else {
927b99926efSAlexeiFedorov 			/* Get a new L1 table from the L1 memory space */
92820e2683dSAlexeiFedorov 			l1_gpt_arr = get_new_l1_tbl();
929f19dc624Sjohpow01 
930b99926efSAlexeiFedorov 			/* Fill out the L0 descriptor and flush it */
931f19dc624Sjohpow01 			l0_gpt_base[l0_idx] = GPT_L0_TBL_DESC(l1_gpt_arr);
932f19dc624Sjohpow01 		}
933f19dc624Sjohpow01 
934b99926efSAlexeiFedorov 		VERBOSE("GPT: L0 entry (TABLE) index %u [%p] ==> L1 Addr %p (0x%"PRIx64")\n",
935b99926efSAlexeiFedorov 			l0_idx, &l0_gpt_base[l0_idx], l1_gpt_arr, l0_gpt_base[l0_idx]);
936f19dc624Sjohpow01 
937f19dc624Sjohpow01 		/*
938f19dc624Sjohpow01 		 * Determine the PA of the last granule in this L0 descriptor.
939f19dc624Sjohpow01 		 */
94020e2683dSAlexeiFedorov 		last_gran_pa = get_l1_end_pa(cur_pa, end_pa) -
941f19dc624Sjohpow01 			       GPT_PGS_ACTUAL_SIZE(gpt_config.p);
942f19dc624Sjohpow01 
943f19dc624Sjohpow01 		/*
944f19dc624Sjohpow01 		 * Fill up L1 GPT entries between these two addresses. This
945f19dc624Sjohpow01 		 * function needs the addresses of the first granule and last
946f19dc624Sjohpow01 		 * granule in the range.
947f19dc624Sjohpow01 		 */
948*ec0088bbSAlexeiFedorov 		fill_l1_tbl(l1_gpt_arr, cur_pa, last_gran_pa, gpi);
949f19dc624Sjohpow01 
950b99926efSAlexeiFedorov 		/* Advance cur_pa to first granule in next L0 region */
95120e2683dSAlexeiFedorov 		cur_pa = get_l1_end_pa(cur_pa, end_pa);
952f19dc624Sjohpow01 	}
953f19dc624Sjohpow01 }
954f19dc624Sjohpow01 
955f19dc624Sjohpow01 /*
956f19dc624Sjohpow01  * This function flushes a range of L0 descriptors used by a given PAS region
957f19dc624Sjohpow01  * array. There is a chance that some unmodified L0 descriptors would be flushed
958f19dc624Sjohpow01  * in the case that there are "holes" in an array of PAS regions but overall
959f19dc624Sjohpow01  * this should be faster than individually flushing each modified L0 descriptor
960f19dc624Sjohpow01  * as they are created.
961f19dc624Sjohpow01  *
962f19dc624Sjohpow01  * Parameters
963f19dc624Sjohpow01  *   *pas		Pointer to an array of PAS regions.
964f19dc624Sjohpow01  *   pas_count		Number of entries in the PAS array.
965f19dc624Sjohpow01  */
966f19dc624Sjohpow01 static void flush_l0_for_pas_array(pas_region_t *pas, unsigned int pas_count)
967f19dc624Sjohpow01 {
968*ec0088bbSAlexeiFedorov 	unsigned long idx;
969*ec0088bbSAlexeiFedorov 	unsigned long start_idx;
970*ec0088bbSAlexeiFedorov 	unsigned long end_idx;
971f19dc624Sjohpow01 	uint64_t *l0 = (uint64_t *)gpt_config.plat_gpt_l0_base;
972f19dc624Sjohpow01 
973f19dc624Sjohpow01 	assert(pas != NULL);
974b99926efSAlexeiFedorov 	assert(pas_count != 0U);
975f19dc624Sjohpow01 
976b99926efSAlexeiFedorov 	/* Initial start and end values */
977f19dc624Sjohpow01 	start_idx = GPT_L0_IDX(pas[0].base_pa);
978b99926efSAlexeiFedorov 	end_idx = GPT_L0_IDX(pas[0].base_pa + pas[0].size - 1UL);
979f19dc624Sjohpow01 
980b99926efSAlexeiFedorov 	/* Find lowest and highest L0 indices used in this PAS array */
981*ec0088bbSAlexeiFedorov 	for (idx = 1UL; idx < pas_count; idx++) {
982f19dc624Sjohpow01 		if (GPT_L0_IDX(pas[idx].base_pa) < start_idx) {
983f19dc624Sjohpow01 			start_idx = GPT_L0_IDX(pas[idx].base_pa);
984f19dc624Sjohpow01 		}
985b99926efSAlexeiFedorov 		if (GPT_L0_IDX(pas[idx].base_pa + pas[idx].size - 1UL) > end_idx) {
986b99926efSAlexeiFedorov 			end_idx = GPT_L0_IDX(pas[idx].base_pa + pas[idx].size - 1UL);
987f19dc624Sjohpow01 		}
988f19dc624Sjohpow01 	}
989f19dc624Sjohpow01 
990f19dc624Sjohpow01 	/*
991f19dc624Sjohpow01 	 * Flush all covered L0 descriptors, add 1 because we need to include
992f19dc624Sjohpow01 	 * the end index value.
993f19dc624Sjohpow01 	 */
994f19dc624Sjohpow01 	flush_dcache_range((uintptr_t)&l0[start_idx],
995*ec0088bbSAlexeiFedorov 			   ((end_idx + 1UL) - start_idx) * sizeof(uint64_t));
996f19dc624Sjohpow01 }
997f19dc624Sjohpow01 
998f19dc624Sjohpow01 /*
999f19dc624Sjohpow01  * Public API to enable granule protection checks once the tables have all been
1000f19dc624Sjohpow01  * initialized. This function is called at first initialization and then again
1001f19dc624Sjohpow01  * later during warm boots of CPU cores.
1002f19dc624Sjohpow01  *
1003f19dc624Sjohpow01  * Return
1004f19dc624Sjohpow01  *   Negative Linux error code in the event of a failure, 0 for success.
1005f19dc624Sjohpow01  */
1006f19dc624Sjohpow01 int gpt_enable(void)
1007f19dc624Sjohpow01 {
1008f19dc624Sjohpow01 	u_register_t gpccr_el3;
1009f19dc624Sjohpow01 
1010f19dc624Sjohpow01 	/*
1011f19dc624Sjohpow01 	 * Granule tables must be initialised before enabling
1012f19dc624Sjohpow01 	 * granule protection.
1013f19dc624Sjohpow01 	 */
1014b99926efSAlexeiFedorov 	if (gpt_config.plat_gpt_l0_base == 0UL) {
1015b99926efSAlexeiFedorov 		ERROR("GPT: Tables have not been initialized!\n");
1016f19dc624Sjohpow01 		return -EPERM;
1017f19dc624Sjohpow01 	}
1018f19dc624Sjohpow01 
1019f19dc624Sjohpow01 	/* Write the base address of the L0 tables into GPTBR */
1020f19dc624Sjohpow01 	write_gptbr_el3(((gpt_config.plat_gpt_l0_base >> GPTBR_BADDR_VAL_SHIFT)
1021f19dc624Sjohpow01 			>> GPTBR_BADDR_SHIFT) & GPTBR_BADDR_MASK);
1022f19dc624Sjohpow01 
1023f19dc624Sjohpow01 	/* GPCCR_EL3.PPS */
1024f19dc624Sjohpow01 	gpccr_el3 = SET_GPCCR_PPS(gpt_config.pps);
1025f19dc624Sjohpow01 
1026f19dc624Sjohpow01 	/* GPCCR_EL3.PGS */
1027f19dc624Sjohpow01 	gpccr_el3 |= SET_GPCCR_PGS(gpt_config.pgs);
1028f19dc624Sjohpow01 
102977612b90SSoby Mathew 	/*
103077612b90SSoby Mathew 	 * Since EL3 maps the L1 region as Inner shareable, use the same
103177612b90SSoby Mathew 	 * shareability attribute for GPC as well so that
103277612b90SSoby Mathew 	 * GPC fetches are visible to PEs
103377612b90SSoby Mathew 	 */
103477612b90SSoby Mathew 	gpccr_el3 |= SET_GPCCR_SH(GPCCR_SH_IS);
1035f19dc624Sjohpow01 
1036b99926efSAlexeiFedorov 	/* Outer and Inner cacheability set to Normal memory, WB, RA, WA */
1037f19dc624Sjohpow01 	gpccr_el3 |= SET_GPCCR_ORGN(GPCCR_ORGN_WB_RA_WA);
1038f19dc624Sjohpow01 	gpccr_el3 |= SET_GPCCR_IRGN(GPCCR_IRGN_WB_RA_WA);
1039f19dc624Sjohpow01 
104014cddd7aSKathleen Capella 	/* Prepopulate GPCCR_EL3 but don't enable GPC yet */
104114cddd7aSKathleen Capella 	write_gpccr_el3(gpccr_el3);
104214cddd7aSKathleen Capella 	isb();
104314cddd7aSKathleen Capella 
104414cddd7aSKathleen Capella 	/* Invalidate any stale TLB entries and any cached register fields */
104514cddd7aSKathleen Capella 	tlbipaallos();
104614cddd7aSKathleen Capella 	dsb();
104714cddd7aSKathleen Capella 	isb();
104814cddd7aSKathleen Capella 
1049f19dc624Sjohpow01 	/* Enable GPT */
1050f19dc624Sjohpow01 	gpccr_el3 |= GPCCR_GPC_BIT;
1051f19dc624Sjohpow01 
1052b99926efSAlexeiFedorov 	/* TODO: Configure GPCCR_EL3_GPCP for Fault control */
1053f19dc624Sjohpow01 	write_gpccr_el3(gpccr_el3);
105477612b90SSoby Mathew 	isb();
1055f19dc624Sjohpow01 	tlbipaallos();
1056f19dc624Sjohpow01 	dsb();
1057f19dc624Sjohpow01 	isb();
1058f19dc624Sjohpow01 
1059f19dc624Sjohpow01 	return 0;
1060f19dc624Sjohpow01 }
1061f19dc624Sjohpow01 
1062f19dc624Sjohpow01 /*
1063f19dc624Sjohpow01  * Public API to disable granule protection checks.
1064f19dc624Sjohpow01  */
1065f19dc624Sjohpow01 void gpt_disable(void)
1066f19dc624Sjohpow01 {
1067f19dc624Sjohpow01 	u_register_t gpccr_el3 = read_gpccr_el3();
1068f19dc624Sjohpow01 
1069f19dc624Sjohpow01 	write_gpccr_el3(gpccr_el3 & ~GPCCR_GPC_BIT);
1070f19dc624Sjohpow01 	dsbsy();
1071f19dc624Sjohpow01 	isb();
1072f19dc624Sjohpow01 }
1073f19dc624Sjohpow01 
1074f19dc624Sjohpow01 /*
1075f19dc624Sjohpow01  * Public API that initializes the entire protected space to GPT_GPI_ANY using
1076f19dc624Sjohpow01  * the L0 tables (block descriptors). Ideally, this function is invoked prior
1077f19dc624Sjohpow01  * to DDR discovery and initialization. The MMU must be initialized before
1078f19dc624Sjohpow01  * calling this function.
1079f19dc624Sjohpow01  *
1080f19dc624Sjohpow01  * Parameters
1081f19dc624Sjohpow01  *   pps		PPS value to use for table generation
1082f19dc624Sjohpow01  *   l0_mem_base	Base address of L0 tables in memory.
1083f19dc624Sjohpow01  *   l0_mem_size	Total size of memory available for L0 tables.
1084f19dc624Sjohpow01  *
1085f19dc624Sjohpow01  * Return
1086f19dc624Sjohpow01  *   Negative Linux error code in the event of a failure, 0 for success.
1087f19dc624Sjohpow01  */
1088a0d5147bSAlexeiFedorov int gpt_init_l0_tables(gpccr_pps_e pps, uintptr_t l0_mem_base,
1089f19dc624Sjohpow01 		       size_t l0_mem_size)
1090f19dc624Sjohpow01 {
1091f19dc624Sjohpow01 	uint64_t gpt_desc;
1092*ec0088bbSAlexeiFedorov 	size_t locks_size;
1093*ec0088bbSAlexeiFedorov 	bitlock_t *bit_locks;
1094*ec0088bbSAlexeiFedorov 	int ret;
1095f19dc624Sjohpow01 
1096b99926efSAlexeiFedorov 	/* Ensure that MMU and Data caches are enabled */
1097f19dc624Sjohpow01 	assert((read_sctlr_el3() & SCTLR_C_BIT) != 0U);
1098f19dc624Sjohpow01 
1099b99926efSAlexeiFedorov 	/* Validate other parameters */
110020e2683dSAlexeiFedorov 	ret = validate_l0_params(pps, l0_mem_base, l0_mem_size);
11016a00e9b0SRobert Wakim 	if (ret != 0) {
1102f19dc624Sjohpow01 		return ret;
1103f19dc624Sjohpow01 	}
1104f19dc624Sjohpow01 
1105b99926efSAlexeiFedorov 	/* Create the descriptor to initialize L0 entries with */
1106f19dc624Sjohpow01 	gpt_desc = GPT_L0_BLK_DESC(GPT_GPI_ANY);
1107f19dc624Sjohpow01 
1108f19dc624Sjohpow01 	/* Iterate through all L0 entries */
1109f19dc624Sjohpow01 	for (unsigned int i = 0U; i < GPT_L0_REGION_COUNT(gpt_config.t); i++) {
1110f19dc624Sjohpow01 		((uint64_t *)l0_mem_base)[i] = gpt_desc;
1111f19dc624Sjohpow01 	}
1112f19dc624Sjohpow01 
1113*ec0088bbSAlexeiFedorov 	/* Initialise bitlocks at the end of L0 table */
1114*ec0088bbSAlexeiFedorov 	bit_locks = (bitlock_t *)(l0_mem_base +
1115*ec0088bbSAlexeiFedorov 					GPT_L0_TABLE_SIZE(gpt_config.t));
1116*ec0088bbSAlexeiFedorov 
1117*ec0088bbSAlexeiFedorov 	/* Size of bitlocks in bytes */
1118*ec0088bbSAlexeiFedorov 	locks_size = GPT_PPS_ACTUAL_SIZE(gpt_config.t) / (SZ_512M * 8U);
1119*ec0088bbSAlexeiFedorov 
1120*ec0088bbSAlexeiFedorov 	for (size_t i = 0UL; i < (locks_size/LOCK_SIZE); i++) {
1121*ec0088bbSAlexeiFedorov 		bit_locks[i].lock = 0U;
1122*ec0088bbSAlexeiFedorov 	}
1123*ec0088bbSAlexeiFedorov 
1124*ec0088bbSAlexeiFedorov 	/* Flush updated L0 tables and bitlocks to memory */
1125f19dc624Sjohpow01 	flush_dcache_range((uintptr_t)l0_mem_base,
1126*ec0088bbSAlexeiFedorov 				GPT_L0_TABLE_SIZE(gpt_config.t) + locks_size);
1127f19dc624Sjohpow01 
1128b99926efSAlexeiFedorov 	/* Stash the L0 base address once initial setup is complete */
1129f19dc624Sjohpow01 	gpt_config.plat_gpt_l0_base = l0_mem_base;
1130f19dc624Sjohpow01 
1131f19dc624Sjohpow01 	return 0;
1132f19dc624Sjohpow01 }
1133f19dc624Sjohpow01 
1134f19dc624Sjohpow01 /*
1135f19dc624Sjohpow01  * Public API that carves out PAS regions from the L0 tables and builds any L1
1136f19dc624Sjohpow01  * tables that are needed. This function ideally is run after DDR discovery and
1137f19dc624Sjohpow01  * initialization. The L0 tables must have already been initialized to GPI_ANY
1138f19dc624Sjohpow01  * when this function is called.
1139f19dc624Sjohpow01  *
1140f19dc624Sjohpow01  * This function can be called multiple times with different L1 memory ranges
1141f19dc624Sjohpow01  * and PAS regions if it is desirable to place L1 tables in different locations
1142f19dc624Sjohpow01  * in memory. (ex: you have multiple DDR banks and want to place the L1 tables
1143*ec0088bbSAlexeiFedorov  * in the DDR bank that they control).
1144f19dc624Sjohpow01  *
1145f19dc624Sjohpow01  * Parameters
1146f19dc624Sjohpow01  *   pgs		PGS value to use for table generation.
1147f19dc624Sjohpow01  *   l1_mem_base	Base address of memory used for L1 tables.
1148f19dc624Sjohpow01  *   l1_mem_size	Total size of memory available for L1 tables.
1149f19dc624Sjohpow01  *   *pas_regions	Pointer to PAS regions structure array.
1150f19dc624Sjohpow01  *   pas_count		Total number of PAS regions.
1151f19dc624Sjohpow01  *
1152f19dc624Sjohpow01  * Return
1153f19dc624Sjohpow01  *   Negative Linux error code in the event of a failure, 0 for success.
1154f19dc624Sjohpow01  */
1155f19dc624Sjohpow01 int gpt_init_pas_l1_tables(gpccr_pgs_e pgs, uintptr_t l1_mem_base,
1156f19dc624Sjohpow01 			   size_t l1_mem_size, pas_region_t *pas_regions,
1157f19dc624Sjohpow01 			   unsigned int pas_count)
1158f19dc624Sjohpow01 {
1159*ec0088bbSAlexeiFedorov 	int l1_gpt_cnt, ret;
1160f19dc624Sjohpow01 
1161b99926efSAlexeiFedorov 	/* Ensure that MMU and Data caches are enabled */
1162f19dc624Sjohpow01 	assert((read_sctlr_el3() & SCTLR_C_BIT) != 0U);
1163f19dc624Sjohpow01 
1164b99926efSAlexeiFedorov 	/* PGS is needed for validate_pas_mappings so check it now */
1165f19dc624Sjohpow01 	if (pgs > GPT_PGS_MAX) {
1166b99926efSAlexeiFedorov 		ERROR("GPT: Invalid PGS: 0x%x\n", pgs);
1167f19dc624Sjohpow01 		return -EINVAL;
1168f19dc624Sjohpow01 	}
1169f19dc624Sjohpow01 	gpt_config.pgs = pgs;
1170f19dc624Sjohpow01 	gpt_config.p = gpt_p_lookup[pgs];
1171f19dc624Sjohpow01 
1172b99926efSAlexeiFedorov 	/* Make sure L0 tables have been initialized */
1173f19dc624Sjohpow01 	if (gpt_config.plat_gpt_l0_base == 0U) {
1174b99926efSAlexeiFedorov 		ERROR("GPT: L0 tables must be initialized first!\n");
1175f19dc624Sjohpow01 		return -EPERM;
1176f19dc624Sjohpow01 	}
1177f19dc624Sjohpow01 
1178b99926efSAlexeiFedorov 	/* Check if L1 GPTs are required and how many */
117920e2683dSAlexeiFedorov 	l1_gpt_cnt = validate_pas_mappings(pas_regions, pas_count);
1180f19dc624Sjohpow01 	if (l1_gpt_cnt < 0) {
1181f19dc624Sjohpow01 		return l1_gpt_cnt;
1182f19dc624Sjohpow01 	}
1183f19dc624Sjohpow01 
1184b99926efSAlexeiFedorov 	VERBOSE("GPT: %i L1 GPTs requested\n", l1_gpt_cnt);
1185f19dc624Sjohpow01 
1186b99926efSAlexeiFedorov 	/* If L1 tables are needed then validate the L1 parameters */
1187f19dc624Sjohpow01 	if (l1_gpt_cnt > 0) {
118820e2683dSAlexeiFedorov 		ret = validate_l1_params(l1_mem_base, l1_mem_size,
1189b99926efSAlexeiFedorov 					(unsigned int)l1_gpt_cnt);
11906a00e9b0SRobert Wakim 		if (ret != 0) {
1191f19dc624Sjohpow01 			return ret;
1192f19dc624Sjohpow01 		}
1193f19dc624Sjohpow01 
1194b99926efSAlexeiFedorov 		/* Set up parameters for L1 table generation */
1195f19dc624Sjohpow01 		gpt_l1_tbl = l1_mem_base;
1196f19dc624Sjohpow01 	}
1197f19dc624Sjohpow01 
1198*ec0088bbSAlexeiFedorov 	/* Number of L1 entries in 2MB depends on GPCCR_EL3.PGS value */
1199*ec0088bbSAlexeiFedorov 	gpt_l1_cnt_2mb = (unsigned int)GPT_L1_ENTRY_COUNT_2MB(gpt_config.p);
1200*ec0088bbSAlexeiFedorov 
1201*ec0088bbSAlexeiFedorov 	/* Mask for the L1 index field */
1202*ec0088bbSAlexeiFedorov 	gpt_l1_index_mask = GPT_L1_IDX_MASK(gpt_config.p);
1203*ec0088bbSAlexeiFedorov 
1204b99926efSAlexeiFedorov 	INFO("GPT: Boot Configuration\n");
1205f19dc624Sjohpow01 	INFO("  PPS/T:     0x%x/%u\n", gpt_config.pps, gpt_config.t);
1206f19dc624Sjohpow01 	INFO("  PGS/P:     0x%x/%u\n", gpt_config.pgs, gpt_config.p);
1207f19dc624Sjohpow01 	INFO("  L0GPTSZ/S: 0x%x/%u\n", GPT_L0GPTSZ, GPT_S_VAL);
1208b99926efSAlexeiFedorov 	INFO("  PAS count: %u\n", pas_count);
1209b99926efSAlexeiFedorov 	INFO("  L0 base:   0x%"PRIxPTR"\n", gpt_config.plat_gpt_l0_base);
1210f19dc624Sjohpow01 
1211b99926efSAlexeiFedorov 	/* Generate the tables in memory */
1212f19dc624Sjohpow01 	for (unsigned int idx = 0U; idx < pas_count; idx++) {
1213b99926efSAlexeiFedorov 		VERBOSE("GPT: PAS[%u]: base 0x%"PRIxPTR"\tsize 0x%lx\tGPI 0x%x\ttype 0x%x\n",
1214f19dc624Sjohpow01 			idx, pas_regions[idx].base_pa, pas_regions[idx].size,
1215f19dc624Sjohpow01 			GPT_PAS_ATTR_GPI(pas_regions[idx].attrs),
1216f19dc624Sjohpow01 			GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs));
1217f19dc624Sjohpow01 
1218f19dc624Sjohpow01 		/* Check if a block or table descriptor is required */
1219f19dc624Sjohpow01 		if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) ==
1220f19dc624Sjohpow01 		    GPT_PAS_ATTR_MAP_TYPE_BLOCK) {
122120e2683dSAlexeiFedorov 			generate_l0_blk_desc(&pas_regions[idx]);
1222f19dc624Sjohpow01 
1223f19dc624Sjohpow01 		} else {
122420e2683dSAlexeiFedorov 			generate_l0_tbl_desc(&pas_regions[idx]);
1225f19dc624Sjohpow01 		}
1226f19dc624Sjohpow01 	}
1227f19dc624Sjohpow01 
1228b99926efSAlexeiFedorov 	/* Flush modified L0 tables */
1229f19dc624Sjohpow01 	flush_l0_for_pas_array(pas_regions, pas_count);
1230f19dc624Sjohpow01 
1231b99926efSAlexeiFedorov 	/* Flush L1 tables if needed */
1232f19dc624Sjohpow01 	if (l1_gpt_cnt > 0) {
1233f19dc624Sjohpow01 		flush_dcache_range(l1_mem_base,
1234f19dc624Sjohpow01 				   GPT_L1_TABLE_SIZE(gpt_config.p) *
1235*ec0088bbSAlexeiFedorov 				   (size_t)l1_gpt_cnt);
1236f19dc624Sjohpow01 	}
1237f19dc624Sjohpow01 
1238b99926efSAlexeiFedorov 	/* Make sure that all the entries are written to the memory */
1239f19dc624Sjohpow01 	dsbishst();
124077612b90SSoby Mathew 	tlbipaallos();
124177612b90SSoby Mathew 	dsb();
124277612b90SSoby Mathew 	isb();
1243f19dc624Sjohpow01 
1244f19dc624Sjohpow01 	return 0;
1245f19dc624Sjohpow01 }
1246f19dc624Sjohpow01 
1247f19dc624Sjohpow01 /*
1248f19dc624Sjohpow01  * Public API to initialize the runtime gpt_config structure based on the values
1249f19dc624Sjohpow01  * present in the GPTBR_EL3 and GPCCR_EL3 registers. GPT initialization
1250f19dc624Sjohpow01  * typically happens in a bootloader stage prior to setting up the EL3 runtime
1251f19dc624Sjohpow01  * environment for the granule transition service so this function detects the
1252f19dc624Sjohpow01  * initialization from a previous stage. Granule protection checks must be
1253f19dc624Sjohpow01  * enabled already or this function will return an error.
1254f19dc624Sjohpow01  *
1255f19dc624Sjohpow01  * Return
1256f19dc624Sjohpow01  *   Negative Linux error code in the event of a failure, 0 for success.
1257f19dc624Sjohpow01  */
1258f19dc624Sjohpow01 int gpt_runtime_init(void)
1259f19dc624Sjohpow01 {
1260f19dc624Sjohpow01 	u_register_t reg;
1261f19dc624Sjohpow01 
1262b99926efSAlexeiFedorov 	/* Ensure that MMU and Data caches are enabled */
1263f19dc624Sjohpow01 	assert((read_sctlr_el3() & SCTLR_C_BIT) != 0U);
1264f19dc624Sjohpow01 
1265b99926efSAlexeiFedorov 	/* Ensure GPC are already enabled */
1266f19dc624Sjohpow01 	if ((read_gpccr_el3() & GPCCR_GPC_BIT) == 0U) {
1267b99926efSAlexeiFedorov 		ERROR("GPT: Granule protection checks are not enabled!\n");
1268f19dc624Sjohpow01 		return -EPERM;
1269f19dc624Sjohpow01 	}
1270f19dc624Sjohpow01 
1271f19dc624Sjohpow01 	/*
1272f19dc624Sjohpow01 	 * Read the L0 table address from GPTBR, we don't need the L1 base
1273f19dc624Sjohpow01 	 * address since those are included in the L0 tables as needed.
1274f19dc624Sjohpow01 	 */
1275f19dc624Sjohpow01 	reg = read_gptbr_el3();
1276f19dc624Sjohpow01 	gpt_config.plat_gpt_l0_base = ((reg >> GPTBR_BADDR_SHIFT) &
1277f19dc624Sjohpow01 				      GPTBR_BADDR_MASK) <<
1278f19dc624Sjohpow01 				      GPTBR_BADDR_VAL_SHIFT;
1279f19dc624Sjohpow01 
1280b99926efSAlexeiFedorov 	/* Read GPCCR to get PGS and PPS values */
1281f19dc624Sjohpow01 	reg = read_gpccr_el3();
1282f19dc624Sjohpow01 	gpt_config.pps = (reg >> GPCCR_PPS_SHIFT) & GPCCR_PPS_MASK;
1283f19dc624Sjohpow01 	gpt_config.t = gpt_t_lookup[gpt_config.pps];
1284f19dc624Sjohpow01 	gpt_config.pgs = (reg >> GPCCR_PGS_SHIFT) & GPCCR_PGS_MASK;
1285f19dc624Sjohpow01 	gpt_config.p = gpt_p_lookup[gpt_config.pgs];
1286f19dc624Sjohpow01 
1287*ec0088bbSAlexeiFedorov 	/* Number of L1 entries in 2MB depends on GPCCR_EL3.PGS value */
1288*ec0088bbSAlexeiFedorov 	gpt_l1_cnt_2mb = (unsigned int)GPT_L1_ENTRY_COUNT_2MB(gpt_config.p);
1289*ec0088bbSAlexeiFedorov 
1290*ec0088bbSAlexeiFedorov 	/* Mask for the L1 index field */
1291*ec0088bbSAlexeiFedorov 	gpt_l1_index_mask = GPT_L1_IDX_MASK(gpt_config.p);
1292*ec0088bbSAlexeiFedorov 
1293*ec0088bbSAlexeiFedorov 	/* Bitlocks at the end of L0 table */
1294*ec0088bbSAlexeiFedorov 	gpt_bitlock_base = (bitlock_t *)(gpt_config.plat_gpt_l0_base +
1295*ec0088bbSAlexeiFedorov 					GPT_L0_TABLE_SIZE(gpt_config.t));
1296*ec0088bbSAlexeiFedorov 
1297b99926efSAlexeiFedorov 	VERBOSE("GPT: Runtime Configuration\n");
1298f19dc624Sjohpow01 	VERBOSE("  PPS/T:     0x%x/%u\n", gpt_config.pps, gpt_config.t);
1299f19dc624Sjohpow01 	VERBOSE("  PGS/P:     0x%x/%u\n", gpt_config.pgs, gpt_config.p);
1300f19dc624Sjohpow01 	VERBOSE("  L0GPTSZ/S: 0x%x/%u\n", GPT_L0GPTSZ, GPT_S_VAL);
1301b99926efSAlexeiFedorov 	VERBOSE("  L0 base:   0x%"PRIxPTR"\n", gpt_config.plat_gpt_l0_base);
1302*ec0088bbSAlexeiFedorov 	VERBOSE("  Bitlocks:  0x%"PRIxPTR"\n", (uintptr_t)gpt_bitlock_base);
1303f19dc624Sjohpow01 
1304f19dc624Sjohpow01 	return 0;
1305f19dc624Sjohpow01 }
1306f19dc624Sjohpow01 
1307f19dc624Sjohpow01 /*
13086a00e9b0SRobert Wakim  * A helper to write the value (target_pas << gpi_shift) to the index of
1309b99926efSAlexeiFedorov  * the gpt_l1_addr.
13106a00e9b0SRobert Wakim  */
13116a00e9b0SRobert Wakim static inline void write_gpt(uint64_t *gpt_l1_desc, uint64_t *gpt_l1_addr,
13126a00e9b0SRobert Wakim 			     unsigned int gpi_shift, unsigned int idx,
13136a00e9b0SRobert Wakim 			     unsigned int target_pas)
13146a00e9b0SRobert Wakim {
13156a00e9b0SRobert Wakim 	*gpt_l1_desc &= ~(GPT_L1_GRAN_DESC_GPI_MASK << gpi_shift);
13166a00e9b0SRobert Wakim 	*gpt_l1_desc |= ((uint64_t)target_pas << gpi_shift);
13176a00e9b0SRobert Wakim 	gpt_l1_addr[idx] = *gpt_l1_desc;
1318*ec0088bbSAlexeiFedorov 
1319*ec0088bbSAlexeiFedorov 	dsboshst();
13206a00e9b0SRobert Wakim }
13216a00e9b0SRobert Wakim 
13226a00e9b0SRobert Wakim /*
13236a00e9b0SRobert Wakim  * Helper to retrieve the gpt_l1_* information from the base address
1324b99926efSAlexeiFedorov  * returned in gpi_info.
13256a00e9b0SRobert Wakim  */
13266a00e9b0SRobert Wakim static int get_gpi_params(uint64_t base, gpi_info_t *gpi_info)
13276a00e9b0SRobert Wakim {
13286a00e9b0SRobert Wakim 	uint64_t gpt_l0_desc, *gpt_l0_base;
1329*ec0088bbSAlexeiFedorov 	unsigned int idx_512;
13306a00e9b0SRobert Wakim 
13316a00e9b0SRobert Wakim 	gpt_l0_base = (uint64_t *)gpt_config.plat_gpt_l0_base;
13326a00e9b0SRobert Wakim 	gpt_l0_desc = gpt_l0_base[GPT_L0_IDX(base)];
13336a00e9b0SRobert Wakim 	if (GPT_L0_TYPE(gpt_l0_desc) != GPT_L0_TYPE_TBL_DESC) {
1334b99926efSAlexeiFedorov 		VERBOSE("GPT: Granule is not covered by a table descriptor!\n");
13356a00e9b0SRobert Wakim 		VERBOSE("      Base=0x%"PRIx64"\n", base);
13366a00e9b0SRobert Wakim 		return -EINVAL;
13376a00e9b0SRobert Wakim 	}
13386a00e9b0SRobert Wakim 
1339b99926efSAlexeiFedorov 	/* Get the table index and GPI shift from PA */
13406a00e9b0SRobert Wakim 	gpi_info->gpt_l1_addr = GPT_L0_TBLD_ADDR(gpt_l0_desc);
1341*ec0088bbSAlexeiFedorov 	gpi_info->idx = (unsigned int)GPT_L1_INDEX(base);
13426a00e9b0SRobert Wakim 	gpi_info->gpi_shift = GPT_L1_GPI_IDX(gpt_config.p, base) << 2;
13436a00e9b0SRobert Wakim 
1344*ec0088bbSAlexeiFedorov 	/* 512MB block index */
1345*ec0088bbSAlexeiFedorov 	idx_512 = (unsigned int)(base / SZ_512M);
1346*ec0088bbSAlexeiFedorov 
1347*ec0088bbSAlexeiFedorov 	/* Bitlock address and mask */
1348*ec0088bbSAlexeiFedorov 	gpi_info->lock = &gpt_bitlock_base[idx_512 / LOCK_BITS];
1349*ec0088bbSAlexeiFedorov 	gpi_info->mask = 1U << (idx_512 & (LOCK_BITS - 1U));
1350*ec0088bbSAlexeiFedorov 
13516a00e9b0SRobert Wakim 	return 0;
13526a00e9b0SRobert Wakim }
13536a00e9b0SRobert Wakim 
13546a00e9b0SRobert Wakim /*
1355*ec0088bbSAlexeiFedorov  * Helper to retrieve the gpt_l1_desc and GPI information from gpi_info.
1356*ec0088bbSAlexeiFedorov  * This function is called with bitlock acquired.
1357*ec0088bbSAlexeiFedorov  */
1358*ec0088bbSAlexeiFedorov static void read_gpi(gpi_info_t *gpi_info)
1359*ec0088bbSAlexeiFedorov {
1360*ec0088bbSAlexeiFedorov 	gpi_info->gpt_l1_desc = (gpi_info->gpt_l1_addr)[gpi_info->idx];
1361*ec0088bbSAlexeiFedorov 
1362*ec0088bbSAlexeiFedorov 	if ((gpi_info->gpt_l1_desc & GPT_L1_TYPE_CONT_DESC_MASK) ==
1363*ec0088bbSAlexeiFedorov 				 GPT_L1_TYPE_CONT_DESC) {
1364*ec0088bbSAlexeiFedorov 		/* Read GPI from Contiguous descriptor */
1365*ec0088bbSAlexeiFedorov 		gpi_info->gpi = (unsigned int)GPT_L1_CONT_GPI(gpi_info->gpt_l1_desc);
1366*ec0088bbSAlexeiFedorov 	} else {
1367*ec0088bbSAlexeiFedorov 		/* Read GPI from Granules descriptor */
1368*ec0088bbSAlexeiFedorov 		gpi_info->gpi = (unsigned int)((gpi_info->gpt_l1_desc >> gpi_info->gpi_shift) &
1369*ec0088bbSAlexeiFedorov 						GPT_L1_GRAN_DESC_GPI_MASK);
1370*ec0088bbSAlexeiFedorov 	}
1371*ec0088bbSAlexeiFedorov }
1372*ec0088bbSAlexeiFedorov 
1373*ec0088bbSAlexeiFedorov static void flush_page_to_popa(uintptr_t addr)
1374*ec0088bbSAlexeiFedorov {
1375*ec0088bbSAlexeiFedorov 	size_t size = GPT_PGS_ACTUAL_SIZE(gpt_config.p);
1376*ec0088bbSAlexeiFedorov 
1377*ec0088bbSAlexeiFedorov 	if (is_feat_mte2_supported()) {
1378*ec0088bbSAlexeiFedorov 		flush_dcache_to_popa_range_mte2(addr, size);
1379*ec0088bbSAlexeiFedorov 	} else {
1380*ec0088bbSAlexeiFedorov 		flush_dcache_to_popa_range(addr, size);
1381*ec0088bbSAlexeiFedorov 	}
1382*ec0088bbSAlexeiFedorov }
1383*ec0088bbSAlexeiFedorov 
1384*ec0088bbSAlexeiFedorov /*
1385*ec0088bbSAlexeiFedorov  * Helper function to check if all L1 entries in 2MB block have
1386*ec0088bbSAlexeiFedorov  * the same Granules descriptor value.
1387*ec0088bbSAlexeiFedorov  *
1388*ec0088bbSAlexeiFedorov  * Parameters
1389*ec0088bbSAlexeiFedorov  *   base		Base address of the region to be checked
1390*ec0088bbSAlexeiFedorov  *   gpi_info		Pointer to 'gpt_config_t' structure
1391*ec0088bbSAlexeiFedorov  *   l1_desc		GPT Granules descriptor with all entries
1392*ec0088bbSAlexeiFedorov  *			set to the same GPI.
1393*ec0088bbSAlexeiFedorov  *
1394*ec0088bbSAlexeiFedorov  * Return
1395*ec0088bbSAlexeiFedorov  *   true if L1 all entries have the same descriptor value, false otherwise.
1396*ec0088bbSAlexeiFedorov  */
1397*ec0088bbSAlexeiFedorov __unused static bool check_fuse_2mb(uint64_t base, const gpi_info_t *gpi_info,
1398*ec0088bbSAlexeiFedorov 					uint64_t l1_desc)
1399*ec0088bbSAlexeiFedorov {
1400*ec0088bbSAlexeiFedorov 	/* Last L1 entry index in 2MB block */
1401*ec0088bbSAlexeiFedorov 	unsigned int long idx = GPT_L1_INDEX(ALIGN_2MB(base)) +
1402*ec0088bbSAlexeiFedorov 						gpt_l1_cnt_2mb - 1UL;
1403*ec0088bbSAlexeiFedorov 
1404*ec0088bbSAlexeiFedorov 	/* Number of L1 entries in 2MB block */
1405*ec0088bbSAlexeiFedorov 	unsigned int cnt = gpt_l1_cnt_2mb;
1406*ec0088bbSAlexeiFedorov 
1407*ec0088bbSAlexeiFedorov 	/*
1408*ec0088bbSAlexeiFedorov 	 * Start check from the last L1 entry and continue until the first
1409*ec0088bbSAlexeiFedorov 	 * non-matching to the passed Granules descriptor value is found.
1410*ec0088bbSAlexeiFedorov 	 */
1411*ec0088bbSAlexeiFedorov 	while (cnt-- != 0U) {
1412*ec0088bbSAlexeiFedorov 		if (gpi_info->gpt_l1_addr[idx--] != l1_desc) {
1413*ec0088bbSAlexeiFedorov 			/* Non-matching L1 entry found */
1414*ec0088bbSAlexeiFedorov 			return false;
1415*ec0088bbSAlexeiFedorov 		}
1416*ec0088bbSAlexeiFedorov 	}
1417*ec0088bbSAlexeiFedorov 
1418*ec0088bbSAlexeiFedorov 	return true;
1419*ec0088bbSAlexeiFedorov }
1420*ec0088bbSAlexeiFedorov 
1421*ec0088bbSAlexeiFedorov __unused static void fuse_2mb(uint64_t base, const gpi_info_t *gpi_info,
1422*ec0088bbSAlexeiFedorov 				uint64_t l1_desc)
1423*ec0088bbSAlexeiFedorov {
1424*ec0088bbSAlexeiFedorov 	/* L1 entry index of the start of 2MB block */
1425*ec0088bbSAlexeiFedorov 	unsigned long idx_2 = GPT_L1_INDEX(ALIGN_2MB(base));
1426*ec0088bbSAlexeiFedorov 
1427*ec0088bbSAlexeiFedorov 	/* 2MB Contiguous descriptor */
1428*ec0088bbSAlexeiFedorov 	uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 2MB);
1429*ec0088bbSAlexeiFedorov 
1430*ec0088bbSAlexeiFedorov 	VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n", __func__, base, l1_desc);
1431*ec0088bbSAlexeiFedorov 
1432*ec0088bbSAlexeiFedorov 	fill_desc(&gpi_info->gpt_l1_addr[idx_2], l1_cont_desc, L1_QWORDS_2MB);
1433*ec0088bbSAlexeiFedorov }
1434*ec0088bbSAlexeiFedorov 
1435*ec0088bbSAlexeiFedorov /*
1436*ec0088bbSAlexeiFedorov  * Helper function to check if all 1st L1 entries of 2MB blocks
1437*ec0088bbSAlexeiFedorov  * in 32MB have the same 2MB Contiguous descriptor value.
1438*ec0088bbSAlexeiFedorov  *
1439*ec0088bbSAlexeiFedorov  * Parameters
1440*ec0088bbSAlexeiFedorov  *   base		Base address of the region to be checked
1441*ec0088bbSAlexeiFedorov  *   gpi_info		Pointer to 'gpt_config_t' structure
1442*ec0088bbSAlexeiFedorov  *   l1_desc		GPT Granules descriptor.
1443*ec0088bbSAlexeiFedorov  *
1444*ec0088bbSAlexeiFedorov  * Return
1445*ec0088bbSAlexeiFedorov  *   true if all L1 entries have the same descriptor value, false otherwise.
1446*ec0088bbSAlexeiFedorov  */
1447*ec0088bbSAlexeiFedorov __unused static bool check_fuse_32mb(uint64_t base, const gpi_info_t *gpi_info,
1448*ec0088bbSAlexeiFedorov 					uint64_t l1_desc)
1449*ec0088bbSAlexeiFedorov {
1450*ec0088bbSAlexeiFedorov 	/* The 1st L1 entry index of the last 2MB block in 32MB */
1451*ec0088bbSAlexeiFedorov 	unsigned long idx = GPT_L1_INDEX(ALIGN_32MB(base)) +
1452*ec0088bbSAlexeiFedorov 					(15UL * gpt_l1_cnt_2mb);
1453*ec0088bbSAlexeiFedorov 
1454*ec0088bbSAlexeiFedorov 	/* 2MB Contiguous descriptor */
1455*ec0088bbSAlexeiFedorov 	uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 2MB);
1456*ec0088bbSAlexeiFedorov 
1457*ec0088bbSAlexeiFedorov 	/* Number of 2MB blocks in 32MB */
1458*ec0088bbSAlexeiFedorov 	unsigned int cnt = 16U;
1459*ec0088bbSAlexeiFedorov 
1460*ec0088bbSAlexeiFedorov 	/* Set the first L1 entry to 2MB Contiguous descriptor */
1461*ec0088bbSAlexeiFedorov 	gpi_info->gpt_l1_addr[GPT_L1_INDEX(ALIGN_2MB(base))] = l1_cont_desc;
1462*ec0088bbSAlexeiFedorov 
1463*ec0088bbSAlexeiFedorov 	/*
1464*ec0088bbSAlexeiFedorov 	 * Start check from the 1st L1 entry of the last 2MB block and
1465*ec0088bbSAlexeiFedorov 	 * continue until the first non-matching to 2MB Contiguous descriptor
1466*ec0088bbSAlexeiFedorov 	 * value is found.
1467*ec0088bbSAlexeiFedorov 	 */
1468*ec0088bbSAlexeiFedorov 	while (cnt-- != 0U) {
1469*ec0088bbSAlexeiFedorov 		if (gpi_info->gpt_l1_addr[idx] != l1_cont_desc) {
1470*ec0088bbSAlexeiFedorov 			/* Non-matching L1 entry found */
1471*ec0088bbSAlexeiFedorov 			return false;
1472*ec0088bbSAlexeiFedorov 		}
1473*ec0088bbSAlexeiFedorov 		idx -= gpt_l1_cnt_2mb;
1474*ec0088bbSAlexeiFedorov 	}
1475*ec0088bbSAlexeiFedorov 
1476*ec0088bbSAlexeiFedorov 	return true;
1477*ec0088bbSAlexeiFedorov }
1478*ec0088bbSAlexeiFedorov 
1479*ec0088bbSAlexeiFedorov __unused static void fuse_32mb(uint64_t base, const gpi_info_t *gpi_info,
1480*ec0088bbSAlexeiFedorov 				uint64_t l1_desc)
1481*ec0088bbSAlexeiFedorov {
1482*ec0088bbSAlexeiFedorov 	/* L1 entry index of the start of 32MB block */
1483*ec0088bbSAlexeiFedorov 	unsigned long idx_32 = GPT_L1_INDEX(ALIGN_32MB(base));
1484*ec0088bbSAlexeiFedorov 
1485*ec0088bbSAlexeiFedorov 	/* 32MB Contiguous descriptor */
1486*ec0088bbSAlexeiFedorov 	uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 32MB);
1487*ec0088bbSAlexeiFedorov 
1488*ec0088bbSAlexeiFedorov 	VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n", __func__, base, l1_desc);
1489*ec0088bbSAlexeiFedorov 
1490*ec0088bbSAlexeiFedorov 	fill_desc(&gpi_info->gpt_l1_addr[idx_32], l1_cont_desc, L1_QWORDS_32MB);
1491*ec0088bbSAlexeiFedorov }
1492*ec0088bbSAlexeiFedorov 
1493*ec0088bbSAlexeiFedorov /*
1494*ec0088bbSAlexeiFedorov  * Helper function to check if all 1st L1 entries of 32MB blocks
1495*ec0088bbSAlexeiFedorov  * in 512MB have the same 32MB Contiguous descriptor value.
1496*ec0088bbSAlexeiFedorov  *
1497*ec0088bbSAlexeiFedorov  * Parameters
1498*ec0088bbSAlexeiFedorov  *   base		Base address of the region to be checked
1499*ec0088bbSAlexeiFedorov  *   gpi_info		Pointer to 'gpt_config_t' structure
1500*ec0088bbSAlexeiFedorov  *   l1_desc		GPT Granules descriptor.
1501*ec0088bbSAlexeiFedorov  *
1502*ec0088bbSAlexeiFedorov  * Return
1503*ec0088bbSAlexeiFedorov  *   true if all L1 entries have the same descriptor value, false otherwise.
1504*ec0088bbSAlexeiFedorov  */
1505*ec0088bbSAlexeiFedorov __unused static bool check_fuse_512mb(uint64_t base, const gpi_info_t *gpi_info,
1506*ec0088bbSAlexeiFedorov 					uint64_t l1_desc)
1507*ec0088bbSAlexeiFedorov {
1508*ec0088bbSAlexeiFedorov 	/* The 1st L1 entry index of the last 32MB block in 512MB */
1509*ec0088bbSAlexeiFedorov 	unsigned long idx = GPT_L1_INDEX(ALIGN_512MB(base)) +
1510*ec0088bbSAlexeiFedorov 					(15UL * 16UL * gpt_l1_cnt_2mb);
1511*ec0088bbSAlexeiFedorov 
1512*ec0088bbSAlexeiFedorov 	/* 32MB Contiguous descriptor */
1513*ec0088bbSAlexeiFedorov 	uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 32MB);
1514*ec0088bbSAlexeiFedorov 
1515*ec0088bbSAlexeiFedorov 	/* Number of 32MB blocks in 512MB */
1516*ec0088bbSAlexeiFedorov 	unsigned int cnt = 16U;
1517*ec0088bbSAlexeiFedorov 
1518*ec0088bbSAlexeiFedorov 	/* Set the first L1 entry to 2MB Contiguous descriptor */
1519*ec0088bbSAlexeiFedorov 	gpi_info->gpt_l1_addr[GPT_L1_INDEX(ALIGN_32MB(base))] = l1_cont_desc;
1520*ec0088bbSAlexeiFedorov 
1521*ec0088bbSAlexeiFedorov 	/*
1522*ec0088bbSAlexeiFedorov 	 * Start check from the 1st L1 entry of the last 32MB block and
1523*ec0088bbSAlexeiFedorov 	 * continue until the first non-matching to 32MB Contiguous descriptor
1524*ec0088bbSAlexeiFedorov 	 * value is found.
1525*ec0088bbSAlexeiFedorov 	 */
1526*ec0088bbSAlexeiFedorov 	while (cnt-- != 0U) {
1527*ec0088bbSAlexeiFedorov 		if (gpi_info->gpt_l1_addr[idx] != l1_cont_desc) {
1528*ec0088bbSAlexeiFedorov 			/* Non-matching L1 entry found */
1529*ec0088bbSAlexeiFedorov 			return false;
1530*ec0088bbSAlexeiFedorov 		}
1531*ec0088bbSAlexeiFedorov 		idx -= 16UL * gpt_l1_cnt_2mb;
1532*ec0088bbSAlexeiFedorov 	}
1533*ec0088bbSAlexeiFedorov 
1534*ec0088bbSAlexeiFedorov 	return true;
1535*ec0088bbSAlexeiFedorov }
1536*ec0088bbSAlexeiFedorov 
1537*ec0088bbSAlexeiFedorov __unused static void fuse_512mb(uint64_t base, const gpi_info_t *gpi_info,
1538*ec0088bbSAlexeiFedorov 				uint64_t l1_desc)
1539*ec0088bbSAlexeiFedorov {
1540*ec0088bbSAlexeiFedorov 	/* L1 entry index of the start of 512MB block */
1541*ec0088bbSAlexeiFedorov 	unsigned long idx_512 = GPT_L1_INDEX(ALIGN_512MB(base));
1542*ec0088bbSAlexeiFedorov 
1543*ec0088bbSAlexeiFedorov 	/* 512MB Contiguous descriptor */
1544*ec0088bbSAlexeiFedorov 	uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 512MB);
1545*ec0088bbSAlexeiFedorov 
1546*ec0088bbSAlexeiFedorov 	VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n", __func__, base, l1_desc);
1547*ec0088bbSAlexeiFedorov 
1548*ec0088bbSAlexeiFedorov 	fill_desc(&gpi_info->gpt_l1_addr[idx_512], l1_cont_desc, L1_QWORDS_512MB);
1549*ec0088bbSAlexeiFedorov }
1550*ec0088bbSAlexeiFedorov 
1551*ec0088bbSAlexeiFedorov /*
1552*ec0088bbSAlexeiFedorov  * Helper function to convert GPI entries in a single L1 table
1553*ec0088bbSAlexeiFedorov  * from Granules to Contiguous descriptor.
1554*ec0088bbSAlexeiFedorov  *
1555*ec0088bbSAlexeiFedorov  * Parameters
1556*ec0088bbSAlexeiFedorov  *   base		Base address of the region to be written
1557*ec0088bbSAlexeiFedorov  *   gpi_info		Pointer to 'gpt_config_t' structure
1558*ec0088bbSAlexeiFedorov  *   l1_desc		GPT Granules descriptor with all entries
1559*ec0088bbSAlexeiFedorov  *			set to the same GPI.
1560*ec0088bbSAlexeiFedorov  */
1561*ec0088bbSAlexeiFedorov __unused static void fuse_block(uint64_t base, const gpi_info_t *gpi_info,
1562*ec0088bbSAlexeiFedorov 				uint64_t l1_desc)
1563*ec0088bbSAlexeiFedorov {
1564*ec0088bbSAlexeiFedorov 	/* Start with check for 2MB block */
1565*ec0088bbSAlexeiFedorov 	if (!check_fuse_2mb(base, gpi_info, l1_desc)) {
1566*ec0088bbSAlexeiFedorov 		/* Check for 2MB fusing failed */
1567*ec0088bbSAlexeiFedorov 		return;
1568*ec0088bbSAlexeiFedorov 	}
1569*ec0088bbSAlexeiFedorov 
1570*ec0088bbSAlexeiFedorov #if (RME_GPT_MAX_BLOCK == 2)
1571*ec0088bbSAlexeiFedorov 	fuse_2mb(base, gpi_info, l1_desc);
1572*ec0088bbSAlexeiFedorov #else
1573*ec0088bbSAlexeiFedorov 	/* Check for 32MB block */
1574*ec0088bbSAlexeiFedorov 	if (!check_fuse_32mb(base, gpi_info, l1_desc)) {
1575*ec0088bbSAlexeiFedorov 		/* Check for 32MB fusing failed, fuse to 2MB */
1576*ec0088bbSAlexeiFedorov 		fuse_2mb(base, gpi_info, l1_desc);
1577*ec0088bbSAlexeiFedorov 		return;
1578*ec0088bbSAlexeiFedorov 	}
1579*ec0088bbSAlexeiFedorov 
1580*ec0088bbSAlexeiFedorov #if (RME_GPT_MAX_BLOCK == 32)
1581*ec0088bbSAlexeiFedorov 	fuse_32mb(base, gpi_info, l1_desc);
1582*ec0088bbSAlexeiFedorov #else
1583*ec0088bbSAlexeiFedorov 	/* Check for 512MB block */
1584*ec0088bbSAlexeiFedorov 	if (!check_fuse_512mb(base, gpi_info, l1_desc)) {
1585*ec0088bbSAlexeiFedorov 		/* Check for 512MB fusing failed, fuse to 32MB */
1586*ec0088bbSAlexeiFedorov 		fuse_32mb(base, gpi_info, l1_desc);
1587*ec0088bbSAlexeiFedorov 		return;
1588*ec0088bbSAlexeiFedorov 	}
1589*ec0088bbSAlexeiFedorov 
1590*ec0088bbSAlexeiFedorov 	/* Fuse to 512MB */
1591*ec0088bbSAlexeiFedorov 	fuse_512mb(base, gpi_info, l1_desc);
1592*ec0088bbSAlexeiFedorov 
1593*ec0088bbSAlexeiFedorov #endif	/* RME_GPT_MAX_BLOCK == 32 */
1594*ec0088bbSAlexeiFedorov #endif	/* RME_GPT_MAX_BLOCK == 2 */
1595*ec0088bbSAlexeiFedorov }
1596*ec0088bbSAlexeiFedorov 
1597*ec0088bbSAlexeiFedorov /*
1598*ec0088bbSAlexeiFedorov  * Helper function to convert GPI entries in a single L1 table
1599*ec0088bbSAlexeiFedorov  * from Contiguous to Granules descriptor. This function updates
1600*ec0088bbSAlexeiFedorov  * descriptor to Granules in passed 'gpt_config_t' structure as
1601*ec0088bbSAlexeiFedorov  * the result of shuttering.
1602*ec0088bbSAlexeiFedorov  *
1603*ec0088bbSAlexeiFedorov  * Parameters
1604*ec0088bbSAlexeiFedorov  *   base		Base address of the region to be written
1605*ec0088bbSAlexeiFedorov  *   gpi_info		Pointer to 'gpt_config_t' structure
1606*ec0088bbSAlexeiFedorov  *   l1_desc		GPT Granules descriptor set this range to.
1607*ec0088bbSAlexeiFedorov  */
1608*ec0088bbSAlexeiFedorov __unused static void shatter_block(uint64_t base, gpi_info_t *gpi_info,
1609*ec0088bbSAlexeiFedorov 				   uint64_t l1_desc)
1610*ec0088bbSAlexeiFedorov {
1611*ec0088bbSAlexeiFedorov 	/* Look-up table for 2MB, 32MB and 512MB locks shattering */
1612*ec0088bbSAlexeiFedorov 	static const gpt_shatter_func gpt_shatter_lookup[] = {
1613*ec0088bbSAlexeiFedorov 		shatter_2mb,
1614*ec0088bbSAlexeiFedorov 		shatter_32mb,
1615*ec0088bbSAlexeiFedorov 		shatter_512mb
1616*ec0088bbSAlexeiFedorov 	};
1617*ec0088bbSAlexeiFedorov 
1618*ec0088bbSAlexeiFedorov 	/* Look-up table for invalidation TLBs for 2MB, 32MB and 512MB blocks */
1619*ec0088bbSAlexeiFedorov 	static const gpt_tlbi_lookup_t tlbi_lookup[] = {
1620*ec0088bbSAlexeiFedorov 		{ tlbirpalos_2m, ~(SZ_2M - 1UL) },
1621*ec0088bbSAlexeiFedorov 		{ tlbirpalos_32m, ~(SZ_32M - 1UL) },
1622*ec0088bbSAlexeiFedorov 		{ tlbirpalos_512m, ~(SZ_512M - 1UL) }
1623*ec0088bbSAlexeiFedorov 	};
1624*ec0088bbSAlexeiFedorov 
1625*ec0088bbSAlexeiFedorov 	/* Get shattering level from Contig field of Contiguous descriptor */
1626*ec0088bbSAlexeiFedorov 	unsigned long level = GPT_L1_CONT_CONTIG(gpi_info->gpt_l1_desc) - 1UL;
1627*ec0088bbSAlexeiFedorov 
1628*ec0088bbSAlexeiFedorov 	/* Shatter contiguous block */
1629*ec0088bbSAlexeiFedorov 	gpt_shatter_lookup[level](base, gpi_info, l1_desc);
1630*ec0088bbSAlexeiFedorov 
1631*ec0088bbSAlexeiFedorov 	tlbi_lookup[level].function(base & tlbi_lookup[level].mask);
1632*ec0088bbSAlexeiFedorov 	dsbosh();
1633*ec0088bbSAlexeiFedorov 
1634*ec0088bbSAlexeiFedorov 	/*
1635*ec0088bbSAlexeiFedorov 	 * Update 'gpt_config_t' structure's descriptor to Granules to reflect
1636*ec0088bbSAlexeiFedorov 	 * the shattered GPI back to caller.
1637*ec0088bbSAlexeiFedorov 	 */
1638*ec0088bbSAlexeiFedorov 	gpi_info->gpt_l1_desc = l1_desc;
1639*ec0088bbSAlexeiFedorov }
1640*ec0088bbSAlexeiFedorov 
1641*ec0088bbSAlexeiFedorov /*
16426a00e9b0SRobert Wakim  * This function is the granule transition delegate service. When a granule
16436a00e9b0SRobert Wakim  * transition request occurs it is routed to this function to have the request,
1644*ec0088bbSAlexeiFedorov  * if valid, fulfilled following A1.1.1 Delegate of RME supplement.
1645f19dc624Sjohpow01  *
16466a00e9b0SRobert Wakim  * TODO: implement support for transitioning multiple granules at once.
1647f19dc624Sjohpow01  *
1648f19dc624Sjohpow01  * Parameters
16496a00e9b0SRobert Wakim  *   base		Base address of the region to transition, must be
16506a00e9b0SRobert Wakim  *			aligned to granule size.
16516a00e9b0SRobert Wakim  *   size		Size of region to transition, must be aligned to granule
16526a00e9b0SRobert Wakim  *			size.
1653f19dc624Sjohpow01  *   src_sec_state	Security state of the caller.
1654f19dc624Sjohpow01  *
1655f19dc624Sjohpow01  * Return
1656f19dc624Sjohpow01  *   Negative Linux error code in the event of a failure, 0 for success.
1657f19dc624Sjohpow01  */
16586a00e9b0SRobert Wakim int gpt_delegate_pas(uint64_t base, size_t size, unsigned int src_sec_state)
1659f19dc624Sjohpow01 {
16606a00e9b0SRobert Wakim 	gpi_info_t gpi_info;
1661*ec0088bbSAlexeiFedorov 	uint64_t nse, __unused l1_desc;
16626a00e9b0SRobert Wakim 	unsigned int target_pas;
1663*ec0088bbSAlexeiFedorov 	int res;
1664f19dc624Sjohpow01 
1665b99926efSAlexeiFedorov 	/* Ensure that the tables have been set up before taking requests */
16666a00e9b0SRobert Wakim 	assert(gpt_config.plat_gpt_l0_base != 0UL);
16676a00e9b0SRobert Wakim 
1668b99926efSAlexeiFedorov 	/* Ensure that caches are enabled */
16696a00e9b0SRobert Wakim 	assert((read_sctlr_el3() & SCTLR_C_BIT) != 0UL);
16706a00e9b0SRobert Wakim 
1671b99926efSAlexeiFedorov 	/* See if this is a single or a range of granule transition */
16726a00e9b0SRobert Wakim 	if (size != GPT_PGS_ACTUAL_SIZE(gpt_config.p)) {
1673f19dc624Sjohpow01 		return -EINVAL;
1674f19dc624Sjohpow01 	}
1675f19dc624Sjohpow01 
16766a00e9b0SRobert Wakim 	/* Check that base and size are valid */
16776a00e9b0SRobert Wakim 	if ((ULONG_MAX - base) < size) {
1678b99926efSAlexeiFedorov 		VERBOSE("GPT: Transition request address overflow!\n");
16796a00e9b0SRobert Wakim 		VERBOSE("      Base=0x%"PRIx64"\n", base);
16806a00e9b0SRobert Wakim 		VERBOSE("      Size=0x%lx\n", size);
16816a00e9b0SRobert Wakim 		return -EINVAL;
16826a00e9b0SRobert Wakim 	}
16836a00e9b0SRobert Wakim 
1684b99926efSAlexeiFedorov 	/* Make sure base and size are valid */
1685b99926efSAlexeiFedorov 	if (((base & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) != 0UL) ||
1686b99926efSAlexeiFedorov 	    ((size & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) != 0UL) ||
16876a00e9b0SRobert Wakim 	    (size == 0UL) ||
16886a00e9b0SRobert Wakim 	    ((base + size) >= GPT_PPS_ACTUAL_SIZE(gpt_config.t))) {
1689b99926efSAlexeiFedorov 		VERBOSE("GPT: Invalid granule transition address range!\n");
16906a00e9b0SRobert Wakim 		VERBOSE("      Base=0x%"PRIx64"\n", base);
16916a00e9b0SRobert Wakim 		VERBOSE("      Size=0x%lx\n", size);
16926a00e9b0SRobert Wakim 		return -EINVAL;
16936a00e9b0SRobert Wakim 	}
16946a00e9b0SRobert Wakim 
1695*ec0088bbSAlexeiFedorov 	/* Delegate request can only come from REALM or SECURE */
1696*ec0088bbSAlexeiFedorov 	if ((src_sec_state != SMC_FROM_REALM) &&
1697*ec0088bbSAlexeiFedorov 	    (src_sec_state != SMC_FROM_SECURE)) {
1698*ec0088bbSAlexeiFedorov 		VERBOSE("GPT: Invalid caller security state 0x%x\n",
1699*ec0088bbSAlexeiFedorov 							src_sec_state);
1700*ec0088bbSAlexeiFedorov 		return -EINVAL;
1701*ec0088bbSAlexeiFedorov 	}
1702*ec0088bbSAlexeiFedorov 
1703*ec0088bbSAlexeiFedorov 	if (src_sec_state == SMC_FROM_REALM) {
17046a00e9b0SRobert Wakim 		target_pas = GPT_GPI_REALM;
1705*ec0088bbSAlexeiFedorov 		nse = (uint64_t)GPT_NSE_REALM << GPT_NSE_SHIFT;
1706*ec0088bbSAlexeiFedorov 		l1_desc = GPT_L1_REALM_DESC;
1707*ec0088bbSAlexeiFedorov 	} else {
17086a00e9b0SRobert Wakim 		target_pas = GPT_GPI_SECURE;
1709*ec0088bbSAlexeiFedorov 		nse = (uint64_t)GPT_NSE_SECURE << GPT_NSE_SHIFT;
1710*ec0088bbSAlexeiFedorov 		l1_desc = GPT_L1_SECURE_DESC;
1711*ec0088bbSAlexeiFedorov 	}
1712*ec0088bbSAlexeiFedorov 
1713*ec0088bbSAlexeiFedorov 	res = get_gpi_params(base, &gpi_info);
1714*ec0088bbSAlexeiFedorov 	if (res != 0) {
1715*ec0088bbSAlexeiFedorov 		return res;
17166a00e9b0SRobert Wakim 	}
17176a00e9b0SRobert Wakim 
17186a00e9b0SRobert Wakim 	/*
1719*ec0088bbSAlexeiFedorov 	 * Access to each 512MB block in L1 tables is controlled by a bitlock
1720*ec0088bbSAlexeiFedorov 	 * to ensure that no more than one CPU is allowed to make changes at
1721*ec0088bbSAlexeiFedorov 	 * any given time.
17226a00e9b0SRobert Wakim 	 */
1723*ec0088bbSAlexeiFedorov 	bit_lock(gpi_info.lock, gpi_info.mask);
1724*ec0088bbSAlexeiFedorov 
1725*ec0088bbSAlexeiFedorov 	read_gpi(&gpi_info);
17266a00e9b0SRobert Wakim 
17276a00e9b0SRobert Wakim 	/* Check that the current address is in NS state */
17286a00e9b0SRobert Wakim 	if (gpi_info.gpi != GPT_GPI_NS) {
1729b99926efSAlexeiFedorov 		VERBOSE("GPT: Only Granule in NS state can be delegated.\n");
17306a00e9b0SRobert Wakim 		VERBOSE("      Caller: %u, Current GPI: %u\n", src_sec_state,
17316a00e9b0SRobert Wakim 			gpi_info.gpi);
1732*ec0088bbSAlexeiFedorov 		bit_unlock(gpi_info.lock, gpi_info.mask);
1733e50fedbcSJavier Almansa Sobrino 		return -EPERM;
17346a00e9b0SRobert Wakim 	}
17356a00e9b0SRobert Wakim 
1736*ec0088bbSAlexeiFedorov #if (RME_GPT_MAX_BLOCK != 0)
1737*ec0088bbSAlexeiFedorov 	/* Check for Contiguous descriptor */
1738*ec0088bbSAlexeiFedorov 	if ((gpi_info.gpt_l1_desc & GPT_L1_TYPE_CONT_DESC_MASK) ==
1739*ec0088bbSAlexeiFedorov 					GPT_L1_TYPE_CONT_DESC) {
1740*ec0088bbSAlexeiFedorov 		shatter_block(base, &gpi_info, GPT_L1_NS_DESC);
1741f19dc624Sjohpow01 	}
1742*ec0088bbSAlexeiFedorov #endif
17436a00e9b0SRobert Wakim 	/*
17446a00e9b0SRobert Wakim 	 * In order to maintain mutual distrust between Realm and Secure
17456a00e9b0SRobert Wakim 	 * states, remove any data speculatively fetched into the target
1746*ec0088bbSAlexeiFedorov 	 * physical address space.
1747*ec0088bbSAlexeiFedorov 	 * Issue DC CIPAPA or DC_CIGDPAPA on implementations with FEAT_MTE2.
17486a00e9b0SRobert Wakim 	 */
1749*ec0088bbSAlexeiFedorov 	flush_page_to_popa(base | nse);
17506a00e9b0SRobert Wakim 
17516a00e9b0SRobert Wakim 	write_gpt(&gpi_info.gpt_l1_desc, gpi_info.gpt_l1_addr,
17526a00e9b0SRobert Wakim 		  gpi_info.gpi_shift, gpi_info.idx, target_pas);
17536a00e9b0SRobert Wakim 
1754*ec0088bbSAlexeiFedorov 	/* Ensure that all agents observe the new configuration */
1755*ec0088bbSAlexeiFedorov 	tlbi_page_dsbosh(base);
17566a00e9b0SRobert Wakim 
17576a00e9b0SRobert Wakim 	nse = (uint64_t)GPT_NSE_NS << GPT_NSE_SHIFT;
17586a00e9b0SRobert Wakim 
1759*ec0088bbSAlexeiFedorov 	/* Ensure that the scrubbed data have made it past the PoPA */
1760*ec0088bbSAlexeiFedorov 	flush_page_to_popa(base | nse);
17616a00e9b0SRobert Wakim 
1762*ec0088bbSAlexeiFedorov #if (RME_GPT_MAX_BLOCK != 0)
1763*ec0088bbSAlexeiFedorov 	if (gpi_info.gpt_l1_desc == l1_desc) {
1764*ec0088bbSAlexeiFedorov 		/* Try to fuse */
1765*ec0088bbSAlexeiFedorov 		fuse_block(base, &gpi_info, l1_desc);
1766*ec0088bbSAlexeiFedorov 	}
1767*ec0088bbSAlexeiFedorov #endif
1768*ec0088bbSAlexeiFedorov 
1769*ec0088bbSAlexeiFedorov 	/* Unlock access to 512MB block */
1770*ec0088bbSAlexeiFedorov 	bit_unlock(gpi_info.lock, gpi_info.mask);
17716a00e9b0SRobert Wakim 
17726a00e9b0SRobert Wakim 	/*
17736a00e9b0SRobert Wakim 	 * The isb() will be done as part of context
1774b99926efSAlexeiFedorov 	 * synchronization when returning to lower EL.
17756a00e9b0SRobert Wakim 	 */
1776b99926efSAlexeiFedorov 	VERBOSE("GPT: Granule 0x%"PRIx64" GPI 0x%x->0x%x\n",
17776a00e9b0SRobert Wakim 		base, gpi_info.gpi, target_pas);
1778f19dc624Sjohpow01 
1779f19dc624Sjohpow01 	return 0;
1780f19dc624Sjohpow01 }
1781f19dc624Sjohpow01 
1782f19dc624Sjohpow01 /*
17836a00e9b0SRobert Wakim  * This function is the granule transition undelegate service. When a granule
1784f19dc624Sjohpow01  * transition request occurs it is routed to this function where the request is
1785f19dc624Sjohpow01  * validated then fulfilled if possible.
1786f19dc624Sjohpow01  *
1787f19dc624Sjohpow01  * TODO: implement support for transitioning multiple granules at once.
1788f19dc624Sjohpow01  *
1789f19dc624Sjohpow01  * Parameters
1790f19dc624Sjohpow01  *   base		Base address of the region to transition, must be
1791f19dc624Sjohpow01  *			aligned to granule size.
1792f19dc624Sjohpow01  *   size		Size of region to transition, must be aligned to granule
1793f19dc624Sjohpow01  *			size.
1794f19dc624Sjohpow01  *   src_sec_state	Security state of the caller.
1795f19dc624Sjohpow01  *
1796f19dc624Sjohpow01  * Return
1797f19dc624Sjohpow01  *    Negative Linux error code in the event of a failure, 0 for success.
1798f19dc624Sjohpow01  */
17996a00e9b0SRobert Wakim int gpt_undelegate_pas(uint64_t base, size_t size, unsigned int src_sec_state)
1800f19dc624Sjohpow01 {
18016a00e9b0SRobert Wakim 	gpi_info_t gpi_info;
1802*ec0088bbSAlexeiFedorov 	uint64_t nse, __unused l1_desc;
18036a00e9b0SRobert Wakim 	int res;
1804f19dc624Sjohpow01 
1805b99926efSAlexeiFedorov 	/* Ensure that the tables have been set up before taking requests */
18066a00e9b0SRobert Wakim 	assert(gpt_config.plat_gpt_l0_base != 0UL);
1807f19dc624Sjohpow01 
1808b99926efSAlexeiFedorov 	/* Ensure that MMU and caches are enabled */
18096a00e9b0SRobert Wakim 	assert((read_sctlr_el3() & SCTLR_C_BIT) != 0UL);
181077612b90SSoby Mathew 
1811b99926efSAlexeiFedorov 	/* See if this is a single or a range of granule transition */
18126a00e9b0SRobert Wakim 	if (size != GPT_PGS_ACTUAL_SIZE(gpt_config.p)) {
18136a00e9b0SRobert Wakim 		return -EINVAL;
18146a00e9b0SRobert Wakim 	}
18156a00e9b0SRobert Wakim 
18166a00e9b0SRobert Wakim 	/* Check that base and size are valid */
1817f19dc624Sjohpow01 	if ((ULONG_MAX - base) < size) {
1818b99926efSAlexeiFedorov 		VERBOSE("GPT: Transition request address overflow!\n");
18192461bd3aSManish Pandey 		VERBOSE("      Base=0x%"PRIx64"\n", base);
1820f19dc624Sjohpow01 		VERBOSE("      Size=0x%lx\n", size);
1821f19dc624Sjohpow01 		return -EINVAL;
1822f19dc624Sjohpow01 	}
1823f19dc624Sjohpow01 
1824b99926efSAlexeiFedorov 	/* Make sure base and size are valid */
1825b99926efSAlexeiFedorov 	if (((base & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) != 0UL) ||
1826b99926efSAlexeiFedorov 	    ((size & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) != 0UL) ||
18276a00e9b0SRobert Wakim 	    (size == 0UL) ||
1828f19dc624Sjohpow01 	    ((base + size) >= GPT_PPS_ACTUAL_SIZE(gpt_config.t))) {
1829b99926efSAlexeiFedorov 		VERBOSE("GPT: Invalid granule transition address range!\n");
18302461bd3aSManish Pandey 		VERBOSE("      Base=0x%"PRIx64"\n", base);
1831f19dc624Sjohpow01 		VERBOSE("      Size=0x%lx\n", size);
1832f19dc624Sjohpow01 		return -EINVAL;
1833f19dc624Sjohpow01 	}
1834f19dc624Sjohpow01 
18356a00e9b0SRobert Wakim 	res = get_gpi_params(base, &gpi_info);
18366a00e9b0SRobert Wakim 	if (res != 0) {
18376a00e9b0SRobert Wakim 		return res;
1838f19dc624Sjohpow01 	}
1839f19dc624Sjohpow01 
1840*ec0088bbSAlexeiFedorov 	/*
1841*ec0088bbSAlexeiFedorov 	 * Access to each 512MB block in L1 tables is controlled by a bitlock
1842*ec0088bbSAlexeiFedorov 	 * to ensure that no more than one CPU is allowed to make changes at
1843*ec0088bbSAlexeiFedorov 	 * any given time.
1844*ec0088bbSAlexeiFedorov 	 */
1845*ec0088bbSAlexeiFedorov 	bit_lock(gpi_info.lock, gpi_info.mask);
1846*ec0088bbSAlexeiFedorov 
1847*ec0088bbSAlexeiFedorov 	read_gpi(&gpi_info);
1848*ec0088bbSAlexeiFedorov 
18496a00e9b0SRobert Wakim 	/* Check that the current address is in the delegated state */
1850*ec0088bbSAlexeiFedorov 	if ((src_sec_state == SMC_FROM_REALM) &&
1851*ec0088bbSAlexeiFedorov 		(gpi_info.gpi == GPT_GPI_REALM)) {
1852*ec0088bbSAlexeiFedorov 		l1_desc = GPT_L1_REALM_DESC;
1853*ec0088bbSAlexeiFedorov 		nse = (uint64_t)GPT_NSE_REALM << GPT_NSE_SHIFT;
1854*ec0088bbSAlexeiFedorov 	} else if ((src_sec_state == SMC_FROM_SECURE) &&
1855*ec0088bbSAlexeiFedorov 		(gpi_info.gpi == GPT_GPI_SECURE)) {
1856*ec0088bbSAlexeiFedorov 		l1_desc = GPT_L1_SECURE_DESC;
1857*ec0088bbSAlexeiFedorov 		nse = (uint64_t)GPT_NSE_SECURE << GPT_NSE_SHIFT;
1858*ec0088bbSAlexeiFedorov 	} else {
1859*ec0088bbSAlexeiFedorov 		VERBOSE("GPT: Only Granule in REALM or SECURE state can be undelegated\n");
1860b99926efSAlexeiFedorov 		VERBOSE("      Caller: %u Current GPI: %u\n", src_sec_state,
18616a00e9b0SRobert Wakim 			gpi_info.gpi);
1862*ec0088bbSAlexeiFedorov 		bit_unlock(gpi_info.lock, gpi_info.mask);
1863e50fedbcSJavier Almansa Sobrino 		return -EPERM;
18646a00e9b0SRobert Wakim 	}
1865f19dc624Sjohpow01 
1866*ec0088bbSAlexeiFedorov #if (RME_GPT_MAX_BLOCK != 0)
1867*ec0088bbSAlexeiFedorov 	/* Check for Contiguous descriptor */
1868*ec0088bbSAlexeiFedorov 	if ((gpi_info.gpt_l1_desc & GPT_L1_TYPE_CONT_DESC_MASK) ==
1869*ec0088bbSAlexeiFedorov 					GPT_L1_TYPE_CONT_DESC) {
1870*ec0088bbSAlexeiFedorov 		shatter_block(base, &gpi_info, l1_desc);
1871*ec0088bbSAlexeiFedorov 	}
1872*ec0088bbSAlexeiFedorov #endif
1873*ec0088bbSAlexeiFedorov 	/*
1874*ec0088bbSAlexeiFedorov 	 * In order to maintain mutual distrust between Realm and Secure
18756a00e9b0SRobert Wakim 	 * states, remove access now, in order to guarantee that writes
18766a00e9b0SRobert Wakim 	 * to the currently-accessible physical address space will not
18776a00e9b0SRobert Wakim 	 * later become observable.
18786a00e9b0SRobert Wakim 	 */
18796a00e9b0SRobert Wakim 	write_gpt(&gpi_info.gpt_l1_desc, gpi_info.gpt_l1_addr,
18806a00e9b0SRobert Wakim 		  gpi_info.gpi_shift, gpi_info.idx, GPT_GPI_NO_ACCESS);
18816a00e9b0SRobert Wakim 
1882*ec0088bbSAlexeiFedorov 	/* Ensure that all agents observe the new NO_ACCESS configuration */
1883*ec0088bbSAlexeiFedorov 	tlbi_page_dsbosh(base);
18846a00e9b0SRobert Wakim 
1885*ec0088bbSAlexeiFedorov 	/* Ensure that the scrubbed data have made it past the PoPA */
1886*ec0088bbSAlexeiFedorov 	flush_page_to_popa(base | nse);
18876a00e9b0SRobert Wakim 
18886a00e9b0SRobert Wakim 	/*
1889*ec0088bbSAlexeiFedorov 	 * Remove any data loaded speculatively in NS space from before
1890*ec0088bbSAlexeiFedorov 	 * the scrubbing.
18916a00e9b0SRobert Wakim 	 */
18926a00e9b0SRobert Wakim 	nse = (uint64_t)GPT_NSE_NS << GPT_NSE_SHIFT;
18936a00e9b0SRobert Wakim 
1894*ec0088bbSAlexeiFedorov 	flush_page_to_popa(base | nse);
18956a00e9b0SRobert Wakim 
1896*ec0088bbSAlexeiFedorov 	/* Clear existing GPI encoding and transition granule */
18976a00e9b0SRobert Wakim 	write_gpt(&gpi_info.gpt_l1_desc, gpi_info.gpt_l1_addr,
18986a00e9b0SRobert Wakim 		  gpi_info.gpi_shift, gpi_info.idx, GPT_GPI_NS);
18996a00e9b0SRobert Wakim 
19006a00e9b0SRobert Wakim 	/* Ensure that all agents observe the new NS configuration */
1901*ec0088bbSAlexeiFedorov 	tlbi_page_dsbosh(base);
1902f19dc624Sjohpow01 
1903*ec0088bbSAlexeiFedorov #if (RME_GPT_MAX_BLOCK != 0)
1904*ec0088bbSAlexeiFedorov 	if (gpi_info.gpt_l1_desc == GPT_L1_NS_DESC) {
1905*ec0088bbSAlexeiFedorov 		/* Try to fuse */
1906*ec0088bbSAlexeiFedorov 		fuse_block(base, &gpi_info, GPT_L1_NS_DESC);
1907*ec0088bbSAlexeiFedorov 	}
1908*ec0088bbSAlexeiFedorov #endif
1909*ec0088bbSAlexeiFedorov 	/* Unlock access to 512MB block */
1910*ec0088bbSAlexeiFedorov 	bit_unlock(gpi_info.lock, gpi_info.mask);
1911f19dc624Sjohpow01 
191277612b90SSoby Mathew 	/*
191377612b90SSoby Mathew 	 * The isb() will be done as part of context
1914b99926efSAlexeiFedorov 	 * synchronization when returning to lower EL.
191577612b90SSoby Mathew 	 */
1916b99926efSAlexeiFedorov 	VERBOSE("GPT: Granule 0x%"PRIx64" GPI 0x%x->0x%x\n",
19176a00e9b0SRobert Wakim 		base, gpi_info.gpi, GPT_GPI_NS);
1918f19dc624Sjohpow01 
1919f19dc624Sjohpow01 	return 0;
1920f19dc624Sjohpow01 }
1921