xref: /rk3399_ARM-atf/lib/gpt_rme/gpt_rme.c (revision 1751181740f22356bd4f3f3ee6444674ffc20902)
1f19dc624Sjohpow01 /*
2b0f1c840SAlexeiFedorov  * Copyright (c) 2022-2025, Arm Limited. All rights reserved.
3f19dc624Sjohpow01  *
4f19dc624Sjohpow01  * SPDX-License-Identifier: BSD-3-Clause
5f19dc624Sjohpow01  */
6f19dc624Sjohpow01 
7f19dc624Sjohpow01 #include <assert.h>
8f19dc624Sjohpow01 #include <errno.h>
92461bd3aSManish Pandey #include <inttypes.h>
10f19dc624Sjohpow01 #include <limits.h>
11f19dc624Sjohpow01 #include <stdint.h>
12f19dc624Sjohpow01 
13f19dc624Sjohpow01 #include <arch.h>
1462d64652SOlivier Deprez #include <arch_features.h>
15f19dc624Sjohpow01 #include <common/debug.h>
16f19dc624Sjohpow01 #include <lib/gpt_rme/gpt_rme.h>
17f19dc624Sjohpow01 #include <lib/smccc.h>
18f19dc624Sjohpow01 #include <lib/xlat_tables/xlat_tables_v2.h>
19f19dc624Sjohpow01 
20b0f1c840SAlexeiFedorov #include "gpt_rme_private.h"
21b0f1c840SAlexeiFedorov 
22f19dc624Sjohpow01 #if !ENABLE_RME
23b99926efSAlexeiFedorov #error "ENABLE_RME must be enabled to use the GPT library"
24f19dc624Sjohpow01 #endif
25f19dc624Sjohpow01 
26f19dc624Sjohpow01 /*
27f19dc624Sjohpow01  * Lookup T from PPS
28f19dc624Sjohpow01  *
29f19dc624Sjohpow01  *   PPS    Size    T
30f19dc624Sjohpow01  *   0b000  4GB     32
31f19dc624Sjohpow01  *   0b001  64GB    36
32f19dc624Sjohpow01  *   0b010  1TB     40
33f19dc624Sjohpow01  *   0b011  4TB     42
34f19dc624Sjohpow01  *   0b100  16TB    44
35f19dc624Sjohpow01  *   0b101  256TB   48
36f19dc624Sjohpow01  *   0b110  4PB     52
37f19dc624Sjohpow01  *
38f19dc624Sjohpow01  * See section 15.1.27 of the RME specification.
39f19dc624Sjohpow01  */
40f19dc624Sjohpow01 static const gpt_t_val_e gpt_t_lookup[] = {PPS_4GB_T, PPS_64GB_T,
41f19dc624Sjohpow01 					   PPS_1TB_T, PPS_4TB_T,
42f19dc624Sjohpow01 					   PPS_16TB_T, PPS_256TB_T,
43f19dc624Sjohpow01 					   PPS_4PB_T};
44f19dc624Sjohpow01 
45f19dc624Sjohpow01 /*
46f19dc624Sjohpow01  * Lookup P from PGS
47f19dc624Sjohpow01  *
48f19dc624Sjohpow01  *   PGS    Size    P
49f19dc624Sjohpow01  *   0b00   4KB     12
50f19dc624Sjohpow01  *   0b10   16KB    14
51f19dc624Sjohpow01  *   0b01   64KB    16
52f19dc624Sjohpow01  *
53f19dc624Sjohpow01  * Note that pgs=0b10 is 16KB and pgs=0b01 is 64KB, this is not a typo.
54f19dc624Sjohpow01  *
55f19dc624Sjohpow01  * See section 15.1.27 of the RME specification.
56f19dc624Sjohpow01  */
57f19dc624Sjohpow01 static const gpt_p_val_e gpt_p_lookup[] = {PGS_4KB_P, PGS_64KB_P, PGS_16KB_P};
58f19dc624Sjohpow01 
59ec0088bbSAlexeiFedorov static void shatter_2mb(uintptr_t base, const gpi_info_t *gpi_info,
60ec0088bbSAlexeiFedorov 				uint64_t l1_desc);
61ec0088bbSAlexeiFedorov static void shatter_32mb(uintptr_t base, const gpi_info_t *gpi_info,
62ec0088bbSAlexeiFedorov 				uint64_t l1_desc);
63ec0088bbSAlexeiFedorov static void shatter_512mb(uintptr_t base, const gpi_info_t *gpi_info,
64ec0088bbSAlexeiFedorov 				uint64_t l1_desc);
65ec0088bbSAlexeiFedorov 
66f19dc624Sjohpow01 /*
67b99926efSAlexeiFedorov  * This structure contains GPT configuration data
68f19dc624Sjohpow01  */
69f19dc624Sjohpow01 typedef struct {
70f19dc624Sjohpow01 	uintptr_t plat_gpt_l0_base;
71f19dc624Sjohpow01 	gpccr_pps_e pps;
72f19dc624Sjohpow01 	gpt_t_val_e t;
73f19dc624Sjohpow01 	gpccr_pgs_e pgs;
74f19dc624Sjohpow01 	gpt_p_val_e p;
75f19dc624Sjohpow01 } gpt_config_t;
76f19dc624Sjohpow01 
77f19dc624Sjohpow01 static gpt_config_t gpt_config;
78f19dc624Sjohpow01 
79ec0088bbSAlexeiFedorov /*
80ec0088bbSAlexeiFedorov  * Number of L1 entries in 2MB, depending on GPCCR_EL3.PGS:
81ec0088bbSAlexeiFedorov  * +-------+------------+
82ec0088bbSAlexeiFedorov  * |  PGS  | L1 entries |
83ec0088bbSAlexeiFedorov  * +-------+------------+
84ec0088bbSAlexeiFedorov  * |  4KB  |     32     |
85ec0088bbSAlexeiFedorov  * +-------+------------+
86ec0088bbSAlexeiFedorov  * |  16KB |     8      |
87ec0088bbSAlexeiFedorov  * +-------+------------+
88ec0088bbSAlexeiFedorov  * |  64KB |     2      |
89ec0088bbSAlexeiFedorov  * +-------+------------+
90ec0088bbSAlexeiFedorov  */
91ec0088bbSAlexeiFedorov static unsigned int gpt_l1_cnt_2mb;
92ec0088bbSAlexeiFedorov 
93ec0088bbSAlexeiFedorov /*
94ec0088bbSAlexeiFedorov  * Mask for the L1 index field, depending on
95ec0088bbSAlexeiFedorov  * GPCCR_EL3.L0GPTSZ and GPCCR_EL3.PGS:
96ec0088bbSAlexeiFedorov  * +---------+-------------------------------+
97ec0088bbSAlexeiFedorov  * |         |             PGS               |
98ec0088bbSAlexeiFedorov  * +---------+----------+----------+---------+
99ec0088bbSAlexeiFedorov  * | L0GPTSZ |   4KB    |   16KB   |   64KB  |
100ec0088bbSAlexeiFedorov  * +---------+----------+----------+---------+
101ec0088bbSAlexeiFedorov  * |  1GB    |  0x3FFF  |  0xFFF   |  0x3FF  |
102ec0088bbSAlexeiFedorov  * +---------+----------+----------+---------+
103ec0088bbSAlexeiFedorov  * |  16GB   | 0x3FFFF  |  0xFFFF  | 0x3FFF  |
104ec0088bbSAlexeiFedorov  * +---------+----------+----------+---------+
105ec0088bbSAlexeiFedorov  * |  64GB   | 0xFFFFF  | 0x3FFFF  | 0xFFFF  |
106ec0088bbSAlexeiFedorov  * +---------+----------+----------+---------+
107ec0088bbSAlexeiFedorov  * |  512GB  | 0x7FFFFF | 0x1FFFFF | 0x7FFFF |
108ec0088bbSAlexeiFedorov  * +---------+----------+----------+---------+
109ec0088bbSAlexeiFedorov  */
110ec0088bbSAlexeiFedorov static uint64_t gpt_l1_index_mask;
111ec0088bbSAlexeiFedorov 
112ec0088bbSAlexeiFedorov /* Number of 128-bit L1 entries in 2MB, 32MB and 512MB */
113ec0088bbSAlexeiFedorov #define L1_QWORDS_2MB	(gpt_l1_cnt_2mb / 2U)
114ec0088bbSAlexeiFedorov #define L1_QWORDS_32MB	(L1_QWORDS_2MB * 16U)
115ec0088bbSAlexeiFedorov #define L1_QWORDS_512MB	(L1_QWORDS_32MB * 16U)
116ec0088bbSAlexeiFedorov 
117ec0088bbSAlexeiFedorov /* Size in bytes of L1 entries in 2MB, 32MB */
118ec0088bbSAlexeiFedorov #define L1_BYTES_2MB	(gpt_l1_cnt_2mb * sizeof(uint64_t))
119ec0088bbSAlexeiFedorov #define L1_BYTES_32MB	(L1_BYTES_2MB * 16U)
120ec0088bbSAlexeiFedorov 
121ec0088bbSAlexeiFedorov /* Get the index into the L1 table from a physical address */
122ec0088bbSAlexeiFedorov #define GPT_L1_INDEX(_pa)	\
123ec0088bbSAlexeiFedorov 	(((_pa) >> (unsigned int)GPT_L1_IDX_SHIFT(gpt_config.p)) & gpt_l1_index_mask)
124ec0088bbSAlexeiFedorov 
125b0f1c840SAlexeiFedorov /* This variable is used during initialization of the L1 tables */
126f19dc624Sjohpow01 static uintptr_t gpt_l1_tbl;
127f19dc624Sjohpow01 
128b0f1c840SAlexeiFedorov /* These variables are used during runtime */
129d766084fSAlexeiFedorov #if (RME_GPT_BITLOCK_BLOCK == 0)
130d766084fSAlexeiFedorov /*
131d766084fSAlexeiFedorov  * The GPTs are protected by a global spinlock to ensure
132d766084fSAlexeiFedorov  * that multiple CPUs do not attempt to change the descriptors at once.
133d766084fSAlexeiFedorov  */
134d766084fSAlexeiFedorov static spinlock_t gpt_lock;
135ec0088bbSAlexeiFedorov 
136b0f1c840SAlexeiFedorov /* Lock/unlock macros for GPT entries
137b0f1c840SAlexeiFedorov  *
138d766084fSAlexeiFedorov  * Access to GPT is controlled by a global lock to ensure
139d766084fSAlexeiFedorov  * that no more than one CPU is allowed to make changes at any
140d766084fSAlexeiFedorov  * given time.
141d766084fSAlexeiFedorov  */
142d766084fSAlexeiFedorov #define GPT_LOCK	spin_lock(&gpt_lock)
143d766084fSAlexeiFedorov #define GPT_UNLOCK	spin_unlock(&gpt_lock)
144d766084fSAlexeiFedorov #else
145b0f1c840SAlexeiFedorov 
146b0f1c840SAlexeiFedorov /* Base address of bitlocks array */
147b0f1c840SAlexeiFedorov static bitlock_t *gpt_bitlock;
148b0f1c840SAlexeiFedorov 
149d766084fSAlexeiFedorov /*
150d766084fSAlexeiFedorov  * Access to a block of memory is controlled by a bitlock.
151d766084fSAlexeiFedorov  * Size of block = RME_GPT_BITLOCK_BLOCK * 512MB.
152d766084fSAlexeiFedorov  */
153d766084fSAlexeiFedorov #define GPT_LOCK	bit_lock(gpi_info.lock, gpi_info.mask)
154d766084fSAlexeiFedorov #define GPT_UNLOCK	bit_unlock(gpi_info.lock, gpi_info.mask)
155b0f1c840SAlexeiFedorov #endif /* RME_GPT_BITLOCK_BLOCK */
156ec0088bbSAlexeiFedorov 
tlbi_page_dsbosh(uintptr_t base)157ec0088bbSAlexeiFedorov static void tlbi_page_dsbosh(uintptr_t base)
158ec0088bbSAlexeiFedorov {
159ec0088bbSAlexeiFedorov 	/* Look-up table for invalidation TLBs for 4KB, 16KB and 64KB pages */
160ec0088bbSAlexeiFedorov 	static const gpt_tlbi_lookup_t tlbi_page_lookup[] = {
161ec0088bbSAlexeiFedorov 		{ tlbirpalos_4k, ~(SZ_4K - 1UL) },
162ec0088bbSAlexeiFedorov 		{ tlbirpalos_64k, ~(SZ_64K - 1UL) },
163ec0088bbSAlexeiFedorov 		{ tlbirpalos_16k, ~(SZ_16K - 1UL) }
164ec0088bbSAlexeiFedorov 	};
165ec0088bbSAlexeiFedorov 
166ec0088bbSAlexeiFedorov 	tlbi_page_lookup[gpt_config.pgs].function(
167ec0088bbSAlexeiFedorov 			base & tlbi_page_lookup[gpt_config.pgs].mask);
168ec0088bbSAlexeiFedorov 	dsbosh();
169ec0088bbSAlexeiFedorov }
170ec0088bbSAlexeiFedorov 
171ec0088bbSAlexeiFedorov /*
172ec0088bbSAlexeiFedorov  * Helper function to fill out GPI entries in a single L1 table
173ec0088bbSAlexeiFedorov  * with Granules or Contiguous descriptor.
174ec0088bbSAlexeiFedorov  *
175ec0088bbSAlexeiFedorov  * Parameters
176ec0088bbSAlexeiFedorov  *   l1			Pointer to 2MB, 32MB or 512MB aligned L1 table entry to fill out
177ec0088bbSAlexeiFedorov  *   l1_desc		GPT Granules or Contiguous descriptor set this range to
178ec0088bbSAlexeiFedorov  *   cnt		Number of double 128-bit L1 entries to fill
179ec0088bbSAlexeiFedorov  *
180ec0088bbSAlexeiFedorov  */
fill_desc(uint64_t * l1,uint64_t l1_desc,unsigned int cnt)181ec0088bbSAlexeiFedorov static void fill_desc(uint64_t *l1, uint64_t l1_desc, unsigned int cnt)
182ec0088bbSAlexeiFedorov {
183ec0088bbSAlexeiFedorov 	uint128_t *l1_quad = (uint128_t *)l1;
184ec0088bbSAlexeiFedorov 	uint128_t l1_quad_desc = (uint128_t)l1_desc | ((uint128_t)l1_desc << 64);
185ec0088bbSAlexeiFedorov 
186ec0088bbSAlexeiFedorov 	VERBOSE("GPT: %s(%p 0x%"PRIx64" %u)\n", __func__, l1, l1_desc, cnt);
187ec0088bbSAlexeiFedorov 
188ec0088bbSAlexeiFedorov 	for (unsigned int i = 0U; i < cnt; i++) {
189ec0088bbSAlexeiFedorov 		*l1_quad++ = l1_quad_desc;
190ec0088bbSAlexeiFedorov 	}
191ec0088bbSAlexeiFedorov }
192ec0088bbSAlexeiFedorov 
shatter_2mb(uintptr_t base,const gpi_info_t * gpi_info,uint64_t l1_desc)193ec0088bbSAlexeiFedorov static void shatter_2mb(uintptr_t base, const gpi_info_t *gpi_info,
194ec0088bbSAlexeiFedorov 				uint64_t l1_desc)
195ec0088bbSAlexeiFedorov {
196ec0088bbSAlexeiFedorov 	unsigned long idx = GPT_L1_INDEX(ALIGN_2MB(base));
197ec0088bbSAlexeiFedorov 
198ec0088bbSAlexeiFedorov 	VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n",
199ec0088bbSAlexeiFedorov 				__func__, base, l1_desc);
200ec0088bbSAlexeiFedorov 
201ec0088bbSAlexeiFedorov 	/* Convert 2MB Contiguous block to Granules */
202ec0088bbSAlexeiFedorov 	fill_desc(&gpi_info->gpt_l1_addr[idx], l1_desc, L1_QWORDS_2MB);
203ec0088bbSAlexeiFedorov }
204ec0088bbSAlexeiFedorov 
shatter_32mb(uintptr_t base,const gpi_info_t * gpi_info,uint64_t l1_desc)205ec0088bbSAlexeiFedorov static void shatter_32mb(uintptr_t base, const gpi_info_t *gpi_info,
206ec0088bbSAlexeiFedorov 				uint64_t l1_desc)
207ec0088bbSAlexeiFedorov {
208ec0088bbSAlexeiFedorov 	unsigned long idx = GPT_L1_INDEX(ALIGN_2MB(base));
209ec0088bbSAlexeiFedorov 	const uint64_t *l1_gran = &gpi_info->gpt_l1_addr[idx];
210ec0088bbSAlexeiFedorov 	uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 2MB);
211ec0088bbSAlexeiFedorov 	uint64_t *l1;
212ec0088bbSAlexeiFedorov 
213ec0088bbSAlexeiFedorov 	VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n",
214ec0088bbSAlexeiFedorov 				__func__, base, l1_desc);
215ec0088bbSAlexeiFedorov 
216ec0088bbSAlexeiFedorov 	/* Get index corresponding to 32MB aligned address */
217ec0088bbSAlexeiFedorov 	idx = GPT_L1_INDEX(ALIGN_32MB(base));
218ec0088bbSAlexeiFedorov 	l1 = &gpi_info->gpt_l1_addr[idx];
219ec0088bbSAlexeiFedorov 
220ec0088bbSAlexeiFedorov 	/* 16 x 2MB blocks in 32MB */
221ec0088bbSAlexeiFedorov 	for (unsigned int i = 0U; i < 16U; i++) {
222ec0088bbSAlexeiFedorov 		/* Fill with Granules or Contiguous descriptors */
223ec0088bbSAlexeiFedorov 		fill_desc(l1, (l1 == l1_gran) ? l1_desc : l1_cont_desc,
224ec0088bbSAlexeiFedorov 							L1_QWORDS_2MB);
225ec0088bbSAlexeiFedorov 		l1 = (uint64_t *)((uintptr_t)l1 + L1_BYTES_2MB);
226ec0088bbSAlexeiFedorov 	}
227ec0088bbSAlexeiFedorov }
228ec0088bbSAlexeiFedorov 
shatter_512mb(uintptr_t base,const gpi_info_t * gpi_info,uint64_t l1_desc)229ec0088bbSAlexeiFedorov static void shatter_512mb(uintptr_t base, const gpi_info_t *gpi_info,
230ec0088bbSAlexeiFedorov 				uint64_t l1_desc)
231ec0088bbSAlexeiFedorov {
232ec0088bbSAlexeiFedorov 	unsigned long idx = GPT_L1_INDEX(ALIGN_32MB(base));
233ec0088bbSAlexeiFedorov 	const uint64_t *l1_32mb = &gpi_info->gpt_l1_addr[idx];
234ec0088bbSAlexeiFedorov 	uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 32MB);
235ec0088bbSAlexeiFedorov 	uint64_t *l1;
236ec0088bbSAlexeiFedorov 
237ec0088bbSAlexeiFedorov 	VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n",
238ec0088bbSAlexeiFedorov 				__func__, base, l1_desc);
239ec0088bbSAlexeiFedorov 
240ec0088bbSAlexeiFedorov 	/* Get index corresponding to 512MB aligned address */
241ec0088bbSAlexeiFedorov 	idx = GPT_L1_INDEX(ALIGN_512MB(base));
242ec0088bbSAlexeiFedorov 	l1 = &gpi_info->gpt_l1_addr[idx];
243ec0088bbSAlexeiFedorov 
244ec0088bbSAlexeiFedorov 	/* 16 x 32MB blocks in 512MB */
245ec0088bbSAlexeiFedorov 	for (unsigned int i = 0U; i < 16U; i++) {
246ec0088bbSAlexeiFedorov 		if (l1 == l1_32mb) {
247ec0088bbSAlexeiFedorov 			/* Shatter this 32MB block */
248ec0088bbSAlexeiFedorov 			shatter_32mb(base, gpi_info, l1_desc);
249ec0088bbSAlexeiFedorov 		} else {
250ec0088bbSAlexeiFedorov 			/* Fill 32MB with Contiguous descriptors */
251ec0088bbSAlexeiFedorov 			fill_desc(l1, l1_cont_desc, L1_QWORDS_32MB);
252ec0088bbSAlexeiFedorov 		}
253ec0088bbSAlexeiFedorov 
254ec0088bbSAlexeiFedorov 		l1 = (uint64_t *)((uintptr_t)l1 + L1_BYTES_32MB);
255ec0088bbSAlexeiFedorov 	}
256ec0088bbSAlexeiFedorov }
257ec0088bbSAlexeiFedorov 
258f19dc624Sjohpow01 /*
259f19dc624Sjohpow01  * This function checks to see if a GPI value is valid.
260f19dc624Sjohpow01  *
261f19dc624Sjohpow01  * These are valid GPI values.
262f19dc624Sjohpow01  *   GPT_GPI_NO_ACCESS   U(0x0)
263*5e827bf0STimothy Hayes  *   GPT_GPI_SA          U(0x4)
264*5e827bf0STimothy Hayes  *   GPT_GPI_NSP	 U(0x5)
265f19dc624Sjohpow01  *   GPT_GPI_SECURE      U(0x8)
266f19dc624Sjohpow01  *   GPT_GPI_NS          U(0x9)
267f19dc624Sjohpow01  *   GPT_GPI_ROOT        U(0xA)
268f19dc624Sjohpow01  *   GPT_GPI_REALM       U(0xB)
26909a4bcb8SGirish Pathak  *   GPT_GPI_NSO         U(0xD)
270f19dc624Sjohpow01  *   GPT_GPI_ANY         U(0xF)
271f19dc624Sjohpow01  *
272f19dc624Sjohpow01  * Parameters
273f19dc624Sjohpow01  *   gpi		GPI to check for validity.
274f19dc624Sjohpow01  *
275f19dc624Sjohpow01  * Return
276f19dc624Sjohpow01  *   true for a valid GPI, false for an invalid one.
277f19dc624Sjohpow01  */
is_gpi_valid(unsigned int gpi)27820e2683dSAlexeiFedorov static bool is_gpi_valid(unsigned int gpi)
279f19dc624Sjohpow01 {
28009a4bcb8SGirish Pathak 	switch (gpi) {
28109a4bcb8SGirish Pathak 	case GPT_GPI_NO_ACCESS:
28209a4bcb8SGirish Pathak 	case GPT_GPI_SECURE:
28309a4bcb8SGirish Pathak 	case GPT_GPI_NS:
28409a4bcb8SGirish Pathak 	case GPT_GPI_ROOT:
28509a4bcb8SGirish Pathak 	case GPT_GPI_REALM:
28609a4bcb8SGirish Pathak 	case GPT_GPI_ANY:
287f19dc624Sjohpow01 		return true;
28809a4bcb8SGirish Pathak 	case GPT_GPI_NSO:
28909a4bcb8SGirish Pathak 		return is_feat_rme_gpc2_present();
290*5e827bf0STimothy Hayes 	case GPT_GPI_SA:
291*5e827bf0STimothy Hayes 	case GPT_GPI_NSP:
292*5e827bf0STimothy Hayes 		return is_feat_rme_gdi_supported();
29309a4bcb8SGirish Pathak 	default:
2946a00e9b0SRobert Wakim 		return false;
295f19dc624Sjohpow01 	}
29609a4bcb8SGirish Pathak }
297f19dc624Sjohpow01 
298f19dc624Sjohpow01 /*
299f19dc624Sjohpow01  * This function checks to see if two PAS regions overlap.
300f19dc624Sjohpow01  *
301f19dc624Sjohpow01  * Parameters
302f19dc624Sjohpow01  *   base_1: base address of first PAS
303f19dc624Sjohpow01  *   size_1: size of first PAS
304f19dc624Sjohpow01  *   base_2: base address of second PAS
305f19dc624Sjohpow01  *   size_2: size of second PAS
306f19dc624Sjohpow01  *
307f19dc624Sjohpow01  * Return
308f19dc624Sjohpow01  *   True if PAS regions overlap, false if they do not.
309f19dc624Sjohpow01  */
check_pas_overlap(uintptr_t base_1,size_t size_1,uintptr_t base_2,size_t size_2)31020e2683dSAlexeiFedorov static bool check_pas_overlap(uintptr_t base_1, size_t size_1,
311f19dc624Sjohpow01 			      uintptr_t base_2, size_t size_2)
312f19dc624Sjohpow01 {
313f19dc624Sjohpow01 	if (((base_1 + size_1) > base_2) && ((base_2 + size_2) > base_1)) {
314f19dc624Sjohpow01 		return true;
315f19dc624Sjohpow01 	}
3166a00e9b0SRobert Wakim 	return false;
317f19dc624Sjohpow01 }
318f19dc624Sjohpow01 
319f19dc624Sjohpow01 /*
320f19dc624Sjohpow01  * This helper function checks to see if a PAS region from index 0 to
321f19dc624Sjohpow01  * (pas_idx - 1) occupies the L0 region at index l0_idx in the L0 table.
322f19dc624Sjohpow01  *
323f19dc624Sjohpow01  * Parameters
324f19dc624Sjohpow01  *   l0_idx:      Index of the L0 entry to check
325f19dc624Sjohpow01  *   pas_regions: PAS region array
326f19dc624Sjohpow01  *   pas_idx:     Upper bound of the PAS array index.
327f19dc624Sjohpow01  *
328f19dc624Sjohpow01  * Return
329f19dc624Sjohpow01  *   True if a PAS region occupies the L0 region in question, false if not.
330f19dc624Sjohpow01  */
does_previous_pas_exist_here(unsigned int l0_idx,pas_region_t * pas_regions,unsigned int pas_idx)33120e2683dSAlexeiFedorov static bool does_previous_pas_exist_here(unsigned int l0_idx,
332f19dc624Sjohpow01 					 pas_region_t *pas_regions,
333f19dc624Sjohpow01 					 unsigned int pas_idx)
334f19dc624Sjohpow01 {
335b99926efSAlexeiFedorov 	/* Iterate over PAS regions up to pas_idx */
336f19dc624Sjohpow01 	for (unsigned int i = 0U; i < pas_idx; i++) {
33720e2683dSAlexeiFedorov 		if (check_pas_overlap((GPT_L0GPTSZ_ACTUAL_SIZE * l0_idx),
338f19dc624Sjohpow01 		    GPT_L0GPTSZ_ACTUAL_SIZE,
339f19dc624Sjohpow01 		    pas_regions[i].base_pa, pas_regions[i].size)) {
340f19dc624Sjohpow01 			return true;
341f19dc624Sjohpow01 		}
342f19dc624Sjohpow01 	}
343f19dc624Sjohpow01 	return false;
344f19dc624Sjohpow01 }
345f19dc624Sjohpow01 
346f19dc624Sjohpow01 /*
347f19dc624Sjohpow01  * This function iterates over all of the PAS regions and checks them to ensure
348f19dc624Sjohpow01  * proper alignment of base and size, that the GPI is valid, and that no regions
349f19dc624Sjohpow01  * overlap. As a part of the overlap checks, this function checks existing L0
350f19dc624Sjohpow01  * mappings against the new PAS regions in the event that gpt_init_pas_l1_tables
351f19dc624Sjohpow01  * is called multiple times to place L1 tables in different areas of memory. It
352f19dc624Sjohpow01  * also counts the number of L1 tables needed and returns it on success.
353f19dc624Sjohpow01  *
354f19dc624Sjohpow01  * Parameters
355f19dc624Sjohpow01  *   *pas_regions	Pointer to array of PAS region structures.
356f19dc624Sjohpow01  *   pas_region_cnt	Total number of PAS regions in the array.
357f19dc624Sjohpow01  *
358f19dc624Sjohpow01  * Return
359f19dc624Sjohpow01  *   Negative Linux error code in the event of a failure, number of L1 regions
360f19dc624Sjohpow01  *   required when successful.
361f19dc624Sjohpow01  */
validate_pas_mappings(pas_region_t * pas_regions,unsigned int pas_region_cnt)36220e2683dSAlexeiFedorov static int validate_pas_mappings(pas_region_t *pas_regions,
363f19dc624Sjohpow01 				 unsigned int pas_region_cnt)
364f19dc624Sjohpow01 {
365f19dc624Sjohpow01 	unsigned int idx;
366f19dc624Sjohpow01 	unsigned int l1_cnt = 0U;
367f19dc624Sjohpow01 	unsigned int pas_l1_cnt;
368f19dc624Sjohpow01 	uint64_t *l0_desc = (uint64_t *)gpt_config.plat_gpt_l0_base;
369f19dc624Sjohpow01 
370f19dc624Sjohpow01 	assert(pas_regions != NULL);
371f19dc624Sjohpow01 	assert(pas_region_cnt != 0U);
372f19dc624Sjohpow01 
373f19dc624Sjohpow01 	for (idx = 0U; idx < pas_region_cnt; idx++) {
374b99926efSAlexeiFedorov 		/* Check for arithmetic overflow in region */
375f19dc624Sjohpow01 		if ((ULONG_MAX - pas_regions[idx].base_pa) <
376f19dc624Sjohpow01 		    pas_regions[idx].size) {
377b99926efSAlexeiFedorov 			ERROR("GPT: Address overflow in PAS[%u]!\n", idx);
378f19dc624Sjohpow01 			return -EOVERFLOW;
379f19dc624Sjohpow01 		}
380f19dc624Sjohpow01 
381b99926efSAlexeiFedorov 		/* Initial checks for PAS validity */
382f19dc624Sjohpow01 		if (((pas_regions[idx].base_pa + pas_regions[idx].size) >
383f19dc624Sjohpow01 		    GPT_PPS_ACTUAL_SIZE(gpt_config.t)) ||
38420e2683dSAlexeiFedorov 		    !is_gpi_valid(GPT_PAS_ATTR_GPI(pas_regions[idx].attrs))) {
385b99926efSAlexeiFedorov 			ERROR("GPT: PAS[%u] is invalid!\n", idx);
386f19dc624Sjohpow01 			return -EFAULT;
387f19dc624Sjohpow01 		}
388f19dc624Sjohpow01 
389f19dc624Sjohpow01 		/*
390f19dc624Sjohpow01 		 * Make sure this PAS does not overlap with another one. We
391f19dc624Sjohpow01 		 * start from idx + 1 instead of 0 since prior PAS mappings will
392f19dc624Sjohpow01 		 * have already checked themselves against this one.
393f19dc624Sjohpow01 		 */
394b99926efSAlexeiFedorov 		for (unsigned int i = idx + 1U; i < pas_region_cnt; i++) {
39520e2683dSAlexeiFedorov 			if (check_pas_overlap(pas_regions[idx].base_pa,
396f19dc624Sjohpow01 			    pas_regions[idx].size,
397f19dc624Sjohpow01 			    pas_regions[i].base_pa,
398f19dc624Sjohpow01 			    pas_regions[i].size)) {
399b99926efSAlexeiFedorov 				ERROR("GPT: PAS[%u] overlaps with PAS[%u]\n",
400f19dc624Sjohpow01 					i, idx);
401f19dc624Sjohpow01 				return -EFAULT;
402f19dc624Sjohpow01 			}
403f19dc624Sjohpow01 		}
404f19dc624Sjohpow01 
405f19dc624Sjohpow01 		/*
406f19dc624Sjohpow01 		 * Since this function can be called multiple times with
407f19dc624Sjohpow01 		 * separate L1 tables we need to check the existing L0 mapping
408f19dc624Sjohpow01 		 * to see if this PAS would fall into one that has already been
409f19dc624Sjohpow01 		 * initialized.
410f19dc624Sjohpow01 		 */
411ec0088bbSAlexeiFedorov 		for (unsigned int i =
412ec0088bbSAlexeiFedorov 			(unsigned int)GPT_L0_IDX(pas_regions[idx].base_pa);
413b99926efSAlexeiFedorov 			i <= GPT_L0_IDX(pas_regions[idx].base_pa +
414b99926efSAlexeiFedorov 					pas_regions[idx].size - 1UL);
415f19dc624Sjohpow01 			i++) {
416f19dc624Sjohpow01 			if ((GPT_L0_TYPE(l0_desc[i]) == GPT_L0_TYPE_BLK_DESC) &&
417f19dc624Sjohpow01 			    (GPT_L0_BLKD_GPI(l0_desc[i]) == GPT_GPI_ANY)) {
418b99926efSAlexeiFedorov 				/* This descriptor is unused so continue */
419f19dc624Sjohpow01 				continue;
420f19dc624Sjohpow01 			}
421f19dc624Sjohpow01 
422f19dc624Sjohpow01 			/*
423f19dc624Sjohpow01 			 * This descriptor has been initialized in a previous
424f19dc624Sjohpow01 			 * call to this function so cannot be initialized again.
425f19dc624Sjohpow01 			 */
426ec0088bbSAlexeiFedorov 			ERROR("GPT: PAS[%u] overlaps with previous L0[%u]!\n",
427f19dc624Sjohpow01 			      idx, i);
428f19dc624Sjohpow01 			return -EFAULT;
429f19dc624Sjohpow01 		}
430f19dc624Sjohpow01 
431b99926efSAlexeiFedorov 		/* Check for block mapping (L0) type */
432f19dc624Sjohpow01 		if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) ==
433f19dc624Sjohpow01 		    GPT_PAS_ATTR_MAP_TYPE_BLOCK) {
434b99926efSAlexeiFedorov 			/* Make sure base and size are block-aligned */
435f19dc624Sjohpow01 			if (!GPT_IS_L0_ALIGNED(pas_regions[idx].base_pa) ||
436f19dc624Sjohpow01 			    !GPT_IS_L0_ALIGNED(pas_regions[idx].size)) {
437b99926efSAlexeiFedorov 				ERROR("GPT: PAS[%u] is not block-aligned!\n",
438f19dc624Sjohpow01 				      idx);
439f19dc624Sjohpow01 				return -EFAULT;
440f19dc624Sjohpow01 			}
441f19dc624Sjohpow01 
442f19dc624Sjohpow01 			continue;
443f19dc624Sjohpow01 		}
444f19dc624Sjohpow01 
445b99926efSAlexeiFedorov 		/* Check for granule mapping (L1) type */
446f19dc624Sjohpow01 		if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) ==
447f19dc624Sjohpow01 		    GPT_PAS_ATTR_MAP_TYPE_GRANULE) {
448b99926efSAlexeiFedorov 			/* Make sure base and size are granule-aligned */
449f19dc624Sjohpow01 			if (!GPT_IS_L1_ALIGNED(gpt_config.p, pas_regions[idx].base_pa) ||
450f19dc624Sjohpow01 			    !GPT_IS_L1_ALIGNED(gpt_config.p, pas_regions[idx].size)) {
451b99926efSAlexeiFedorov 				ERROR("GPT: PAS[%u] is not granule-aligned!\n",
452f19dc624Sjohpow01 				      idx);
453f19dc624Sjohpow01 				return -EFAULT;
454f19dc624Sjohpow01 			}
455f19dc624Sjohpow01 
456b99926efSAlexeiFedorov 			/* Find how many L1 tables this PAS occupies */
457f19dc624Sjohpow01 			pas_l1_cnt = (GPT_L0_IDX(pas_regions[idx].base_pa +
458b99926efSAlexeiFedorov 				     pas_regions[idx].size - 1UL) -
459b99926efSAlexeiFedorov 				     GPT_L0_IDX(pas_regions[idx].base_pa) + 1U);
460f19dc624Sjohpow01 
461f19dc624Sjohpow01 			/*
462f19dc624Sjohpow01 			 * This creates a situation where, if multiple PAS
463f19dc624Sjohpow01 			 * regions occupy the same table descriptor, we can get
464f19dc624Sjohpow01 			 * an artificially high total L1 table count. The way we
465f19dc624Sjohpow01 			 * handle this is by checking each PAS against those
466f19dc624Sjohpow01 			 * before it in the array, and if they both occupy the
467f19dc624Sjohpow01 			 * same PAS we subtract from pas_l1_cnt and only the
468f19dc624Sjohpow01 			 * first PAS in the array gets to count it.
469f19dc624Sjohpow01 			 */
470f19dc624Sjohpow01 
471f19dc624Sjohpow01 			/*
472f19dc624Sjohpow01 			 * If L1 count is greater than 1 we know the start and
473f19dc624Sjohpow01 			 * end PAs are in different L0 regions so we must check
474f19dc624Sjohpow01 			 * both for overlap against other PAS.
475f19dc624Sjohpow01 			 */
476f19dc624Sjohpow01 			if (pas_l1_cnt > 1) {
47720e2683dSAlexeiFedorov 				if (does_previous_pas_exist_here(
478f19dc624Sjohpow01 				    GPT_L0_IDX(pas_regions[idx].base_pa +
479b99926efSAlexeiFedorov 				    pas_regions[idx].size - 1UL),
480f19dc624Sjohpow01 				    pas_regions, idx)) {
481b99926efSAlexeiFedorov 					pas_l1_cnt--;
482f19dc624Sjohpow01 				}
483f19dc624Sjohpow01 			}
484f19dc624Sjohpow01 
48520e2683dSAlexeiFedorov 			if (does_previous_pas_exist_here(
486f19dc624Sjohpow01 			    GPT_L0_IDX(pas_regions[idx].base_pa),
487f19dc624Sjohpow01 			    pas_regions, idx)) {
488b99926efSAlexeiFedorov 				pas_l1_cnt--;
489f19dc624Sjohpow01 			}
490f19dc624Sjohpow01 
491f19dc624Sjohpow01 			l1_cnt += pas_l1_cnt;
492f19dc624Sjohpow01 			continue;
493f19dc624Sjohpow01 		}
494f19dc624Sjohpow01 
495b99926efSAlexeiFedorov 		/* If execution reaches this point, mapping type is invalid */
496b99926efSAlexeiFedorov 		ERROR("GPT: PAS[%u] has invalid mapping type 0x%x.\n", idx,
497f19dc624Sjohpow01 		      GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs));
498f19dc624Sjohpow01 		return -EINVAL;
499f19dc624Sjohpow01 	}
500f19dc624Sjohpow01 
501f19dc624Sjohpow01 	return l1_cnt;
502f19dc624Sjohpow01 }
503f19dc624Sjohpow01 
504f19dc624Sjohpow01 /*
505f19dc624Sjohpow01  * This function validates L0 initialization parameters.
506f19dc624Sjohpow01  *
507f19dc624Sjohpow01  * Parameters
508b0f1c840SAlexeiFedorov  *   l0_mem_base	Base address of memory used for L0 table.
509b0f1c840SAlexeiFedorov  *   l0_mem_size	Size of memory available for L0 table.
510f19dc624Sjohpow01  *
511f19dc624Sjohpow01  * Return
512f19dc624Sjohpow01  *   Negative Linux error code in the event of a failure, 0 for success.
513f19dc624Sjohpow01  */
validate_l0_params(gpccr_pps_e pps,uintptr_t l0_mem_base,size_t l0_mem_size)51420e2683dSAlexeiFedorov static int validate_l0_params(gpccr_pps_e pps, uintptr_t l0_mem_base,
515f19dc624Sjohpow01 				size_t l0_mem_size)
516f19dc624Sjohpow01 {
517b0f1c840SAlexeiFedorov 	size_t l0_alignment;
518f19dc624Sjohpow01 
519f19dc624Sjohpow01 	/*
520f19dc624Sjohpow01 	 * Make sure PPS is valid and then store it since macros need this value
521f19dc624Sjohpow01 	 * to work.
522f19dc624Sjohpow01 	 */
523f19dc624Sjohpow01 	if (pps > GPT_PPS_MAX) {
524b99926efSAlexeiFedorov 		ERROR("GPT: Invalid PPS: 0x%x\n", pps);
525f19dc624Sjohpow01 		return -EINVAL;
526f19dc624Sjohpow01 	}
527f19dc624Sjohpow01 	gpt_config.pps = pps;
528f19dc624Sjohpow01 	gpt_config.t = gpt_t_lookup[pps];
529f19dc624Sjohpow01 
530b0f1c840SAlexeiFedorov 	/* Alignment must be the greater of 4KB or L0 table size */
531b0f1c840SAlexeiFedorov 	l0_alignment = SZ_4K;
532f19dc624Sjohpow01 	if (l0_alignment < GPT_L0_TABLE_SIZE(gpt_config.t)) {
533f19dc624Sjohpow01 		l0_alignment = GPT_L0_TABLE_SIZE(gpt_config.t);
534f19dc624Sjohpow01 	}
535f19dc624Sjohpow01 
536b99926efSAlexeiFedorov 	/* Check base address */
537b99926efSAlexeiFedorov 	if ((l0_mem_base == 0UL) ||
538b99926efSAlexeiFedorov 	   ((l0_mem_base & (l0_alignment - 1UL)) != 0UL)) {
539b99926efSAlexeiFedorov 		ERROR("GPT: Invalid L0 base address: 0x%lx\n", l0_mem_base);
540f19dc624Sjohpow01 		return -EFAULT;
541f19dc624Sjohpow01 	}
542f19dc624Sjohpow01 
543b0f1c840SAlexeiFedorov 	/* Check memory size for L0 table */
544b0f1c840SAlexeiFedorov 	if (l0_mem_size < GPT_L0_TABLE_SIZE(gpt_config.t)) {
545d766084fSAlexeiFedorov 		ERROR("GPT: Inadequate L0 memory\n");
546b0f1c840SAlexeiFedorov 		ERROR("      Expected 0x%lx bytes, got 0x%lx\n",
547b0f1c840SAlexeiFedorov 				GPT_L0_TABLE_SIZE(gpt_config.t), l0_mem_size);
548f19dc624Sjohpow01 		return -ENOMEM;
549f19dc624Sjohpow01 	}
550f19dc624Sjohpow01 
551f19dc624Sjohpow01 	return 0;
552f19dc624Sjohpow01 }
553f19dc624Sjohpow01 
554f19dc624Sjohpow01 /*
555f19dc624Sjohpow01  * In the event that L1 tables are needed, this function validates
556f19dc624Sjohpow01  * the L1 table generation parameters.
557f19dc624Sjohpow01  *
558f19dc624Sjohpow01  * Parameters
559f19dc624Sjohpow01  *   l1_mem_base	Base address of memory used for L1 table allocation.
560f19dc624Sjohpow01  *   l1_mem_size	Total size of memory available for L1 tables.
561f19dc624Sjohpow01  *   l1_gpt_cnt		Number of L1 tables needed.
562f19dc624Sjohpow01  *
563f19dc624Sjohpow01  * Return
564f19dc624Sjohpow01  *   Negative Linux error code in the event of a failure, 0 for success.
565f19dc624Sjohpow01  */
validate_l1_params(uintptr_t l1_mem_base,size_t l1_mem_size,unsigned int l1_gpt_cnt)56620e2683dSAlexeiFedorov static int validate_l1_params(uintptr_t l1_mem_base, size_t l1_mem_size,
567f19dc624Sjohpow01 				unsigned int l1_gpt_cnt)
568f19dc624Sjohpow01 {
569f19dc624Sjohpow01 	size_t l1_gpt_mem_sz;
570f19dc624Sjohpow01 
571f19dc624Sjohpow01 	/* Check if the granularity is supported */
572f19dc624Sjohpow01 	if (!xlat_arch_is_granule_size_supported(
573f19dc624Sjohpow01 	    GPT_PGS_ACTUAL_SIZE(gpt_config.p))) {
574f19dc624Sjohpow01 		return -EPERM;
575f19dc624Sjohpow01 	}
576f19dc624Sjohpow01 
577b99926efSAlexeiFedorov 	/* Make sure L1 tables are aligned to their size */
578b99926efSAlexeiFedorov 	if ((l1_mem_base & (GPT_L1_TABLE_SIZE(gpt_config.p) - 1UL)) != 0UL) {
579b99926efSAlexeiFedorov 		ERROR("GPT: Unaligned L1 GPT base address: 0x%"PRIxPTR"\n",
580f19dc624Sjohpow01 		      l1_mem_base);
581f19dc624Sjohpow01 		return -EFAULT;
582f19dc624Sjohpow01 	}
583f19dc624Sjohpow01 
584b99926efSAlexeiFedorov 	/* Get total memory needed for L1 tables */
585f19dc624Sjohpow01 	l1_gpt_mem_sz = l1_gpt_cnt * GPT_L1_TABLE_SIZE(gpt_config.p);
586f19dc624Sjohpow01 
587b99926efSAlexeiFedorov 	/* Check for overflow */
588f19dc624Sjohpow01 	if ((l1_gpt_mem_sz / GPT_L1_TABLE_SIZE(gpt_config.p)) != l1_gpt_cnt) {
589b99926efSAlexeiFedorov 		ERROR("GPT: Overflow calculating L1 memory size\n");
590f19dc624Sjohpow01 		return -ENOMEM;
591f19dc624Sjohpow01 	}
592f19dc624Sjohpow01 
593b99926efSAlexeiFedorov 	/* Make sure enough space was supplied */
594f19dc624Sjohpow01 	if (l1_mem_size < l1_gpt_mem_sz) {
595ec0088bbSAlexeiFedorov 		ERROR("%sL1 GPTs%s", (const char *)"GPT: Inadequate ",
596ec0088bbSAlexeiFedorov 			(const char *)" memory\n");
597b0f1c840SAlexeiFedorov 		ERROR("      Expected 0x%lx bytes, got 0x%lx\n",
598f19dc624Sjohpow01 			l1_gpt_mem_sz, l1_mem_size);
599f19dc624Sjohpow01 		return -ENOMEM;
600f19dc624Sjohpow01 	}
601f19dc624Sjohpow01 
602b99926efSAlexeiFedorov 	VERBOSE("GPT: Requested 0x%lx bytes for L1 GPTs\n", l1_gpt_mem_sz);
603f19dc624Sjohpow01 	return 0;
604f19dc624Sjohpow01 }
605f19dc624Sjohpow01 
606f19dc624Sjohpow01 /*
607f19dc624Sjohpow01  * This function initializes L0 block descriptors (regions that cannot be
608f19dc624Sjohpow01  * transitioned at the granule level) according to the provided PAS.
609f19dc624Sjohpow01  *
610f19dc624Sjohpow01  * Parameters
611f19dc624Sjohpow01  *   *pas		Pointer to the structure defining the PAS region to
612f19dc624Sjohpow01  *			initialize.
613f19dc624Sjohpow01  */
generate_l0_blk_desc(pas_region_t * pas)61420e2683dSAlexeiFedorov static void generate_l0_blk_desc(pas_region_t *pas)
615f19dc624Sjohpow01 {
616f19dc624Sjohpow01 	uint64_t gpt_desc;
617ec0088bbSAlexeiFedorov 	unsigned long idx, end_idx;
618f19dc624Sjohpow01 	uint64_t *l0_gpt_arr;
619f19dc624Sjohpow01 
620b0f1c840SAlexeiFedorov 	assert(gpt_config.plat_gpt_l0_base != 0UL);
621f19dc624Sjohpow01 	assert(pas != NULL);
622f19dc624Sjohpow01 
623f19dc624Sjohpow01 	/*
624f19dc624Sjohpow01 	 * Checking of PAS parameters has already been done in
62520e2683dSAlexeiFedorov 	 * validate_pas_mappings so no need to check the same things again.
626f19dc624Sjohpow01 	 */
627f19dc624Sjohpow01 
628f19dc624Sjohpow01 	l0_gpt_arr = (uint64_t *)gpt_config.plat_gpt_l0_base;
629f19dc624Sjohpow01 
630f19dc624Sjohpow01 	/* Create the GPT Block descriptor for this PAS region */
631f19dc624Sjohpow01 	gpt_desc = GPT_L0_BLK_DESC(GPT_PAS_ATTR_GPI(pas->attrs));
632f19dc624Sjohpow01 
633f19dc624Sjohpow01 	/* Start index of this region in L0 GPTs */
6346a00e9b0SRobert Wakim 	idx = GPT_L0_IDX(pas->base_pa);
635f19dc624Sjohpow01 
636f19dc624Sjohpow01 	/*
637f19dc624Sjohpow01 	 * Determine number of L0 GPT descriptors covered by
638f19dc624Sjohpow01 	 * this PAS region and use the count to populate these
639f19dc624Sjohpow01 	 * descriptors.
640f19dc624Sjohpow01 	 */
6416a00e9b0SRobert Wakim 	end_idx = GPT_L0_IDX(pas->base_pa + pas->size);
642f19dc624Sjohpow01 
643b99926efSAlexeiFedorov 	/* Generate the needed block descriptors */
644f19dc624Sjohpow01 	for (; idx < end_idx; idx++) {
645f19dc624Sjohpow01 		l0_gpt_arr[idx] = gpt_desc;
646ec0088bbSAlexeiFedorov 		VERBOSE("GPT: L0 entry (BLOCK) index %lu [%p]: GPI = 0x%"PRIx64" (0x%"PRIx64")\n",
647f19dc624Sjohpow01 			idx, &l0_gpt_arr[idx],
648f19dc624Sjohpow01 			(gpt_desc >> GPT_L0_BLK_DESC_GPI_SHIFT) &
649f19dc624Sjohpow01 			GPT_L0_BLK_DESC_GPI_MASK, l0_gpt_arr[idx]);
650f19dc624Sjohpow01 	}
651f19dc624Sjohpow01 }
652f19dc624Sjohpow01 
653f19dc624Sjohpow01 /*
654f19dc624Sjohpow01  * Helper function to determine if the end physical address lies in the same L0
655f19dc624Sjohpow01  * region as the current physical address. If true, the end physical address is
656f19dc624Sjohpow01  * returned else, the start address of the next region is returned.
657f19dc624Sjohpow01  *
658f19dc624Sjohpow01  * Parameters
659f19dc624Sjohpow01  *   cur_pa		Physical address of the current PA in the loop through
660f19dc624Sjohpow01  *			the range.
661f19dc624Sjohpow01  *   end_pa		Physical address of the end PA in a PAS range.
662f19dc624Sjohpow01  *
663f19dc624Sjohpow01  * Return
664f19dc624Sjohpow01  *   The PA of the end of the current range.
665f19dc624Sjohpow01  */
get_l1_end_pa(uintptr_t cur_pa,uintptr_t end_pa)66620e2683dSAlexeiFedorov static uintptr_t get_l1_end_pa(uintptr_t cur_pa, uintptr_t end_pa)
667f19dc624Sjohpow01 {
668f19dc624Sjohpow01 	uintptr_t cur_idx;
669f19dc624Sjohpow01 	uintptr_t end_idx;
670f19dc624Sjohpow01 
6716a00e9b0SRobert Wakim 	cur_idx = GPT_L0_IDX(cur_pa);
6726a00e9b0SRobert Wakim 	end_idx = GPT_L0_IDX(end_pa);
673f19dc624Sjohpow01 
674f19dc624Sjohpow01 	assert(cur_idx <= end_idx);
675f19dc624Sjohpow01 
676f19dc624Sjohpow01 	if (cur_idx == end_idx) {
677f19dc624Sjohpow01 		return end_pa;
678f19dc624Sjohpow01 	}
679f19dc624Sjohpow01 
680ec0088bbSAlexeiFedorov 	return (cur_idx + 1UL) << GPT_L0_IDX_SHIFT;
681f19dc624Sjohpow01 }
682f19dc624Sjohpow01 
683f19dc624Sjohpow01 /*
684ec0088bbSAlexeiFedorov  * Helper function to fill out GPI entries from 'first' granule address of
685ec0088bbSAlexeiFedorov  * the specified 'length' in a single L1 table with 'l1_desc' Contiguous
686ec0088bbSAlexeiFedorov  * descriptor.
687f19dc624Sjohpow01  *
688f19dc624Sjohpow01  * Parameters
689f19dc624Sjohpow01  *   l1			Pointer to L1 table to fill out
690ec0088bbSAlexeiFedorov  *   first		Address of first granule in range
691ec0088bbSAlexeiFedorov  *   length		Length of the range in bytes
692ec0088bbSAlexeiFedorov  *   gpi		GPI set this range to
693ec0088bbSAlexeiFedorov  *
694ec0088bbSAlexeiFedorov  * Return
695ec0088bbSAlexeiFedorov  *   Address of next granule in range.
696f19dc624Sjohpow01  */
fill_l1_cont_desc(uint64_t * l1,uintptr_t first,size_t length,unsigned int gpi)6976350aea2SAlexeiFedorov __unused static uintptr_t fill_l1_cont_desc(uint64_t *l1, uintptr_t first,
698ec0088bbSAlexeiFedorov 					    size_t length, unsigned int gpi)
699f19dc624Sjohpow01 {
700ec0088bbSAlexeiFedorov 	/*
701ec0088bbSAlexeiFedorov 	 * Look up table for contiguous blocks and descriptors.
702ec0088bbSAlexeiFedorov 	 * Entries should be defined in descending block sizes:
703ec0088bbSAlexeiFedorov 	 * 512MB, 32MB and 2MB.
704ec0088bbSAlexeiFedorov 	 */
705ec0088bbSAlexeiFedorov 	static const gpt_fill_lookup_t gpt_fill_lookup[] = {
706ec0088bbSAlexeiFedorov #if (RME_GPT_MAX_BLOCK == 512)
707ec0088bbSAlexeiFedorov 		{ SZ_512M, GPT_L1_CONT_DESC_512MB },
708ec0088bbSAlexeiFedorov #endif
709ec0088bbSAlexeiFedorov #if (RME_GPT_MAX_BLOCK >= 32)
710ec0088bbSAlexeiFedorov 		{ SZ_32M, GPT_L1_CONT_DESC_32MB },
711ec0088bbSAlexeiFedorov #endif
712ec0088bbSAlexeiFedorov #if (RME_GPT_MAX_BLOCK != 0)
713ec0088bbSAlexeiFedorov 		{ SZ_2M, GPT_L1_CONT_DESC_2MB }
714ec0088bbSAlexeiFedorov #endif
715ec0088bbSAlexeiFedorov 	};
716f19dc624Sjohpow01 
717ec0088bbSAlexeiFedorov 	/*
718ec0088bbSAlexeiFedorov 	 * Iterate through all block sizes (512MB, 32MB and 2MB)
719ec0088bbSAlexeiFedorov 	 * starting with maximum supported.
720ec0088bbSAlexeiFedorov 	 */
721ec0088bbSAlexeiFedorov 	for (unsigned long i = 0UL; i < ARRAY_SIZE(gpt_fill_lookup); i++) {
722ec0088bbSAlexeiFedorov 		/* Calculate index */
723ec0088bbSAlexeiFedorov 		unsigned long idx = GPT_L1_INDEX(first);
724ec0088bbSAlexeiFedorov 
725ec0088bbSAlexeiFedorov 		/* Contiguous block size */
726ec0088bbSAlexeiFedorov 		size_t cont_size = gpt_fill_lookup[i].size;
727ec0088bbSAlexeiFedorov 
728ec0088bbSAlexeiFedorov 		if (GPT_REGION_IS_CONT(length, first, cont_size)) {
729ec0088bbSAlexeiFedorov 
730ec0088bbSAlexeiFedorov 			/* Generate Contiguous descriptor */
731ec0088bbSAlexeiFedorov 			uint64_t l1_desc = GPT_L1_GPI_CONT_DESC(gpi,
732ec0088bbSAlexeiFedorov 						gpt_fill_lookup[i].desc);
733ec0088bbSAlexeiFedorov 
734ec0088bbSAlexeiFedorov 			/* Number of 128-bit L1 entries in block */
735ec0088bbSAlexeiFedorov 			unsigned int cnt;
736ec0088bbSAlexeiFedorov 
737ec0088bbSAlexeiFedorov 			switch (cont_size) {
738ec0088bbSAlexeiFedorov 			case SZ_512M:
739ec0088bbSAlexeiFedorov 				cnt = L1_QWORDS_512MB;
740ec0088bbSAlexeiFedorov 				break;
741ec0088bbSAlexeiFedorov 			case SZ_32M:
742ec0088bbSAlexeiFedorov 				cnt = L1_QWORDS_32MB;
743ec0088bbSAlexeiFedorov 				break;
744ec0088bbSAlexeiFedorov 			default:			/* SZ_2MB */
745ec0088bbSAlexeiFedorov 				cnt = L1_QWORDS_2MB;
746ec0088bbSAlexeiFedorov 			}
747ec0088bbSAlexeiFedorov 
748ec0088bbSAlexeiFedorov 			VERBOSE("GPT: Contiguous descriptor 0x%"PRIxPTR" %luMB\n",
749ec0088bbSAlexeiFedorov 				first, cont_size / SZ_1M);
750ec0088bbSAlexeiFedorov 
751ec0088bbSAlexeiFedorov 			/* Fill Contiguous descriptors */
752ec0088bbSAlexeiFedorov 			fill_desc(&l1[idx], l1_desc, cnt);
7539bc1e599SAlexeiFedorov 			return (first + cont_size);
754ec0088bbSAlexeiFedorov 		}
755ec0088bbSAlexeiFedorov 	}
756ec0088bbSAlexeiFedorov 
757ec0088bbSAlexeiFedorov 	return first;
758ec0088bbSAlexeiFedorov }
759ec0088bbSAlexeiFedorov 
760ec0088bbSAlexeiFedorov /* Build Granules descriptor with the same 'gpi' for every GPI entry */
build_l1_desc(unsigned int gpi)761ec0088bbSAlexeiFedorov static uint64_t build_l1_desc(unsigned int gpi)
762ec0088bbSAlexeiFedorov {
763ec0088bbSAlexeiFedorov 	uint64_t l1_desc = (uint64_t)gpi | ((uint64_t)gpi << 4);
764ec0088bbSAlexeiFedorov 
765ec0088bbSAlexeiFedorov 	l1_desc |= (l1_desc << 8);
766ec0088bbSAlexeiFedorov 	l1_desc |= (l1_desc << 16);
767ec0088bbSAlexeiFedorov 	return (l1_desc | (l1_desc << 32));
768ec0088bbSAlexeiFedorov }
769ec0088bbSAlexeiFedorov 
770ec0088bbSAlexeiFedorov /*
771ec0088bbSAlexeiFedorov  * Helper function to fill out GPI entries from 'first' to 'last' granule
772ec0088bbSAlexeiFedorov  * address in a single L1 table with 'l1_desc' Granules descriptor.
773ec0088bbSAlexeiFedorov  *
774ec0088bbSAlexeiFedorov  * Parameters
775ec0088bbSAlexeiFedorov  *   l1			Pointer to L1 table to fill out
776ec0088bbSAlexeiFedorov  *   first		Address of first granule in range
777ec0088bbSAlexeiFedorov  *   last		Address of last granule in range (inclusive)
778ec0088bbSAlexeiFedorov  *   gpi		GPI set this range to
779ec0088bbSAlexeiFedorov  *
780ec0088bbSAlexeiFedorov  * Return
781ec0088bbSAlexeiFedorov  *   Address of next granule in range.
782ec0088bbSAlexeiFedorov  */
fill_l1_gran_desc(uint64_t * l1,uintptr_t first,uintptr_t last,unsigned int gpi)783ec0088bbSAlexeiFedorov static uintptr_t fill_l1_gran_desc(uint64_t *l1, uintptr_t first,
784ec0088bbSAlexeiFedorov 				   uintptr_t last, unsigned int gpi)
785ec0088bbSAlexeiFedorov {
786ec0088bbSAlexeiFedorov 	uint64_t gpi_mask;
787ec0088bbSAlexeiFedorov 	unsigned long i;
788ec0088bbSAlexeiFedorov 
789ec0088bbSAlexeiFedorov 	/* Generate Granules descriptor */
790ec0088bbSAlexeiFedorov 	uint64_t l1_desc = build_l1_desc(gpi);
791f19dc624Sjohpow01 
792b99926efSAlexeiFedorov 	/* Shift the mask if we're starting in the middle of an L1 entry */
793ec0088bbSAlexeiFedorov 	gpi_mask = ULONG_MAX << (GPT_L1_GPI_IDX(gpt_config.p, first) << 2);
794f19dc624Sjohpow01 
795b99926efSAlexeiFedorov 	/* Fill out each L1 entry for this region */
796ec0088bbSAlexeiFedorov 	for (i = GPT_L1_INDEX(first); i <= GPT_L1_INDEX(last); i++) {
797ec0088bbSAlexeiFedorov 
798b99926efSAlexeiFedorov 		/* Account for stopping in the middle of an L1 entry */
799ec0088bbSAlexeiFedorov 		if (i == GPT_L1_INDEX(last)) {
800b99926efSAlexeiFedorov 			gpi_mask &= (gpi_mask >> ((15U -
801f19dc624Sjohpow01 				    GPT_L1_GPI_IDX(gpt_config.p, last)) << 2));
802f19dc624Sjohpow01 		}
803f19dc624Sjohpow01 
804ec0088bbSAlexeiFedorov 		assert((l1[i] & gpi_mask) == (GPT_L1_ANY_DESC & gpi_mask));
805ec0088bbSAlexeiFedorov 
806b99926efSAlexeiFedorov 		/* Write GPI values */
807ec0088bbSAlexeiFedorov 		l1[i] = (l1[i] & ~gpi_mask) | (l1_desc & gpi_mask);
808f19dc624Sjohpow01 
809b99926efSAlexeiFedorov 		/* Reset mask */
810b99926efSAlexeiFedorov 		gpi_mask = ULONG_MAX;
811f19dc624Sjohpow01 	}
812ec0088bbSAlexeiFedorov 
813ec0088bbSAlexeiFedorov 	return last + GPT_PGS_ACTUAL_SIZE(gpt_config.p);
814ec0088bbSAlexeiFedorov }
815ec0088bbSAlexeiFedorov 
816ec0088bbSAlexeiFedorov /*
817ec0088bbSAlexeiFedorov  * Helper function to fill out GPI entries in a single L1 table.
8186350aea2SAlexeiFedorov  * This function fills out an entire L1 table with either Granules or Contiguous
8196350aea2SAlexeiFedorov  * (RME_GPT_MAX_BLOCK != 0) descriptors depending on region length and alignment.
8206350aea2SAlexeiFedorov  * Note. If RME_GPT_MAX_BLOCK == 0, then the L1 tables are filled with regular
8216350aea2SAlexeiFedorov  * Granules descriptors.
822ec0088bbSAlexeiFedorov  *
823ec0088bbSAlexeiFedorov  * Parameters
824ec0088bbSAlexeiFedorov  *   l1			Pointer to L1 table to fill out
825ec0088bbSAlexeiFedorov  *   first		Address of first granule in range
826ec0088bbSAlexeiFedorov  *   last		Address of last granule in range (inclusive)
827ec0088bbSAlexeiFedorov  *   gpi		GPI set this range to
828ec0088bbSAlexeiFedorov  */
fill_l1_tbl(uint64_t * l1,uintptr_t first,uintptr_t last,unsigned int gpi)829ec0088bbSAlexeiFedorov static void fill_l1_tbl(uint64_t *l1, uintptr_t first, uintptr_t last,
830ec0088bbSAlexeiFedorov 			unsigned int gpi)
831ec0088bbSAlexeiFedorov {
832ec0088bbSAlexeiFedorov 	assert(l1 != NULL);
833ec0088bbSAlexeiFedorov 	assert(first <= last);
834ec0088bbSAlexeiFedorov 	assert((first & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) == 0UL);
835ec0088bbSAlexeiFedorov 	assert((last & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) == 0UL);
836ec0088bbSAlexeiFedorov 	assert(GPT_L0_IDX(first) == GPT_L0_IDX(last));
837ec0088bbSAlexeiFedorov 
8386350aea2SAlexeiFedorov #if (RME_GPT_MAX_BLOCK != 0)
839d024cce3SAlexeiFedorov 	while (first <= last) {
840ec0088bbSAlexeiFedorov 		/* Region length */
841ec0088bbSAlexeiFedorov 		size_t length = last - first + GPT_PGS_ACTUAL_SIZE(gpt_config.p);
842ec0088bbSAlexeiFedorov 
843ec0088bbSAlexeiFedorov 		if (length < SZ_2M) {
844ec0088bbSAlexeiFedorov 			/*
8456350aea2SAlexeiFedorov 			 * Fill with Granule descriptors in case of
846ec0088bbSAlexeiFedorov 			 * region length < 2MB.
847ec0088bbSAlexeiFedorov 			 */
848ec0088bbSAlexeiFedorov 			first = fill_l1_gran_desc(l1, first, last, gpi);
849ec0088bbSAlexeiFedorov 
850ec0088bbSAlexeiFedorov 		} else if ((first & (SZ_2M - UL(1))) == UL(0)) {
851ec0088bbSAlexeiFedorov 			/*
852ec0088bbSAlexeiFedorov 			 * For region length >= 2MB and at least 2MB aligned
853ec0088bbSAlexeiFedorov 			 * call to fill_l1_cont_desc will iterate through
854ec0088bbSAlexeiFedorov 			 * all block sizes (512MB, 32MB and 2MB) supported and
855ec0088bbSAlexeiFedorov 			 * fill corresponding Contiguous descriptors.
856ec0088bbSAlexeiFedorov 			 */
857ec0088bbSAlexeiFedorov 			first = fill_l1_cont_desc(l1, first, length, gpi);
858ec0088bbSAlexeiFedorov 		} else {
859ec0088bbSAlexeiFedorov 			/*
860ec0088bbSAlexeiFedorov 			 * For not aligned region >= 2MB fill with Granules
861ec0088bbSAlexeiFedorov 			 * descriptors up to the next 2MB aligned address.
862ec0088bbSAlexeiFedorov 			 */
863ec0088bbSAlexeiFedorov 			uintptr_t new_last = ALIGN_2MB(first + SZ_2M) -
864ec0088bbSAlexeiFedorov 					GPT_PGS_ACTUAL_SIZE(gpt_config.p);
865ec0088bbSAlexeiFedorov 
866ec0088bbSAlexeiFedorov 			first = fill_l1_gran_desc(l1, first, new_last, gpi);
867ec0088bbSAlexeiFedorov 		}
868ec0088bbSAlexeiFedorov 	}
8696350aea2SAlexeiFedorov #else
8706350aea2SAlexeiFedorov 	/* Fill with Granule descriptors */
8716350aea2SAlexeiFedorov 	first = fill_l1_gran_desc(l1, first, last, gpi);
8726350aea2SAlexeiFedorov #endif
873ec0088bbSAlexeiFedorov 	assert(first == (last + GPT_PGS_ACTUAL_SIZE(gpt_config.p)));
874f19dc624Sjohpow01 }
875f19dc624Sjohpow01 
876f19dc624Sjohpow01 /*
877f19dc624Sjohpow01  * This function finds the next available unused L1 table and initializes all
878f19dc624Sjohpow01  * granules descriptor entries to GPI_ANY. This ensures that there are no chunks
879f19dc624Sjohpow01  * of GPI_NO_ACCESS (0b0000) memory floating around in the system in the
880f19dc624Sjohpow01  * event that a PAS region stops midway through an L1 table, thus guaranteeing
881f19dc624Sjohpow01  * that all memory not explicitly assigned is GPI_ANY. This function does not
882f19dc624Sjohpow01  * check for overflow conditions, that should be done by the caller.
883f19dc624Sjohpow01  *
884f19dc624Sjohpow01  * Return
885f19dc624Sjohpow01  *   Pointer to the next available L1 table.
886f19dc624Sjohpow01  */
get_new_l1_tbl(void)88720e2683dSAlexeiFedorov static uint64_t *get_new_l1_tbl(void)
888f19dc624Sjohpow01 {
889b99926efSAlexeiFedorov 	/* Retrieve the next L1 table */
890ec0088bbSAlexeiFedorov 	uint64_t *l1 = (uint64_t *)gpt_l1_tbl;
891f19dc624Sjohpow01 
892ec0088bbSAlexeiFedorov 	/* Increment L1 GPT address */
893ec0088bbSAlexeiFedorov 	gpt_l1_tbl += GPT_L1_TABLE_SIZE(gpt_config.p);
894f19dc624Sjohpow01 
895f19dc624Sjohpow01 	/* Initialize all GPIs to GPT_GPI_ANY */
896f19dc624Sjohpow01 	for (unsigned int i = 0U; i < GPT_L1_ENTRY_COUNT(gpt_config.p); i++) {
897ec0088bbSAlexeiFedorov 		l1[i] = GPT_L1_ANY_DESC;
898f19dc624Sjohpow01 	}
899f19dc624Sjohpow01 
900f19dc624Sjohpow01 	return l1;
901f19dc624Sjohpow01 }
902f19dc624Sjohpow01 
903f19dc624Sjohpow01 /*
904f19dc624Sjohpow01  * When L1 tables are needed, this function creates the necessary L0 table
905f19dc624Sjohpow01  * descriptors and fills out the L1 table entries according to the supplied
906f19dc624Sjohpow01  * PAS range.
907f19dc624Sjohpow01  *
908f19dc624Sjohpow01  * Parameters
909f19dc624Sjohpow01  *   *pas		Pointer to the structure defining the PAS region.
910f19dc624Sjohpow01  */
generate_l0_tbl_desc(pas_region_t * pas)91120e2683dSAlexeiFedorov static void generate_l0_tbl_desc(pas_region_t *pas)
912f19dc624Sjohpow01 {
913f19dc624Sjohpow01 	uintptr_t end_pa;
914f19dc624Sjohpow01 	uintptr_t cur_pa;
915f19dc624Sjohpow01 	uintptr_t last_gran_pa;
916f19dc624Sjohpow01 	uint64_t *l0_gpt_base;
917f19dc624Sjohpow01 	uint64_t *l1_gpt_arr;
918ec0088bbSAlexeiFedorov 	unsigned int l0_idx, gpi;
919f19dc624Sjohpow01 
920b0f1c840SAlexeiFedorov 	assert(gpt_config.plat_gpt_l0_base != 0UL);
921f19dc624Sjohpow01 	assert(pas != NULL);
922f19dc624Sjohpow01 
923f19dc624Sjohpow01 	/*
924f19dc624Sjohpow01 	 * Checking of PAS parameters has already been done in
92520e2683dSAlexeiFedorov 	 * validate_pas_mappings so no need to check the same things again.
926f19dc624Sjohpow01 	 */
927f19dc624Sjohpow01 	end_pa = pas->base_pa + pas->size;
928f19dc624Sjohpow01 	l0_gpt_base = (uint64_t *)gpt_config.plat_gpt_l0_base;
929f19dc624Sjohpow01 
930f19dc624Sjohpow01 	/* We start working from the granule at base PA */
931f19dc624Sjohpow01 	cur_pa = pas->base_pa;
932f19dc624Sjohpow01 
933ec0088bbSAlexeiFedorov 	/* Get GPI */
934ec0088bbSAlexeiFedorov 	gpi = GPT_PAS_ATTR_GPI(pas->attrs);
935f19dc624Sjohpow01 
936ec0088bbSAlexeiFedorov 	/* Iterate over each L0 region in this memory range */
937ec0088bbSAlexeiFedorov 	for (l0_idx = (unsigned int)GPT_L0_IDX(pas->base_pa);
938ec0088bbSAlexeiFedorov 	     l0_idx <= (unsigned int)GPT_L0_IDX(end_pa - 1UL);
939ec0088bbSAlexeiFedorov 	     l0_idx++) {
940f19dc624Sjohpow01 		/*
941f19dc624Sjohpow01 		 * See if the L0 entry is already a table descriptor or if we
942f19dc624Sjohpow01 		 * need to create one.
943f19dc624Sjohpow01 		 */
944f19dc624Sjohpow01 		if (GPT_L0_TYPE(l0_gpt_base[l0_idx]) == GPT_L0_TYPE_TBL_DESC) {
945b99926efSAlexeiFedorov 			/* Get the L1 array from the L0 entry */
946f19dc624Sjohpow01 			l1_gpt_arr = GPT_L0_TBLD_ADDR(l0_gpt_base[l0_idx]);
947f19dc624Sjohpow01 		} else {
948b99926efSAlexeiFedorov 			/* Get a new L1 table from the L1 memory space */
94920e2683dSAlexeiFedorov 			l1_gpt_arr = get_new_l1_tbl();
950f19dc624Sjohpow01 
951b99926efSAlexeiFedorov 			/* Fill out the L0 descriptor and flush it */
952f19dc624Sjohpow01 			l0_gpt_base[l0_idx] = GPT_L0_TBL_DESC(l1_gpt_arr);
953f19dc624Sjohpow01 		}
954f19dc624Sjohpow01 
955b99926efSAlexeiFedorov 		VERBOSE("GPT: L0 entry (TABLE) index %u [%p] ==> L1 Addr %p (0x%"PRIx64")\n",
956b99926efSAlexeiFedorov 			l0_idx, &l0_gpt_base[l0_idx], l1_gpt_arr, l0_gpt_base[l0_idx]);
957f19dc624Sjohpow01 
958f19dc624Sjohpow01 		/*
959f19dc624Sjohpow01 		 * Determine the PA of the last granule in this L0 descriptor.
960f19dc624Sjohpow01 		 */
96120e2683dSAlexeiFedorov 		last_gran_pa = get_l1_end_pa(cur_pa, end_pa) -
962f19dc624Sjohpow01 			       GPT_PGS_ACTUAL_SIZE(gpt_config.p);
963f19dc624Sjohpow01 
964f19dc624Sjohpow01 		/*
965f19dc624Sjohpow01 		 * Fill up L1 GPT entries between these two addresses. This
966f19dc624Sjohpow01 		 * function needs the addresses of the first granule and last
967f19dc624Sjohpow01 		 * granule in the range.
968f19dc624Sjohpow01 		 */
969ec0088bbSAlexeiFedorov 		fill_l1_tbl(l1_gpt_arr, cur_pa, last_gran_pa, gpi);
970f19dc624Sjohpow01 
971b99926efSAlexeiFedorov 		/* Advance cur_pa to first granule in next L0 region */
97220e2683dSAlexeiFedorov 		cur_pa = get_l1_end_pa(cur_pa, end_pa);
973f19dc624Sjohpow01 	}
974f19dc624Sjohpow01 }
975f19dc624Sjohpow01 
976f19dc624Sjohpow01 /*
977f19dc624Sjohpow01  * This function flushes a range of L0 descriptors used by a given PAS region
978f19dc624Sjohpow01  * array. There is a chance that some unmodified L0 descriptors would be flushed
979f19dc624Sjohpow01  * in the case that there are "holes" in an array of PAS regions but overall
980f19dc624Sjohpow01  * this should be faster than individually flushing each modified L0 descriptor
981f19dc624Sjohpow01  * as they are created.
982f19dc624Sjohpow01  *
983f19dc624Sjohpow01  * Parameters
984f19dc624Sjohpow01  *   *pas		Pointer to an array of PAS regions.
985f19dc624Sjohpow01  *   pas_count		Number of entries in the PAS array.
986f19dc624Sjohpow01  */
flush_l0_for_pas_array(pas_region_t * pas,unsigned int pas_count)987f19dc624Sjohpow01 static void flush_l0_for_pas_array(pas_region_t *pas, unsigned int pas_count)
988f19dc624Sjohpow01 {
989ec0088bbSAlexeiFedorov 	unsigned long idx;
990ec0088bbSAlexeiFedorov 	unsigned long start_idx;
991ec0088bbSAlexeiFedorov 	unsigned long end_idx;
992f19dc624Sjohpow01 	uint64_t *l0 = (uint64_t *)gpt_config.plat_gpt_l0_base;
993f19dc624Sjohpow01 
994f19dc624Sjohpow01 	assert(pas != NULL);
995b99926efSAlexeiFedorov 	assert(pas_count != 0U);
996f19dc624Sjohpow01 
997b99926efSAlexeiFedorov 	/* Initial start and end values */
998f19dc624Sjohpow01 	start_idx = GPT_L0_IDX(pas[0].base_pa);
999b99926efSAlexeiFedorov 	end_idx = GPT_L0_IDX(pas[0].base_pa + pas[0].size - 1UL);
1000f19dc624Sjohpow01 
1001b99926efSAlexeiFedorov 	/* Find lowest and highest L0 indices used in this PAS array */
1002ec0088bbSAlexeiFedorov 	for (idx = 1UL; idx < pas_count; idx++) {
1003f19dc624Sjohpow01 		if (GPT_L0_IDX(pas[idx].base_pa) < start_idx) {
1004f19dc624Sjohpow01 			start_idx = GPT_L0_IDX(pas[idx].base_pa);
1005f19dc624Sjohpow01 		}
1006b99926efSAlexeiFedorov 		if (GPT_L0_IDX(pas[idx].base_pa + pas[idx].size - 1UL) > end_idx) {
1007b99926efSAlexeiFedorov 			end_idx = GPT_L0_IDX(pas[idx].base_pa + pas[idx].size - 1UL);
1008f19dc624Sjohpow01 		}
1009f19dc624Sjohpow01 	}
1010f19dc624Sjohpow01 
1011f19dc624Sjohpow01 	/*
1012f19dc624Sjohpow01 	 * Flush all covered L0 descriptors, add 1 because we need to include
1013f19dc624Sjohpow01 	 * the end index value.
1014f19dc624Sjohpow01 	 */
1015f19dc624Sjohpow01 	flush_dcache_range((uintptr_t)&l0[start_idx],
1016ec0088bbSAlexeiFedorov 			   ((end_idx + 1UL) - start_idx) * sizeof(uint64_t));
1017f19dc624Sjohpow01 }
1018f19dc624Sjohpow01 
1019f19dc624Sjohpow01 /*
1020f19dc624Sjohpow01  * Public API to enable granule protection checks once the tables have all been
1021f19dc624Sjohpow01  * initialized. This function is called at first initialization and then again
1022f19dc624Sjohpow01  * later during warm boots of CPU cores.
1023f19dc624Sjohpow01  *
1024f19dc624Sjohpow01  * Return
1025f19dc624Sjohpow01  *   Negative Linux error code in the event of a failure, 0 for success.
1026f19dc624Sjohpow01  */
gpt_enable(void)1027f19dc624Sjohpow01 int gpt_enable(void)
1028f19dc624Sjohpow01 {
1029f19dc624Sjohpow01 	u_register_t gpccr_el3;
1030f19dc624Sjohpow01 
1031f19dc624Sjohpow01 	/*
1032f19dc624Sjohpow01 	 * Granule tables must be initialised before enabling
1033f19dc624Sjohpow01 	 * granule protection.
1034f19dc624Sjohpow01 	 */
1035b99926efSAlexeiFedorov 	if (gpt_config.plat_gpt_l0_base == 0UL) {
1036b99926efSAlexeiFedorov 		ERROR("GPT: Tables have not been initialized!\n");
1037f19dc624Sjohpow01 		return -EPERM;
1038f19dc624Sjohpow01 	}
1039f19dc624Sjohpow01 
1040f19dc624Sjohpow01 	/* Write the base address of the L0 tables into GPTBR */
1041f19dc624Sjohpow01 	write_gptbr_el3(((gpt_config.plat_gpt_l0_base >> GPTBR_BADDR_VAL_SHIFT)
1042f19dc624Sjohpow01 			>> GPTBR_BADDR_SHIFT) & GPTBR_BADDR_MASK);
1043f19dc624Sjohpow01 
1044f19dc624Sjohpow01 	/* GPCCR_EL3.PPS */
1045f19dc624Sjohpow01 	gpccr_el3 = SET_GPCCR_PPS(gpt_config.pps);
1046f19dc624Sjohpow01 
1047f19dc624Sjohpow01 	/* GPCCR_EL3.PGS */
1048f19dc624Sjohpow01 	gpccr_el3 |= SET_GPCCR_PGS(gpt_config.pgs);
1049f19dc624Sjohpow01 
105077612b90SSoby Mathew 	/*
105177612b90SSoby Mathew 	 * Since EL3 maps the L1 region as Inner shareable, use the same
105277612b90SSoby Mathew 	 * shareability attribute for GPC as well so that
105377612b90SSoby Mathew 	 * GPC fetches are visible to PEs
105477612b90SSoby Mathew 	 */
105577612b90SSoby Mathew 	gpccr_el3 |= SET_GPCCR_SH(GPCCR_SH_IS);
1056f19dc624Sjohpow01 
1057b99926efSAlexeiFedorov 	/* Outer and Inner cacheability set to Normal memory, WB, RA, WA */
1058f19dc624Sjohpow01 	gpccr_el3 |= SET_GPCCR_ORGN(GPCCR_ORGN_WB_RA_WA);
1059f19dc624Sjohpow01 	gpccr_el3 |= SET_GPCCR_IRGN(GPCCR_IRGN_WB_RA_WA);
1060f19dc624Sjohpow01 
1061*5e827bf0STimothy Hayes 	/* Enable NSP and SA if FEAT_RME_GDI is implemented */
1062*5e827bf0STimothy Hayes 	if (is_feat_rme_gdi_supported()) {
1063*5e827bf0STimothy Hayes 		gpccr_el3 |= GPCCR_NSP_BIT;
1064*5e827bf0STimothy Hayes 		gpccr_el3 |= GPCCR_SA_BIT;
1065*5e827bf0STimothy Hayes 	}
1066*5e827bf0STimothy Hayes 
106714cddd7aSKathleen Capella 	/* Prepopulate GPCCR_EL3 but don't enable GPC yet */
106814cddd7aSKathleen Capella 	write_gpccr_el3(gpccr_el3);
106914cddd7aSKathleen Capella 	isb();
107014cddd7aSKathleen Capella 
107114cddd7aSKathleen Capella 	/* Invalidate any stale TLB entries and any cached register fields */
107214cddd7aSKathleen Capella 	tlbipaallos();
107314cddd7aSKathleen Capella 	dsb();
107414cddd7aSKathleen Capella 	isb();
107514cddd7aSKathleen Capella 
1076f19dc624Sjohpow01 	/* Enable GPT */
1077f19dc624Sjohpow01 	gpccr_el3 |= GPCCR_GPC_BIT;
1078f19dc624Sjohpow01 
107909a4bcb8SGirish Pathak 	/* Enable NSO encoding if FEAT_RME_GPC2 is supported. */
108009a4bcb8SGirish Pathak 	if (is_feat_rme_gpc2_present()) {
108109a4bcb8SGirish Pathak 		gpccr_el3 |= GPCCR_NSO_BIT;
108209a4bcb8SGirish Pathak 	}
108309a4bcb8SGirish Pathak 
1084b99926efSAlexeiFedorov 	/* TODO: Configure GPCCR_EL3_GPCP for Fault control */
1085f19dc624Sjohpow01 	write_gpccr_el3(gpccr_el3);
108677612b90SSoby Mathew 	isb();
1087f19dc624Sjohpow01 	tlbipaallos();
1088f19dc624Sjohpow01 	dsb();
1089f19dc624Sjohpow01 	isb();
1090f19dc624Sjohpow01 
1091f19dc624Sjohpow01 	return 0;
1092f19dc624Sjohpow01 }
1093f19dc624Sjohpow01 
1094f19dc624Sjohpow01 /*
1095f19dc624Sjohpow01  * Public API that initializes the entire protected space to GPT_GPI_ANY using
1096f19dc624Sjohpow01  * the L0 tables (block descriptors). Ideally, this function is invoked prior
gpt_disable(void)1097f19dc624Sjohpow01  * to DDR discovery and initialization. The MMU must be initialized before
1098f19dc624Sjohpow01  * calling this function.
1099f19dc624Sjohpow01  *
1100f19dc624Sjohpow01  * Parameters
1101f19dc624Sjohpow01  *   pps		PPS value to use for table generation
1102f19dc624Sjohpow01  *   l0_mem_base	Base address of L0 tables in memory.
1103f19dc624Sjohpow01  *   l0_mem_size	Total size of memory available for L0 tables.
1104f19dc624Sjohpow01  *
1105f19dc624Sjohpow01  * Return
1106f19dc624Sjohpow01  *   Negative Linux error code in the event of a failure, 0 for success.
1107f19dc624Sjohpow01  */
1108a0d5147bSAlexeiFedorov int gpt_init_l0_tables(gpccr_pps_e pps, uintptr_t l0_mem_base,
1109f19dc624Sjohpow01 		       size_t l0_mem_size)
1110f19dc624Sjohpow01 {
1111f19dc624Sjohpow01 	uint64_t gpt_desc;
1112ec0088bbSAlexeiFedorov 	int ret;
1113f19dc624Sjohpow01 
1114b99926efSAlexeiFedorov 	/* Ensure that MMU and Data caches are enabled */
1115b0f1c840SAlexeiFedorov 	assert((read_sctlr_el3() & SCTLR_C_BIT) != 0UL);
1116f19dc624Sjohpow01 
1117b99926efSAlexeiFedorov 	/* Validate other parameters */
111820e2683dSAlexeiFedorov 	ret = validate_l0_params(pps, l0_mem_base, l0_mem_size);
11196a00e9b0SRobert Wakim 	if (ret != 0) {
1120f19dc624Sjohpow01 		return ret;
1121f19dc624Sjohpow01 	}
1122f19dc624Sjohpow01 
1123b99926efSAlexeiFedorov 	/* Create the descriptor to initialize L0 entries with */
1124f19dc624Sjohpow01 	gpt_desc = GPT_L0_BLK_DESC(GPT_GPI_ANY);
1125f19dc624Sjohpow01 
1126f19dc624Sjohpow01 	/* Iterate through all L0 entries */
1127f19dc624Sjohpow01 	for (unsigned int i = 0U; i < GPT_L0_REGION_COUNT(gpt_config.t); i++) {
1128f19dc624Sjohpow01 		((uint64_t *)l0_mem_base)[i] = gpt_desc;
1129f19dc624Sjohpow01 	}
1130f19dc624Sjohpow01 
1131b0f1c840SAlexeiFedorov 	/* Flush updated L0 table to memory */
1132b0f1c840SAlexeiFedorov 	flush_dcache_range((uintptr_t)l0_mem_base, GPT_L0_TABLE_SIZE(gpt_config.t));
1133f19dc624Sjohpow01 
1134b99926efSAlexeiFedorov 	/* Stash the L0 base address once initial setup is complete */
1135f19dc624Sjohpow01 	gpt_config.plat_gpt_l0_base = l0_mem_base;
1136f19dc624Sjohpow01 
1137f19dc624Sjohpow01 	return 0;
1138f19dc624Sjohpow01 }
1139f19dc624Sjohpow01 
1140f19dc624Sjohpow01 /*
1141f19dc624Sjohpow01  * Public API that carves out PAS regions from the L0 tables and builds any L1
1142f19dc624Sjohpow01  * tables that are needed. This function ideally is run after DDR discovery and
1143f19dc624Sjohpow01  * initialization. The L0 tables must have already been initialized to GPI_ANY
1144f19dc624Sjohpow01  * when this function is called.
1145f19dc624Sjohpow01  *
1146f19dc624Sjohpow01  * This function can be called multiple times with different L1 memory ranges
1147f19dc624Sjohpow01  * and PAS regions if it is desirable to place L1 tables in different locations
1148f19dc624Sjohpow01  * in memory. (ex: you have multiple DDR banks and want to place the L1 tables
1149ec0088bbSAlexeiFedorov  * in the DDR bank that they control).
1150f19dc624Sjohpow01  *
1151f19dc624Sjohpow01  * Parameters
1152f19dc624Sjohpow01  *   pgs		PGS value to use for table generation.
1153f19dc624Sjohpow01  *   l1_mem_base	Base address of memory used for L1 tables.
1154f19dc624Sjohpow01  *   l1_mem_size	Total size of memory available for L1 tables.
1155f19dc624Sjohpow01  *   *pas_regions	Pointer to PAS regions structure array.
1156f19dc624Sjohpow01  *   pas_count		Total number of PAS regions.
1157f19dc624Sjohpow01  *
1158f19dc624Sjohpow01  * Return
1159f19dc624Sjohpow01  *   Negative Linux error code in the event of a failure, 0 for success.
1160f19dc624Sjohpow01  */
1161f19dc624Sjohpow01 int gpt_init_pas_l1_tables(gpccr_pgs_e pgs, uintptr_t l1_mem_base,
1162f19dc624Sjohpow01 			   size_t l1_mem_size, pas_region_t *pas_regions,
1163f19dc624Sjohpow01 			   unsigned int pas_count)
1164f19dc624Sjohpow01 {
1165ec0088bbSAlexeiFedorov 	int l1_gpt_cnt, ret;
1166f19dc624Sjohpow01 
1167b99926efSAlexeiFedorov 	/* Ensure that MMU and Data caches are enabled */
1168b0f1c840SAlexeiFedorov 	assert((read_sctlr_el3() & SCTLR_C_BIT) != 0UL);
1169f19dc624Sjohpow01 
1170b99926efSAlexeiFedorov 	/* PGS is needed for validate_pas_mappings so check it now */
1171f19dc624Sjohpow01 	if (pgs > GPT_PGS_MAX) {
1172b99926efSAlexeiFedorov 		ERROR("GPT: Invalid PGS: 0x%x\n", pgs);
gpt_init_pas_l1_tables(gpccr_pgs_e pgs,uintptr_t l1_mem_base,size_t l1_mem_size,pas_region_t * pas_regions,unsigned int pas_count)1173f19dc624Sjohpow01 		return -EINVAL;
1174f19dc624Sjohpow01 	}
1175f19dc624Sjohpow01 	gpt_config.pgs = pgs;
1176f19dc624Sjohpow01 	gpt_config.p = gpt_p_lookup[pgs];
1177f19dc624Sjohpow01 
1178b99926efSAlexeiFedorov 	/* Make sure L0 tables have been initialized */
1179b0f1c840SAlexeiFedorov 	if (gpt_config.plat_gpt_l0_base == 0UL) {
1180b99926efSAlexeiFedorov 		ERROR("GPT: L0 tables must be initialized first!\n");
1181f19dc624Sjohpow01 		return -EPERM;
1182f19dc624Sjohpow01 	}
1183f19dc624Sjohpow01 
1184b99926efSAlexeiFedorov 	/* Check if L1 GPTs are required and how many */
118520e2683dSAlexeiFedorov 	l1_gpt_cnt = validate_pas_mappings(pas_regions, pas_count);
1186f19dc624Sjohpow01 	if (l1_gpt_cnt < 0) {
1187f19dc624Sjohpow01 		return l1_gpt_cnt;
1188f19dc624Sjohpow01 	}
1189f19dc624Sjohpow01 
1190b99926efSAlexeiFedorov 	VERBOSE("GPT: %i L1 GPTs requested\n", l1_gpt_cnt);
1191f19dc624Sjohpow01 
1192b99926efSAlexeiFedorov 	/* If L1 tables are needed then validate the L1 parameters */
1193f19dc624Sjohpow01 	if (l1_gpt_cnt > 0) {
119420e2683dSAlexeiFedorov 		ret = validate_l1_params(l1_mem_base, l1_mem_size,
1195b99926efSAlexeiFedorov 					(unsigned int)l1_gpt_cnt);
11966a00e9b0SRobert Wakim 		if (ret != 0) {
1197f19dc624Sjohpow01 			return ret;
1198f19dc624Sjohpow01 		}
1199f19dc624Sjohpow01 
1200b99926efSAlexeiFedorov 		/* Set up parameters for L1 table generation */
1201f19dc624Sjohpow01 		gpt_l1_tbl = l1_mem_base;
1202f19dc624Sjohpow01 	}
1203f19dc624Sjohpow01 
1204ec0088bbSAlexeiFedorov 	/* Number of L1 entries in 2MB depends on GPCCR_EL3.PGS value */
1205ec0088bbSAlexeiFedorov 	gpt_l1_cnt_2mb = (unsigned int)GPT_L1_ENTRY_COUNT_2MB(gpt_config.p);
1206ec0088bbSAlexeiFedorov 
1207ec0088bbSAlexeiFedorov 	/* Mask for the L1 index field */
1208ec0088bbSAlexeiFedorov 	gpt_l1_index_mask = GPT_L1_IDX_MASK(gpt_config.p);
1209ec0088bbSAlexeiFedorov 
1210b99926efSAlexeiFedorov 	INFO("GPT: Boot Configuration\n");
1211f19dc624Sjohpow01 	INFO("  PPS/T:     0x%x/%u\n", gpt_config.pps, gpt_config.t);
1212f19dc624Sjohpow01 	INFO("  PGS/P:     0x%x/%u\n", gpt_config.pgs, gpt_config.p);
1213f19dc624Sjohpow01 	INFO("  L0GPTSZ/S: 0x%x/%u\n", GPT_L0GPTSZ, GPT_S_VAL);
1214b99926efSAlexeiFedorov 	INFO("  PAS count: %u\n", pas_count);
1215b99926efSAlexeiFedorov 	INFO("  L0 base:   0x%"PRIxPTR"\n", gpt_config.plat_gpt_l0_base);
1216f19dc624Sjohpow01 
1217b99926efSAlexeiFedorov 	/* Generate the tables in memory */
1218f19dc624Sjohpow01 	for (unsigned int idx = 0U; idx < pas_count; idx++) {
1219b99926efSAlexeiFedorov 		VERBOSE("GPT: PAS[%u]: base 0x%"PRIxPTR"\tsize 0x%lx\tGPI 0x%x\ttype 0x%x\n",
1220f19dc624Sjohpow01 			idx, pas_regions[idx].base_pa, pas_regions[idx].size,
1221f19dc624Sjohpow01 			GPT_PAS_ATTR_GPI(pas_regions[idx].attrs),
1222f19dc624Sjohpow01 			GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs));
1223f19dc624Sjohpow01 
1224f19dc624Sjohpow01 		/* Check if a block or table descriptor is required */
1225f19dc624Sjohpow01 		if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) ==
1226f19dc624Sjohpow01 		    GPT_PAS_ATTR_MAP_TYPE_BLOCK) {
122720e2683dSAlexeiFedorov 			generate_l0_blk_desc(&pas_regions[idx]);
1228f19dc624Sjohpow01 
1229f19dc624Sjohpow01 		} else {
123020e2683dSAlexeiFedorov 			generate_l0_tbl_desc(&pas_regions[idx]);
1231f19dc624Sjohpow01 		}
1232f19dc624Sjohpow01 	}
1233f19dc624Sjohpow01 
1234b99926efSAlexeiFedorov 	/* Flush modified L0 tables */
1235f19dc624Sjohpow01 	flush_l0_for_pas_array(pas_regions, pas_count);
1236f19dc624Sjohpow01 
1237b99926efSAlexeiFedorov 	/* Flush L1 tables if needed */
1238f19dc624Sjohpow01 	if (l1_gpt_cnt > 0) {
1239f19dc624Sjohpow01 		flush_dcache_range(l1_mem_base,
1240f19dc624Sjohpow01 				   GPT_L1_TABLE_SIZE(gpt_config.p) *
1241ec0088bbSAlexeiFedorov 				   (size_t)l1_gpt_cnt);
1242f19dc624Sjohpow01 	}
1243f19dc624Sjohpow01 
1244b99926efSAlexeiFedorov 	/* Make sure that all the entries are written to the memory */
1245f19dc624Sjohpow01 	dsbishst();
124677612b90SSoby Mathew 	tlbipaallos();
124777612b90SSoby Mathew 	dsb();
124877612b90SSoby Mathew 	isb();
1249f19dc624Sjohpow01 
1250f19dc624Sjohpow01 	return 0;
1251f19dc624Sjohpow01 }
1252f19dc624Sjohpow01 
1253f19dc624Sjohpow01 /*
1254f19dc624Sjohpow01  * Public API to initialize the runtime gpt_config structure based on the values
1255f19dc624Sjohpow01  * present in the GPTBR_EL3 and GPCCR_EL3 registers. GPT initialization
1256f19dc624Sjohpow01  * typically happens in a bootloader stage prior to setting up the EL3 runtime
1257f19dc624Sjohpow01  * environment for the granule transition service so this function detects the
1258f19dc624Sjohpow01  * initialization from a previous stage. Granule protection checks must be
1259f19dc624Sjohpow01  * enabled already or this function will return an error.
1260f19dc624Sjohpow01  *
1261b0f1c840SAlexeiFedorov  * Parameters
1262b0f1c840SAlexeiFedorov  *   l1_bitlocks_base	Base address of memory for L1 tables bitlocks.
1263b0f1c840SAlexeiFedorov  *   l1_bitlocks_size	Total size of memory available for L1 tables bitlocks.
1264b0f1c840SAlexeiFedorov  *
1265f19dc624Sjohpow01  * Return
1266f19dc624Sjohpow01  *   Negative Linux error code in the event of a failure, 0 for success.
1267f19dc624Sjohpow01  */
1268b0f1c840SAlexeiFedorov int gpt_runtime_init(uintptr_t l1_bitlocks_base, size_t l1_bitlocks_size)
1269f19dc624Sjohpow01 {
1270f19dc624Sjohpow01 	u_register_t reg;
1271b0f1c840SAlexeiFedorov 	__unused size_t locks_size;
1272f19dc624Sjohpow01 
1273b99926efSAlexeiFedorov 	/* Ensure that MMU and Data caches are enabled */
1274b0f1c840SAlexeiFedorov 	assert((read_sctlr_el3() & SCTLR_C_BIT) != 0UL);
1275f19dc624Sjohpow01 
1276b99926efSAlexeiFedorov 	/* Ensure GPC are already enabled */
1277b0f1c840SAlexeiFedorov 	if ((read_gpccr_el3() & GPCCR_GPC_BIT) == 0UL) {
1278b99926efSAlexeiFedorov 		ERROR("GPT: Granule protection checks are not enabled!\n");
1279f19dc624Sjohpow01 		return -EPERM;
gpt_runtime_init(uintptr_t l1_bitlocks_base,size_t l1_bitlocks_size)1280f19dc624Sjohpow01 	}
1281f19dc624Sjohpow01 
1282f19dc624Sjohpow01 	/*
1283f19dc624Sjohpow01 	 * Read the L0 table address from GPTBR, we don't need the L1 base
1284f19dc624Sjohpow01 	 * address since those are included in the L0 tables as needed.
1285f19dc624Sjohpow01 	 */
1286f19dc624Sjohpow01 	reg = read_gptbr_el3();
1287f19dc624Sjohpow01 	gpt_config.plat_gpt_l0_base = ((reg >> GPTBR_BADDR_SHIFT) &
1288f19dc624Sjohpow01 				      GPTBR_BADDR_MASK) <<
1289f19dc624Sjohpow01 				      GPTBR_BADDR_VAL_SHIFT;
1290f19dc624Sjohpow01 
1291b99926efSAlexeiFedorov 	/* Read GPCCR to get PGS and PPS values */
1292f19dc624Sjohpow01 	reg = read_gpccr_el3();
1293f19dc624Sjohpow01 	gpt_config.pps = (reg >> GPCCR_PPS_SHIFT) & GPCCR_PPS_MASK;
1294f19dc624Sjohpow01 	gpt_config.t = gpt_t_lookup[gpt_config.pps];
1295f19dc624Sjohpow01 	gpt_config.pgs = (reg >> GPCCR_PGS_SHIFT) & GPCCR_PGS_MASK;
1296f19dc624Sjohpow01 	gpt_config.p = gpt_p_lookup[gpt_config.pgs];
1297f19dc624Sjohpow01 
1298ec0088bbSAlexeiFedorov 	/* Number of L1 entries in 2MB depends on GPCCR_EL3.PGS value */
1299ec0088bbSAlexeiFedorov 	gpt_l1_cnt_2mb = (unsigned int)GPT_L1_ENTRY_COUNT_2MB(gpt_config.p);
1300ec0088bbSAlexeiFedorov 
1301ec0088bbSAlexeiFedorov 	/* Mask for the L1 index field */
1302ec0088bbSAlexeiFedorov 	gpt_l1_index_mask = GPT_L1_IDX_MASK(gpt_config.p);
1303ec0088bbSAlexeiFedorov 
1304d766084fSAlexeiFedorov #if (RME_GPT_BITLOCK_BLOCK != 0)
1305b0f1c840SAlexeiFedorov 	/*
1306b0f1c840SAlexeiFedorov 	 * Size of GPT bitlocks in bytes for the protected address space
1307b0f1c840SAlexeiFedorov 	 * with RME_GPT_BITLOCK_BLOCK * 512MB per bitlock.
1308b0f1c840SAlexeiFedorov 	 */
1309b0f1c840SAlexeiFedorov 	locks_size = GPT_PPS_ACTUAL_SIZE(gpt_config.t) /
1310b0f1c840SAlexeiFedorov 			(RME_GPT_BITLOCK_BLOCK * SZ_512M * 8U);
1311b0f1c840SAlexeiFedorov 	/*
1312b0f1c840SAlexeiFedorov 	 * If protected space size is less than the size covered
1313b0f1c840SAlexeiFedorov 	 * by 'bitlock' structure, check for a single bitlock.
1314b0f1c840SAlexeiFedorov 	 */
1315b0f1c840SAlexeiFedorov 	if (locks_size < LOCK_SIZE) {
1316b0f1c840SAlexeiFedorov 		locks_size = LOCK_SIZE;
1317b0f1c840SAlexeiFedorov 	/* Check bitlocks array size */
1318b0f1c840SAlexeiFedorov 	} else if (locks_size > l1_bitlocks_size) {
1319b0f1c840SAlexeiFedorov 		ERROR("GPT: Inadequate GPT bitlocks memory\n");
1320b0f1c840SAlexeiFedorov 		ERROR("      Expected 0x%lx bytes, got 0x%lx\n",
1321b0f1c840SAlexeiFedorov 			locks_size, l1_bitlocks_size);
1322b0f1c840SAlexeiFedorov 		return -ENOMEM;
1323b0f1c840SAlexeiFedorov 	}
1324b0f1c840SAlexeiFedorov 
1325b0f1c840SAlexeiFedorov 	gpt_bitlock = (bitlock_t *)l1_bitlocks_base;
1326b0f1c840SAlexeiFedorov 
1327b0f1c840SAlexeiFedorov 	/* Initialise GPT bitlocks */
1328b0f1c840SAlexeiFedorov 	(void)memset((void *)gpt_bitlock, 0, locks_size);
1329b0f1c840SAlexeiFedorov 
1330b0f1c840SAlexeiFedorov 	/* Flush GPT bitlocks to memory */
1331b0f1c840SAlexeiFedorov 	flush_dcache_range((uintptr_t)gpt_bitlock, locks_size);
1332b0f1c840SAlexeiFedorov #endif /* RME_GPT_BITLOCK_BLOCK */
1333b0f1c840SAlexeiFedorov 
1334b99926efSAlexeiFedorov 	VERBOSE("GPT: Runtime Configuration\n");
1335f19dc624Sjohpow01 	VERBOSE("  PPS/T:     0x%x/%u\n", gpt_config.pps, gpt_config.t);
1336f19dc624Sjohpow01 	VERBOSE("  PGS/P:     0x%x/%u\n", gpt_config.pgs, gpt_config.p);
1337f19dc624Sjohpow01 	VERBOSE("  L0GPTSZ/S: 0x%x/%u\n", GPT_L0GPTSZ, GPT_S_VAL);
1338b99926efSAlexeiFedorov 	VERBOSE("  L0 base:   0x%"PRIxPTR"\n", gpt_config.plat_gpt_l0_base);
1339d766084fSAlexeiFedorov #if (RME_GPT_BITLOCK_BLOCK != 0)
1340b0f1c840SAlexeiFedorov 	VERBOSE("  Bitlocks:  0x%"PRIxPTR"/0x%lx\n", (uintptr_t)gpt_bitlock,
1341b0f1c840SAlexeiFedorov 					locks_size);
1342d766084fSAlexeiFedorov #endif
1343f19dc624Sjohpow01 	return 0;
1344f19dc624Sjohpow01 }
1345f19dc624Sjohpow01 
1346f19dc624Sjohpow01 /*
13476a00e9b0SRobert Wakim  * A helper to write the value (target_pas << gpi_shift) to the index of
1348b99926efSAlexeiFedorov  * the gpt_l1_addr.
13496a00e9b0SRobert Wakim  */
13506a00e9b0SRobert Wakim static inline void write_gpt(uint64_t *gpt_l1_desc, uint64_t *gpt_l1_addr,
13516a00e9b0SRobert Wakim 			     unsigned int gpi_shift, unsigned int idx,
13526a00e9b0SRobert Wakim 			     unsigned int target_pas)
13536a00e9b0SRobert Wakim {
13546a00e9b0SRobert Wakim 	*gpt_l1_desc &= ~(GPT_L1_GRAN_DESC_GPI_MASK << gpi_shift);
13556a00e9b0SRobert Wakim 	*gpt_l1_desc |= ((uint64_t)target_pas << gpi_shift);
13566a00e9b0SRobert Wakim 	gpt_l1_addr[idx] = *gpt_l1_desc;
1357ec0088bbSAlexeiFedorov 
1358ec0088bbSAlexeiFedorov 	dsboshst();
13596a00e9b0SRobert Wakim }
13606a00e9b0SRobert Wakim 
13616a00e9b0SRobert Wakim /*
write_gpt(uint64_t * gpt_l1_desc,uint64_t * gpt_l1_addr,unsigned int gpi_shift,unsigned int idx,unsigned int target_pas)13626a00e9b0SRobert Wakim  * Helper to retrieve the gpt_l1_* information from the base address
1363b99926efSAlexeiFedorov  * returned in gpi_info.
13646a00e9b0SRobert Wakim  */
13656a00e9b0SRobert Wakim static int get_gpi_params(uint64_t base, gpi_info_t *gpi_info)
13666a00e9b0SRobert Wakim {
13676a00e9b0SRobert Wakim 	uint64_t gpt_l0_desc, *gpt_l0_base;
1368d766084fSAlexeiFedorov 	__unused unsigned int block_idx;
13696a00e9b0SRobert Wakim 
13706a00e9b0SRobert Wakim 	gpt_l0_base = (uint64_t *)gpt_config.plat_gpt_l0_base;
13716a00e9b0SRobert Wakim 	gpt_l0_desc = gpt_l0_base[GPT_L0_IDX(base)];
13726a00e9b0SRobert Wakim 	if (GPT_L0_TYPE(gpt_l0_desc) != GPT_L0_TYPE_TBL_DESC) {
1373b99926efSAlexeiFedorov 		VERBOSE("GPT: Granule is not covered by a table descriptor!\n");
13746a00e9b0SRobert Wakim 		VERBOSE("      Base=0x%"PRIx64"\n", base);
13756a00e9b0SRobert Wakim 		return -EINVAL;
13766a00e9b0SRobert Wakim 	}
13776a00e9b0SRobert Wakim 
1378b99926efSAlexeiFedorov 	/* Get the table index and GPI shift from PA */
13796a00e9b0SRobert Wakim 	gpi_info->gpt_l1_addr = GPT_L0_TBLD_ADDR(gpt_l0_desc);
1380ec0088bbSAlexeiFedorov 	gpi_info->idx = (unsigned int)GPT_L1_INDEX(base);
13816a00e9b0SRobert Wakim 	gpi_info->gpi_shift = GPT_L1_GPI_IDX(gpt_config.p, base) << 2;
13826a00e9b0SRobert Wakim 
1383d766084fSAlexeiFedorov #if (RME_GPT_BITLOCK_BLOCK != 0)
1384d766084fSAlexeiFedorov 	/* Block index */
1385d766084fSAlexeiFedorov 	block_idx = (unsigned int)(base / (RME_GPT_BITLOCK_BLOCK * SZ_512M));
1386ec0088bbSAlexeiFedorov 
1387ec0088bbSAlexeiFedorov 	/* Bitlock address and mask */
1388b0f1c840SAlexeiFedorov 	gpi_info->lock = &gpt_bitlock[block_idx / LOCK_BITS];
1389d766084fSAlexeiFedorov 	gpi_info->mask = 1U << (block_idx & (LOCK_BITS - 1U));
1390d766084fSAlexeiFedorov #endif
13916a00e9b0SRobert Wakim 	return 0;
13926a00e9b0SRobert Wakim }
13936a00e9b0SRobert Wakim 
13946a00e9b0SRobert Wakim /*
1395ec0088bbSAlexeiFedorov  * Helper to retrieve the gpt_l1_desc and GPI information from gpi_info.
1396d766084fSAlexeiFedorov  * This function is called with bitlock or spinlock acquired.
1397ec0088bbSAlexeiFedorov  */
1398ec0088bbSAlexeiFedorov static void read_gpi(gpi_info_t *gpi_info)
1399ec0088bbSAlexeiFedorov {
1400ec0088bbSAlexeiFedorov 	gpi_info->gpt_l1_desc = (gpi_info->gpt_l1_addr)[gpi_info->idx];
1401ec0088bbSAlexeiFedorov 
1402ec0088bbSAlexeiFedorov 	if ((gpi_info->gpt_l1_desc & GPT_L1_TYPE_CONT_DESC_MASK) ==
1403ec0088bbSAlexeiFedorov 				 GPT_L1_TYPE_CONT_DESC) {
1404ec0088bbSAlexeiFedorov 		/* Read GPI from Contiguous descriptor */
1405ec0088bbSAlexeiFedorov 		gpi_info->gpi = (unsigned int)GPT_L1_CONT_GPI(gpi_info->gpt_l1_desc);
1406ec0088bbSAlexeiFedorov 	} else {
1407ec0088bbSAlexeiFedorov 		/* Read GPI from Granules descriptor */
1408ec0088bbSAlexeiFedorov 		gpi_info->gpi = (unsigned int)((gpi_info->gpt_l1_desc >> gpi_info->gpi_shift) &
1409ec0088bbSAlexeiFedorov 						GPT_L1_GRAN_DESC_GPI_MASK);
read_gpi(gpi_info_t * gpi_info)1410ec0088bbSAlexeiFedorov 	}
1411ec0088bbSAlexeiFedorov }
1412ec0088bbSAlexeiFedorov 
1413ec0088bbSAlexeiFedorov static void flush_page_to_popa(uintptr_t addr)
1414ec0088bbSAlexeiFedorov {
1415ec0088bbSAlexeiFedorov 	size_t size = GPT_PGS_ACTUAL_SIZE(gpt_config.p);
1416ec0088bbSAlexeiFedorov 
1417ec0088bbSAlexeiFedorov 	if (is_feat_mte2_supported()) {
1418ec0088bbSAlexeiFedorov 		flush_dcache_to_popa_range_mte2(addr, size);
1419ec0088bbSAlexeiFedorov 	} else {
1420ec0088bbSAlexeiFedorov 		flush_dcache_to_popa_range(addr, size);
1421ec0088bbSAlexeiFedorov 	}
1422ec0088bbSAlexeiFedorov }
1423ec0088bbSAlexeiFedorov 
1424ec0088bbSAlexeiFedorov /*
flush_page_to_popa(uintptr_t addr)1425ec0088bbSAlexeiFedorov  * Helper function to check if all L1 entries in 2MB block have
1426ec0088bbSAlexeiFedorov  * the same Granules descriptor value.
1427ec0088bbSAlexeiFedorov  *
1428ec0088bbSAlexeiFedorov  * Parameters
1429ec0088bbSAlexeiFedorov  *   base		Base address of the region to be checked
1430ec0088bbSAlexeiFedorov  *   gpi_info		Pointer to 'gpt_config_t' structure
1431ec0088bbSAlexeiFedorov  *   l1_desc		GPT Granules descriptor with all entries
1432ec0088bbSAlexeiFedorov  *			set to the same GPI.
1433ec0088bbSAlexeiFedorov  *
1434ec0088bbSAlexeiFedorov  * Return
1435ec0088bbSAlexeiFedorov  *   true if L1 all entries have the same descriptor value, false otherwise.
1436ec0088bbSAlexeiFedorov  */
1437ec0088bbSAlexeiFedorov __unused static bool check_fuse_2mb(uint64_t base, const gpi_info_t *gpi_info,
1438ec0088bbSAlexeiFedorov 					uint64_t l1_desc)
1439ec0088bbSAlexeiFedorov {
1440ec0088bbSAlexeiFedorov 	/* Last L1 entry index in 2MB block */
1441ec0088bbSAlexeiFedorov 	unsigned int long idx = GPT_L1_INDEX(ALIGN_2MB(base)) +
1442ec0088bbSAlexeiFedorov 						gpt_l1_cnt_2mb - 1UL;
1443ec0088bbSAlexeiFedorov 
1444ec0088bbSAlexeiFedorov 	/* Number of L1 entries in 2MB block */
1445ec0088bbSAlexeiFedorov 	unsigned int cnt = gpt_l1_cnt_2mb;
1446ec0088bbSAlexeiFedorov 
1447ec0088bbSAlexeiFedorov 	/*
1448ec0088bbSAlexeiFedorov 	 * Start check from the last L1 entry and continue until the first
1449ec0088bbSAlexeiFedorov 	 * non-matching to the passed Granules descriptor value is found.
1450ec0088bbSAlexeiFedorov 	 */
1451ec0088bbSAlexeiFedorov 	while (cnt-- != 0U) {
1452ec0088bbSAlexeiFedorov 		if (gpi_info->gpt_l1_addr[idx--] != l1_desc) {
1453ec0088bbSAlexeiFedorov 			/* Non-matching L1 entry found */
1454ec0088bbSAlexeiFedorov 			return false;
1455ec0088bbSAlexeiFedorov 		}
1456ec0088bbSAlexeiFedorov 	}
1457ec0088bbSAlexeiFedorov 
1458ec0088bbSAlexeiFedorov 	return true;
1459ec0088bbSAlexeiFedorov }
1460ec0088bbSAlexeiFedorov 
1461ec0088bbSAlexeiFedorov __unused static void fuse_2mb(uint64_t base, const gpi_info_t *gpi_info,
1462ec0088bbSAlexeiFedorov 				uint64_t l1_desc)
1463ec0088bbSAlexeiFedorov {
1464ec0088bbSAlexeiFedorov 	/* L1 entry index of the start of 2MB block */
1465ec0088bbSAlexeiFedorov 	unsigned long idx_2 = GPT_L1_INDEX(ALIGN_2MB(base));
1466ec0088bbSAlexeiFedorov 
1467ec0088bbSAlexeiFedorov 	/* 2MB Contiguous descriptor */
1468ec0088bbSAlexeiFedorov 	uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 2MB);
1469ec0088bbSAlexeiFedorov 
1470ec0088bbSAlexeiFedorov 	VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n", __func__, base, l1_desc);
1471ec0088bbSAlexeiFedorov 
1472ec0088bbSAlexeiFedorov 	fill_desc(&gpi_info->gpt_l1_addr[idx_2], l1_cont_desc, L1_QWORDS_2MB);
fuse_2mb(uint64_t base,const gpi_info_t * gpi_info,uint64_t l1_desc)1473ec0088bbSAlexeiFedorov }
1474ec0088bbSAlexeiFedorov 
1475ec0088bbSAlexeiFedorov /*
1476ec0088bbSAlexeiFedorov  * Helper function to check if all 1st L1 entries of 2MB blocks
1477ec0088bbSAlexeiFedorov  * in 32MB have the same 2MB Contiguous descriptor value.
1478ec0088bbSAlexeiFedorov  *
1479ec0088bbSAlexeiFedorov  * Parameters
1480ec0088bbSAlexeiFedorov  *   base		Base address of the region to be checked
1481ec0088bbSAlexeiFedorov  *   gpi_info		Pointer to 'gpt_config_t' structure
1482ec0088bbSAlexeiFedorov  *   l1_desc		GPT Granules descriptor.
1483ec0088bbSAlexeiFedorov  *
1484ec0088bbSAlexeiFedorov  * Return
1485ec0088bbSAlexeiFedorov  *   true if all L1 entries have the same descriptor value, false otherwise.
1486ec0088bbSAlexeiFedorov  */
1487ec0088bbSAlexeiFedorov __unused static bool check_fuse_32mb(uint64_t base, const gpi_info_t *gpi_info,
1488ec0088bbSAlexeiFedorov 					uint64_t l1_desc)
1489ec0088bbSAlexeiFedorov {
1490ec0088bbSAlexeiFedorov 	/* The 1st L1 entry index of the last 2MB block in 32MB */
1491ec0088bbSAlexeiFedorov 	unsigned long idx = GPT_L1_INDEX(ALIGN_32MB(base)) +
1492ec0088bbSAlexeiFedorov 					(15UL * gpt_l1_cnt_2mb);
1493ec0088bbSAlexeiFedorov 
1494ec0088bbSAlexeiFedorov 	/* 2MB Contiguous descriptor */
1495ec0088bbSAlexeiFedorov 	uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 2MB);
1496ec0088bbSAlexeiFedorov 
1497ec0088bbSAlexeiFedorov 	/* Number of 2MB blocks in 32MB */
1498ec0088bbSAlexeiFedorov 	unsigned int cnt = 16U;
1499ec0088bbSAlexeiFedorov 
1500ec0088bbSAlexeiFedorov 	/* Set the first L1 entry to 2MB Contiguous descriptor */
1501ec0088bbSAlexeiFedorov 	gpi_info->gpt_l1_addr[GPT_L1_INDEX(ALIGN_2MB(base))] = l1_cont_desc;
1502ec0088bbSAlexeiFedorov 
1503ec0088bbSAlexeiFedorov 	/*
1504ec0088bbSAlexeiFedorov 	 * Start check from the 1st L1 entry of the last 2MB block and
1505ec0088bbSAlexeiFedorov 	 * continue until the first non-matching to 2MB Contiguous descriptor
1506ec0088bbSAlexeiFedorov 	 * value is found.
1507ec0088bbSAlexeiFedorov 	 */
1508ec0088bbSAlexeiFedorov 	while (cnt-- != 0U) {
1509ec0088bbSAlexeiFedorov 		if (gpi_info->gpt_l1_addr[idx] != l1_cont_desc) {
1510ec0088bbSAlexeiFedorov 			/* Non-matching L1 entry found */
1511ec0088bbSAlexeiFedorov 			return false;
1512ec0088bbSAlexeiFedorov 		}
1513ec0088bbSAlexeiFedorov 		idx -= gpt_l1_cnt_2mb;
1514ec0088bbSAlexeiFedorov 	}
1515ec0088bbSAlexeiFedorov 
1516ec0088bbSAlexeiFedorov 	return true;
1517ec0088bbSAlexeiFedorov }
1518ec0088bbSAlexeiFedorov 
1519ec0088bbSAlexeiFedorov __unused static void fuse_32mb(uint64_t base, const gpi_info_t *gpi_info,
1520ec0088bbSAlexeiFedorov 				uint64_t l1_desc)
1521ec0088bbSAlexeiFedorov {
1522ec0088bbSAlexeiFedorov 	/* L1 entry index of the start of 32MB block */
1523ec0088bbSAlexeiFedorov 	unsigned long idx_32 = GPT_L1_INDEX(ALIGN_32MB(base));
1524ec0088bbSAlexeiFedorov 
1525ec0088bbSAlexeiFedorov 	/* 32MB Contiguous descriptor */
1526ec0088bbSAlexeiFedorov 	uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 32MB);
1527ec0088bbSAlexeiFedorov 
1528ec0088bbSAlexeiFedorov 	VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n", __func__, base, l1_desc);
1529ec0088bbSAlexeiFedorov 
1530ec0088bbSAlexeiFedorov 	fill_desc(&gpi_info->gpt_l1_addr[idx_32], l1_cont_desc, L1_QWORDS_32MB);
fuse_32mb(uint64_t base,const gpi_info_t * gpi_info,uint64_t l1_desc)1531ec0088bbSAlexeiFedorov }
1532ec0088bbSAlexeiFedorov 
1533ec0088bbSAlexeiFedorov /*
1534ec0088bbSAlexeiFedorov  * Helper function to check if all 1st L1 entries of 32MB blocks
1535ec0088bbSAlexeiFedorov  * in 512MB have the same 32MB Contiguous descriptor value.
1536ec0088bbSAlexeiFedorov  *
1537ec0088bbSAlexeiFedorov  * Parameters
1538ec0088bbSAlexeiFedorov  *   base		Base address of the region to be checked
1539ec0088bbSAlexeiFedorov  *   gpi_info		Pointer to 'gpt_config_t' structure
1540ec0088bbSAlexeiFedorov  *   l1_desc		GPT Granules descriptor.
1541ec0088bbSAlexeiFedorov  *
1542ec0088bbSAlexeiFedorov  * Return
1543ec0088bbSAlexeiFedorov  *   true if all L1 entries have the same descriptor value, false otherwise.
1544ec0088bbSAlexeiFedorov  */
1545ec0088bbSAlexeiFedorov __unused static bool check_fuse_512mb(uint64_t base, const gpi_info_t *gpi_info,
1546ec0088bbSAlexeiFedorov 					uint64_t l1_desc)
1547ec0088bbSAlexeiFedorov {
1548ec0088bbSAlexeiFedorov 	/* The 1st L1 entry index of the last 32MB block in 512MB */
1549ec0088bbSAlexeiFedorov 	unsigned long idx = GPT_L1_INDEX(ALIGN_512MB(base)) +
1550ec0088bbSAlexeiFedorov 					(15UL * 16UL * gpt_l1_cnt_2mb);
1551ec0088bbSAlexeiFedorov 
1552ec0088bbSAlexeiFedorov 	/* 32MB Contiguous descriptor */
1553ec0088bbSAlexeiFedorov 	uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 32MB);
1554ec0088bbSAlexeiFedorov 
1555ec0088bbSAlexeiFedorov 	/* Number of 32MB blocks in 512MB */
1556ec0088bbSAlexeiFedorov 	unsigned int cnt = 16U;
1557ec0088bbSAlexeiFedorov 
1558ec0088bbSAlexeiFedorov 	/* Set the first L1 entry to 2MB Contiguous descriptor */
1559ec0088bbSAlexeiFedorov 	gpi_info->gpt_l1_addr[GPT_L1_INDEX(ALIGN_32MB(base))] = l1_cont_desc;
1560ec0088bbSAlexeiFedorov 
1561ec0088bbSAlexeiFedorov 	/*
1562ec0088bbSAlexeiFedorov 	 * Start check from the 1st L1 entry of the last 32MB block and
1563ec0088bbSAlexeiFedorov 	 * continue until the first non-matching to 32MB Contiguous descriptor
1564ec0088bbSAlexeiFedorov 	 * value is found.
1565ec0088bbSAlexeiFedorov 	 */
1566ec0088bbSAlexeiFedorov 	while (cnt-- != 0U) {
1567ec0088bbSAlexeiFedorov 		if (gpi_info->gpt_l1_addr[idx] != l1_cont_desc) {
1568ec0088bbSAlexeiFedorov 			/* Non-matching L1 entry found */
1569ec0088bbSAlexeiFedorov 			return false;
1570ec0088bbSAlexeiFedorov 		}
1571ec0088bbSAlexeiFedorov 		idx -= 16UL * gpt_l1_cnt_2mb;
1572ec0088bbSAlexeiFedorov 	}
1573ec0088bbSAlexeiFedorov 
1574ec0088bbSAlexeiFedorov 	return true;
1575ec0088bbSAlexeiFedorov }
1576ec0088bbSAlexeiFedorov 
1577ec0088bbSAlexeiFedorov __unused static void fuse_512mb(uint64_t base, const gpi_info_t *gpi_info,
1578ec0088bbSAlexeiFedorov 				uint64_t l1_desc)
1579ec0088bbSAlexeiFedorov {
1580ec0088bbSAlexeiFedorov 	/* L1 entry index of the start of 512MB block */
1581ec0088bbSAlexeiFedorov 	unsigned long idx_512 = GPT_L1_INDEX(ALIGN_512MB(base));
1582ec0088bbSAlexeiFedorov 
1583ec0088bbSAlexeiFedorov 	/* 512MB Contiguous descriptor */
1584ec0088bbSAlexeiFedorov 	uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 512MB);
1585ec0088bbSAlexeiFedorov 
1586ec0088bbSAlexeiFedorov 	VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n", __func__, base, l1_desc);
1587ec0088bbSAlexeiFedorov 
1588ec0088bbSAlexeiFedorov 	fill_desc(&gpi_info->gpt_l1_addr[idx_512], l1_cont_desc, L1_QWORDS_512MB);
fuse_512mb(uint64_t base,const gpi_info_t * gpi_info,uint64_t l1_desc)1589ec0088bbSAlexeiFedorov }
1590ec0088bbSAlexeiFedorov 
1591ec0088bbSAlexeiFedorov /*
1592ec0088bbSAlexeiFedorov  * Helper function to convert GPI entries in a single L1 table
1593ec0088bbSAlexeiFedorov  * from Granules to Contiguous descriptor.
1594ec0088bbSAlexeiFedorov  *
1595ec0088bbSAlexeiFedorov  * Parameters
1596ec0088bbSAlexeiFedorov  *   base		Base address of the region to be written
1597ec0088bbSAlexeiFedorov  *   gpi_info		Pointer to 'gpt_config_t' structure
1598ec0088bbSAlexeiFedorov  *   l1_desc		GPT Granules descriptor with all entries
1599ec0088bbSAlexeiFedorov  *			set to the same GPI.
1600ec0088bbSAlexeiFedorov  */
1601ec0088bbSAlexeiFedorov __unused static void fuse_block(uint64_t base, const gpi_info_t *gpi_info,
1602ec0088bbSAlexeiFedorov 				uint64_t l1_desc)
1603ec0088bbSAlexeiFedorov {
1604ec0088bbSAlexeiFedorov 	/* Start with check for 2MB block */
1605ec0088bbSAlexeiFedorov 	if (!check_fuse_2mb(base, gpi_info, l1_desc)) {
1606ec0088bbSAlexeiFedorov 		/* Check for 2MB fusing failed */
1607ec0088bbSAlexeiFedorov 		return;
1608ec0088bbSAlexeiFedorov 	}
1609ec0088bbSAlexeiFedorov 
1610ec0088bbSAlexeiFedorov #if (RME_GPT_MAX_BLOCK == 2)
1611ec0088bbSAlexeiFedorov 	fuse_2mb(base, gpi_info, l1_desc);
1612ec0088bbSAlexeiFedorov #else
1613ec0088bbSAlexeiFedorov 	/* Check for 32MB block */
1614ec0088bbSAlexeiFedorov 	if (!check_fuse_32mb(base, gpi_info, l1_desc)) {
1615ec0088bbSAlexeiFedorov 		/* Check for 32MB fusing failed, fuse to 2MB */
1616ec0088bbSAlexeiFedorov 		fuse_2mb(base, gpi_info, l1_desc);
1617ec0088bbSAlexeiFedorov 		return;
1618ec0088bbSAlexeiFedorov 	}
1619ec0088bbSAlexeiFedorov 
1620ec0088bbSAlexeiFedorov #if (RME_GPT_MAX_BLOCK == 32)
1621ec0088bbSAlexeiFedorov 	fuse_32mb(base, gpi_info, l1_desc);
1622ec0088bbSAlexeiFedorov #else
1623ec0088bbSAlexeiFedorov 	/* Check for 512MB block */
1624ec0088bbSAlexeiFedorov 	if (!check_fuse_512mb(base, gpi_info, l1_desc)) {
1625ec0088bbSAlexeiFedorov 		/* Check for 512MB fusing failed, fuse to 32MB */
1626ec0088bbSAlexeiFedorov 		fuse_32mb(base, gpi_info, l1_desc);
1627ec0088bbSAlexeiFedorov 		return;
1628ec0088bbSAlexeiFedorov 	}
1629ec0088bbSAlexeiFedorov 
1630ec0088bbSAlexeiFedorov 	/* Fuse to 512MB */
1631ec0088bbSAlexeiFedorov 	fuse_512mb(base, gpi_info, l1_desc);
1632ec0088bbSAlexeiFedorov 
1633ec0088bbSAlexeiFedorov #endif	/* RME_GPT_MAX_BLOCK == 32 */
1634ec0088bbSAlexeiFedorov #endif	/* RME_GPT_MAX_BLOCK == 2 */
1635ec0088bbSAlexeiFedorov }
1636ec0088bbSAlexeiFedorov 
1637ec0088bbSAlexeiFedorov /*
1638ec0088bbSAlexeiFedorov  * Helper function to convert GPI entries in a single L1 table
1639ec0088bbSAlexeiFedorov  * from Contiguous to Granules descriptor. This function updates
1640ec0088bbSAlexeiFedorov  * descriptor to Granules in passed 'gpt_config_t' structure as
1641ec0088bbSAlexeiFedorov  * the result of shuttering.
1642ec0088bbSAlexeiFedorov  *
1643ec0088bbSAlexeiFedorov  * Parameters
1644ec0088bbSAlexeiFedorov  *   base		Base address of the region to be written
1645ec0088bbSAlexeiFedorov  *   gpi_info		Pointer to 'gpt_config_t' structure
1646ec0088bbSAlexeiFedorov  *   l1_desc		GPT Granules descriptor set this range to.
1647ec0088bbSAlexeiFedorov  */
1648ec0088bbSAlexeiFedorov __unused static void shatter_block(uint64_t base, gpi_info_t *gpi_info,
1649ec0088bbSAlexeiFedorov 				   uint64_t l1_desc)
1650ec0088bbSAlexeiFedorov {
1651ec0088bbSAlexeiFedorov 	/* Look-up table for 2MB, 32MB and 512MB locks shattering */
1652ec0088bbSAlexeiFedorov 	static const gpt_shatter_func gpt_shatter_lookup[] = {
1653ec0088bbSAlexeiFedorov 		shatter_2mb,
1654ec0088bbSAlexeiFedorov 		shatter_32mb,
1655ec0088bbSAlexeiFedorov 		shatter_512mb
1656ec0088bbSAlexeiFedorov 	};
1657ec0088bbSAlexeiFedorov 
1658ec0088bbSAlexeiFedorov 	/* Look-up table for invalidation TLBs for 2MB, 32MB and 512MB blocks */
1659ec0088bbSAlexeiFedorov 	static const gpt_tlbi_lookup_t tlbi_lookup[] = {
shatter_block(uint64_t base,gpi_info_t * gpi_info,uint64_t l1_desc)1660ec0088bbSAlexeiFedorov 		{ tlbirpalos_2m, ~(SZ_2M - 1UL) },
1661ec0088bbSAlexeiFedorov 		{ tlbirpalos_32m, ~(SZ_32M - 1UL) },
1662ec0088bbSAlexeiFedorov 		{ tlbirpalos_512m, ~(SZ_512M - 1UL) }
1663ec0088bbSAlexeiFedorov 	};
1664ec0088bbSAlexeiFedorov 
1665ec0088bbSAlexeiFedorov 	/* Get shattering level from Contig field of Contiguous descriptor */
1666ec0088bbSAlexeiFedorov 	unsigned long level = GPT_L1_CONT_CONTIG(gpi_info->gpt_l1_desc) - 1UL;
1667ec0088bbSAlexeiFedorov 
1668ec0088bbSAlexeiFedorov 	/* Shatter contiguous block */
1669ec0088bbSAlexeiFedorov 	gpt_shatter_lookup[level](base, gpi_info, l1_desc);
1670ec0088bbSAlexeiFedorov 
1671ec0088bbSAlexeiFedorov 	tlbi_lookup[level].function(base & tlbi_lookup[level].mask);
1672ec0088bbSAlexeiFedorov 	dsbosh();
1673ec0088bbSAlexeiFedorov 
1674ec0088bbSAlexeiFedorov 	/*
1675ec0088bbSAlexeiFedorov 	 * Update 'gpt_config_t' structure's descriptor to Granules to reflect
1676ec0088bbSAlexeiFedorov 	 * the shattered GPI back to caller.
1677ec0088bbSAlexeiFedorov 	 */
1678ec0088bbSAlexeiFedorov 	gpi_info->gpt_l1_desc = l1_desc;
1679ec0088bbSAlexeiFedorov }
1680ec0088bbSAlexeiFedorov 
1681ec0088bbSAlexeiFedorov /*
16826a00e9b0SRobert Wakim  * This function is the granule transition delegate service. When a granule
16836a00e9b0SRobert Wakim  * transition request occurs it is routed to this function to have the request,
1684ec0088bbSAlexeiFedorov  * if valid, fulfilled following A1.1.1 Delegate of RME supplement.
1685f19dc624Sjohpow01  *
16866a00e9b0SRobert Wakim  * TODO: implement support for transitioning multiple granules at once.
1687f19dc624Sjohpow01  *
1688f19dc624Sjohpow01  * Parameters
16896a00e9b0SRobert Wakim  *   base		Base address of the region to transition, must be
16906a00e9b0SRobert Wakim  *			aligned to granule size.
16916a00e9b0SRobert Wakim  *   size		Size of region to transition, must be aligned to granule
16926a00e9b0SRobert Wakim  *			size.
1693f19dc624Sjohpow01  *   src_sec_state	Security state of the caller.
1694f19dc624Sjohpow01  *
1695f19dc624Sjohpow01  * Return
1696f19dc624Sjohpow01  *   Negative Linux error code in the event of a failure, 0 for success.
1697f19dc624Sjohpow01  */
16986a00e9b0SRobert Wakim int gpt_delegate_pas(uint64_t base, size_t size, unsigned int src_sec_state)
1699f19dc624Sjohpow01 {
17006a00e9b0SRobert Wakim 	gpi_info_t gpi_info;
1701ec0088bbSAlexeiFedorov 	uint64_t nse, __unused l1_desc;
17026a00e9b0SRobert Wakim 	unsigned int target_pas;
1703ec0088bbSAlexeiFedorov 	int res;
1704f19dc624Sjohpow01 
1705b99926efSAlexeiFedorov 	/* Ensure that the tables have been set up before taking requests */
17066a00e9b0SRobert Wakim 	assert(gpt_config.plat_gpt_l0_base != 0UL);
17076a00e9b0SRobert Wakim 
1708b99926efSAlexeiFedorov 	/* Ensure that caches are enabled */
17096a00e9b0SRobert Wakim 	assert((read_sctlr_el3() & SCTLR_C_BIT) != 0UL);
gpt_delegate_pas(uint64_t base,size_t size,unsigned int src_sec_state)17106a00e9b0SRobert Wakim 
1711b99926efSAlexeiFedorov 	/* See if this is a single or a range of granule transition */
17126a00e9b0SRobert Wakim 	if (size != GPT_PGS_ACTUAL_SIZE(gpt_config.p)) {
1713f19dc624Sjohpow01 		return -EINVAL;
1714f19dc624Sjohpow01 	}
1715f19dc624Sjohpow01 
17166a00e9b0SRobert Wakim 	/* Check that base and size are valid */
17176a00e9b0SRobert Wakim 	if ((ULONG_MAX - base) < size) {
1718b99926efSAlexeiFedorov 		VERBOSE("GPT: Transition request address overflow!\n");
17196a00e9b0SRobert Wakim 		VERBOSE("      Base=0x%"PRIx64"\n", base);
17206a00e9b0SRobert Wakim 		VERBOSE("      Size=0x%lx\n", size);
17216a00e9b0SRobert Wakim 		return -EINVAL;
17226a00e9b0SRobert Wakim 	}
17236a00e9b0SRobert Wakim 
1724b99926efSAlexeiFedorov 	/* Make sure base and size are valid */
1725b99926efSAlexeiFedorov 	if (((base & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) != 0UL) ||
1726b99926efSAlexeiFedorov 	    ((size & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) != 0UL) ||
17276a00e9b0SRobert Wakim 	    (size == 0UL) ||
17286a00e9b0SRobert Wakim 	    ((base + size) >= GPT_PPS_ACTUAL_SIZE(gpt_config.t))) {
1729b99926efSAlexeiFedorov 		VERBOSE("GPT: Invalid granule transition address range!\n");
17306a00e9b0SRobert Wakim 		VERBOSE("      Base=0x%"PRIx64"\n", base);
17316a00e9b0SRobert Wakim 		VERBOSE("      Size=0x%lx\n", size);
17326a00e9b0SRobert Wakim 		return -EINVAL;
17336a00e9b0SRobert Wakim 	}
17346a00e9b0SRobert Wakim 
1735ec0088bbSAlexeiFedorov 	/* Delegate request can only come from REALM or SECURE */
1736ec0088bbSAlexeiFedorov 	if ((src_sec_state != SMC_FROM_REALM) &&
1737ec0088bbSAlexeiFedorov 	    (src_sec_state != SMC_FROM_SECURE)) {
1738ec0088bbSAlexeiFedorov 		VERBOSE("GPT: Invalid caller security state 0x%x\n",
1739ec0088bbSAlexeiFedorov 							src_sec_state);
1740ec0088bbSAlexeiFedorov 		return -EINVAL;
1741ec0088bbSAlexeiFedorov 	}
1742ec0088bbSAlexeiFedorov 
1743ec0088bbSAlexeiFedorov 	if (src_sec_state == SMC_FROM_REALM) {
17446a00e9b0SRobert Wakim 		target_pas = GPT_GPI_REALM;
1745ec0088bbSAlexeiFedorov 		nse = (uint64_t)GPT_NSE_REALM << GPT_NSE_SHIFT;
1746ec0088bbSAlexeiFedorov 		l1_desc = GPT_L1_REALM_DESC;
1747ec0088bbSAlexeiFedorov 	} else {
17486a00e9b0SRobert Wakim 		target_pas = GPT_GPI_SECURE;
1749ec0088bbSAlexeiFedorov 		nse = (uint64_t)GPT_NSE_SECURE << GPT_NSE_SHIFT;
1750ec0088bbSAlexeiFedorov 		l1_desc = GPT_L1_SECURE_DESC;
1751ec0088bbSAlexeiFedorov 	}
1752ec0088bbSAlexeiFedorov 
1753ec0088bbSAlexeiFedorov 	res = get_gpi_params(base, &gpi_info);
1754ec0088bbSAlexeiFedorov 	if (res != 0) {
1755ec0088bbSAlexeiFedorov 		return res;
17566a00e9b0SRobert Wakim 	}
17576a00e9b0SRobert Wakim 
17586a00e9b0SRobert Wakim 	/*
1759d766084fSAlexeiFedorov 	 * Access to GPT is controlled by a lock to ensure that no more
1760d766084fSAlexeiFedorov 	 * than one CPU is allowed to make changes at any given time.
17616a00e9b0SRobert Wakim 	 */
1762d766084fSAlexeiFedorov 	GPT_LOCK;
1763ec0088bbSAlexeiFedorov 	read_gpi(&gpi_info);
17646a00e9b0SRobert Wakim 
17656a00e9b0SRobert Wakim 	/* Check that the current address is in NS state */
17666a00e9b0SRobert Wakim 	if (gpi_info.gpi != GPT_GPI_NS) {
1767b99926efSAlexeiFedorov 		VERBOSE("GPT: Only Granule in NS state can be delegated.\n");
17686a00e9b0SRobert Wakim 		VERBOSE("      Caller: %u, Current GPI: %u\n", src_sec_state,
17696a00e9b0SRobert Wakim 			gpi_info.gpi);
1770d766084fSAlexeiFedorov 		GPT_UNLOCK;
1771e50fedbcSJavier Almansa Sobrino 		return -EPERM;
17726a00e9b0SRobert Wakim 	}
17736a00e9b0SRobert Wakim 
1774ec0088bbSAlexeiFedorov #if (RME_GPT_MAX_BLOCK != 0)
1775ec0088bbSAlexeiFedorov 	/* Check for Contiguous descriptor */
1776ec0088bbSAlexeiFedorov 	if ((gpi_info.gpt_l1_desc & GPT_L1_TYPE_CONT_DESC_MASK) ==
1777ec0088bbSAlexeiFedorov 					GPT_L1_TYPE_CONT_DESC) {
1778ec0088bbSAlexeiFedorov 		shatter_block(base, &gpi_info, GPT_L1_NS_DESC);
1779f19dc624Sjohpow01 	}
1780ec0088bbSAlexeiFedorov #endif
17816a00e9b0SRobert Wakim 	/*
17826a00e9b0SRobert Wakim 	 * In order to maintain mutual distrust between Realm and Secure
17836a00e9b0SRobert Wakim 	 * states, remove any data speculatively fetched into the target
1784ec0088bbSAlexeiFedorov 	 * physical address space.
1785ec0088bbSAlexeiFedorov 	 * Issue DC CIPAPA or DC_CIGDPAPA on implementations with FEAT_MTE2.
17866a00e9b0SRobert Wakim 	 */
1787ec0088bbSAlexeiFedorov 	flush_page_to_popa(base | nse);
17886a00e9b0SRobert Wakim 
17896a00e9b0SRobert Wakim 	write_gpt(&gpi_info.gpt_l1_desc, gpi_info.gpt_l1_addr,
17906a00e9b0SRobert Wakim 		  gpi_info.gpi_shift, gpi_info.idx, target_pas);
17916a00e9b0SRobert Wakim 
1792ec0088bbSAlexeiFedorov 	/* Ensure that all agents observe the new configuration */
1793ec0088bbSAlexeiFedorov 	tlbi_page_dsbosh(base);
17946a00e9b0SRobert Wakim 
17956a00e9b0SRobert Wakim 	nse = (uint64_t)GPT_NSE_NS << GPT_NSE_SHIFT;
17966a00e9b0SRobert Wakim 
1797ec0088bbSAlexeiFedorov 	/* Ensure that the scrubbed data have made it past the PoPA */
1798ec0088bbSAlexeiFedorov 	flush_page_to_popa(base | nse);
17996a00e9b0SRobert Wakim 
1800ec0088bbSAlexeiFedorov #if (RME_GPT_MAX_BLOCK != 0)
1801ec0088bbSAlexeiFedorov 	if (gpi_info.gpt_l1_desc == l1_desc) {
1802ec0088bbSAlexeiFedorov 		/* Try to fuse */
1803ec0088bbSAlexeiFedorov 		fuse_block(base, &gpi_info, l1_desc);
1804ec0088bbSAlexeiFedorov 	}
1805ec0088bbSAlexeiFedorov #endif
1806ec0088bbSAlexeiFedorov 
1807d766084fSAlexeiFedorov 	/* Unlock the lock to GPT */
1808d766084fSAlexeiFedorov 	GPT_UNLOCK;
18096a00e9b0SRobert Wakim 
18106a00e9b0SRobert Wakim 	/*
18116a00e9b0SRobert Wakim 	 * The isb() will be done as part of context
1812b99926efSAlexeiFedorov 	 * synchronization when returning to lower EL.
18136a00e9b0SRobert Wakim 	 */
1814b99926efSAlexeiFedorov 	VERBOSE("GPT: Granule 0x%"PRIx64" GPI 0x%x->0x%x\n",
18156a00e9b0SRobert Wakim 		base, gpi_info.gpi, target_pas);
1816f19dc624Sjohpow01 
1817f19dc624Sjohpow01 	return 0;
1818f19dc624Sjohpow01 }
1819f19dc624Sjohpow01 
1820f19dc624Sjohpow01 /*
18216a00e9b0SRobert Wakim  * This function is the granule transition undelegate service. When a granule
1822f19dc624Sjohpow01  * transition request occurs it is routed to this function where the request is
1823f19dc624Sjohpow01  * validated then fulfilled if possible.
1824f19dc624Sjohpow01  *
1825f19dc624Sjohpow01  * TODO: implement support for transitioning multiple granules at once.
1826f19dc624Sjohpow01  *
1827f19dc624Sjohpow01  * Parameters
1828f19dc624Sjohpow01  *   base		Base address of the region to transition, must be
1829f19dc624Sjohpow01  *			aligned to granule size.
1830f19dc624Sjohpow01  *   size		Size of region to transition, must be aligned to granule
1831f19dc624Sjohpow01  *			size.
1832f19dc624Sjohpow01  *   src_sec_state	Security state of the caller.
1833f19dc624Sjohpow01  *
1834f19dc624Sjohpow01  * Return
1835f19dc624Sjohpow01  *    Negative Linux error code in the event of a failure, 0 for success.
1836f19dc624Sjohpow01  */
18376a00e9b0SRobert Wakim int gpt_undelegate_pas(uint64_t base, size_t size, unsigned int src_sec_state)
1838f19dc624Sjohpow01 {
18396a00e9b0SRobert Wakim 	gpi_info_t gpi_info;
1840ec0088bbSAlexeiFedorov 	uint64_t nse, __unused l1_desc;
18416a00e9b0SRobert Wakim 	int res;
1842f19dc624Sjohpow01 
1843b99926efSAlexeiFedorov 	/* Ensure that the tables have been set up before taking requests */
18446a00e9b0SRobert Wakim 	assert(gpt_config.plat_gpt_l0_base != 0UL);
1845f19dc624Sjohpow01 
1846b99926efSAlexeiFedorov 	/* Ensure that MMU and caches are enabled */
18476a00e9b0SRobert Wakim 	assert((read_sctlr_el3() & SCTLR_C_BIT) != 0UL);
184877612b90SSoby Mathew 
gpt_undelegate_pas(uint64_t base,size_t size,unsigned int src_sec_state)1849b99926efSAlexeiFedorov 	/* See if this is a single or a range of granule transition */
18506a00e9b0SRobert Wakim 	if (size != GPT_PGS_ACTUAL_SIZE(gpt_config.p)) {
18516a00e9b0SRobert Wakim 		return -EINVAL;
18526a00e9b0SRobert Wakim 	}
18536a00e9b0SRobert Wakim 
18546a00e9b0SRobert Wakim 	/* Check that base and size are valid */
1855f19dc624Sjohpow01 	if ((ULONG_MAX - base) < size) {
1856b99926efSAlexeiFedorov 		VERBOSE("GPT: Transition request address overflow!\n");
18572461bd3aSManish Pandey 		VERBOSE("      Base=0x%"PRIx64"\n", base);
1858f19dc624Sjohpow01 		VERBOSE("      Size=0x%lx\n", size);
1859f19dc624Sjohpow01 		return -EINVAL;
1860f19dc624Sjohpow01 	}
1861f19dc624Sjohpow01 
1862b99926efSAlexeiFedorov 	/* Make sure base and size are valid */
1863b99926efSAlexeiFedorov 	if (((base & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) != 0UL) ||
1864b99926efSAlexeiFedorov 	    ((size & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) != 0UL) ||
18656a00e9b0SRobert Wakim 	    (size == 0UL) ||
1866f19dc624Sjohpow01 	    ((base + size) >= GPT_PPS_ACTUAL_SIZE(gpt_config.t))) {
1867b99926efSAlexeiFedorov 		VERBOSE("GPT: Invalid granule transition address range!\n");
18682461bd3aSManish Pandey 		VERBOSE("      Base=0x%"PRIx64"\n", base);
1869f19dc624Sjohpow01 		VERBOSE("      Size=0x%lx\n", size);
1870f19dc624Sjohpow01 		return -EINVAL;
1871f19dc624Sjohpow01 	}
1872f19dc624Sjohpow01 
18736a00e9b0SRobert Wakim 	res = get_gpi_params(base, &gpi_info);
18746a00e9b0SRobert Wakim 	if (res != 0) {
18756a00e9b0SRobert Wakim 		return res;
1876f19dc624Sjohpow01 	}
1877f19dc624Sjohpow01 
1878ec0088bbSAlexeiFedorov 	/*
1879d766084fSAlexeiFedorov 	 * Access to GPT is controlled by a lock to ensure that no more
1880d766084fSAlexeiFedorov 	 * than one CPU is allowed to make changes at any given time.
1881ec0088bbSAlexeiFedorov 	 */
1882d766084fSAlexeiFedorov 	GPT_LOCK;
1883ec0088bbSAlexeiFedorov 	read_gpi(&gpi_info);
1884ec0088bbSAlexeiFedorov 
18856a00e9b0SRobert Wakim 	/* Check that the current address is in the delegated state */
1886ec0088bbSAlexeiFedorov 	if ((src_sec_state == SMC_FROM_REALM) &&
1887ec0088bbSAlexeiFedorov 		(gpi_info.gpi == GPT_GPI_REALM)) {
1888ec0088bbSAlexeiFedorov 		l1_desc = GPT_L1_REALM_DESC;
1889ec0088bbSAlexeiFedorov 		nse = (uint64_t)GPT_NSE_REALM << GPT_NSE_SHIFT;
1890ec0088bbSAlexeiFedorov 	} else if ((src_sec_state == SMC_FROM_SECURE) &&
1891ec0088bbSAlexeiFedorov 		(gpi_info.gpi == GPT_GPI_SECURE)) {
1892ec0088bbSAlexeiFedorov 		l1_desc = GPT_L1_SECURE_DESC;
1893ec0088bbSAlexeiFedorov 		nse = (uint64_t)GPT_NSE_SECURE << GPT_NSE_SHIFT;
1894ec0088bbSAlexeiFedorov 	} else {
1895ec0088bbSAlexeiFedorov 		VERBOSE("GPT: Only Granule in REALM or SECURE state can be undelegated\n");
1896b99926efSAlexeiFedorov 		VERBOSE("      Caller: %u Current GPI: %u\n", src_sec_state,
18976a00e9b0SRobert Wakim 			gpi_info.gpi);
1898d766084fSAlexeiFedorov 		GPT_UNLOCK;
1899e50fedbcSJavier Almansa Sobrino 		return -EPERM;
19006a00e9b0SRobert Wakim 	}
1901f19dc624Sjohpow01 
1902ec0088bbSAlexeiFedorov #if (RME_GPT_MAX_BLOCK != 0)
1903ec0088bbSAlexeiFedorov 	/* Check for Contiguous descriptor */
1904ec0088bbSAlexeiFedorov 	if ((gpi_info.gpt_l1_desc & GPT_L1_TYPE_CONT_DESC_MASK) ==
1905ec0088bbSAlexeiFedorov 					GPT_L1_TYPE_CONT_DESC) {
1906ec0088bbSAlexeiFedorov 		shatter_block(base, &gpi_info, l1_desc);
1907ec0088bbSAlexeiFedorov 	}
1908ec0088bbSAlexeiFedorov #endif
1909ec0088bbSAlexeiFedorov 	/*
1910ec0088bbSAlexeiFedorov 	 * In order to maintain mutual distrust between Realm and Secure
19116a00e9b0SRobert Wakim 	 * states, remove access now, in order to guarantee that writes
19126a00e9b0SRobert Wakim 	 * to the currently-accessible physical address space will not
19136a00e9b0SRobert Wakim 	 * later become observable.
19146a00e9b0SRobert Wakim 	 */
19156a00e9b0SRobert Wakim 	write_gpt(&gpi_info.gpt_l1_desc, gpi_info.gpt_l1_addr,
19166a00e9b0SRobert Wakim 		  gpi_info.gpi_shift, gpi_info.idx, GPT_GPI_NO_ACCESS);
19176a00e9b0SRobert Wakim 
1918ec0088bbSAlexeiFedorov 	/* Ensure that all agents observe the new NO_ACCESS configuration */
1919ec0088bbSAlexeiFedorov 	tlbi_page_dsbosh(base);
19206a00e9b0SRobert Wakim 
1921ec0088bbSAlexeiFedorov 	/* Ensure that the scrubbed data have made it past the PoPA */
1922ec0088bbSAlexeiFedorov 	flush_page_to_popa(base | nse);
19236a00e9b0SRobert Wakim 
19246a00e9b0SRobert Wakim 	/*
1925ec0088bbSAlexeiFedorov 	 * Remove any data loaded speculatively in NS space from before
1926ec0088bbSAlexeiFedorov 	 * the scrubbing.
19276a00e9b0SRobert Wakim 	 */
19286a00e9b0SRobert Wakim 	nse = (uint64_t)GPT_NSE_NS << GPT_NSE_SHIFT;
19296a00e9b0SRobert Wakim 
1930ec0088bbSAlexeiFedorov 	flush_page_to_popa(base | nse);
19316a00e9b0SRobert Wakim 
1932ec0088bbSAlexeiFedorov 	/* Clear existing GPI encoding and transition granule */
19336a00e9b0SRobert Wakim 	write_gpt(&gpi_info.gpt_l1_desc, gpi_info.gpt_l1_addr,
19346a00e9b0SRobert Wakim 		  gpi_info.gpi_shift, gpi_info.idx, GPT_GPI_NS);
19356a00e9b0SRobert Wakim 
19366a00e9b0SRobert Wakim 	/* Ensure that all agents observe the new NS configuration */
1937ec0088bbSAlexeiFedorov 	tlbi_page_dsbosh(base);
1938f19dc624Sjohpow01 
1939ec0088bbSAlexeiFedorov #if (RME_GPT_MAX_BLOCK != 0)
1940ec0088bbSAlexeiFedorov 	if (gpi_info.gpt_l1_desc == GPT_L1_NS_DESC) {
1941ec0088bbSAlexeiFedorov 		/* Try to fuse */
1942ec0088bbSAlexeiFedorov 		fuse_block(base, &gpi_info, GPT_L1_NS_DESC);
1943ec0088bbSAlexeiFedorov 	}
1944ec0088bbSAlexeiFedorov #endif
1945d766084fSAlexeiFedorov 	/* Unlock the lock to GPT */
1946d766084fSAlexeiFedorov 	GPT_UNLOCK;
1947f19dc624Sjohpow01 
194877612b90SSoby Mathew 	/*
194977612b90SSoby Mathew 	 * The isb() will be done as part of context
1950b99926efSAlexeiFedorov 	 * synchronization when returning to lower EL.
195177612b90SSoby Mathew 	 */
1952b99926efSAlexeiFedorov 	VERBOSE("GPT: Granule 0x%"PRIx64" GPI 0x%x->0x%x\n",
19536a00e9b0SRobert Wakim 		base, gpi_info.gpi, GPT_GPI_NS);
1954f19dc624Sjohpow01 
1955f19dc624Sjohpow01 	return 0;
1956f19dc624Sjohpow01 }
1957