xref: /rk3399_ARM-atf/lib/gpt_rme/gpt_rme_private.h (revision 5e827bf0296efeea9c99dd93ff5d346a928452bd)
1 /*
2  * Copyright (c) 2022-2025, Arm Limited. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #ifndef GPT_RME_PRIVATE_H
8 #define GPT_RME_PRIVATE_H
9 
10 #include <lib/gpt_rme/gpt_rme.h>
11 #include <lib/utils_def.h>
12 
13 /******************************************************************************/
14 /* GPT descriptor definitions                                                 */
15 /******************************************************************************/
16 
17 /* GPT level 0 descriptor bit definitions */
18 #define GPT_L0_TYPE_MASK		UL(0xF)
19 #define GPT_L0_TYPE_SHIFT		U(0)
20 
21 /* GPT level 0 table and block descriptors */
22 #define GPT_L0_TYPE_TBL_DESC		UL(3)
23 #define GPT_L0_TYPE_BLK_DESC		UL(1)
24 
25 #define GPT_L0_TBL_DESC_L1ADDR_MASK	UL(0xFFFFFFFFFF)
26 #define GPT_L0_TBL_DESC_L1ADDR_SHIFT	U(12)
27 
28 #define GPT_L0_BLK_DESC_GPI_MASK	UL(0xF)
29 #define GPT_L0_BLK_DESC_GPI_SHIFT	U(4)
30 
31 /* GPT level 1 Contiguous descriptor */
32 #define GPT_L1_TYPE_CONT_DESC_MASK	UL(0xF)
33 #define GPT_L1_TYPE_CONT_DESC		UL(1)
34 
35 /* GPT level 1 Contiguous descriptor definitions */
36 #define GPT_L1_CONTIG_2MB		UL(1)
37 #define GPT_L1_CONTIG_32MB		UL(2)
38 #define GPT_L1_CONTIG_512MB		UL(3)
39 
40 #define GPT_L1_CONT_DESC_GPI_SHIFT	U(4)
41 #define GPT_L1_CONT_DESC_GPI_MASK	UL(0xF)
42 #define GPT_L1_CONT_DESC_CONTIG_SHIFT	U(8)
43 #define GPT_L1_CONT_DESC_CONTIG_MASK	UL(3)
44 
45 /* GPT level 1 Granules descriptor bit definitions */
46 #define GPT_L1_GRAN_DESC_GPI_MASK	UL(0xF)
47 
48 /* L1 Contiguous descriptors templates */
49 #define GPT_L1_CONT_DESC_2MB	\
50 			(GPT_L1_TYPE_CONT_DESC |	\
51 			(GPT_L1_CONTIG_2MB << GPT_L1_CONT_DESC_CONTIG_SHIFT))
52 #define GPT_L1_CONT_DESC_32MB	\
53 			(GPT_L1_TYPE_CONT_DESC |	\
54 			(GPT_L1_CONTIG_32MB << GPT_L1_CONT_DESC_CONTIG_SHIFT))
55 #define GPT_L1_CONT_DESC_512MB	\
56 			(GPT_L1_TYPE_CONT_DESC |	\
57 			(GPT_L1_CONTIG_512MB << GPT_L1_CONT_DESC_CONTIG_SHIFT))
58 
59 /* Create L1 Contiguous descriptor from GPI and template */
60 #define GPT_L1_GPI_CONT_DESC(_gpi, _desc)	\
61 			((_desc) | ((uint64_t)(_gpi) << GPT_L1_CONT_DESC_GPI_SHIFT))
62 
63 /* Create L1 Contiguous descriptor from Granules descriptor and size */
64 #define GPT_L1_CONT_DESC(_desc, _size) \
65 				(GPT_L1_CONT_DESC_##_size	| \
66 				(((_desc) & GPT_L1_GRAN_DESC_GPI_MASK) << \
67 				GPT_L1_CONT_DESC_GPI_SHIFT))
68 
69 /* Create L1 Contiguous descriptor from GPI and size */
70 #define GPT_L1_CONT_DESC_SIZE(_gpi, _size) \
71 				(GPT_L1_CONT_DESC_##_size	| \
72 				(((uint64_t)(_gpi) << GPT_L1_CONT_DESC_GPI_SHIFT))
73 
74 #define GPT_L1_GPI_BYTE(_gpi)		(uint64_t)((_gpi) | ((_gpi) << 4))
75 #define GPT_L1_GPI_HALF(_gpi)		(GPT_L1_GPI_BYTE(_gpi) | (GPT_L1_GPI_BYTE(_gpi) << 8))
76 #define GPT_L1_GPI_WORD(_gpi)		(GPT_L1_GPI_HALF(_gpi) | (GPT_L1_GPI_HALF(_gpi) << 16))
77 
78 /*
79  * This macro generates a Granules descriptor
80  * with the same value for every GPI entry.
81  */
82 #define GPT_BUILD_L1_DESC(_gpi)		(GPT_L1_GPI_WORD(_gpi) | (GPT_L1_GPI_WORD(_gpi) << 32))
83 
84 #define GPT_L1_SECURE_DESC	GPT_BUILD_L1_DESC(GPT_GPI_SECURE)
85 #define GPT_L1_NS_DESC		GPT_BUILD_L1_DESC(GPT_GPI_NS)
86 #define GPT_L1_REALM_DESC	GPT_BUILD_L1_DESC(GPT_GPI_REALM)
87 #define GPT_L1_NSO_DESC		GPT_BUILD_L1_DESC(GPT_GPI_NSO)
88 #define GPT_L1_ROOT_DESC	GPT_BUILD_L1_DESC(GPT_GPI_ROOT)
89 #define GPT_L1_SA_DESC		GPT_BUILD_L1_DESC(GPT_GPI_SA)
90 #define GPT_L1_NSP_DESC		GPT_BUILD_L1_DESC(GPT_GPI_NSP)
91 #define GPT_L1_ANY_DESC		GPT_BUILD_L1_DESC(GPT_GPI_ANY)
92 
93 /******************************************************************************/
94 /* GPT platform configuration                                                 */
95 /******************************************************************************/
96 
97 /* This value comes from GPCCR_EL3 so no externally supplied definition */
98 #define GPT_L0GPTSZ		((unsigned int)((read_gpccr_el3() >> \
99 				GPCCR_L0GPTSZ_SHIFT) & GPCCR_L0GPTSZ_MASK))
100 
101 /* The "S" value is directly related to L0GPTSZ */
102 #define GPT_S_VAL		(GPT_L0GPTSZ + 30U)
103 
104 /*
105  * Map PPS values to T values.
106  *
107  *   PPS    Size    T
108  *   0b000  4GB     32
109  *   0b001  64GB    36
110  *   0b010  1TB     40
111  *   0b011  4TB     42
112  *   0b100  16TB    44
113  *   0b101  256TB   48
114  *   0b110  4PB     52
115  *
116  * See section 15.1.27 of the RME specification.
117  */
118 typedef enum {
119 	PPS_4GB_T =	32U,
120 	PPS_64GB_T =	36U,
121 	PPS_1TB_T =	40U,
122 	PPS_4TB_T =	42U,
123 	PPS_16TB_T =	44U,
124 	PPS_256TB_T =	48U,
125 	PPS_4PB_T =	52U
126 } gpt_t_val_e;
127 
128 /*
129  * Map PGS values to P values.
130  *
131  *   PGS    Size    P
132  *   0b00   4KB     12
133  *   0b10   16KB    14
134  *   0b01   64KB    16
135  *
136  * Note that pgs=0b10 is 16KB and pgs=0b01 is 64KB, this is not a typo.
137  *
138  * See section 15.1.27 of the RME specification.
139  */
140 typedef enum {
141 	PGS_4KB_P =	12U,
142 	PGS_16KB_P =	14U,
143 	PGS_64KB_P =	16U
144 } gpt_p_val_e;
145 
146 /*
147  * Internal structure to retrieve the values from get_gpi_params();
148  */
149 typedef struct {
150 	uint64_t gpt_l1_desc;
151 	uint64_t *gpt_l1_addr;
152 	unsigned int idx;
153 	unsigned int gpi_shift;
154 	unsigned int gpi;
155 #if (RME_GPT_BITLOCK_BLOCK != 0)
156 	bitlock_t *lock;
157 	LOCK_TYPE mask;
158 #endif
159 } gpi_info_t;
160 
161 /*
162  * Look up structure for contiguous blocks and descriptors
163  */
164 typedef struct {
165 	size_t size;
166 	unsigned int desc;
167 } gpt_fill_lookup_t;
168 
169 typedef void (*gpt_shatter_func)(uintptr_t base, const gpi_info_t *gpi_info,
170 					uint64_t l1_desc);
171 typedef void (*gpt_tlbi_func)(uintptr_t base);
172 
173 /*
174  * Look-up structure for
175  * invalidating TLBs of GPT entries by Physical address, last level.
176  */
177 typedef struct {
178 	gpt_tlbi_func function;
179 	size_t mask;
180 } gpt_tlbi_lookup_t;
181 
182 /* Max valid value for PGS */
183 #define GPT_PGS_MAX			(2U)
184 
185 /* Max valid value for PPS */
186 #define GPT_PPS_MAX			(6U)
187 
188 /******************************************************************************/
189 /* L0 address attribute macros                                                */
190 /******************************************************************************/
191 
192 /*
193  * Width of the L0 index field.
194  *
195  * If S is greater than or equal to T then there is a single L0 region covering
196  * the entire protected space so there is no L0 index, so the width (and the
197  * derivative mask value) are both zero.  If we don't specifically handle this
198  * special case we'll get a negative width value which does not make sense and
199  * would cause problems.
200  */
201 #define GPT_L0_IDX_WIDTH(_t)		(((unsigned int)(_t) > GPT_S_VAL) ? \
202 					((unsigned int)(_t) - GPT_S_VAL) : (0U))
203 
204 /* Bit shift for the L0 index field in a PA */
205 #define GPT_L0_IDX_SHIFT		(GPT_S_VAL)
206 
207 /*
208  * Mask for the L0 index field, must be shifted.
209  *
210  * The value 0x3FFFFF is 22 bits wide which is the maximum possible width of the
211  * L0 index within a physical address. This is calculated by
212  * ((t_max - 1) - s_min + 1) where t_max is 52 for 4PB, the largest PPS, and
213  * s_min is 30 for 1GB, the smallest L0GPTSZ.
214  */
215 #define GPT_L0_IDX_MASK(_t)		(0x3FFFFFUL >> (22U - \
216 					(GPT_L0_IDX_WIDTH(_t))))
217 
218 /* Total number of L0 regions */
219 #define GPT_L0_REGION_COUNT(_t)		((GPT_L0_IDX_MASK(_t)) + 1U)
220 
221 /* Total size of each GPT L0 region in bytes */
222 #define GPT_L0_REGION_SIZE		(1UL << (GPT_L0_IDX_SHIFT))
223 
224 /* Total size in bytes of the whole L0 table */
225 #define GPT_L0_TABLE_SIZE(_t)		((GPT_L0_REGION_COUNT(_t)) << 3U)
226 
227 /******************************************************************************/
228 /* L1 address attribute macros                                                */
229 /******************************************************************************/
230 
231 /*
232  * Width of the L1 index field.
233  *
234  * This field does not have a special case to handle widths less than zero like
235  * the L0 index field above since all valid combinations of PGS (p) and L0GPTSZ
236  * (s) will result in a positive width value.
237  */
238 #define GPT_L1_IDX_WIDTH(_p)		((GPT_S_VAL - 1U) - \
239 					((unsigned int)(_p) + 3U))
240 
241 /* Bit shift for the L1 index field */
242 #define GPT_L1_IDX_SHIFT(_p)		((unsigned int)(_p) + 4U)
243 
244 /*
245  * Mask for the L1 index field, must be shifted.
246  *
247  * The value 0x7FFFFF is 23 bits wide and is the maximum possible width of the
248  * L1 index within a physical address. It is calculated by
249  * ((s_max - 1) - (p_min + 4) + 1) where s_max is 39 for 512GB, the largest
250  * L0GPTSZ, and p_min is 12 for 4KB granules, the smallest PGS.
251  */
252 #define GPT_L1_IDX_MASK(_p)		(0x7FFFFFUL >> (23U - \
253 					(GPT_L1_IDX_WIDTH(_p))))
254 
255 /* Bit shift for the index of the L1 GPI in a PA */
256 #define GPT_L1_GPI_IDX_SHIFT(_p)	(_p)
257 
258 /* Mask for the index of the L1 GPI in a PA */
259 #define GPT_L1_GPI_IDX_MASK		(0xF)
260 
261 /* Total number of entries in each L1 table */
262 #define GPT_L1_ENTRY_COUNT(_p)		((GPT_L1_IDX_MASK(_p)) + 1UL)
263 
264 /* Number of L1 entries in 2MB block */
265 #define GPT_L1_ENTRY_COUNT_2MB(_p)	(SZ_2M >> GPT_L1_IDX_SHIFT(_p))
266 
267 /* Total size in bytes of each L1 table */
268 #define GPT_L1_TABLE_SIZE(_p)		((GPT_L1_ENTRY_COUNT(_p)) << 3U)
269 
270 /******************************************************************************/
271 /* General helper macros                                                      */
272 /******************************************************************************/
273 
274 /* Protected space actual size in bytes */
275 #define GPT_PPS_ACTUAL_SIZE(_t)	(1UL << (unsigned int)(_t))
276 
277 /* Granule actual size in bytes */
278 #define GPT_PGS_ACTUAL_SIZE(_p)	(1UL << (unsigned int)(_p))
279 
280 /* Number of granules in 2MB block */
281 #define GPT_PGS_COUNT_2MB(_p)	(1UL << (21U - (unsigned int)(_p)))
282 
283 /* L0 GPT region size in bytes */
284 #define GPT_L0GPTSZ_ACTUAL_SIZE	(1UL << GPT_S_VAL)
285 
286 /* Get the index of the L0 entry from a physical address */
287 #define GPT_L0_IDX(_pa)		((_pa) >> GPT_L0_IDX_SHIFT)
288 
289 /*
290  * This definition is used to determine if a physical address lies on an L0
291  * region boundary.
292  */
293 #define GPT_IS_L0_ALIGNED(_pa)	\
294 	(((_pa) & (GPT_L0_REGION_SIZE - UL(1))) == UL(0))
295 
296 /* Get the type field from an L0 descriptor */
297 #define GPT_L0_TYPE(_desc)	(((_desc) >> GPT_L0_TYPE_SHIFT) & \
298 				GPT_L0_TYPE_MASK)
299 
300 /* Create an L0 block descriptor */
301 #define GPT_L0_BLK_DESC(_gpi)	(GPT_L0_TYPE_BLK_DESC | \
302 				(((_gpi) & GPT_L0_BLK_DESC_GPI_MASK) << \
303 				GPT_L0_BLK_DESC_GPI_SHIFT))
304 
305 /* Create an L0 table descriptor with an L1 table address */
306 #define GPT_L0_TBL_DESC(_pa)	(GPT_L0_TYPE_TBL_DESC | ((uint64_t)(_pa) & \
307 				(GPT_L0_TBL_DESC_L1ADDR_MASK << \
308 				GPT_L0_TBL_DESC_L1ADDR_SHIFT)))
309 
310 /* Get the GPI from an L0 block descriptor */
311 #define GPT_L0_BLKD_GPI(_desc)	(((_desc) >> GPT_L0_BLK_DESC_GPI_SHIFT) & \
312 				GPT_L0_BLK_DESC_GPI_MASK)
313 
314 /* Get the L1 address from an L0 table descriptor */
315 #define GPT_L0_TBLD_ADDR(_desc)	((uint64_t *)(((_desc) & \
316 				(GPT_L0_TBL_DESC_L1ADDR_MASK << \
317 				GPT_L0_TBL_DESC_L1ADDR_SHIFT))))
318 
319 /* Get the GPI from L1 Contiguous descriptor */
320 #define GPT_L1_CONT_GPI(_desc)		\
321 	(((_desc) >> GPT_L1_CONT_DESC_GPI_SHIFT) & GPT_L1_CONT_DESC_GPI_MASK)
322 
323 /* Get the GPI from L1 Granules descriptor */
324 #define GPT_L1_GRAN_GPI(_desc)	((_desc) & GPT_L1_GRAN_DESC_GPI_MASK)
325 
326 /* Get the Contig from L1 Contiguous descriptor */
327 #define GPT_L1_CONT_CONTIG(_desc)	\
328 	(((_desc) >> GPT_L1_CONT_DESC_CONTIG_SHIFT) & \
329 					GPT_L1_CONT_DESC_CONTIG_MASK)
330 
331 /* Get the index into the L1 table from a physical address */
332 #define GPT_L1_IDX(_p, _pa)		\
333 	(((_pa) >> GPT_L1_IDX_SHIFT(_p)) & GPT_L1_IDX_MASK(_p))
334 
335 /* Get the index of the GPI within an L1 table entry from a physical address */
336 #define GPT_L1_GPI_IDX(_p, _pa)		\
337 	(((_pa) >> GPT_L1_GPI_IDX_SHIFT(_p)) & GPT_L1_GPI_IDX_MASK)
338 
339 /* Determine if an address is granule-aligned */
340 #define GPT_IS_L1_ALIGNED(_p, _pa)	\
341 	(((_pa) & (GPT_PGS_ACTUAL_SIZE(_p) - UL(1))) == UL(0))
342 
343 /* Get aligned addresses */
344 #define ALIGN_2MB(_addr)	((_addr) & ~(SZ_2M - 1UL))
345 #define ALIGN_32MB(_addr)	((_addr) & ~(SZ_32M - 1UL))
346 #define ALIGN_512MB(_addr)	((_addr) & ~(SZ_512M - 1UL))
347 
348 /* Determine if region is contiguous */
349 #define GPT_REGION_IS_CONT(_len, _addr, _size)	\
350 	(((_len) >= (_size)) && (((_addr) & ((_size) - UL(1))) == UL(0)))
351 
352 /* Get 32MB block number in 512MB block: 0-15 */
353 #define GET_32MB_NUM(_addr)	((_addr >> 25) & 0xF)
354 
355 /* Get 2MB block number in 32MB block: 0-15 */
356 #define GET_2MB_NUM(_addr)	((_addr >> 21) & 0xF)
357 
358 #endif /* GPT_RME_PRIVATE_H */
359