1 /*
2 * Copyright (c) 2022-2026, Arm Limited. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <assert.h>
8 #include <errno.h>
9 #include <inttypes.h>
10 #include <limits.h>
11 #include <stdint.h>
12
13 #include <arch.h>
14 #include <arch_features.h>
15 #include <common/debug.h>
16 #include <lib/gpt_rme/gpt_rme.h>
17 #include <lib/smccc.h>
18 #include <lib/xlat_tables/xlat_tables_v2.h>
19
20 #include "gpt_rme_private.h"
21
22 #if !ENABLE_FEAT_RME
23 #error "ENABLE_FEAT_RME must be enabled to use the GPT library"
24 #endif
25
26 /*
27 * Lookup T from PPS
28 *
29 * PPS Size T
30 * 0b000 4GB 32
31 * 0b001 64GB 36
32 * 0b010 1TB 40
33 * 0b011 4TB 42
34 * 0b100 16TB 44
35 * 0b101 256TB 48
36 * 0b110 4PB 52
37 *
38 * See section 15.1.27 of the RME specification.
39 */
40 static const gpt_t_val_e gpt_t_lookup[] = {PPS_4GB_T, PPS_64GB_T,
41 PPS_1TB_T, PPS_4TB_T,
42 PPS_16TB_T, PPS_256TB_T,
43 PPS_4PB_T};
44
45 /*
46 * Lookup P from PGS
47 *
48 * PGS Size P
49 * 0b00 4KB 12
50 * 0b10 16KB 14
51 * 0b01 64KB 16
52 *
53 * Note that pgs=0b10 is 16KB and pgs=0b01 is 64KB, this is not a typo.
54 *
55 * See section 15.1.27 of the RME specification.
56 */
57 static const gpt_p_val_e gpt_p_lookup[] = {PGS_4KB_P, PGS_64KB_P, PGS_16KB_P};
58
59 /*
60 * This table allows us to easily look up GPI-specific information such as
61 * descriptors, nse/nse2 fields, and allowed transitions without using large
62 * blocks of nested conditionals which both take up a lot of space and are
63 * very slow.
64 *
65 * This array contains an entry for each valid GPI, unused values are all zeros.
66 * GPIs such as SA, NSP, and NSO are not part of base FEAT_RME so those policy
67 * flags are set at runtime when FEAT_RME_GDI and FEAT_RME_GPC2 are enabled.
68 *
69 * uint64_t desc The L1 descriptor associated with a GPI
70 * uint8_t nse NSE bits
71 * uint8_t nse2 NSE2 bits
72 * uint16_t policy[3] Contains bit fields representing which GPIs a given
73 * security state can transition this GPI to. 0=s, 1=ns, and
74 * 0x2=realm.
75 */
76 static gpi_lookup_t gpi_config[] = {
77 { 0 },
78 { 0 },
79 { 0 },
80 { 0 },
81 { GPT_L1_SA_DESC, 0, GPT_NSE2_SA, { 0x0, 0x0, 0x0 } },
82 { GPT_L1_NSP_DESC, 0, GPT_NSE2_NSP, { 0x0, 0x0, 0x0 } },
83 { 0 },
84 { 0 },
85 { GPT_L1_SECURE_DESC,
86 GPT_NSE_SECURE,
87 0,
88 { (1 << GPT_GPI_NS), 0x0, 0x0 } },
89 { GPT_L1_NS_DESC,
90 GPT_NSE_NS,
91 0,
92 { (1 << GPT_GPI_SECURE), 0x0, (1 << GPT_GPI_REALM) } },
93 { GPT_L1_ROOT_DESC, GPT_NSE_ROOT, 0, { 0x0, 0x0, 0x0 } },
94 { GPT_L1_REALM_DESC, GPT_NSE_REALM, 0, { 0x0, 0x0, (1 << GPT_GPI_NS) } },
95 { 0 },
96 { GPT_L1_NSO_DESC, GPT_NSE_NS, 0, { 0x0, 0x0, 0x0 } },
97 { 0 },
98 { 0 },
99 };
100
101 static void shatter_2mb(uintptr_t base, const gpi_info_t *gpi_info,
102 uint64_t l1_desc);
103 static void shatter_32mb(uintptr_t base, const gpi_info_t *gpi_info,
104 uint64_t l1_desc);
105 static void shatter_512mb(uintptr_t base, const gpi_info_t *gpi_info,
106 uint64_t l1_desc);
107
108 /*
109 * This structure contains GPT configuration data
110 */
111 typedef struct {
112 uintptr_t plat_gpt_l0_base;
113 gpccr_pps_e pps;
114 gpt_t_val_e t;
115 gpccr_pgs_e pgs;
116 gpt_p_val_e p;
117 } gpt_config_t;
118
119 static gpt_config_t gpt_config;
120
121 /*
122 * Number of L1 entries in 2MB, depending on GPCCR_EL3.PGS:
123 * +-------+------------+
124 * | PGS | L1 entries |
125 * +-------+------------+
126 * | 4KB | 32 |
127 * +-------+------------+
128 * | 16KB | 8 |
129 * +-------+------------+
130 * | 64KB | 2 |
131 * +-------+------------+
132 */
133 static unsigned int gpt_l1_cnt_2mb;
134
135 /*
136 * Mask for the L1 index field, depending on
137 * GPCCR_EL3.L0GPTSZ and GPCCR_EL3.PGS:
138 * +---------+-------------------------------+
139 * | | PGS |
140 * +---------+----------+----------+---------+
141 * | L0GPTSZ | 4KB | 16KB | 64KB |
142 * +---------+----------+----------+---------+
143 * | 1GB | 0x3FFF | 0xFFF | 0x3FF |
144 * +---------+----------+----------+---------+
145 * | 16GB | 0x3FFFF | 0xFFFF | 0x3FFF |
146 * +---------+----------+----------+---------+
147 * | 64GB | 0xFFFFF | 0x3FFFF | 0xFFFF |
148 * +---------+----------+----------+---------+
149 * | 512GB | 0x7FFFFF | 0x1FFFFF | 0x7FFFF |
150 * +---------+----------+----------+---------+
151 */
152 static uint64_t gpt_l1_index_mask;
153
154 /* Number of 128-bit L1 entries in 2MB, 32MB and 512MB */
155 #define L1_QWORDS_2MB (gpt_l1_cnt_2mb / 2U)
156 #define L1_QWORDS_32MB (L1_QWORDS_2MB * 16U)
157 #define L1_QWORDS_512MB (L1_QWORDS_32MB * 16U)
158
159 /* Size in bytes of L1 entries in 2MB, 32MB */
160 #define L1_BYTES_2MB (gpt_l1_cnt_2mb * sizeof(uint64_t))
161 #define L1_BYTES_32MB (L1_BYTES_2MB * 16U)
162
163 /* Get the index into the L1 table from a physical address */
164 #define GPT_L1_INDEX(_pa) \
165 (((_pa) >> (unsigned int)GPT_L1_IDX_SHIFT(gpt_config.p)) & gpt_l1_index_mask)
166
167 /* Get the descriptor or NSE bits from GPI encoding. */
168 #define GPI_TO_DESC(_gpi) (gpi_config[_gpi].desc)
169 #define GPI_TO_NSE(_gpi) \
170 (((uint64_t)gpi_config[_gpi].nse << GPT_NSE_SHIFT) | \
171 ((uint64_t)gpi_config[_gpi].nse2 << GPT_NSE2_SHIFT))
172
173 /* This variable is used during initialization of the L1 tables */
174 static uintptr_t gpt_l1_tbl;
175
176 /* These variables are used during runtime */
177 #if (RME_GPT_BITLOCK_BLOCK == 0)
178
179 /*
180 * The GPTs are protected by a global spinlock to ensure
181 * that multiple CPUs do not attempt to change the descriptors at once.
182 */
183 static spinlock_t gpt_lock;
184
185 /* Lock/unlock macros for GPT entries
186 *
187 * Access to GPT is controlled by a global lock to ensure
188 * that no more than one CPU is allowed to make changes at any
189 * given time.
190 */
191 #define GPT_LOCK spin_lock(&gpt_lock)
192 #define GPT_UNLOCK spin_unlock(&gpt_lock)
193 #else
194
195 /* Base address of bitlocks array */
196 static bitlock_t *gpt_bitlock;
197
198 /*
199 * Access to a block of memory is controlled by a bitlock.
200 * Size of block = RME_GPT_BITLOCK_BLOCK * 512MB.
201 */
202 #define GPT_LOCK bit_lock(gpi_info.lock, gpi_info.mask)
203 #define GPT_UNLOCK bit_unlock(gpi_info.lock, gpi_info.mask)
204 #endif /* RME_GPT_BITLOCK_BLOCK */
205
tlbi_page_dsbosh(uintptr_t base)206 static void tlbi_page_dsbosh(uintptr_t base)
207 {
208 /* Look-up table for invalidation TLBs for 4KB, 16KB and 64KB pages */
209 static const gpt_tlbi_lookup_t tlbi_page_lookup[] = {
210 { tlbirpalos_4k, ~(SZ_4K - 1UL) },
211 { tlbirpalos_64k, ~(SZ_64K - 1UL) },
212 { tlbirpalos_16k, ~(SZ_16K - 1UL) }
213 };
214
215 tlbi_page_lookup[gpt_config.pgs].function(
216 base & tlbi_page_lookup[gpt_config.pgs].mask);
217 dsbosh();
218 }
219
220 /*
221 * Helper function to fill out GPI entries in a single L1 table
222 * with Granules or Contiguous descriptor.
223 *
224 * Parameters
225 * l1 Pointer to 2MB, 32MB or 512MB aligned L1 table entry to fill out
226 * l1_desc GPT Granules or Contiguous descriptor set this range to
227 * cnt Number of double 128-bit L1 entries to fill
228 *
229 */
fill_desc(uint64_t * l1,uint64_t l1_desc,unsigned int cnt)230 static void fill_desc(uint64_t *l1, uint64_t l1_desc, unsigned int cnt)
231 {
232 uint128_t *l1_quad = (uint128_t *)l1;
233 uint128_t l1_quad_desc = (uint128_t)l1_desc | ((uint128_t)l1_desc << 64);
234
235 VERBOSE("GPT: %s(%p 0x%"PRIx64" %u)\n", __func__, l1, l1_desc, cnt);
236
237 for (unsigned int i = 0U; i < cnt; i++) {
238 *l1_quad++ = l1_quad_desc;
239 }
240 }
241
shatter_2mb(uintptr_t base,const gpi_info_t * gpi_info,uint64_t l1_desc)242 static void shatter_2mb(uintptr_t base, const gpi_info_t *gpi_info,
243 uint64_t l1_desc)
244 {
245 unsigned long idx = GPT_L1_INDEX(ALIGN_2MB(base));
246
247 VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n",
248 __func__, base, l1_desc);
249
250 /* Convert 2MB Contiguous block to Granules */
251 fill_desc(&gpi_info->gpt_l1_addr[idx], l1_desc, L1_QWORDS_2MB);
252 }
253
shatter_32mb(uintptr_t base,const gpi_info_t * gpi_info,uint64_t l1_desc)254 static void shatter_32mb(uintptr_t base, const gpi_info_t *gpi_info,
255 uint64_t l1_desc)
256 {
257 unsigned long idx = GPT_L1_INDEX(ALIGN_2MB(base));
258 const uint64_t *l1_gran = &gpi_info->gpt_l1_addr[idx];
259 uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 2MB);
260 uint64_t *l1;
261
262 VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n",
263 __func__, base, l1_desc);
264
265 /* Get index corresponding to 32MB aligned address */
266 idx = GPT_L1_INDEX(ALIGN_32MB(base));
267 l1 = &gpi_info->gpt_l1_addr[idx];
268
269 /* 16 x 2MB blocks in 32MB */
270 for (unsigned int i = 0U; i < 16U; i++) {
271 /* Fill with Granules or Contiguous descriptors */
272 fill_desc(l1, (l1 == l1_gran) ? l1_desc : l1_cont_desc,
273 L1_QWORDS_2MB);
274 l1 = (uint64_t *)((uintptr_t)l1 + L1_BYTES_2MB);
275 }
276 }
277
shatter_512mb(uintptr_t base,const gpi_info_t * gpi_info,uint64_t l1_desc)278 static void shatter_512mb(uintptr_t base, const gpi_info_t *gpi_info,
279 uint64_t l1_desc)
280 {
281 unsigned long idx = GPT_L1_INDEX(ALIGN_32MB(base));
282 const uint64_t *l1_32mb = &gpi_info->gpt_l1_addr[idx];
283 uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 32MB);
284 uint64_t *l1;
285
286 VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n",
287 __func__, base, l1_desc);
288
289 /* Get index corresponding to 512MB aligned address */
290 idx = GPT_L1_INDEX(ALIGN_512MB(base));
291 l1 = &gpi_info->gpt_l1_addr[idx];
292
293 /* 16 x 32MB blocks in 512MB */
294 for (unsigned int i = 0U; i < 16U; i++) {
295 if (l1 == l1_32mb) {
296 /* Shatter this 32MB block */
297 shatter_32mb(base, gpi_info, l1_desc);
298 } else {
299 /* Fill 32MB with Contiguous descriptors */
300 fill_desc(l1, l1_cont_desc, L1_QWORDS_32MB);
301 }
302
303 l1 = (uint64_t *)((uintptr_t)l1 + L1_BYTES_32MB);
304 }
305 }
306
307 /*
308 * This function checks to see if a GPI value is valid.
309 *
310 * These are valid GPI values.
311 * GPT_GPI_NO_ACCESS U(0x0)
312 * GPT_GPI_SA U(0x4)
313 * GPT_GPI_NSP U(0x5)
314 * GPT_GPI_SECURE U(0x8)
315 * GPT_GPI_NS U(0x9)
316 * GPT_GPI_ROOT U(0xA)
317 * GPT_GPI_REALM U(0xB)
318 * GPT_GPI_NSO U(0xD)
319 * GPT_GPI_ANY U(0xF)
320 *
321 * Parameters
322 * gpi GPI to check for validity.
323 *
324 * Return
325 * true for a valid GPI, false for an invalid one.
326 */
is_gpi_valid(unsigned int gpi)327 static bool is_gpi_valid(unsigned int gpi)
328 {
329 switch (gpi) {
330 case GPT_GPI_NO_ACCESS:
331 case GPT_GPI_SECURE:
332 case GPT_GPI_NS:
333 case GPT_GPI_ROOT:
334 #if ENABLE_RMM
335 case GPT_GPI_REALM:
336 #endif
337 case GPT_GPI_ANY:
338 return true;
339 case GPT_GPI_NSO:
340 return is_feat_rme_gpc2_present();
341 case GPT_GPI_SA:
342 case GPT_GPI_NSP:
343 return is_feat_rme_gdi_supported();
344 default:
345 return false;
346 }
347 }
348
349 /*
350 * This function checks to see if two PAS regions overlap.
351 *
352 * Parameters
353 * base_1: base address of first PAS
354 * size_1: size of first PAS
355 * base_2: base address of second PAS
356 * size_2: size of second PAS
357 *
358 * Return
359 * True if PAS regions overlap, false if they do not.
360 */
check_pas_overlap(uintptr_t base_1,size_t size_1,uintptr_t base_2,size_t size_2)361 static bool check_pas_overlap(uintptr_t base_1, size_t size_1,
362 uintptr_t base_2, size_t size_2)
363 {
364 if (((base_1 + size_1) > base_2) && ((base_2 + size_2) > base_1)) {
365 return true;
366 }
367 return false;
368 }
369
370 /*
371 * This helper function checks to see if a PAS region from index 0 to
372 * (pas_idx - 1) occupies the L0 region at index l0_idx in the L0 table.
373 *
374 * Parameters
375 * l0_idx: Index of the L0 entry to check
376 * pas_regions: PAS region array
377 * pas_idx: Upper bound of the PAS array index.
378 *
379 * Return
380 * True if a PAS region occupies the L0 region in question, false if not.
381 */
does_previous_pas_exist_here(unsigned int l0_idx,pas_region_t * pas_regions,unsigned int pas_idx)382 static bool does_previous_pas_exist_here(unsigned int l0_idx,
383 pas_region_t *pas_regions,
384 unsigned int pas_idx)
385 {
386 /* Iterate over PAS regions up to pas_idx */
387 for (unsigned int i = 0U; i < pas_idx; i++) {
388 if (check_pas_overlap((GPT_L0GPTSZ_ACTUAL_SIZE * l0_idx),
389 GPT_L0GPTSZ_ACTUAL_SIZE,
390 pas_regions[i].base_pa, pas_regions[i].size)) {
391 return true;
392 }
393 }
394 return false;
395 }
396
397 /*
398 * This function iterates over all of the PAS regions and checks them to ensure
399 * proper alignment of base and size, that the GPI is valid, and that no regions
400 * overlap. As a part of the overlap checks, this function checks existing L0
401 * mappings against the new PAS regions in the event that gpt_init_pas_l1_tables
402 * is called multiple times to place L1 tables in different areas of memory. It
403 * also counts the number of L1 tables needed and returns it on success.
404 *
405 * Parameters
406 * *pas_regions Pointer to array of PAS region structures.
407 * pas_region_cnt Total number of PAS regions in the array.
408 *
409 * Return
410 * Negative Linux error code in the event of a failure, number of L1 regions
411 * required when successful.
412 */
validate_pas_mappings(pas_region_t * pas_regions,unsigned int pas_region_cnt)413 static int validate_pas_mappings(pas_region_t *pas_regions,
414 unsigned int pas_region_cnt)
415 {
416 unsigned int idx;
417 unsigned int l1_cnt = 0U;
418 unsigned int pas_l1_cnt;
419 uint64_t *l0_desc = (uint64_t *)gpt_config.plat_gpt_l0_base;
420
421 assert(pas_regions != NULL);
422 assert(pas_region_cnt != 0U);
423
424 for (idx = 0U; idx < pas_region_cnt; idx++) {
425 /* Check for arithmetic overflow in region */
426 if ((ULONG_MAX - pas_regions[idx].base_pa) <
427 pas_regions[idx].size) {
428 ERROR("GPT: Address overflow in PAS[%u]!\n", idx);
429 return -EOVERFLOW;
430 }
431
432 /* Initial checks for PAS validity */
433 if (((pas_regions[idx].base_pa + pas_regions[idx].size) >
434 GPT_PPS_ACTUAL_SIZE(gpt_config.t)) ||
435 !is_gpi_valid(GPT_PAS_ATTR_GPI(pas_regions[idx].attrs))) {
436 ERROR("GPT: PAS[%u] is invalid!\n", idx);
437 return -EFAULT;
438 }
439
440 /*
441 * Make sure this PAS does not overlap with another one. We
442 * start from idx + 1 instead of 0 since prior PAS mappings will
443 * have already checked themselves against this one.
444 */
445 for (unsigned int i = idx + 1U; i < pas_region_cnt; i++) {
446 if (check_pas_overlap(pas_regions[idx].base_pa,
447 pas_regions[idx].size,
448 pas_regions[i].base_pa,
449 pas_regions[i].size)) {
450 ERROR("GPT: PAS[%u] overlaps with PAS[%u]\n",
451 i, idx);
452 return -EFAULT;
453 }
454 }
455
456 /*
457 * Since this function can be called multiple times with
458 * separate L1 tables we need to check the existing L0 mapping
459 * to see if this PAS would fall into one that has already been
460 * initialized.
461 */
462 for (unsigned int i =
463 (unsigned int)GPT_L0_IDX(pas_regions[idx].base_pa);
464 i <= GPT_L0_IDX(pas_regions[idx].base_pa +
465 pas_regions[idx].size - 1UL);
466 i++) {
467 if ((GPT_L0_TYPE(l0_desc[i]) == GPT_L0_TYPE_BLK_DESC) &&
468 (GPT_L0_BLKD_GPI(l0_desc[i]) == GPT_GPI_ANY)) {
469 /* This descriptor is unused so continue */
470 continue;
471 }
472
473 /*
474 * This descriptor has been initialized in a previous
475 * call to this function so cannot be initialized again.
476 */
477 ERROR("GPT: PAS[%u] overlaps with previous L0[%u]!\n",
478 idx, i);
479 return -EFAULT;
480 }
481
482 /* Check for block mapping (L0) type */
483 if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) ==
484 GPT_PAS_ATTR_MAP_TYPE_BLOCK) {
485 /* Make sure base and size are block-aligned */
486 if (!GPT_IS_L0_ALIGNED(pas_regions[idx].base_pa) ||
487 !GPT_IS_L0_ALIGNED(pas_regions[idx].size)) {
488 ERROR("GPT: PAS[%u] is not block-aligned!\n",
489 idx);
490 return -EFAULT;
491 }
492
493 continue;
494 }
495
496 /* Check for granule mapping (L1) type */
497 if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) ==
498 GPT_PAS_ATTR_MAP_TYPE_GRANULE) {
499 /* Make sure base and size are granule-aligned */
500 if (!GPT_IS_L1_ALIGNED(gpt_config.p, pas_regions[idx].base_pa) ||
501 !GPT_IS_L1_ALIGNED(gpt_config.p, pas_regions[idx].size)) {
502 ERROR("GPT: PAS[%u] is not granule-aligned!\n",
503 idx);
504 return -EFAULT;
505 }
506
507 /* Find how many L1 tables this PAS occupies */
508 pas_l1_cnt = (GPT_L0_IDX(pas_regions[idx].base_pa +
509 pas_regions[idx].size - 1UL) -
510 GPT_L0_IDX(pas_regions[idx].base_pa) + 1U);
511
512 /*
513 * This creates a situation where, if multiple PAS
514 * regions occupy the same table descriptor, we can get
515 * an artificially high total L1 table count. The way we
516 * handle this is by checking each PAS against those
517 * before it in the array, and if they both occupy the
518 * same PAS we subtract from pas_l1_cnt and only the
519 * first PAS in the array gets to count it.
520 */
521
522 /*
523 * If L1 count is greater than 1 we know the start and
524 * end PAs are in different L0 regions so we must check
525 * both for overlap against other PAS.
526 */
527 if (pas_l1_cnt > 1) {
528 if (does_previous_pas_exist_here(
529 GPT_L0_IDX(pas_regions[idx].base_pa +
530 pas_regions[idx].size - 1UL),
531 pas_regions, idx)) {
532 pas_l1_cnt--;
533 }
534 }
535
536 if (does_previous_pas_exist_here(
537 GPT_L0_IDX(pas_regions[idx].base_pa),
538 pas_regions, idx)) {
539 pas_l1_cnt--;
540 }
541
542 l1_cnt += pas_l1_cnt;
543 continue;
544 }
545
546 /* If execution reaches this point, mapping type is invalid */
547 ERROR("GPT: PAS[%u] has invalid mapping type 0x%x.\n", idx,
548 GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs));
549 return -EINVAL;
550 }
551
552 return l1_cnt;
553 }
554
555 /*
556 * This function validates L0 initialization parameters.
557 *
558 * Parameters
559 * l0_mem_base Base address of memory used for L0 table.
560 * l0_mem_size Size of memory available for L0 table.
561 *
562 * Return
563 * Negative Linux error code in the event of a failure, 0 for success.
564 */
validate_l0_params(gpccr_pps_e pps,uintptr_t l0_mem_base,size_t l0_mem_size)565 static int validate_l0_params(gpccr_pps_e pps, uintptr_t l0_mem_base,
566 size_t l0_mem_size)
567 {
568 size_t l0_alignment;
569
570 /*
571 * Make sure PPS is valid and then store it since macros need this value
572 * to work.
573 */
574 if (pps > GPT_PPS_MAX) {
575 ERROR("GPT: Invalid PPS: 0x%x\n", pps);
576 return -EINVAL;
577 }
578 gpt_config.pps = pps;
579 gpt_config.t = gpt_t_lookup[pps];
580
581 /* Alignment must be the greater of 4KB or L0 table size */
582 l0_alignment = SZ_4K;
583 if (l0_alignment < GPT_L0_TABLE_SIZE(gpt_config.t)) {
584 l0_alignment = GPT_L0_TABLE_SIZE(gpt_config.t);
585 }
586
587 /* Check base address */
588 if ((l0_mem_base == 0UL) ||
589 ((l0_mem_base & (l0_alignment - 1UL)) != 0UL)) {
590 ERROR("GPT: Invalid L0 base address: 0x%lx\n", l0_mem_base);
591 return -EFAULT;
592 }
593
594 /* Check memory size for L0 table */
595 if (l0_mem_size < GPT_L0_TABLE_SIZE(gpt_config.t)) {
596 ERROR("GPT: Inadequate L0 memory\n");
597 ERROR(" Expected 0x%lx bytes, got 0x%lx\n",
598 GPT_L0_TABLE_SIZE(gpt_config.t), l0_mem_size);
599 return -ENOMEM;
600 }
601
602 return 0;
603 }
604
605 /*
606 * In the event that L1 tables are needed, this function validates
607 * the L1 table generation parameters.
608 *
609 * Parameters
610 * l1_mem_base Base address of memory used for L1 table allocation.
611 * l1_mem_size Total size of memory available for L1 tables.
612 * l1_gpt_cnt Number of L1 tables needed.
613 *
614 * Return
615 * Negative Linux error code in the event of a failure, 0 for success.
616 */
validate_l1_params(uintptr_t l1_mem_base,size_t l1_mem_size,unsigned int l1_gpt_cnt)617 static int validate_l1_params(uintptr_t l1_mem_base, size_t l1_mem_size,
618 unsigned int l1_gpt_cnt)
619 {
620 size_t l1_gpt_mem_sz;
621
622 /* Check if the granularity is supported */
623 if (!xlat_arch_is_granule_size_supported(
624 GPT_PGS_ACTUAL_SIZE(gpt_config.p))) {
625 return -EPERM;
626 }
627
628 /* Make sure L1 tables are aligned to their size */
629 if ((l1_mem_base & (GPT_L1_TABLE_SIZE(gpt_config.p) - 1UL)) != 0UL) {
630 ERROR("GPT: Unaligned L1 GPT base address: 0x%"PRIxPTR"\n",
631 l1_mem_base);
632 return -EFAULT;
633 }
634
635 /* Get total memory needed for L1 tables */
636 l1_gpt_mem_sz = l1_gpt_cnt * GPT_L1_TABLE_SIZE(gpt_config.p);
637
638 /* Check for overflow */
639 if ((l1_gpt_mem_sz / GPT_L1_TABLE_SIZE(gpt_config.p)) != l1_gpt_cnt) {
640 ERROR("GPT: Overflow calculating L1 memory size\n");
641 return -ENOMEM;
642 }
643
644 /* Make sure enough space was supplied */
645 if (l1_mem_size < l1_gpt_mem_sz) {
646 ERROR("%sL1 GPTs%s", (const char *)"GPT: Inadequate ",
647 (const char *)" memory\n");
648 ERROR(" Expected 0x%lx bytes, got 0x%lx\n",
649 l1_gpt_mem_sz, l1_mem_size);
650 return -ENOMEM;
651 }
652
653 VERBOSE("GPT: Requested 0x%lx bytes for L1 GPTs\n", l1_gpt_mem_sz);
654 return 0;
655 }
656
657 /*
658 * This function initializes L0 block descriptors (regions that cannot be
659 * transitioned at the granule level) according to the provided PAS.
660 *
661 * Parameters
662 * *pas Pointer to the structure defining the PAS region to
663 * initialize.
664 */
generate_l0_blk_desc(pas_region_t * pas)665 static void generate_l0_blk_desc(pas_region_t *pas)
666 {
667 uint64_t gpt_desc;
668 unsigned long idx, end_idx;
669 uint64_t *l0_gpt_arr;
670
671 assert(gpt_config.plat_gpt_l0_base != 0UL);
672 assert(pas != NULL);
673
674 /*
675 * Checking of PAS parameters has already been done in
676 * validate_pas_mappings so no need to check the same things again.
677 */
678
679 l0_gpt_arr = (uint64_t *)gpt_config.plat_gpt_l0_base;
680
681 /* Create the GPT Block descriptor for this PAS region */
682 gpt_desc = GPT_L0_BLK_DESC(GPT_PAS_ATTR_GPI(pas->attrs));
683
684 /* Start index of this region in L0 GPTs */
685 idx = GPT_L0_IDX(pas->base_pa);
686
687 /*
688 * Determine number of L0 GPT descriptors covered by
689 * this PAS region and use the count to populate these
690 * descriptors.
691 */
692 end_idx = GPT_L0_IDX(pas->base_pa + pas->size);
693
694 /* Generate the needed block descriptors */
695 for (; idx < end_idx; idx++) {
696 l0_gpt_arr[idx] = gpt_desc;
697 VERBOSE("GPT: L0 entry (BLOCK) index %lu [%p]: GPI = 0x%"PRIx64" (0x%"PRIx64")\n",
698 idx, &l0_gpt_arr[idx],
699 (gpt_desc >> GPT_L0_BLK_DESC_GPI_SHIFT) &
700 GPT_L0_BLK_DESC_GPI_MASK, l0_gpt_arr[idx]);
701 }
702 }
703
704 /*
705 * Helper function to determine if the end physical address lies in the same L0
706 * region as the current physical address. If true, the end physical address is
707 * returned else, the start address of the next region is returned.
708 *
709 * Parameters
710 * cur_pa Physical address of the current PA in the loop through
711 * the range.
712 * end_pa Physical address of the end PA in a PAS range.
713 *
714 * Return
715 * The PA of the end of the current range.
716 */
get_l1_end_pa(uintptr_t cur_pa,uintptr_t end_pa)717 static uintptr_t get_l1_end_pa(uintptr_t cur_pa, uintptr_t end_pa)
718 {
719 uintptr_t cur_idx;
720 uintptr_t end_idx;
721
722 cur_idx = GPT_L0_IDX(cur_pa);
723 end_idx = GPT_L0_IDX(end_pa);
724
725 assert(cur_idx <= end_idx);
726
727 if (cur_idx == end_idx) {
728 return end_pa;
729 }
730
731 return (cur_idx + 1UL) << GPT_L0_IDX_SHIFT;
732 }
733
734 /*
735 * Helper function to fill out GPI entries from 'first' granule address of
736 * the specified 'length' in a single L1 table with 'l1_desc' Contiguous
737 * descriptor.
738 *
739 * Parameters
740 * l1 Pointer to L1 table to fill out
741 * first Address of first granule in range
742 * length Length of the range in bytes
743 * gpi GPI set this range to
744 *
745 * Return
746 * Address of next granule in range.
747 */
fill_l1_cont_desc(uint64_t * l1,uintptr_t first,size_t length,unsigned int gpi)748 __unused static uintptr_t fill_l1_cont_desc(uint64_t *l1, uintptr_t first,
749 size_t length, unsigned int gpi)
750 {
751 /*
752 * Look up table for contiguous blocks and descriptors.
753 * Entries should be defined in descending block sizes:
754 * 512MB, 32MB and 2MB.
755 */
756 static const gpt_fill_lookup_t gpt_fill_lookup[] = {
757 #if (RME_GPT_MAX_BLOCK == 512)
758 { SZ_512M, GPT_L1_CONT_DESC_512MB },
759 #endif
760 #if (RME_GPT_MAX_BLOCK >= 32)
761 { SZ_32M, GPT_L1_CONT_DESC_32MB },
762 #endif
763 #if (RME_GPT_MAX_BLOCK != 0)
764 { SZ_2M, GPT_L1_CONT_DESC_2MB }
765 #endif
766 };
767
768 /*
769 * Iterate through all block sizes (512MB, 32MB and 2MB)
770 * starting with maximum supported.
771 */
772 for (unsigned long i = 0UL; i < ARRAY_SIZE(gpt_fill_lookup); i++) {
773 /* Calculate index */
774 unsigned long idx = GPT_L1_INDEX(first);
775
776 /* Contiguous block size */
777 size_t cont_size = gpt_fill_lookup[i].size;
778
779 if (GPT_REGION_IS_CONT(length, first, cont_size)) {
780
781 /* Generate Contiguous descriptor */
782 uint64_t l1_desc = GPT_L1_GPI_CONT_DESC(gpi,
783 gpt_fill_lookup[i].desc);
784
785 /* Number of 128-bit L1 entries in block */
786 unsigned int cnt;
787
788 switch (cont_size) {
789 case SZ_512M:
790 cnt = L1_QWORDS_512MB;
791 break;
792 case SZ_32M:
793 cnt = L1_QWORDS_32MB;
794 break;
795 default: /* SZ_2MB */
796 cnt = L1_QWORDS_2MB;
797 }
798
799 VERBOSE("GPT: Contiguous descriptor 0x%"PRIxPTR" %luMB\n",
800 first, cont_size / SZ_1M);
801
802 /* Fill Contiguous descriptors */
803 fill_desc(&l1[idx], l1_desc, cnt);
804 return (first + cont_size);
805 }
806 }
807
808 return first;
809 }
810
811 /* Build Granules descriptor with the same 'gpi' for every GPI entry */
build_l1_desc(unsigned int gpi)812 static uint64_t build_l1_desc(unsigned int gpi)
813 {
814 uint64_t l1_desc = (uint64_t)gpi | ((uint64_t)gpi << 4);
815
816 l1_desc |= (l1_desc << 8);
817 l1_desc |= (l1_desc << 16);
818 return (l1_desc | (l1_desc << 32));
819 }
820
821 /*
822 * Helper function to fill out GPI entries from 'first' to 'last' granule
823 * address in a single L1 table with 'l1_desc' Granules descriptor.
824 *
825 * Parameters
826 * l1 Pointer to L1 table to fill out
827 * first Address of first granule in range
828 * last Address of last granule in range (inclusive)
829 * gpi GPI set this range to
830 *
831 * Return
832 * Address of next granule in range.
833 */
fill_l1_gran_desc(uint64_t * l1,uintptr_t first,uintptr_t last,unsigned int gpi)834 static uintptr_t fill_l1_gran_desc(uint64_t *l1, uintptr_t first,
835 uintptr_t last, unsigned int gpi)
836 {
837 uint64_t gpi_mask;
838 unsigned long i;
839
840 /* Generate Granules descriptor */
841 uint64_t l1_desc = build_l1_desc(gpi);
842
843 /* Shift the mask if we're starting in the middle of an L1 entry */
844 gpi_mask = ULONG_MAX << (GPT_L1_GPI_IDX(gpt_config.p, first) << 2);
845
846 /* Fill out each L1 entry for this region */
847 for (i = GPT_L1_INDEX(first); i <= GPT_L1_INDEX(last); i++) {
848
849 /* Account for stopping in the middle of an L1 entry */
850 if (i == GPT_L1_INDEX(last)) {
851 gpi_mask &= (gpi_mask >> ((15U -
852 GPT_L1_GPI_IDX(gpt_config.p, last)) << 2));
853 }
854
855 assert((l1[i] & gpi_mask) == (GPT_L1_ANY_DESC & gpi_mask));
856
857 /* Write GPI values */
858 l1[i] = (l1[i] & ~gpi_mask) | (l1_desc & gpi_mask);
859
860 /* Reset mask */
861 gpi_mask = ULONG_MAX;
862 }
863
864 return last + GPT_PGS_ACTUAL_SIZE(gpt_config.p);
865 }
866
867 /*
868 * Helper function to fill out GPI entries in a single L1 table.
869 * This function fills out an entire L1 table with either Granules or Contiguous
870 * (RME_GPT_MAX_BLOCK != 0) descriptors depending on region length and alignment.
871 * Note. If RME_GPT_MAX_BLOCK == 0, then the L1 tables are filled with regular
872 * Granules descriptors.
873 *
874 * Parameters
875 * l1 Pointer to L1 table to fill out
876 * first Address of first granule in range
877 * last Address of last granule in range (inclusive)
878 * gpi GPI set this range to
879 */
fill_l1_tbl(uint64_t * l1,uintptr_t first,uintptr_t last,unsigned int gpi)880 static void fill_l1_tbl(uint64_t *l1, uintptr_t first, uintptr_t last,
881 unsigned int gpi)
882 {
883 assert(l1 != NULL);
884 assert(first <= last);
885 assert((first & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) == 0UL);
886 assert((last & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) == 0UL);
887 assert(GPT_L0_IDX(first) == GPT_L0_IDX(last));
888
889 #if (RME_GPT_MAX_BLOCK != 0)
890 while (first <= last) {
891 /* Region length */
892 size_t length = last - first + GPT_PGS_ACTUAL_SIZE(gpt_config.p);
893
894 if (length < SZ_2M) {
895 /*
896 * Fill with Granule descriptors in case of
897 * region length < 2MB.
898 */
899 first = fill_l1_gran_desc(l1, first, last, gpi);
900
901 } else if ((first & (SZ_2M - UL(1))) == UL(0)) {
902 /*
903 * For region length >= 2MB and at least 2MB aligned
904 * call to fill_l1_cont_desc will iterate through
905 * all block sizes (512MB, 32MB and 2MB) supported and
906 * fill corresponding Contiguous descriptors.
907 */
908 first = fill_l1_cont_desc(l1, first, length, gpi);
909 } else {
910 /*
911 * For not aligned region >= 2MB fill with Granules
912 * descriptors up to the next 2MB aligned address.
913 */
914 uintptr_t new_last = ALIGN_2MB(first + SZ_2M) -
915 GPT_PGS_ACTUAL_SIZE(gpt_config.p);
916
917 first = fill_l1_gran_desc(l1, first, new_last, gpi);
918 }
919 }
920 #else
921 /* Fill with Granule descriptors */
922 first = fill_l1_gran_desc(l1, first, last, gpi);
923 #endif
924 assert(first == (last + GPT_PGS_ACTUAL_SIZE(gpt_config.p)));
925 }
926
927 /*
928 * This function finds the next available unused L1 table and initializes all
929 * granules descriptor entries to GPI_ANY. This ensures that there are no chunks
930 * of GPI_NO_ACCESS (0b0000) memory floating around in the system in the
931 * event that a PAS region stops midway through an L1 table, thus guaranteeing
932 * that all memory not explicitly assigned is GPI_ANY. This function does not
933 * check for overflow conditions, that should be done by the caller.
934 *
935 * Return
936 * Pointer to the next available L1 table.
937 */
get_new_l1_tbl(void)938 static uint64_t *get_new_l1_tbl(void)
939 {
940 /* Retrieve the next L1 table */
941 uint64_t *l1 = (uint64_t *)gpt_l1_tbl;
942
943 /* Increment L1 GPT address */
944 gpt_l1_tbl += GPT_L1_TABLE_SIZE(gpt_config.p);
945
946 /* Initialize all GPIs to GPT_GPI_ANY */
947 for (unsigned int i = 0U; i < GPT_L1_ENTRY_COUNT(gpt_config.p); i++) {
948 l1[i] = GPT_L1_ANY_DESC;
949 }
950
951 return l1;
952 }
953
954 /*
955 * When L1 tables are needed, this function creates the necessary L0 table
956 * descriptors and fills out the L1 table entries according to the supplied
957 * PAS range.
958 *
959 * Parameters
960 * *pas Pointer to the structure defining the PAS region.
961 */
generate_l0_tbl_desc(pas_region_t * pas)962 static void generate_l0_tbl_desc(pas_region_t *pas)
963 {
964 uintptr_t end_pa;
965 uintptr_t cur_pa;
966 uintptr_t last_gran_pa;
967 uint64_t *l0_gpt_base;
968 uint64_t *l1_gpt_arr;
969 unsigned int l0_idx, gpi;
970
971 assert(gpt_config.plat_gpt_l0_base != 0UL);
972 assert(pas != NULL);
973
974 /*
975 * Checking of PAS parameters has already been done in
976 * validate_pas_mappings so no need to check the same things again.
977 */
978 end_pa = pas->base_pa + pas->size;
979 l0_gpt_base = (uint64_t *)gpt_config.plat_gpt_l0_base;
980
981 /* We start working from the granule at base PA */
982 cur_pa = pas->base_pa;
983
984 /* Get GPI */
985 gpi = GPT_PAS_ATTR_GPI(pas->attrs);
986
987 /* Iterate over each L0 region in this memory range */
988 for (l0_idx = (unsigned int)GPT_L0_IDX(pas->base_pa);
989 l0_idx <= (unsigned int)GPT_L0_IDX(end_pa - 1UL);
990 l0_idx++) {
991 /*
992 * See if the L0 entry is already a table descriptor or if we
993 * need to create one.
994 */
995 if (GPT_L0_TYPE(l0_gpt_base[l0_idx]) == GPT_L0_TYPE_TBL_DESC) {
996 /* Get the L1 array from the L0 entry */
997 l1_gpt_arr = GPT_L0_TBLD_ADDR(l0_gpt_base[l0_idx]);
998 } else {
999 /* Get a new L1 table from the L1 memory space */
1000 l1_gpt_arr = get_new_l1_tbl();
1001
1002 /* Fill out the L0 descriptor and flush it */
1003 l0_gpt_base[l0_idx] = GPT_L0_TBL_DESC(l1_gpt_arr);
1004 }
1005
1006 VERBOSE("GPT: L0 entry (TABLE) index %u [%p] ==> L1 Addr %p (0x%"PRIx64")\n",
1007 l0_idx, &l0_gpt_base[l0_idx], l1_gpt_arr, l0_gpt_base[l0_idx]);
1008
1009 /*
1010 * Determine the PA of the last granule in this L0 descriptor.
1011 */
1012 last_gran_pa = get_l1_end_pa(cur_pa, end_pa) -
1013 GPT_PGS_ACTUAL_SIZE(gpt_config.p);
1014
1015 /*
1016 * Fill up L1 GPT entries between these two addresses. This
1017 * function needs the addresses of the first granule and last
1018 * granule in the range.
1019 */
1020 fill_l1_tbl(l1_gpt_arr, cur_pa, last_gran_pa, gpi);
1021
1022 /* Advance cur_pa to first granule in next L0 region */
1023 cur_pa = get_l1_end_pa(cur_pa, end_pa);
1024 }
1025 }
1026
1027 /*
1028 * This function flushes a range of L0 descriptors used by a given PAS region
1029 * array. There is a chance that some unmodified L0 descriptors would be flushed
1030 * in the case that there are "holes" in an array of PAS regions but overall
1031 * this should be faster than individually flushing each modified L0 descriptor
1032 * as they are created.
1033 *
1034 * Parameters
1035 * *pas Pointer to an array of PAS regions.
1036 * pas_count Number of entries in the PAS array.
1037 */
flush_l0_for_pas_array(pas_region_t * pas,unsigned int pas_count)1038 static void flush_l0_for_pas_array(pas_region_t *pas, unsigned int pas_count)
1039 {
1040 unsigned long idx;
1041 unsigned long start_idx;
1042 unsigned long end_idx;
1043 uint64_t *l0 = (uint64_t *)gpt_config.plat_gpt_l0_base;
1044
1045 assert(pas != NULL);
1046 assert(pas_count != 0U);
1047
1048 /* Initial start and end values */
1049 start_idx = GPT_L0_IDX(pas[0].base_pa);
1050 end_idx = GPT_L0_IDX(pas[0].base_pa + pas[0].size - 1UL);
1051
1052 /* Find lowest and highest L0 indices used in this PAS array */
1053 for (idx = 1UL; idx < pas_count; idx++) {
1054 if (GPT_L0_IDX(pas[idx].base_pa) < start_idx) {
1055 start_idx = GPT_L0_IDX(pas[idx].base_pa);
1056 }
1057 if (GPT_L0_IDX(pas[idx].base_pa + pas[idx].size - 1UL) > end_idx) {
1058 end_idx = GPT_L0_IDX(pas[idx].base_pa + pas[idx].size - 1UL);
1059 }
1060 }
1061
1062 /*
1063 * Flush all covered L0 descriptors, add 1 because we need to include
1064 * the end index value.
1065 */
1066 flush_dcache_range((uintptr_t)&l0[start_idx],
1067 ((end_idx + 1UL) - start_idx) * sizeof(uint64_t));
1068 }
1069
is_gpi_transition_permitted(uint8_t caller,uint8_t current_gpi,uint8_t target_gpi)1070 static inline bool is_gpi_transition_permitted(uint8_t caller,
1071 uint8_t current_gpi,
1072 uint8_t target_gpi)
1073 {
1074 /*
1075 * So we can use a small lookup table, change caller security state 0x21
1076 * (from realm) to 0x2 so it can be an index.
1077 */
1078 if (caller == SMC_FROM_REALM) {
1079 caller = 0x2;
1080 }
1081
1082 assert(caller <= 0x2);
1083 assert(current_gpi <= GPT_GPI_ANY);
1084
1085 return (gpi_config[current_gpi].policy[caller] >> target_gpi) & 0x1;
1086 }
1087
1088 /*
1089 * Public API to enable granule protection checks once the tables have all been
1090 * initialized. This function is called at first initialization and then again
1091 * later during warm boots of CPU cores.
1092 *
1093 * Return
1094 * Negative Linux error code in the event of a failure, 0 for success.
1095 */
gpt_enable(void)1096 int gpt_enable(void)
1097 {
1098 u_register_t gpccr_el3;
1099
1100 /*
1101 * Granule tables must be initialised before enabling
1102 * granule protection.
1103 */
1104 if (gpt_config.plat_gpt_l0_base == 0UL) {
1105 ERROR("GPT: Tables have not been initialized!\n");
1106 return -EPERM;
1107 }
1108
1109 /* Write the base address of the L0 tables into GPTBR */
1110 write_gptbr_el3(((gpt_config.plat_gpt_l0_base >> GPTBR_BADDR_VAL_SHIFT)
1111 >> GPTBR_BADDR_SHIFT) & GPTBR_BADDR_MASK);
1112
1113 /* GPCCR_EL3.PPS */
1114 gpccr_el3 = SET_GPCCR_PPS(gpt_config.pps);
1115
1116 /* GPCCR_EL3.PGS */
1117 gpccr_el3 |= SET_GPCCR_PGS(gpt_config.pgs);
1118
1119 /*
1120 * Since EL3 maps the L1 region as Inner shareable, use the same
1121 * shareability attribute for GPC as well so that
1122 * GPC fetches are visible to PEs
1123 */
1124 gpccr_el3 |= SET_GPCCR_SH(GPCCR_SH_IS);
1125
1126 /* Outer and Inner cacheability set to Normal memory, WB, RA, WA */
1127 gpccr_el3 |= SET_GPCCR_ORGN(GPCCR_ORGN_WB_RA_WA);
1128 gpccr_el3 |= SET_GPCCR_IRGN(GPCCR_IRGN_WB_RA_WA);
1129
1130 /* Enable NSP and SA if FEAT_RME_GDI is implemented */
1131 if (is_feat_rme_gdi_supported()) {
1132 gpccr_el3 |= GPCCR_NSP_BIT;
1133 gpccr_el3 |= GPCCR_SA_BIT;
1134
1135 /* Enable these GPIs in NS transition policies. */
1136 gpi_config[GPT_GPI_NS].policy[SMC_FROM_NON_SECURE] |=
1137 ((1 << GPT_GPI_NSP) | (1 << GPT_GPI_SA));
1138 gpi_config[GPT_GPI_NSO].policy[SMC_FROM_NON_SECURE] |=
1139 ((1 << GPT_GPI_NSP) | (1 << GPT_GPI_SA));
1140 gpi_config[GPT_GPI_NSP].policy[SMC_FROM_NON_SECURE] |=
1141 ((1 << GPT_GPI_NS) | (1 << GPT_GPI_NSO));
1142 gpi_config[GPT_GPI_SA].policy[SMC_FROM_NON_SECURE] |=
1143 ((1 << GPT_GPI_NS) | (1 << GPT_GPI_NSO));
1144 }
1145
1146 /* Prepopulate GPCCR_EL3 but don't enable GPC yet */
1147 write_gpccr_el3(gpccr_el3);
1148 isb();
1149
1150 /* Invalidate any stale TLB entries and any cached register fields */
1151 tlbipaallos();
1152 dsb();
1153 isb();
1154
1155 /* Enable GPT */
1156 gpccr_el3 |= GPCCR_GPC_BIT;
1157
1158 /* Enable NSO encoding if FEAT_RME_GPC2 is supported. */
1159 if (is_feat_rme_gpc2_present()) {
1160 gpccr_el3 |= GPCCR_NSO_BIT;
1161
1162 /* Enable NSO in NS transition policies. */
1163 gpi_config[GPT_GPI_NS].policy[SMC_FROM_NON_SECURE] |=
1164 (1 << GPT_GPI_NSO);
1165 gpi_config[GPT_GPI_NSO].policy[SMC_FROM_NON_SECURE] |=
1166 (1 << GPT_GPI_NS);
1167 }
1168
1169 /* TODO: Configure GPCCR_EL3_GPCP for Fault control */
1170 write_gpccr_el3(gpccr_el3);
1171 isb();
1172 tlbipaallos();
1173 dsb();
1174 isb();
1175
1176 return 0;
1177 }
1178
1179 /*
1180 * Public API that initializes the entire protected space to GPT_GPI_ANY using
1181 * the L0 tables (block descriptors). Ideally, this function is invoked prior
1182 * to DDR discovery and initialization. The MMU must be initialized before
1183 * calling this function.
1184 *
1185 * Parameters
1186 * pps PPS value to use for table generation
1187 * l0_mem_base Base address of L0 tables in memory.
1188 * l0_mem_size Total size of memory available for L0 tables.
1189 *
1190 * Return
1191 * Negative Linux error code in the event of a failure, 0 for success.
1192 */
gpt_init_l0_tables(gpccr_pps_e pps,uintptr_t l0_mem_base,size_t l0_mem_size)1193 int gpt_init_l0_tables(gpccr_pps_e pps, uintptr_t l0_mem_base,
1194 size_t l0_mem_size)
1195 {
1196 uint64_t gpt_desc;
1197 int ret;
1198
1199 /* Ensure that MMU and Data caches are enabled */
1200 assert((read_sctlr_el3() & SCTLR_C_BIT) != 0UL);
1201
1202 /* Validate other parameters */
1203 ret = validate_l0_params(pps, l0_mem_base, l0_mem_size);
1204 if (ret != 0) {
1205 return ret;
1206 }
1207
1208 /* Create the descriptor to initialize L0 entries with */
1209 gpt_desc = GPT_L0_BLK_DESC(GPT_GPI_ANY);
1210
1211 /* Iterate through all L0 entries */
1212 for (unsigned int i = 0U; i < GPT_L0_REGION_COUNT(gpt_config.t); i++) {
1213 ((uint64_t *)l0_mem_base)[i] = gpt_desc;
1214 }
1215
1216 /* Flush updated L0 table to memory */
1217 flush_dcache_range((uintptr_t)l0_mem_base, GPT_L0_TABLE_SIZE(gpt_config.t));
1218
1219 /* Stash the L0 base address once initial setup is complete */
1220 gpt_config.plat_gpt_l0_base = l0_mem_base;
1221
1222 return 0;
1223 }
1224
1225 /*
1226 * Public API that carves out PAS regions from the L0 tables and builds any L1
1227 * tables that are needed. This function ideally is run after DDR discovery and
1228 * initialization. The L0 tables must have already been initialized to GPI_ANY
1229 * when this function is called.
1230 *
1231 * This function can be called multiple times with different L1 memory ranges
1232 * and PAS regions if it is desirable to place L1 tables in different locations
1233 * in memory. (ex: you have multiple DDR banks and want to place the L1 tables
1234 * in the DDR bank that they control).
1235 *
1236 * Parameters
1237 * pgs PGS value to use for table generation.
1238 * l1_mem_base Base address of memory used for L1 tables.
1239 * l1_mem_size Total size of memory available for L1 tables.
1240 * *pas_regions Pointer to PAS regions structure array.
1241 * pas_count Total number of PAS regions.
1242 *
1243 * Return
1244 * Negative Linux error code in the event of a failure, 0 for success.
1245 */
gpt_init_pas_l1_tables(gpccr_pgs_e pgs,uintptr_t l1_mem_base,size_t l1_mem_size,pas_region_t * pas_regions,unsigned int pas_count)1246 int gpt_init_pas_l1_tables(gpccr_pgs_e pgs, uintptr_t l1_mem_base,
1247 size_t l1_mem_size, pas_region_t *pas_regions,
1248 unsigned int pas_count)
1249 {
1250 int l1_gpt_cnt, ret;
1251
1252 /* Ensure that MMU and Data caches are enabled */
1253 assert((read_sctlr_el3() & SCTLR_C_BIT) != 0UL);
1254
1255 /* PGS is needed for validate_pas_mappings so check it now */
1256 if (pgs > GPT_PGS_MAX) {
1257 ERROR("GPT: Invalid PGS: 0x%x\n", pgs);
1258 return -EINVAL;
1259 }
1260 gpt_config.pgs = pgs;
1261 gpt_config.p = gpt_p_lookup[pgs];
1262
1263 /* Make sure L0 tables have been initialized */
1264 if (gpt_config.plat_gpt_l0_base == 0UL) {
1265 ERROR("GPT: L0 tables must be initialized first!\n");
1266 return -EPERM;
1267 }
1268
1269 /* Check if L1 GPTs are required and how many */
1270 l1_gpt_cnt = validate_pas_mappings(pas_regions, pas_count);
1271 if (l1_gpt_cnt < 0) {
1272 return l1_gpt_cnt;
1273 }
1274
1275 VERBOSE("GPT: %i L1 GPTs requested\n", l1_gpt_cnt);
1276
1277 /* If L1 tables are needed then validate the L1 parameters */
1278 if (l1_gpt_cnt > 0) {
1279 ret = validate_l1_params(l1_mem_base, l1_mem_size,
1280 (unsigned int)l1_gpt_cnt);
1281 if (ret != 0) {
1282 return ret;
1283 }
1284
1285 /* Set up parameters for L1 table generation */
1286 gpt_l1_tbl = l1_mem_base;
1287 }
1288
1289 /* Number of L1 entries in 2MB depends on GPCCR_EL3.PGS value */
1290 gpt_l1_cnt_2mb = (unsigned int)GPT_L1_ENTRY_COUNT_2MB(gpt_config.p);
1291
1292 /* Mask for the L1 index field */
1293 gpt_l1_index_mask = GPT_L1_IDX_MASK(gpt_config.p);
1294
1295 INFO("GPT: Boot Configuration\n");
1296 INFO(" PPS/T: 0x%x/%u\n", gpt_config.pps, gpt_config.t);
1297 INFO(" PGS/P: 0x%x/%u\n", gpt_config.pgs, gpt_config.p);
1298 INFO(" L0GPTSZ/S: 0x%x/%u\n", GPT_L0GPTSZ, GPT_S_VAL);
1299 INFO(" PAS count: %u\n", pas_count);
1300 INFO(" L0 base: 0x%"PRIxPTR"\n", gpt_config.plat_gpt_l0_base);
1301
1302 /* Generate the tables in memory */
1303 for (unsigned int idx = 0U; idx < pas_count; idx++) {
1304 VERBOSE("GPT: PAS[%u]: base 0x%"PRIxPTR"\tsize 0x%lx\tGPI 0x%x\ttype 0x%x\n",
1305 idx, pas_regions[idx].base_pa, pas_regions[idx].size,
1306 GPT_PAS_ATTR_GPI(pas_regions[idx].attrs),
1307 GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs));
1308
1309 /* Check if a block or table descriptor is required */
1310 if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) ==
1311 GPT_PAS_ATTR_MAP_TYPE_BLOCK) {
1312 generate_l0_blk_desc(&pas_regions[idx]);
1313
1314 } else {
1315 generate_l0_tbl_desc(&pas_regions[idx]);
1316 }
1317 }
1318
1319 /* Flush modified L0 tables */
1320 flush_l0_for_pas_array(pas_regions, pas_count);
1321
1322 /* Flush L1 tables if needed */
1323 if (l1_gpt_cnt > 0) {
1324 flush_dcache_range(l1_mem_base,
1325 GPT_L1_TABLE_SIZE(gpt_config.p) *
1326 (size_t)l1_gpt_cnt);
1327 }
1328
1329 /* Make sure that all the entries are written to the memory */
1330 dsbishst();
1331 tlbipaallos();
1332 dsb();
1333 isb();
1334
1335 return 0;
1336 }
1337
1338 /*
1339 * Public API to initialize the runtime gpt_config structure based on the values
1340 * present in the GPTBR_EL3 and GPCCR_EL3 registers. GPT initialization
1341 * typically happens in a bootloader stage prior to setting up the EL3 runtime
1342 * environment for the granule transition service so this function detects the
1343 * initialization from a previous stage. Granule protection checks must be
1344 * enabled already or this function will return an error.
1345 *
1346 * Parameters
1347 * l1_bitlocks_base Base address of memory for L1 tables bitlocks.
1348 * l1_bitlocks_size Total size of memory available for L1 tables bitlocks.
1349 *
1350 * Return
1351 * Negative Linux error code in the event of a failure, 0 for success.
1352 */
gpt_runtime_init(uintptr_t l1_bitlocks_base,size_t l1_bitlocks_size)1353 int gpt_runtime_init(uintptr_t l1_bitlocks_base, size_t l1_bitlocks_size)
1354 {
1355 u_register_t reg;
1356 __unused size_t locks_size;
1357
1358 /* Ensure that MMU and Data caches are enabled */
1359 assert((read_sctlr_el3() & SCTLR_C_BIT) != 0UL);
1360
1361 /* Ensure GPC are already enabled */
1362 if ((read_gpccr_el3() & GPCCR_GPC_BIT) == 0UL) {
1363 ERROR("GPT: Granule protection checks are not enabled!\n");
1364 return -EPERM;
1365 }
1366
1367 /*
1368 * Read the L0 table address from GPTBR, we don't need the L1 base
1369 * address since those are included in the L0 tables as needed.
1370 */
1371 reg = read_gptbr_el3();
1372 gpt_config.plat_gpt_l0_base = ((reg >> GPTBR_BADDR_SHIFT) &
1373 GPTBR_BADDR_MASK) <<
1374 GPTBR_BADDR_VAL_SHIFT;
1375
1376 /* Read GPCCR to get PGS and PPS values */
1377 reg = read_gpccr_el3();
1378 gpt_config.pps = (reg >> GPCCR_PPS_SHIFT) & GPCCR_PPS_MASK;
1379 gpt_config.t = gpt_t_lookup[gpt_config.pps];
1380 gpt_config.pgs = (reg >> GPCCR_PGS_SHIFT) & GPCCR_PGS_MASK;
1381 gpt_config.p = gpt_p_lookup[gpt_config.pgs];
1382
1383 /* Number of L1 entries in 2MB depends on GPCCR_EL3.PGS value */
1384 gpt_l1_cnt_2mb = (unsigned int)GPT_L1_ENTRY_COUNT_2MB(gpt_config.p);
1385
1386 /* Mask for the L1 index field */
1387 gpt_l1_index_mask = GPT_L1_IDX_MASK(gpt_config.p);
1388
1389 #if (RME_GPT_BITLOCK_BLOCK != 0)
1390 /*
1391 * Size of GPT bitlocks in bytes for the protected address space
1392 * with RME_GPT_BITLOCK_BLOCK * 512MB per bitlock.
1393 */
1394 locks_size = GPT_PPS_ACTUAL_SIZE(gpt_config.t) /
1395 (RME_GPT_BITLOCK_BLOCK * SZ_512M * 8U);
1396 /*
1397 * If protected space size is less than the size covered
1398 * by 'bitlock' structure, check for a single bitlock.
1399 */
1400 if (locks_size < LOCK_SIZE) {
1401 locks_size = LOCK_SIZE;
1402 /* Check bitlocks array size */
1403 } else if (locks_size > l1_bitlocks_size) {
1404 ERROR("GPT: Inadequate GPT bitlocks memory\n");
1405 ERROR(" Expected 0x%lx bytes, got 0x%lx\n",
1406 locks_size, l1_bitlocks_size);
1407 return -ENOMEM;
1408 }
1409
1410 gpt_bitlock = (bitlock_t *)l1_bitlocks_base;
1411
1412 /* Initialise GPT bitlocks */
1413 (void)memset((void *)gpt_bitlock, 0, locks_size);
1414
1415 /* Flush GPT bitlocks to memory */
1416 flush_dcache_range((uintptr_t)gpt_bitlock, locks_size);
1417 #endif /* RME_GPT_BITLOCK_BLOCK */
1418
1419 VERBOSE("GPT: Runtime Configuration\n");
1420 VERBOSE(" PPS/T: 0x%x/%u\n", gpt_config.pps, gpt_config.t);
1421 VERBOSE(" PGS/P: 0x%x/%u\n", gpt_config.pgs, gpt_config.p);
1422 VERBOSE(" L0GPTSZ/S: 0x%x/%u\n", GPT_L0GPTSZ, GPT_S_VAL);
1423 VERBOSE(" L0 base: 0x%"PRIxPTR"\n", gpt_config.plat_gpt_l0_base);
1424 #if (RME_GPT_BITLOCK_BLOCK != 0)
1425 VERBOSE(" Bitlocks: 0x%"PRIxPTR"/0x%lx\n", (uintptr_t)gpt_bitlock,
1426 locks_size);
1427 #endif
1428 return 0;
1429 }
1430
1431 /*
1432 * A helper to write the value (target_pas << gpi_shift) to the index of
1433 * the gpt_l1_addr.
1434 */
write_gpt(uint64_t * gpt_l1_desc,uint64_t * gpt_l1_addr,unsigned int gpi_shift,unsigned int idx,unsigned int target_pas)1435 static inline void write_gpt(uint64_t *gpt_l1_desc, uint64_t *gpt_l1_addr,
1436 unsigned int gpi_shift, unsigned int idx,
1437 unsigned int target_pas)
1438 {
1439 *gpt_l1_desc &= ~(GPT_L1_GRAN_DESC_GPI_MASK << gpi_shift);
1440 *gpt_l1_desc |= ((uint64_t)target_pas << gpi_shift);
1441 gpt_l1_addr[idx] = *gpt_l1_desc;
1442
1443 dsboshst();
1444 }
1445
1446 /*
1447 * Helper to retrieve the gpt_l1_* information from the base address
1448 * returned in gpi_info.
1449 */
get_gpi_params(uint64_t base,gpi_info_t * gpi_info)1450 static int get_gpi_params(uint64_t base, gpi_info_t *gpi_info)
1451 {
1452 uint64_t gpt_l0_desc, *gpt_l0_base;
1453 __unused unsigned int block_idx;
1454
1455 gpt_l0_base = (uint64_t *)gpt_config.plat_gpt_l0_base;
1456 gpt_l0_desc = gpt_l0_base[GPT_L0_IDX(base)];
1457 if (GPT_L0_TYPE(gpt_l0_desc) != GPT_L0_TYPE_TBL_DESC) {
1458 VERBOSE("GPT: Granule is not covered by a table descriptor!\n");
1459 VERBOSE(" Base=0x%"PRIx64"\n", base);
1460 return -EINVAL;
1461 }
1462
1463 /* Get the table index and GPI shift from PA */
1464 gpi_info->gpt_l1_addr = GPT_L0_TBLD_ADDR(gpt_l0_desc);
1465 gpi_info->idx = (unsigned int)GPT_L1_INDEX(base);
1466 gpi_info->gpi_shift = GPT_L1_GPI_IDX(gpt_config.p, base) << 2;
1467
1468 #if (RME_GPT_BITLOCK_BLOCK != 0)
1469 /* Block index */
1470 block_idx = (unsigned int)(base / (RME_GPT_BITLOCK_BLOCK * SZ_512M));
1471
1472 /* Bitlock address and mask */
1473 gpi_info->lock = &gpt_bitlock[block_idx / LOCK_BITS];
1474 gpi_info->mask = 1U << (block_idx & (LOCK_BITS - 1U));
1475 #endif
1476 return 0;
1477 }
1478
1479 /*
1480 * Helper to retrieve the gpt_l1_desc and GPI information from gpi_info.
1481 * This function is called with bitlock or spinlock acquired.
1482 */
read_gpi(gpi_info_t * gpi_info)1483 static void read_gpi(gpi_info_t *gpi_info)
1484 {
1485 gpi_info->gpt_l1_desc = (gpi_info->gpt_l1_addr)[gpi_info->idx];
1486
1487 if ((gpi_info->gpt_l1_desc & GPT_L1_TYPE_CONT_DESC_MASK) ==
1488 GPT_L1_TYPE_CONT_DESC) {
1489 /* Read GPI from Contiguous descriptor */
1490 gpi_info->gpi = (unsigned int)GPT_L1_CONT_GPI(gpi_info->gpt_l1_desc);
1491 } else {
1492 /* Read GPI from Granules descriptor */
1493 gpi_info->gpi = (unsigned int)((gpi_info->gpt_l1_desc >> gpi_info->gpi_shift) &
1494 GPT_L1_GRAN_DESC_GPI_MASK);
1495 }
1496 }
1497
flush_page_to_popa(uintptr_t addr)1498 static void flush_page_to_popa(uintptr_t addr)
1499 {
1500 size_t size = GPT_PGS_ACTUAL_SIZE(gpt_config.p);
1501
1502 if (is_feat_mte2_supported()) {
1503 flush_dcache_to_popa_range_mte2(addr, size);
1504 } else {
1505 flush_dcache_to_popa_range(addr, size);
1506 }
1507 }
1508
1509 /*
1510 * Helper function to check if all L1 entries in 2MB block have
1511 * the same Granules descriptor value.
1512 *
1513 * Parameters
1514 * base Base address of the region to be checked
1515 * gpi_info Pointer to 'gpt_config_t' structure
1516 * l1_desc GPT Granules descriptor with all entries
1517 * set to the same GPI.
1518 *
1519 * Return
1520 * true if L1 all entries have the same descriptor value, false otherwise.
1521 */
check_fuse_2mb(uint64_t base,const gpi_info_t * gpi_info,uint64_t l1_desc)1522 __unused static bool check_fuse_2mb(uint64_t base, const gpi_info_t *gpi_info,
1523 uint64_t l1_desc)
1524 {
1525 /* Last L1 entry index in 2MB block */
1526 unsigned int long idx = GPT_L1_INDEX(ALIGN_2MB(base)) +
1527 gpt_l1_cnt_2mb - 1UL;
1528
1529 /* Number of L1 entries in 2MB block */
1530 unsigned int cnt = gpt_l1_cnt_2mb;
1531
1532 /*
1533 * Start check from the last L1 entry and continue until the first
1534 * non-matching to the passed Granules descriptor value is found.
1535 */
1536 while (cnt-- != 0U) {
1537 if (gpi_info->gpt_l1_addr[idx--] != l1_desc) {
1538 /* Non-matching L1 entry found */
1539 return false;
1540 }
1541 }
1542
1543 return true;
1544 }
1545
fuse_2mb(uint64_t base,const gpi_info_t * gpi_info,uint64_t l1_desc)1546 __unused static void fuse_2mb(uint64_t base, const gpi_info_t *gpi_info,
1547 uint64_t l1_desc)
1548 {
1549 /* L1 entry index of the start of 2MB block */
1550 unsigned long idx_2 = GPT_L1_INDEX(ALIGN_2MB(base));
1551
1552 /* 2MB Contiguous descriptor */
1553 uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 2MB);
1554
1555 VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n", __func__, base, l1_desc);
1556
1557 fill_desc(&gpi_info->gpt_l1_addr[idx_2], l1_cont_desc, L1_QWORDS_2MB);
1558 }
1559
1560 /*
1561 * Helper function to check if all 1st L1 entries of 2MB blocks
1562 * in 32MB have the same 2MB Contiguous descriptor value.
1563 *
1564 * Parameters
1565 * base Base address of the region to be checked
1566 * gpi_info Pointer to 'gpt_config_t' structure
1567 * l1_desc GPT Granules descriptor.
1568 *
1569 * Return
1570 * true if all L1 entries have the same descriptor value, false otherwise.
1571 */
check_fuse_32mb(uint64_t base,const gpi_info_t * gpi_info,uint64_t l1_desc)1572 __unused static bool check_fuse_32mb(uint64_t base, const gpi_info_t *gpi_info,
1573 uint64_t l1_desc)
1574 {
1575 /* The 1st L1 entry index of the last 2MB block in 32MB */
1576 unsigned long idx = GPT_L1_INDEX(ALIGN_32MB(base)) +
1577 (15UL * gpt_l1_cnt_2mb);
1578
1579 /* 2MB Contiguous descriptor */
1580 uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 2MB);
1581
1582 /* Number of 2MB blocks in 32MB */
1583 unsigned int cnt = 16U;
1584
1585 /* Set the first L1 entry to 2MB Contiguous descriptor */
1586 gpi_info->gpt_l1_addr[GPT_L1_INDEX(ALIGN_2MB(base))] = l1_cont_desc;
1587
1588 /*
1589 * Start check from the 1st L1 entry of the last 2MB block and
1590 * continue until the first non-matching to 2MB Contiguous descriptor
1591 * value is found.
1592 */
1593 while (cnt-- != 0U) {
1594 if (gpi_info->gpt_l1_addr[idx] != l1_cont_desc) {
1595 /* Non-matching L1 entry found */
1596 return false;
1597 }
1598 idx -= gpt_l1_cnt_2mb;
1599 }
1600
1601 return true;
1602 }
1603
fuse_32mb(uint64_t base,const gpi_info_t * gpi_info,uint64_t l1_desc)1604 __unused static void fuse_32mb(uint64_t base, const gpi_info_t *gpi_info,
1605 uint64_t l1_desc)
1606 {
1607 /* L1 entry index of the start of 32MB block */
1608 unsigned long idx_32 = GPT_L1_INDEX(ALIGN_32MB(base));
1609
1610 /* 32MB Contiguous descriptor */
1611 uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 32MB);
1612
1613 VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n", __func__, base, l1_desc);
1614
1615 fill_desc(&gpi_info->gpt_l1_addr[idx_32], l1_cont_desc, L1_QWORDS_32MB);
1616 }
1617
1618 /*
1619 * Helper function to check if all 1st L1 entries of 32MB blocks
1620 * in 512MB have the same 32MB Contiguous descriptor value.
1621 *
1622 * Parameters
1623 * base Base address of the region to be checked
1624 * gpi_info Pointer to 'gpt_config_t' structure
1625 * l1_desc GPT Granules descriptor.
1626 *
1627 * Return
1628 * true if all L1 entries have the same descriptor value, false otherwise.
1629 */
check_fuse_512mb(uint64_t base,const gpi_info_t * gpi_info,uint64_t l1_desc)1630 __unused static bool check_fuse_512mb(uint64_t base, const gpi_info_t *gpi_info,
1631 uint64_t l1_desc)
1632 {
1633 /* The 1st L1 entry index of the last 32MB block in 512MB */
1634 unsigned long idx = GPT_L1_INDEX(ALIGN_512MB(base)) +
1635 (15UL * 16UL * gpt_l1_cnt_2mb);
1636
1637 /* 32MB Contiguous descriptor */
1638 uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 32MB);
1639
1640 /* Number of 32MB blocks in 512MB */
1641 unsigned int cnt = 16U;
1642
1643 /* Set the first L1 entry to 2MB Contiguous descriptor */
1644 gpi_info->gpt_l1_addr[GPT_L1_INDEX(ALIGN_32MB(base))] = l1_cont_desc;
1645
1646 /*
1647 * Start check from the 1st L1 entry of the last 32MB block and
1648 * continue until the first non-matching to 32MB Contiguous descriptor
1649 * value is found.
1650 */
1651 while (cnt-- != 0U) {
1652 if (gpi_info->gpt_l1_addr[idx] != l1_cont_desc) {
1653 /* Non-matching L1 entry found */
1654 return false;
1655 }
1656 idx -= 16UL * gpt_l1_cnt_2mb;
1657 }
1658
1659 return true;
1660 }
1661
fuse_512mb(uint64_t base,const gpi_info_t * gpi_info,uint64_t l1_desc)1662 __unused static void fuse_512mb(uint64_t base, const gpi_info_t *gpi_info,
1663 uint64_t l1_desc)
1664 {
1665 /* L1 entry index of the start of 512MB block */
1666 unsigned long idx_512 = GPT_L1_INDEX(ALIGN_512MB(base));
1667
1668 /* 512MB Contiguous descriptor */
1669 uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 512MB);
1670
1671 VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n", __func__, base, l1_desc);
1672
1673 fill_desc(&gpi_info->gpt_l1_addr[idx_512], l1_cont_desc, L1_QWORDS_512MB);
1674 }
1675
1676 /*
1677 * Helper function to convert GPI entries in a single L1 table
1678 * from Granules to Contiguous descriptor.
1679 *
1680 * Parameters
1681 * base Base address of the region to be written
1682 * gpi_info Pointer to 'gpt_config_t' structure
1683 * l1_desc GPT Granules descriptor with all entries
1684 * set to the same GPI.
1685 */
fuse_block(uint64_t base,const gpi_info_t * gpi_info,uint64_t l1_desc)1686 __unused static void fuse_block(uint64_t base, const gpi_info_t *gpi_info,
1687 uint64_t l1_desc)
1688 {
1689 /* Start with check for 2MB block */
1690 if (!check_fuse_2mb(base, gpi_info, l1_desc)) {
1691 /* Check for 2MB fusing failed */
1692 return;
1693 }
1694
1695 #if (RME_GPT_MAX_BLOCK == 2)
1696 fuse_2mb(base, gpi_info, l1_desc);
1697 #else
1698 /* Check for 32MB block */
1699 if (!check_fuse_32mb(base, gpi_info, l1_desc)) {
1700 /* Check for 32MB fusing failed, fuse to 2MB */
1701 fuse_2mb(base, gpi_info, l1_desc);
1702 return;
1703 }
1704
1705 #if (RME_GPT_MAX_BLOCK == 32)
1706 fuse_32mb(base, gpi_info, l1_desc);
1707 #else
1708 /* Check for 512MB block */
1709 if (!check_fuse_512mb(base, gpi_info, l1_desc)) {
1710 /* Check for 512MB fusing failed, fuse to 32MB */
1711 fuse_32mb(base, gpi_info, l1_desc);
1712 return;
1713 }
1714
1715 /* Fuse to 512MB */
1716 fuse_512mb(base, gpi_info, l1_desc);
1717
1718 #endif /* RME_GPT_MAX_BLOCK == 32 */
1719 #endif /* RME_GPT_MAX_BLOCK == 2 */
1720 }
1721
1722 /*
1723 * Helper function to convert GPI entries in a single L1 table
1724 * from Contiguous to Granules descriptor. This function updates
1725 * descriptor to Granules in passed 'gpt_config_t' structure as
1726 * the result of shuttering.
1727 *
1728 * Parameters
1729 * base Base address of the region to be written
1730 * gpi_info Pointer to 'gpt_config_t' structure
1731 * l1_desc GPT Granules descriptor set this range to.
1732 */
shatter_block(uint64_t base,gpi_info_t * gpi_info,uint64_t l1_desc)1733 __unused static void shatter_block(uint64_t base, gpi_info_t *gpi_info,
1734 uint64_t l1_desc)
1735 {
1736 /* Look-up table for 2MB, 32MB and 512MB locks shattering */
1737 static const gpt_shatter_func gpt_shatter_lookup[] = {
1738 shatter_2mb,
1739 shatter_32mb,
1740 shatter_512mb
1741 };
1742
1743 /* Look-up table for invalidation TLBs for 2MB, 32MB and 512MB blocks */
1744 static const gpt_tlbi_lookup_t tlbi_lookup[] = {
1745 { tlbirpalos_2m, ~(SZ_2M - 1UL) },
1746 { tlbirpalos_32m, ~(SZ_32M - 1UL) },
1747 { tlbirpalos_512m, ~(SZ_512M - 1UL) }
1748 };
1749
1750 /* Get shattering level from Contig field of Contiguous descriptor */
1751 unsigned long level = GPT_L1_CONT_CONTIG(gpi_info->gpt_l1_desc) - 1UL;
1752
1753 /* Shatter contiguous block */
1754 gpt_shatter_lookup[level](base, gpi_info, l1_desc);
1755
1756 tlbi_lookup[level].function(base & tlbi_lookup[level].mask);
1757 dsbosh();
1758
1759 /*
1760 * Update 'gpt_config_t' structure's descriptor to Granules to reflect
1761 * the shattered GPI back to caller.
1762 */
1763 gpi_info->gpt_l1_desc = l1_desc;
1764 }
1765
gpt_write_entry(uint64_t base,uint8_t target_gpi,gpi_info_t * gpi_info)1766 static inline void gpt_write_entry(uint64_t base, uint8_t target_gpi,
1767 gpi_info_t *gpi_info)
1768 {
1769 /* Update the GPI entry to the new state. */
1770 write_gpt(&gpi_info->gpt_l1_desc, gpi_info->gpt_l1_addr,
1771 gpi_info->gpi_shift, gpi_info->idx, target_gpi);
1772
1773 /* Ensure all agents observe new state. */
1774 tlbi_page_dsbosh(base);
1775 }
1776
gpt_delegate(uint64_t base,uint8_t target_gpi,gpi_info_t * gpi_info)1777 static inline void gpt_delegate(uint64_t base, uint8_t target_gpi,
1778 gpi_info_t *gpi_info)
1779 {
1780 uint8_t source_gpi = gpi_info->gpi;
1781
1782 /*
1783 * In order to maintain mutual distrust between states, remove any data
1784 * speculatively fetched into the target physical address space.
1785 */
1786 flush_page_to_popa(base | GPI_TO_NSE(target_gpi));
1787
1788 gpt_write_entry(base, target_gpi, gpi_info);
1789
1790 /* Ensure scrubbed data has made it past PoPA */
1791 flush_page_to_popa(base | GPI_TO_NSE(source_gpi));
1792 }
1793
gpt_undelegate(uint64_t base,uint8_t target_gpi,gpi_info_t * gpi_info)1794 static inline void gpt_undelegate(uint64_t base, uint8_t target_gpi,
1795 gpi_info_t *gpi_info)
1796 {
1797 uint8_t source_gpi = gpi_info->gpi;
1798
1799 /*
1800 * In order to maintain mutual distrust between states, remove access
1801 * now, in order to guarantee that writes to the currently-accessible
1802 * physical address space will not later become observable.
1803 */
1804 write_gpt(&gpi_info->gpt_l1_desc, gpi_info->gpt_l1_addr,
1805 gpi_info->gpi_shift, gpi_info->idx, GPT_GPI_NO_ACCESS);
1806
1807 /* Ensure all agents observe NO ACCESS state. */
1808 tlbi_page_dsbosh(base);
1809
1810 /*
1811 * Ensure that the scrubbed data have made it past the PoPA for both
1812 * old and new security states.
1813 */
1814 flush_page_to_popa(base | GPI_TO_NSE(source_gpi));
1815 flush_page_to_popa(base | GPI_TO_NSE(target_gpi));
1816
1817 gpt_write_entry(base, target_gpi, gpi_info);
1818 }
1819
1820 /*
1821 * This function is the core of the granule transition service, including both
1822 * delegate and undelegate operations. When a granule transition request occurs
1823 * it is routed to this function which will determine if it is valid and fulfill
1824 * it.
1825 *
1826 * Parameters
1827 * base Base address of the first granule to transition, aligned
1828 * to granule size.
1829 * *granule_count Pointer to a variable containing the number of granules
1830 * to be transitioned. This value will be overwritten with
1831 * the number of granules actually transitioned once this
1832 * function returns. It is possible to return an error part
1833 * way through the process and have a non-zero number of
1834 * granules transitioned. TODO is this acceptable?
1835 * target_gpi GPI to transition the granules to.
1836 * src_sec_state Security state of the requesting entity. This will be
1837 * combined with target_gpi to determine whether a
1838 * transition is allowed.
1839 */
gpt_transition_pas(uint64_t base,uint64_t * granule_count,uint8_t target_gpi,uint8_t src_sec_state)1840 int gpt_transition_pas(uint64_t base, uint64_t *granule_count,
1841 uint8_t target_gpi, uint8_t src_sec_state)
1842 {
1843 gpi_info_t gpi_info = { 0, NULL, 0, 0, 0 };
1844 int res;
1845 size_t size;
1846
1847 /* Ensure that the tables have been set up before taking requests */
1848 assert(gpt_config.plat_gpt_l0_base != 0UL);
1849
1850 /* Ensure that MMU and caches are enabled */
1851 assert((read_sctlr_el3() & SCTLR_C_BIT) != 0UL);
1852
1853 /* Only one granule supported per call at this point. */
1854 if ((*granule_count) != 1U) {
1855 VERBOSE("GPT: Invalid granule count! Only one allowed per transition request.\n");
1856 return -EINVAL;
1857 }
1858
1859 /* Calculate total region size and zero out granule count. */
1860 size = *granule_count * GPT_PGS_ACTUAL_SIZE(gpt_config.p);
1861 *granule_count = 0U;
1862
1863 /* Make sure target GPI is valid. */
1864 if (!is_gpi_valid(target_gpi)) {
1865 VERBOSE("GPT: Invalid target GPI value in request: %u\n",
1866 target_gpi);
1867 return -EPERM;
1868 }
1869
1870 /* Check that base and size are valid */
1871 if ((ULONG_MAX - base) < size) {
1872 VERBOSE("GPT: Transition request address overflow!\n");
1873 VERBOSE(" Base=0x%" PRIx64 "\n", base);
1874 VERBOSE(" Size=%lu\n", size);
1875 return -EINVAL;
1876 }
1877
1878 /* Make sure base and size are valid */
1879 if (((base & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) != 0UL) ||
1880 ((size & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) != 0UL) ||
1881 (size == 0UL) ||
1882 ((base + size) >= GPT_PPS_ACTUAL_SIZE(gpt_config.t))) {
1883 VERBOSE("GPT: Invalid granule transition address range!\n");
1884 VERBOSE(" Base=0x%" PRIx64 "\n", base);
1885 VERBOSE(" Size=%lu\n", size);
1886 return -EINVAL;
1887 }
1888
1889 /* Get GPI info for next granule to transition. */
1890 res = get_gpi_params(base, &gpi_info);
1891 if (res != 0) {
1892 return res;
1893 }
1894
1895 GPT_LOCK;
1896
1897 read_gpi(&gpi_info);
1898
1899 /* Verify that transition of this granule is allowed. */
1900 if (!is_gpi_transition_permitted(src_sec_state, gpi_info.gpi,
1901 target_gpi)) {
1902 VERBOSE("(%s) Sec state %u is not allowed to transition %u to %u!\n",
1903 __func__, src_sec_state, gpi_info.gpi, target_gpi);
1904 console_flush();
1905 GPT_UNLOCK;
1906 return -EPERM;
1907 }
1908
1909 #if (RME_GPT_MAX_BLOCK != 0)
1910 /* Check for Contiguous descriptor */
1911 if ((gpi_info.gpt_l1_desc & GPT_L1_TYPE_CONT_DESC_MASK) ==
1912 GPT_L1_TYPE_CONT_DESC) {
1913 shatter_block(base, &gpi_info, GPI_TO_DESC(gpi_info.gpi));
1914 }
1915 #endif
1916
1917 if (((target_gpi == GPT_GPI_NS) && (gpi_info.gpi == GPT_GPI_NSO)) ||
1918 ((target_gpi == GPT_GPI_NSO) && (gpi_info.gpi == GPT_GPI_NS))) {
1919 /* Handle NS/NSO transition. */
1920 gpt_write_entry(base, target_gpi, &gpi_info);
1921 } else if ((target_gpi == GPT_GPI_NS) || (target_gpi == GPT_GPI_NSO)) {
1922 /* Handle undelegate transition. */
1923 gpt_undelegate(base, target_gpi, &gpi_info);
1924 } else {
1925 /* Handle delegate transition. */
1926 gpt_delegate(base, target_gpi, &gpi_info);
1927 }
1928
1929 #if (RME_GPT_MAX_BLOCK != 0)
1930 if (gpi_info.gpt_l1_desc == GPI_TO_DESC(target_gpi)) {
1931 /* Try to fuse */
1932 fuse_block(base, &gpi_info, GPI_TO_DESC(target_gpi));
1933 }
1934 #endif
1935
1936 GPT_UNLOCK;
1937
1938 /* Increment granule counter only once everything is complete. */
1939 (*granule_count)++;
1940
1941 return 0;
1942 }
1943