1 /*
2 * Copyright (c) 2022-2025, Arm Limited. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <assert.h>
8 #include <errno.h>
9 #include <inttypes.h>
10 #include <limits.h>
11 #include <stdint.h>
12
13 #include <arch.h>
14 #include <arch_features.h>
15 #include <common/debug.h>
16 #include <lib/gpt_rme/gpt_rme.h>
17 #include <lib/smccc.h>
18 #include <lib/xlat_tables/xlat_tables_v2.h>
19
20 #include "gpt_rme_private.h"
21
22 #if !ENABLE_RME
23 #error "ENABLE_RME must be enabled to use the GPT library"
24 #endif
25
26 /*
27 * Lookup T from PPS
28 *
29 * PPS Size T
30 * 0b000 4GB 32
31 * 0b001 64GB 36
32 * 0b010 1TB 40
33 * 0b011 4TB 42
34 * 0b100 16TB 44
35 * 0b101 256TB 48
36 * 0b110 4PB 52
37 *
38 * See section 15.1.27 of the RME specification.
39 */
40 static const gpt_t_val_e gpt_t_lookup[] = {PPS_4GB_T, PPS_64GB_T,
41 PPS_1TB_T, PPS_4TB_T,
42 PPS_16TB_T, PPS_256TB_T,
43 PPS_4PB_T};
44
45 /*
46 * Lookup P from PGS
47 *
48 * PGS Size P
49 * 0b00 4KB 12
50 * 0b10 16KB 14
51 * 0b01 64KB 16
52 *
53 * Note that pgs=0b10 is 16KB and pgs=0b01 is 64KB, this is not a typo.
54 *
55 * See section 15.1.27 of the RME specification.
56 */
57 static const gpt_p_val_e gpt_p_lookup[] = {PGS_4KB_P, PGS_64KB_P, PGS_16KB_P};
58
59 static void shatter_2mb(uintptr_t base, const gpi_info_t *gpi_info,
60 uint64_t l1_desc);
61 static void shatter_32mb(uintptr_t base, const gpi_info_t *gpi_info,
62 uint64_t l1_desc);
63 static void shatter_512mb(uintptr_t base, const gpi_info_t *gpi_info,
64 uint64_t l1_desc);
65
66 /*
67 * This structure contains GPT configuration data
68 */
69 typedef struct {
70 uintptr_t plat_gpt_l0_base;
71 gpccr_pps_e pps;
72 gpt_t_val_e t;
73 gpccr_pgs_e pgs;
74 gpt_p_val_e p;
75 } gpt_config_t;
76
77 static gpt_config_t gpt_config;
78
79 /*
80 * Number of L1 entries in 2MB, depending on GPCCR_EL3.PGS:
81 * +-------+------------+
82 * | PGS | L1 entries |
83 * +-------+------------+
84 * | 4KB | 32 |
85 * +-------+------------+
86 * | 16KB | 8 |
87 * +-------+------------+
88 * | 64KB | 2 |
89 * +-------+------------+
90 */
91 static unsigned int gpt_l1_cnt_2mb;
92
93 /*
94 * Mask for the L1 index field, depending on
95 * GPCCR_EL3.L0GPTSZ and GPCCR_EL3.PGS:
96 * +---------+-------------------------------+
97 * | | PGS |
98 * +---------+----------+----------+---------+
99 * | L0GPTSZ | 4KB | 16KB | 64KB |
100 * +---------+----------+----------+---------+
101 * | 1GB | 0x3FFF | 0xFFF | 0x3FF |
102 * +---------+----------+----------+---------+
103 * | 16GB | 0x3FFFF | 0xFFFF | 0x3FFF |
104 * +---------+----------+----------+---------+
105 * | 64GB | 0xFFFFF | 0x3FFFF | 0xFFFF |
106 * +---------+----------+----------+---------+
107 * | 512GB | 0x7FFFFF | 0x1FFFFF | 0x7FFFF |
108 * +---------+----------+----------+---------+
109 */
110 static uint64_t gpt_l1_index_mask;
111
112 /* Number of 128-bit L1 entries in 2MB, 32MB and 512MB */
113 #define L1_QWORDS_2MB (gpt_l1_cnt_2mb / 2U)
114 #define L1_QWORDS_32MB (L1_QWORDS_2MB * 16U)
115 #define L1_QWORDS_512MB (L1_QWORDS_32MB * 16U)
116
117 /* Size in bytes of L1 entries in 2MB, 32MB */
118 #define L1_BYTES_2MB (gpt_l1_cnt_2mb * sizeof(uint64_t))
119 #define L1_BYTES_32MB (L1_BYTES_2MB * 16U)
120
121 /* Get the index into the L1 table from a physical address */
122 #define GPT_L1_INDEX(_pa) \
123 (((_pa) >> (unsigned int)GPT_L1_IDX_SHIFT(gpt_config.p)) & gpt_l1_index_mask)
124
125 /* This variable is used during initialization of the L1 tables */
126 static uintptr_t gpt_l1_tbl;
127
128 /* These variables are used during runtime */
129 #if (RME_GPT_BITLOCK_BLOCK == 0)
130 /*
131 * The GPTs are protected by a global spinlock to ensure
132 * that multiple CPUs do not attempt to change the descriptors at once.
133 */
134 static spinlock_t gpt_lock;
135
136 /* Lock/unlock macros for GPT entries
137 *
138 * Access to GPT is controlled by a global lock to ensure
139 * that no more than one CPU is allowed to make changes at any
140 * given time.
141 */
142 #define GPT_LOCK spin_lock(&gpt_lock)
143 #define GPT_UNLOCK spin_unlock(&gpt_lock)
144 #else
145
146 /* Base address of bitlocks array */
147 static bitlock_t *gpt_bitlock;
148
149 /*
150 * Access to a block of memory is controlled by a bitlock.
151 * Size of block = RME_GPT_BITLOCK_BLOCK * 512MB.
152 */
153 #define GPT_LOCK bit_lock(gpi_info.lock, gpi_info.mask)
154 #define GPT_UNLOCK bit_unlock(gpi_info.lock, gpi_info.mask)
155 #endif /* RME_GPT_BITLOCK_BLOCK */
156
tlbi_page_dsbosh(uintptr_t base)157 static void tlbi_page_dsbosh(uintptr_t base)
158 {
159 /* Look-up table for invalidation TLBs for 4KB, 16KB and 64KB pages */
160 static const gpt_tlbi_lookup_t tlbi_page_lookup[] = {
161 { tlbirpalos_4k, ~(SZ_4K - 1UL) },
162 { tlbirpalos_64k, ~(SZ_64K - 1UL) },
163 { tlbirpalos_16k, ~(SZ_16K - 1UL) }
164 };
165
166 tlbi_page_lookup[gpt_config.pgs].function(
167 base & tlbi_page_lookup[gpt_config.pgs].mask);
168 dsbosh();
169 }
170
171 /*
172 * Helper function to fill out GPI entries in a single L1 table
173 * with Granules or Contiguous descriptor.
174 *
175 * Parameters
176 * l1 Pointer to 2MB, 32MB or 512MB aligned L1 table entry to fill out
177 * l1_desc GPT Granules or Contiguous descriptor set this range to
178 * cnt Number of double 128-bit L1 entries to fill
179 *
180 */
fill_desc(uint64_t * l1,uint64_t l1_desc,unsigned int cnt)181 static void fill_desc(uint64_t *l1, uint64_t l1_desc, unsigned int cnt)
182 {
183 uint128_t *l1_quad = (uint128_t *)l1;
184 uint128_t l1_quad_desc = (uint128_t)l1_desc | ((uint128_t)l1_desc << 64);
185
186 VERBOSE("GPT: %s(%p 0x%"PRIx64" %u)\n", __func__, l1, l1_desc, cnt);
187
188 for (unsigned int i = 0U; i < cnt; i++) {
189 *l1_quad++ = l1_quad_desc;
190 }
191 }
192
shatter_2mb(uintptr_t base,const gpi_info_t * gpi_info,uint64_t l1_desc)193 static void shatter_2mb(uintptr_t base, const gpi_info_t *gpi_info,
194 uint64_t l1_desc)
195 {
196 unsigned long idx = GPT_L1_INDEX(ALIGN_2MB(base));
197
198 VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n",
199 __func__, base, l1_desc);
200
201 /* Convert 2MB Contiguous block to Granules */
202 fill_desc(&gpi_info->gpt_l1_addr[idx], l1_desc, L1_QWORDS_2MB);
203 }
204
shatter_32mb(uintptr_t base,const gpi_info_t * gpi_info,uint64_t l1_desc)205 static void shatter_32mb(uintptr_t base, const gpi_info_t *gpi_info,
206 uint64_t l1_desc)
207 {
208 unsigned long idx = GPT_L1_INDEX(ALIGN_2MB(base));
209 const uint64_t *l1_gran = &gpi_info->gpt_l1_addr[idx];
210 uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 2MB);
211 uint64_t *l1;
212
213 VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n",
214 __func__, base, l1_desc);
215
216 /* Get index corresponding to 32MB aligned address */
217 idx = GPT_L1_INDEX(ALIGN_32MB(base));
218 l1 = &gpi_info->gpt_l1_addr[idx];
219
220 /* 16 x 2MB blocks in 32MB */
221 for (unsigned int i = 0U; i < 16U; i++) {
222 /* Fill with Granules or Contiguous descriptors */
223 fill_desc(l1, (l1 == l1_gran) ? l1_desc : l1_cont_desc,
224 L1_QWORDS_2MB);
225 l1 = (uint64_t *)((uintptr_t)l1 + L1_BYTES_2MB);
226 }
227 }
228
shatter_512mb(uintptr_t base,const gpi_info_t * gpi_info,uint64_t l1_desc)229 static void shatter_512mb(uintptr_t base, const gpi_info_t *gpi_info,
230 uint64_t l1_desc)
231 {
232 unsigned long idx = GPT_L1_INDEX(ALIGN_32MB(base));
233 const uint64_t *l1_32mb = &gpi_info->gpt_l1_addr[idx];
234 uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 32MB);
235 uint64_t *l1;
236
237 VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n",
238 __func__, base, l1_desc);
239
240 /* Get index corresponding to 512MB aligned address */
241 idx = GPT_L1_INDEX(ALIGN_512MB(base));
242 l1 = &gpi_info->gpt_l1_addr[idx];
243
244 /* 16 x 32MB blocks in 512MB */
245 for (unsigned int i = 0U; i < 16U; i++) {
246 if (l1 == l1_32mb) {
247 /* Shatter this 32MB block */
248 shatter_32mb(base, gpi_info, l1_desc);
249 } else {
250 /* Fill 32MB with Contiguous descriptors */
251 fill_desc(l1, l1_cont_desc, L1_QWORDS_32MB);
252 }
253
254 l1 = (uint64_t *)((uintptr_t)l1 + L1_BYTES_32MB);
255 }
256 }
257
258 /*
259 * This function checks to see if a GPI value is valid.
260 *
261 * These are valid GPI values.
262 * GPT_GPI_NO_ACCESS U(0x0)
263 * GPT_GPI_SA U(0x4)
264 * GPT_GPI_NSP U(0x5)
265 * GPT_GPI_SECURE U(0x8)
266 * GPT_GPI_NS U(0x9)
267 * GPT_GPI_ROOT U(0xA)
268 * GPT_GPI_REALM U(0xB)
269 * GPT_GPI_NSO U(0xD)
270 * GPT_GPI_ANY U(0xF)
271 *
272 * Parameters
273 * gpi GPI to check for validity.
274 *
275 * Return
276 * true for a valid GPI, false for an invalid one.
277 */
is_gpi_valid(unsigned int gpi)278 static bool is_gpi_valid(unsigned int gpi)
279 {
280 switch (gpi) {
281 case GPT_GPI_NO_ACCESS:
282 case GPT_GPI_SECURE:
283 case GPT_GPI_NS:
284 case GPT_GPI_ROOT:
285 case GPT_GPI_REALM:
286 case GPT_GPI_ANY:
287 return true;
288 case GPT_GPI_NSO:
289 return is_feat_rme_gpc2_present();
290 case GPT_GPI_SA:
291 case GPT_GPI_NSP:
292 return is_feat_rme_gdi_supported();
293 default:
294 return false;
295 }
296 }
297
298 /*
299 * This function checks to see if two PAS regions overlap.
300 *
301 * Parameters
302 * base_1: base address of first PAS
303 * size_1: size of first PAS
304 * base_2: base address of second PAS
305 * size_2: size of second PAS
306 *
307 * Return
308 * True if PAS regions overlap, false if they do not.
309 */
check_pas_overlap(uintptr_t base_1,size_t size_1,uintptr_t base_2,size_t size_2)310 static bool check_pas_overlap(uintptr_t base_1, size_t size_1,
311 uintptr_t base_2, size_t size_2)
312 {
313 if (((base_1 + size_1) > base_2) && ((base_2 + size_2) > base_1)) {
314 return true;
315 }
316 return false;
317 }
318
319 /*
320 * This helper function checks to see if a PAS region from index 0 to
321 * (pas_idx - 1) occupies the L0 region at index l0_idx in the L0 table.
322 *
323 * Parameters
324 * l0_idx: Index of the L0 entry to check
325 * pas_regions: PAS region array
326 * pas_idx: Upper bound of the PAS array index.
327 *
328 * Return
329 * True if a PAS region occupies the L0 region in question, false if not.
330 */
does_previous_pas_exist_here(unsigned int l0_idx,pas_region_t * pas_regions,unsigned int pas_idx)331 static bool does_previous_pas_exist_here(unsigned int l0_idx,
332 pas_region_t *pas_regions,
333 unsigned int pas_idx)
334 {
335 /* Iterate over PAS regions up to pas_idx */
336 for (unsigned int i = 0U; i < pas_idx; i++) {
337 if (check_pas_overlap((GPT_L0GPTSZ_ACTUAL_SIZE * l0_idx),
338 GPT_L0GPTSZ_ACTUAL_SIZE,
339 pas_regions[i].base_pa, pas_regions[i].size)) {
340 return true;
341 }
342 }
343 return false;
344 }
345
346 /*
347 * This function iterates over all of the PAS regions and checks them to ensure
348 * proper alignment of base and size, that the GPI is valid, and that no regions
349 * overlap. As a part of the overlap checks, this function checks existing L0
350 * mappings against the new PAS regions in the event that gpt_init_pas_l1_tables
351 * is called multiple times to place L1 tables in different areas of memory. It
352 * also counts the number of L1 tables needed and returns it on success.
353 *
354 * Parameters
355 * *pas_regions Pointer to array of PAS region structures.
356 * pas_region_cnt Total number of PAS regions in the array.
357 *
358 * Return
359 * Negative Linux error code in the event of a failure, number of L1 regions
360 * required when successful.
361 */
validate_pas_mappings(pas_region_t * pas_regions,unsigned int pas_region_cnt)362 static int validate_pas_mappings(pas_region_t *pas_regions,
363 unsigned int pas_region_cnt)
364 {
365 unsigned int idx;
366 unsigned int l1_cnt = 0U;
367 unsigned int pas_l1_cnt;
368 uint64_t *l0_desc = (uint64_t *)gpt_config.plat_gpt_l0_base;
369
370 assert(pas_regions != NULL);
371 assert(pas_region_cnt != 0U);
372
373 for (idx = 0U; idx < pas_region_cnt; idx++) {
374 /* Check for arithmetic overflow in region */
375 if ((ULONG_MAX - pas_regions[idx].base_pa) <
376 pas_regions[idx].size) {
377 ERROR("GPT: Address overflow in PAS[%u]!\n", idx);
378 return -EOVERFLOW;
379 }
380
381 /* Initial checks for PAS validity */
382 if (((pas_regions[idx].base_pa + pas_regions[idx].size) >
383 GPT_PPS_ACTUAL_SIZE(gpt_config.t)) ||
384 !is_gpi_valid(GPT_PAS_ATTR_GPI(pas_regions[idx].attrs))) {
385 ERROR("GPT: PAS[%u] is invalid!\n", idx);
386 return -EFAULT;
387 }
388
389 /*
390 * Make sure this PAS does not overlap with another one. We
391 * start from idx + 1 instead of 0 since prior PAS mappings will
392 * have already checked themselves against this one.
393 */
394 for (unsigned int i = idx + 1U; i < pas_region_cnt; i++) {
395 if (check_pas_overlap(pas_regions[idx].base_pa,
396 pas_regions[idx].size,
397 pas_regions[i].base_pa,
398 pas_regions[i].size)) {
399 ERROR("GPT: PAS[%u] overlaps with PAS[%u]\n",
400 i, idx);
401 return -EFAULT;
402 }
403 }
404
405 /*
406 * Since this function can be called multiple times with
407 * separate L1 tables we need to check the existing L0 mapping
408 * to see if this PAS would fall into one that has already been
409 * initialized.
410 */
411 for (unsigned int i =
412 (unsigned int)GPT_L0_IDX(pas_regions[idx].base_pa);
413 i <= GPT_L0_IDX(pas_regions[idx].base_pa +
414 pas_regions[idx].size - 1UL);
415 i++) {
416 if ((GPT_L0_TYPE(l0_desc[i]) == GPT_L0_TYPE_BLK_DESC) &&
417 (GPT_L0_BLKD_GPI(l0_desc[i]) == GPT_GPI_ANY)) {
418 /* This descriptor is unused so continue */
419 continue;
420 }
421
422 /*
423 * This descriptor has been initialized in a previous
424 * call to this function so cannot be initialized again.
425 */
426 ERROR("GPT: PAS[%u] overlaps with previous L0[%u]!\n",
427 idx, i);
428 return -EFAULT;
429 }
430
431 /* Check for block mapping (L0) type */
432 if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) ==
433 GPT_PAS_ATTR_MAP_TYPE_BLOCK) {
434 /* Make sure base and size are block-aligned */
435 if (!GPT_IS_L0_ALIGNED(pas_regions[idx].base_pa) ||
436 !GPT_IS_L0_ALIGNED(pas_regions[idx].size)) {
437 ERROR("GPT: PAS[%u] is not block-aligned!\n",
438 idx);
439 return -EFAULT;
440 }
441
442 continue;
443 }
444
445 /* Check for granule mapping (L1) type */
446 if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) ==
447 GPT_PAS_ATTR_MAP_TYPE_GRANULE) {
448 /* Make sure base and size are granule-aligned */
449 if (!GPT_IS_L1_ALIGNED(gpt_config.p, pas_regions[idx].base_pa) ||
450 !GPT_IS_L1_ALIGNED(gpt_config.p, pas_regions[idx].size)) {
451 ERROR("GPT: PAS[%u] is not granule-aligned!\n",
452 idx);
453 return -EFAULT;
454 }
455
456 /* Find how many L1 tables this PAS occupies */
457 pas_l1_cnt = (GPT_L0_IDX(pas_regions[idx].base_pa +
458 pas_regions[idx].size - 1UL) -
459 GPT_L0_IDX(pas_regions[idx].base_pa) + 1U);
460
461 /*
462 * This creates a situation where, if multiple PAS
463 * regions occupy the same table descriptor, we can get
464 * an artificially high total L1 table count. The way we
465 * handle this is by checking each PAS against those
466 * before it in the array, and if they both occupy the
467 * same PAS we subtract from pas_l1_cnt and only the
468 * first PAS in the array gets to count it.
469 */
470
471 /*
472 * If L1 count is greater than 1 we know the start and
473 * end PAs are in different L0 regions so we must check
474 * both for overlap against other PAS.
475 */
476 if (pas_l1_cnt > 1) {
477 if (does_previous_pas_exist_here(
478 GPT_L0_IDX(pas_regions[idx].base_pa +
479 pas_regions[idx].size - 1UL),
480 pas_regions, idx)) {
481 pas_l1_cnt--;
482 }
483 }
484
485 if (does_previous_pas_exist_here(
486 GPT_L0_IDX(pas_regions[idx].base_pa),
487 pas_regions, idx)) {
488 pas_l1_cnt--;
489 }
490
491 l1_cnt += pas_l1_cnt;
492 continue;
493 }
494
495 /* If execution reaches this point, mapping type is invalid */
496 ERROR("GPT: PAS[%u] has invalid mapping type 0x%x.\n", idx,
497 GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs));
498 return -EINVAL;
499 }
500
501 return l1_cnt;
502 }
503
504 /*
505 * This function validates L0 initialization parameters.
506 *
507 * Parameters
508 * l0_mem_base Base address of memory used for L0 table.
509 * l0_mem_size Size of memory available for L0 table.
510 *
511 * Return
512 * Negative Linux error code in the event of a failure, 0 for success.
513 */
validate_l0_params(gpccr_pps_e pps,uintptr_t l0_mem_base,size_t l0_mem_size)514 static int validate_l0_params(gpccr_pps_e pps, uintptr_t l0_mem_base,
515 size_t l0_mem_size)
516 {
517 size_t l0_alignment;
518
519 /*
520 * Make sure PPS is valid and then store it since macros need this value
521 * to work.
522 */
523 if (pps > GPT_PPS_MAX) {
524 ERROR("GPT: Invalid PPS: 0x%x\n", pps);
525 return -EINVAL;
526 }
527 gpt_config.pps = pps;
528 gpt_config.t = gpt_t_lookup[pps];
529
530 /* Alignment must be the greater of 4KB or L0 table size */
531 l0_alignment = SZ_4K;
532 if (l0_alignment < GPT_L0_TABLE_SIZE(gpt_config.t)) {
533 l0_alignment = GPT_L0_TABLE_SIZE(gpt_config.t);
534 }
535
536 /* Check base address */
537 if ((l0_mem_base == 0UL) ||
538 ((l0_mem_base & (l0_alignment - 1UL)) != 0UL)) {
539 ERROR("GPT: Invalid L0 base address: 0x%lx\n", l0_mem_base);
540 return -EFAULT;
541 }
542
543 /* Check memory size for L0 table */
544 if (l0_mem_size < GPT_L0_TABLE_SIZE(gpt_config.t)) {
545 ERROR("GPT: Inadequate L0 memory\n");
546 ERROR(" Expected 0x%lx bytes, got 0x%lx\n",
547 GPT_L0_TABLE_SIZE(gpt_config.t), l0_mem_size);
548 return -ENOMEM;
549 }
550
551 return 0;
552 }
553
554 /*
555 * In the event that L1 tables are needed, this function validates
556 * the L1 table generation parameters.
557 *
558 * Parameters
559 * l1_mem_base Base address of memory used for L1 table allocation.
560 * l1_mem_size Total size of memory available for L1 tables.
561 * l1_gpt_cnt Number of L1 tables needed.
562 *
563 * Return
564 * Negative Linux error code in the event of a failure, 0 for success.
565 */
validate_l1_params(uintptr_t l1_mem_base,size_t l1_mem_size,unsigned int l1_gpt_cnt)566 static int validate_l1_params(uintptr_t l1_mem_base, size_t l1_mem_size,
567 unsigned int l1_gpt_cnt)
568 {
569 size_t l1_gpt_mem_sz;
570
571 /* Check if the granularity is supported */
572 if (!xlat_arch_is_granule_size_supported(
573 GPT_PGS_ACTUAL_SIZE(gpt_config.p))) {
574 return -EPERM;
575 }
576
577 /* Make sure L1 tables are aligned to their size */
578 if ((l1_mem_base & (GPT_L1_TABLE_SIZE(gpt_config.p) - 1UL)) != 0UL) {
579 ERROR("GPT: Unaligned L1 GPT base address: 0x%"PRIxPTR"\n",
580 l1_mem_base);
581 return -EFAULT;
582 }
583
584 /* Get total memory needed for L1 tables */
585 l1_gpt_mem_sz = l1_gpt_cnt * GPT_L1_TABLE_SIZE(gpt_config.p);
586
587 /* Check for overflow */
588 if ((l1_gpt_mem_sz / GPT_L1_TABLE_SIZE(gpt_config.p)) != l1_gpt_cnt) {
589 ERROR("GPT: Overflow calculating L1 memory size\n");
590 return -ENOMEM;
591 }
592
593 /* Make sure enough space was supplied */
594 if (l1_mem_size < l1_gpt_mem_sz) {
595 ERROR("%sL1 GPTs%s", (const char *)"GPT: Inadequate ",
596 (const char *)" memory\n");
597 ERROR(" Expected 0x%lx bytes, got 0x%lx\n",
598 l1_gpt_mem_sz, l1_mem_size);
599 return -ENOMEM;
600 }
601
602 VERBOSE("GPT: Requested 0x%lx bytes for L1 GPTs\n", l1_gpt_mem_sz);
603 return 0;
604 }
605
606 /*
607 * This function initializes L0 block descriptors (regions that cannot be
608 * transitioned at the granule level) according to the provided PAS.
609 *
610 * Parameters
611 * *pas Pointer to the structure defining the PAS region to
612 * initialize.
613 */
generate_l0_blk_desc(pas_region_t * pas)614 static void generate_l0_blk_desc(pas_region_t *pas)
615 {
616 uint64_t gpt_desc;
617 unsigned long idx, end_idx;
618 uint64_t *l0_gpt_arr;
619
620 assert(gpt_config.plat_gpt_l0_base != 0UL);
621 assert(pas != NULL);
622
623 /*
624 * Checking of PAS parameters has already been done in
625 * validate_pas_mappings so no need to check the same things again.
626 */
627
628 l0_gpt_arr = (uint64_t *)gpt_config.plat_gpt_l0_base;
629
630 /* Create the GPT Block descriptor for this PAS region */
631 gpt_desc = GPT_L0_BLK_DESC(GPT_PAS_ATTR_GPI(pas->attrs));
632
633 /* Start index of this region in L0 GPTs */
634 idx = GPT_L0_IDX(pas->base_pa);
635
636 /*
637 * Determine number of L0 GPT descriptors covered by
638 * this PAS region and use the count to populate these
639 * descriptors.
640 */
641 end_idx = GPT_L0_IDX(pas->base_pa + pas->size);
642
643 /* Generate the needed block descriptors */
644 for (; idx < end_idx; idx++) {
645 l0_gpt_arr[idx] = gpt_desc;
646 VERBOSE("GPT: L0 entry (BLOCK) index %lu [%p]: GPI = 0x%"PRIx64" (0x%"PRIx64")\n",
647 idx, &l0_gpt_arr[idx],
648 (gpt_desc >> GPT_L0_BLK_DESC_GPI_SHIFT) &
649 GPT_L0_BLK_DESC_GPI_MASK, l0_gpt_arr[idx]);
650 }
651 }
652
653 /*
654 * Helper function to determine if the end physical address lies in the same L0
655 * region as the current physical address. If true, the end physical address is
656 * returned else, the start address of the next region is returned.
657 *
658 * Parameters
659 * cur_pa Physical address of the current PA in the loop through
660 * the range.
661 * end_pa Physical address of the end PA in a PAS range.
662 *
663 * Return
664 * The PA of the end of the current range.
665 */
get_l1_end_pa(uintptr_t cur_pa,uintptr_t end_pa)666 static uintptr_t get_l1_end_pa(uintptr_t cur_pa, uintptr_t end_pa)
667 {
668 uintptr_t cur_idx;
669 uintptr_t end_idx;
670
671 cur_idx = GPT_L0_IDX(cur_pa);
672 end_idx = GPT_L0_IDX(end_pa);
673
674 assert(cur_idx <= end_idx);
675
676 if (cur_idx == end_idx) {
677 return end_pa;
678 }
679
680 return (cur_idx + 1UL) << GPT_L0_IDX_SHIFT;
681 }
682
683 /*
684 * Helper function to fill out GPI entries from 'first' granule address of
685 * the specified 'length' in a single L1 table with 'l1_desc' Contiguous
686 * descriptor.
687 *
688 * Parameters
689 * l1 Pointer to L1 table to fill out
690 * first Address of first granule in range
691 * length Length of the range in bytes
692 * gpi GPI set this range to
693 *
694 * Return
695 * Address of next granule in range.
696 */
fill_l1_cont_desc(uint64_t * l1,uintptr_t first,size_t length,unsigned int gpi)697 __unused static uintptr_t fill_l1_cont_desc(uint64_t *l1, uintptr_t first,
698 size_t length, unsigned int gpi)
699 {
700 /*
701 * Look up table for contiguous blocks and descriptors.
702 * Entries should be defined in descending block sizes:
703 * 512MB, 32MB and 2MB.
704 */
705 static const gpt_fill_lookup_t gpt_fill_lookup[] = {
706 #if (RME_GPT_MAX_BLOCK == 512)
707 { SZ_512M, GPT_L1_CONT_DESC_512MB },
708 #endif
709 #if (RME_GPT_MAX_BLOCK >= 32)
710 { SZ_32M, GPT_L1_CONT_DESC_32MB },
711 #endif
712 #if (RME_GPT_MAX_BLOCK != 0)
713 { SZ_2M, GPT_L1_CONT_DESC_2MB }
714 #endif
715 };
716
717 /*
718 * Iterate through all block sizes (512MB, 32MB and 2MB)
719 * starting with maximum supported.
720 */
721 for (unsigned long i = 0UL; i < ARRAY_SIZE(gpt_fill_lookup); i++) {
722 /* Calculate index */
723 unsigned long idx = GPT_L1_INDEX(first);
724
725 /* Contiguous block size */
726 size_t cont_size = gpt_fill_lookup[i].size;
727
728 if (GPT_REGION_IS_CONT(length, first, cont_size)) {
729
730 /* Generate Contiguous descriptor */
731 uint64_t l1_desc = GPT_L1_GPI_CONT_DESC(gpi,
732 gpt_fill_lookup[i].desc);
733
734 /* Number of 128-bit L1 entries in block */
735 unsigned int cnt;
736
737 switch (cont_size) {
738 case SZ_512M:
739 cnt = L1_QWORDS_512MB;
740 break;
741 case SZ_32M:
742 cnt = L1_QWORDS_32MB;
743 break;
744 default: /* SZ_2MB */
745 cnt = L1_QWORDS_2MB;
746 }
747
748 VERBOSE("GPT: Contiguous descriptor 0x%"PRIxPTR" %luMB\n",
749 first, cont_size / SZ_1M);
750
751 /* Fill Contiguous descriptors */
752 fill_desc(&l1[idx], l1_desc, cnt);
753 return (first + cont_size);
754 }
755 }
756
757 return first;
758 }
759
760 /* Build Granules descriptor with the same 'gpi' for every GPI entry */
build_l1_desc(unsigned int gpi)761 static uint64_t build_l1_desc(unsigned int gpi)
762 {
763 uint64_t l1_desc = (uint64_t)gpi | ((uint64_t)gpi << 4);
764
765 l1_desc |= (l1_desc << 8);
766 l1_desc |= (l1_desc << 16);
767 return (l1_desc | (l1_desc << 32));
768 }
769
770 /*
771 * Helper function to fill out GPI entries from 'first' to 'last' granule
772 * address in a single L1 table with 'l1_desc' Granules descriptor.
773 *
774 * Parameters
775 * l1 Pointer to L1 table to fill out
776 * first Address of first granule in range
777 * last Address of last granule in range (inclusive)
778 * gpi GPI set this range to
779 *
780 * Return
781 * Address of next granule in range.
782 */
fill_l1_gran_desc(uint64_t * l1,uintptr_t first,uintptr_t last,unsigned int gpi)783 static uintptr_t fill_l1_gran_desc(uint64_t *l1, uintptr_t first,
784 uintptr_t last, unsigned int gpi)
785 {
786 uint64_t gpi_mask;
787 unsigned long i;
788
789 /* Generate Granules descriptor */
790 uint64_t l1_desc = build_l1_desc(gpi);
791
792 /* Shift the mask if we're starting in the middle of an L1 entry */
793 gpi_mask = ULONG_MAX << (GPT_L1_GPI_IDX(gpt_config.p, first) << 2);
794
795 /* Fill out each L1 entry for this region */
796 for (i = GPT_L1_INDEX(first); i <= GPT_L1_INDEX(last); i++) {
797
798 /* Account for stopping in the middle of an L1 entry */
799 if (i == GPT_L1_INDEX(last)) {
800 gpi_mask &= (gpi_mask >> ((15U -
801 GPT_L1_GPI_IDX(gpt_config.p, last)) << 2));
802 }
803
804 assert((l1[i] & gpi_mask) == (GPT_L1_ANY_DESC & gpi_mask));
805
806 /* Write GPI values */
807 l1[i] = (l1[i] & ~gpi_mask) | (l1_desc & gpi_mask);
808
809 /* Reset mask */
810 gpi_mask = ULONG_MAX;
811 }
812
813 return last + GPT_PGS_ACTUAL_SIZE(gpt_config.p);
814 }
815
816 /*
817 * Helper function to fill out GPI entries in a single L1 table.
818 * This function fills out an entire L1 table with either Granules or Contiguous
819 * (RME_GPT_MAX_BLOCK != 0) descriptors depending on region length and alignment.
820 * Note. If RME_GPT_MAX_BLOCK == 0, then the L1 tables are filled with regular
821 * Granules descriptors.
822 *
823 * Parameters
824 * l1 Pointer to L1 table to fill out
825 * first Address of first granule in range
826 * last Address of last granule in range (inclusive)
827 * gpi GPI set this range to
828 */
fill_l1_tbl(uint64_t * l1,uintptr_t first,uintptr_t last,unsigned int gpi)829 static void fill_l1_tbl(uint64_t *l1, uintptr_t first, uintptr_t last,
830 unsigned int gpi)
831 {
832 assert(l1 != NULL);
833 assert(first <= last);
834 assert((first & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) == 0UL);
835 assert((last & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) == 0UL);
836 assert(GPT_L0_IDX(first) == GPT_L0_IDX(last));
837
838 #if (RME_GPT_MAX_BLOCK != 0)
839 while (first <= last) {
840 /* Region length */
841 size_t length = last - first + GPT_PGS_ACTUAL_SIZE(gpt_config.p);
842
843 if (length < SZ_2M) {
844 /*
845 * Fill with Granule descriptors in case of
846 * region length < 2MB.
847 */
848 first = fill_l1_gran_desc(l1, first, last, gpi);
849
850 } else if ((first & (SZ_2M - UL(1))) == UL(0)) {
851 /*
852 * For region length >= 2MB and at least 2MB aligned
853 * call to fill_l1_cont_desc will iterate through
854 * all block sizes (512MB, 32MB and 2MB) supported and
855 * fill corresponding Contiguous descriptors.
856 */
857 first = fill_l1_cont_desc(l1, first, length, gpi);
858 } else {
859 /*
860 * For not aligned region >= 2MB fill with Granules
861 * descriptors up to the next 2MB aligned address.
862 */
863 uintptr_t new_last = ALIGN_2MB(first + SZ_2M) -
864 GPT_PGS_ACTUAL_SIZE(gpt_config.p);
865
866 first = fill_l1_gran_desc(l1, first, new_last, gpi);
867 }
868 }
869 #else
870 /* Fill with Granule descriptors */
871 first = fill_l1_gran_desc(l1, first, last, gpi);
872 #endif
873 assert(first == (last + GPT_PGS_ACTUAL_SIZE(gpt_config.p)));
874 }
875
876 /*
877 * This function finds the next available unused L1 table and initializes all
878 * granules descriptor entries to GPI_ANY. This ensures that there are no chunks
879 * of GPI_NO_ACCESS (0b0000) memory floating around in the system in the
880 * event that a PAS region stops midway through an L1 table, thus guaranteeing
881 * that all memory not explicitly assigned is GPI_ANY. This function does not
882 * check for overflow conditions, that should be done by the caller.
883 *
884 * Return
885 * Pointer to the next available L1 table.
886 */
get_new_l1_tbl(void)887 static uint64_t *get_new_l1_tbl(void)
888 {
889 /* Retrieve the next L1 table */
890 uint64_t *l1 = (uint64_t *)gpt_l1_tbl;
891
892 /* Increment L1 GPT address */
893 gpt_l1_tbl += GPT_L1_TABLE_SIZE(gpt_config.p);
894
895 /* Initialize all GPIs to GPT_GPI_ANY */
896 for (unsigned int i = 0U; i < GPT_L1_ENTRY_COUNT(gpt_config.p); i++) {
897 l1[i] = GPT_L1_ANY_DESC;
898 }
899
900 return l1;
901 }
902
903 /*
904 * When L1 tables are needed, this function creates the necessary L0 table
905 * descriptors and fills out the L1 table entries according to the supplied
906 * PAS range.
907 *
908 * Parameters
909 * *pas Pointer to the structure defining the PAS region.
910 */
generate_l0_tbl_desc(pas_region_t * pas)911 static void generate_l0_tbl_desc(pas_region_t *pas)
912 {
913 uintptr_t end_pa;
914 uintptr_t cur_pa;
915 uintptr_t last_gran_pa;
916 uint64_t *l0_gpt_base;
917 uint64_t *l1_gpt_arr;
918 unsigned int l0_idx, gpi;
919
920 assert(gpt_config.plat_gpt_l0_base != 0UL);
921 assert(pas != NULL);
922
923 /*
924 * Checking of PAS parameters has already been done in
925 * validate_pas_mappings so no need to check the same things again.
926 */
927 end_pa = pas->base_pa + pas->size;
928 l0_gpt_base = (uint64_t *)gpt_config.plat_gpt_l0_base;
929
930 /* We start working from the granule at base PA */
931 cur_pa = pas->base_pa;
932
933 /* Get GPI */
934 gpi = GPT_PAS_ATTR_GPI(pas->attrs);
935
936 /* Iterate over each L0 region in this memory range */
937 for (l0_idx = (unsigned int)GPT_L0_IDX(pas->base_pa);
938 l0_idx <= (unsigned int)GPT_L0_IDX(end_pa - 1UL);
939 l0_idx++) {
940 /*
941 * See if the L0 entry is already a table descriptor or if we
942 * need to create one.
943 */
944 if (GPT_L0_TYPE(l0_gpt_base[l0_idx]) == GPT_L0_TYPE_TBL_DESC) {
945 /* Get the L1 array from the L0 entry */
946 l1_gpt_arr = GPT_L0_TBLD_ADDR(l0_gpt_base[l0_idx]);
947 } else {
948 /* Get a new L1 table from the L1 memory space */
949 l1_gpt_arr = get_new_l1_tbl();
950
951 /* Fill out the L0 descriptor and flush it */
952 l0_gpt_base[l0_idx] = GPT_L0_TBL_DESC(l1_gpt_arr);
953 }
954
955 VERBOSE("GPT: L0 entry (TABLE) index %u [%p] ==> L1 Addr %p (0x%"PRIx64")\n",
956 l0_idx, &l0_gpt_base[l0_idx], l1_gpt_arr, l0_gpt_base[l0_idx]);
957
958 /*
959 * Determine the PA of the last granule in this L0 descriptor.
960 */
961 last_gran_pa = get_l1_end_pa(cur_pa, end_pa) -
962 GPT_PGS_ACTUAL_SIZE(gpt_config.p);
963
964 /*
965 * Fill up L1 GPT entries between these two addresses. This
966 * function needs the addresses of the first granule and last
967 * granule in the range.
968 */
969 fill_l1_tbl(l1_gpt_arr, cur_pa, last_gran_pa, gpi);
970
971 /* Advance cur_pa to first granule in next L0 region */
972 cur_pa = get_l1_end_pa(cur_pa, end_pa);
973 }
974 }
975
976 /*
977 * This function flushes a range of L0 descriptors used by a given PAS region
978 * array. There is a chance that some unmodified L0 descriptors would be flushed
979 * in the case that there are "holes" in an array of PAS regions but overall
980 * this should be faster than individually flushing each modified L0 descriptor
981 * as they are created.
982 *
983 * Parameters
984 * *pas Pointer to an array of PAS regions.
985 * pas_count Number of entries in the PAS array.
986 */
flush_l0_for_pas_array(pas_region_t * pas,unsigned int pas_count)987 static void flush_l0_for_pas_array(pas_region_t *pas, unsigned int pas_count)
988 {
989 unsigned long idx;
990 unsigned long start_idx;
991 unsigned long end_idx;
992 uint64_t *l0 = (uint64_t *)gpt_config.plat_gpt_l0_base;
993
994 assert(pas != NULL);
995 assert(pas_count != 0U);
996
997 /* Initial start and end values */
998 start_idx = GPT_L0_IDX(pas[0].base_pa);
999 end_idx = GPT_L0_IDX(pas[0].base_pa + pas[0].size - 1UL);
1000
1001 /* Find lowest and highest L0 indices used in this PAS array */
1002 for (idx = 1UL; idx < pas_count; idx++) {
1003 if (GPT_L0_IDX(pas[idx].base_pa) < start_idx) {
1004 start_idx = GPT_L0_IDX(pas[idx].base_pa);
1005 }
1006 if (GPT_L0_IDX(pas[idx].base_pa + pas[idx].size - 1UL) > end_idx) {
1007 end_idx = GPT_L0_IDX(pas[idx].base_pa + pas[idx].size - 1UL);
1008 }
1009 }
1010
1011 /*
1012 * Flush all covered L0 descriptors, add 1 because we need to include
1013 * the end index value.
1014 */
1015 flush_dcache_range((uintptr_t)&l0[start_idx],
1016 ((end_idx + 1UL) - start_idx) * sizeof(uint64_t));
1017 }
1018
1019 /*
1020 * Public API to enable granule protection checks once the tables have all been
1021 * initialized. This function is called at first initialization and then again
1022 * later during warm boots of CPU cores.
1023 *
1024 * Return
1025 * Negative Linux error code in the event of a failure, 0 for success.
1026 */
gpt_enable(void)1027 int gpt_enable(void)
1028 {
1029 u_register_t gpccr_el3;
1030
1031 /*
1032 * Granule tables must be initialised before enabling
1033 * granule protection.
1034 */
1035 if (gpt_config.plat_gpt_l0_base == 0UL) {
1036 ERROR("GPT: Tables have not been initialized!\n");
1037 return -EPERM;
1038 }
1039
1040 /* Write the base address of the L0 tables into GPTBR */
1041 write_gptbr_el3(((gpt_config.plat_gpt_l0_base >> GPTBR_BADDR_VAL_SHIFT)
1042 >> GPTBR_BADDR_SHIFT) & GPTBR_BADDR_MASK);
1043
1044 /* GPCCR_EL3.PPS */
1045 gpccr_el3 = SET_GPCCR_PPS(gpt_config.pps);
1046
1047 /* GPCCR_EL3.PGS */
1048 gpccr_el3 |= SET_GPCCR_PGS(gpt_config.pgs);
1049
1050 /*
1051 * Since EL3 maps the L1 region as Inner shareable, use the same
1052 * shareability attribute for GPC as well so that
1053 * GPC fetches are visible to PEs
1054 */
1055 gpccr_el3 |= SET_GPCCR_SH(GPCCR_SH_IS);
1056
1057 /* Outer and Inner cacheability set to Normal memory, WB, RA, WA */
1058 gpccr_el3 |= SET_GPCCR_ORGN(GPCCR_ORGN_WB_RA_WA);
1059 gpccr_el3 |= SET_GPCCR_IRGN(GPCCR_IRGN_WB_RA_WA);
1060
1061 /* Enable NSP and SA if FEAT_RME_GDI is implemented */
1062 if (is_feat_rme_gdi_supported()) {
1063 gpccr_el3 |= GPCCR_NSP_BIT;
1064 gpccr_el3 |= GPCCR_SA_BIT;
1065 }
1066
1067 /* Prepopulate GPCCR_EL3 but don't enable GPC yet */
1068 write_gpccr_el3(gpccr_el3);
1069 isb();
1070
1071 /* Invalidate any stale TLB entries and any cached register fields */
1072 tlbipaallos();
1073 dsb();
1074 isb();
1075
1076 /* Enable GPT */
1077 gpccr_el3 |= GPCCR_GPC_BIT;
1078
1079 /* Enable NSO encoding if FEAT_RME_GPC2 is supported. */
1080 if (is_feat_rme_gpc2_present()) {
1081 gpccr_el3 |= GPCCR_NSO_BIT;
1082 }
1083
1084 /* TODO: Configure GPCCR_EL3_GPCP for Fault control */
1085 write_gpccr_el3(gpccr_el3);
1086 isb();
1087 tlbipaallos();
1088 dsb();
1089 isb();
1090
1091 return 0;
1092 }
1093
1094 /*
1095 * Public API that initializes the entire protected space to GPT_GPI_ANY using
1096 * the L0 tables (block descriptors). Ideally, this function is invoked prior
1097 * to DDR discovery and initialization. The MMU must be initialized before
1098 * calling this function.
1099 *
1100 * Parameters
1101 * pps PPS value to use for table generation
1102 * l0_mem_base Base address of L0 tables in memory.
1103 * l0_mem_size Total size of memory available for L0 tables.
1104 *
1105 * Return
1106 * Negative Linux error code in the event of a failure, 0 for success.
1107 */
gpt_init_l0_tables(gpccr_pps_e pps,uintptr_t l0_mem_base,size_t l0_mem_size)1108 int gpt_init_l0_tables(gpccr_pps_e pps, uintptr_t l0_mem_base,
1109 size_t l0_mem_size)
1110 {
1111 uint64_t gpt_desc;
1112 int ret;
1113
1114 /* Ensure that MMU and Data caches are enabled */
1115 assert((read_sctlr_el3() & SCTLR_C_BIT) != 0UL);
1116
1117 /* Validate other parameters */
1118 ret = validate_l0_params(pps, l0_mem_base, l0_mem_size);
1119 if (ret != 0) {
1120 return ret;
1121 }
1122
1123 /* Create the descriptor to initialize L0 entries with */
1124 gpt_desc = GPT_L0_BLK_DESC(GPT_GPI_ANY);
1125
1126 /* Iterate through all L0 entries */
1127 for (unsigned int i = 0U; i < GPT_L0_REGION_COUNT(gpt_config.t); i++) {
1128 ((uint64_t *)l0_mem_base)[i] = gpt_desc;
1129 }
1130
1131 /* Flush updated L0 table to memory */
1132 flush_dcache_range((uintptr_t)l0_mem_base, GPT_L0_TABLE_SIZE(gpt_config.t));
1133
1134 /* Stash the L0 base address once initial setup is complete */
1135 gpt_config.plat_gpt_l0_base = l0_mem_base;
1136
1137 return 0;
1138 }
1139
1140 /*
1141 * Public API that carves out PAS regions from the L0 tables and builds any L1
1142 * tables that are needed. This function ideally is run after DDR discovery and
1143 * initialization. The L0 tables must have already been initialized to GPI_ANY
1144 * when this function is called.
1145 *
1146 * This function can be called multiple times with different L1 memory ranges
1147 * and PAS regions if it is desirable to place L1 tables in different locations
1148 * in memory. (ex: you have multiple DDR banks and want to place the L1 tables
1149 * in the DDR bank that they control).
1150 *
1151 * Parameters
1152 * pgs PGS value to use for table generation.
1153 * l1_mem_base Base address of memory used for L1 tables.
1154 * l1_mem_size Total size of memory available for L1 tables.
1155 * *pas_regions Pointer to PAS regions structure array.
1156 * pas_count Total number of PAS regions.
1157 *
1158 * Return
1159 * Negative Linux error code in the event of a failure, 0 for success.
1160 */
gpt_init_pas_l1_tables(gpccr_pgs_e pgs,uintptr_t l1_mem_base,size_t l1_mem_size,pas_region_t * pas_regions,unsigned int pas_count)1161 int gpt_init_pas_l1_tables(gpccr_pgs_e pgs, uintptr_t l1_mem_base,
1162 size_t l1_mem_size, pas_region_t *pas_regions,
1163 unsigned int pas_count)
1164 {
1165 int l1_gpt_cnt, ret;
1166
1167 /* Ensure that MMU and Data caches are enabled */
1168 assert((read_sctlr_el3() & SCTLR_C_BIT) != 0UL);
1169
1170 /* PGS is needed for validate_pas_mappings so check it now */
1171 if (pgs > GPT_PGS_MAX) {
1172 ERROR("GPT: Invalid PGS: 0x%x\n", pgs);
1173 return -EINVAL;
1174 }
1175 gpt_config.pgs = pgs;
1176 gpt_config.p = gpt_p_lookup[pgs];
1177
1178 /* Make sure L0 tables have been initialized */
1179 if (gpt_config.plat_gpt_l0_base == 0UL) {
1180 ERROR("GPT: L0 tables must be initialized first!\n");
1181 return -EPERM;
1182 }
1183
1184 /* Check if L1 GPTs are required and how many */
1185 l1_gpt_cnt = validate_pas_mappings(pas_regions, pas_count);
1186 if (l1_gpt_cnt < 0) {
1187 return l1_gpt_cnt;
1188 }
1189
1190 VERBOSE("GPT: %i L1 GPTs requested\n", l1_gpt_cnt);
1191
1192 /* If L1 tables are needed then validate the L1 parameters */
1193 if (l1_gpt_cnt > 0) {
1194 ret = validate_l1_params(l1_mem_base, l1_mem_size,
1195 (unsigned int)l1_gpt_cnt);
1196 if (ret != 0) {
1197 return ret;
1198 }
1199
1200 /* Set up parameters for L1 table generation */
1201 gpt_l1_tbl = l1_mem_base;
1202 }
1203
1204 /* Number of L1 entries in 2MB depends on GPCCR_EL3.PGS value */
1205 gpt_l1_cnt_2mb = (unsigned int)GPT_L1_ENTRY_COUNT_2MB(gpt_config.p);
1206
1207 /* Mask for the L1 index field */
1208 gpt_l1_index_mask = GPT_L1_IDX_MASK(gpt_config.p);
1209
1210 INFO("GPT: Boot Configuration\n");
1211 INFO(" PPS/T: 0x%x/%u\n", gpt_config.pps, gpt_config.t);
1212 INFO(" PGS/P: 0x%x/%u\n", gpt_config.pgs, gpt_config.p);
1213 INFO(" L0GPTSZ/S: 0x%x/%u\n", GPT_L0GPTSZ, GPT_S_VAL);
1214 INFO(" PAS count: %u\n", pas_count);
1215 INFO(" L0 base: 0x%"PRIxPTR"\n", gpt_config.plat_gpt_l0_base);
1216
1217 /* Generate the tables in memory */
1218 for (unsigned int idx = 0U; idx < pas_count; idx++) {
1219 VERBOSE("GPT: PAS[%u]: base 0x%"PRIxPTR"\tsize 0x%lx\tGPI 0x%x\ttype 0x%x\n",
1220 idx, pas_regions[idx].base_pa, pas_regions[idx].size,
1221 GPT_PAS_ATTR_GPI(pas_regions[idx].attrs),
1222 GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs));
1223
1224 /* Check if a block or table descriptor is required */
1225 if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) ==
1226 GPT_PAS_ATTR_MAP_TYPE_BLOCK) {
1227 generate_l0_blk_desc(&pas_regions[idx]);
1228
1229 } else {
1230 generate_l0_tbl_desc(&pas_regions[idx]);
1231 }
1232 }
1233
1234 /* Flush modified L0 tables */
1235 flush_l0_for_pas_array(pas_regions, pas_count);
1236
1237 /* Flush L1 tables if needed */
1238 if (l1_gpt_cnt > 0) {
1239 flush_dcache_range(l1_mem_base,
1240 GPT_L1_TABLE_SIZE(gpt_config.p) *
1241 (size_t)l1_gpt_cnt);
1242 }
1243
1244 /* Make sure that all the entries are written to the memory */
1245 dsbishst();
1246 tlbipaallos();
1247 dsb();
1248 isb();
1249
1250 return 0;
1251 }
1252
1253 /*
1254 * Public API to initialize the runtime gpt_config structure based on the values
1255 * present in the GPTBR_EL3 and GPCCR_EL3 registers. GPT initialization
1256 * typically happens in a bootloader stage prior to setting up the EL3 runtime
1257 * environment for the granule transition service so this function detects the
1258 * initialization from a previous stage. Granule protection checks must be
1259 * enabled already or this function will return an error.
1260 *
1261 * Parameters
1262 * l1_bitlocks_base Base address of memory for L1 tables bitlocks.
1263 * l1_bitlocks_size Total size of memory available for L1 tables bitlocks.
1264 *
1265 * Return
1266 * Negative Linux error code in the event of a failure, 0 for success.
1267 */
gpt_runtime_init(uintptr_t l1_bitlocks_base,size_t l1_bitlocks_size)1268 int gpt_runtime_init(uintptr_t l1_bitlocks_base, size_t l1_bitlocks_size)
1269 {
1270 u_register_t reg;
1271 __unused size_t locks_size;
1272
1273 /* Ensure that MMU and Data caches are enabled */
1274 assert((read_sctlr_el3() & SCTLR_C_BIT) != 0UL);
1275
1276 /* Ensure GPC are already enabled */
1277 if ((read_gpccr_el3() & GPCCR_GPC_BIT) == 0UL) {
1278 ERROR("GPT: Granule protection checks are not enabled!\n");
1279 return -EPERM;
1280 }
1281
1282 /*
1283 * Read the L0 table address from GPTBR, we don't need the L1 base
1284 * address since those are included in the L0 tables as needed.
1285 */
1286 reg = read_gptbr_el3();
1287 gpt_config.plat_gpt_l0_base = ((reg >> GPTBR_BADDR_SHIFT) &
1288 GPTBR_BADDR_MASK) <<
1289 GPTBR_BADDR_VAL_SHIFT;
1290
1291 /* Read GPCCR to get PGS and PPS values */
1292 reg = read_gpccr_el3();
1293 gpt_config.pps = (reg >> GPCCR_PPS_SHIFT) & GPCCR_PPS_MASK;
1294 gpt_config.t = gpt_t_lookup[gpt_config.pps];
1295 gpt_config.pgs = (reg >> GPCCR_PGS_SHIFT) & GPCCR_PGS_MASK;
1296 gpt_config.p = gpt_p_lookup[gpt_config.pgs];
1297
1298 /* Number of L1 entries in 2MB depends on GPCCR_EL3.PGS value */
1299 gpt_l1_cnt_2mb = (unsigned int)GPT_L1_ENTRY_COUNT_2MB(gpt_config.p);
1300
1301 /* Mask for the L1 index field */
1302 gpt_l1_index_mask = GPT_L1_IDX_MASK(gpt_config.p);
1303
1304 #if (RME_GPT_BITLOCK_BLOCK != 0)
1305 /*
1306 * Size of GPT bitlocks in bytes for the protected address space
1307 * with RME_GPT_BITLOCK_BLOCK * 512MB per bitlock.
1308 */
1309 locks_size = GPT_PPS_ACTUAL_SIZE(gpt_config.t) /
1310 (RME_GPT_BITLOCK_BLOCK * SZ_512M * 8U);
1311 /*
1312 * If protected space size is less than the size covered
1313 * by 'bitlock' structure, check for a single bitlock.
1314 */
1315 if (locks_size < LOCK_SIZE) {
1316 locks_size = LOCK_SIZE;
1317 /* Check bitlocks array size */
1318 } else if (locks_size > l1_bitlocks_size) {
1319 ERROR("GPT: Inadequate GPT bitlocks memory\n");
1320 ERROR(" Expected 0x%lx bytes, got 0x%lx\n",
1321 locks_size, l1_bitlocks_size);
1322 return -ENOMEM;
1323 }
1324
1325 gpt_bitlock = (bitlock_t *)l1_bitlocks_base;
1326
1327 /* Initialise GPT bitlocks */
1328 (void)memset((void *)gpt_bitlock, 0, locks_size);
1329
1330 /* Flush GPT bitlocks to memory */
1331 flush_dcache_range((uintptr_t)gpt_bitlock, locks_size);
1332 #endif /* RME_GPT_BITLOCK_BLOCK */
1333
1334 VERBOSE("GPT: Runtime Configuration\n");
1335 VERBOSE(" PPS/T: 0x%x/%u\n", gpt_config.pps, gpt_config.t);
1336 VERBOSE(" PGS/P: 0x%x/%u\n", gpt_config.pgs, gpt_config.p);
1337 VERBOSE(" L0GPTSZ/S: 0x%x/%u\n", GPT_L0GPTSZ, GPT_S_VAL);
1338 VERBOSE(" L0 base: 0x%"PRIxPTR"\n", gpt_config.plat_gpt_l0_base);
1339 #if (RME_GPT_BITLOCK_BLOCK != 0)
1340 VERBOSE(" Bitlocks: 0x%"PRIxPTR"/0x%lx\n", (uintptr_t)gpt_bitlock,
1341 locks_size);
1342 #endif
1343 return 0;
1344 }
1345
1346 /*
1347 * A helper to write the value (target_pas << gpi_shift) to the index of
1348 * the gpt_l1_addr.
1349 */
write_gpt(uint64_t * gpt_l1_desc,uint64_t * gpt_l1_addr,unsigned int gpi_shift,unsigned int idx,unsigned int target_pas)1350 static inline void write_gpt(uint64_t *gpt_l1_desc, uint64_t *gpt_l1_addr,
1351 unsigned int gpi_shift, unsigned int idx,
1352 unsigned int target_pas)
1353 {
1354 *gpt_l1_desc &= ~(GPT_L1_GRAN_DESC_GPI_MASK << gpi_shift);
1355 *gpt_l1_desc |= ((uint64_t)target_pas << gpi_shift);
1356 gpt_l1_addr[idx] = *gpt_l1_desc;
1357
1358 dsboshst();
1359 }
1360
1361 /*
1362 * Helper to retrieve the gpt_l1_* information from the base address
1363 * returned in gpi_info.
1364 */
get_gpi_params(uint64_t base,gpi_info_t * gpi_info)1365 static int get_gpi_params(uint64_t base, gpi_info_t *gpi_info)
1366 {
1367 uint64_t gpt_l0_desc, *gpt_l0_base;
1368 __unused unsigned int block_idx;
1369
1370 gpt_l0_base = (uint64_t *)gpt_config.plat_gpt_l0_base;
1371 gpt_l0_desc = gpt_l0_base[GPT_L0_IDX(base)];
1372 if (GPT_L0_TYPE(gpt_l0_desc) != GPT_L0_TYPE_TBL_DESC) {
1373 VERBOSE("GPT: Granule is not covered by a table descriptor!\n");
1374 VERBOSE(" Base=0x%"PRIx64"\n", base);
1375 return -EINVAL;
1376 }
1377
1378 /* Get the table index and GPI shift from PA */
1379 gpi_info->gpt_l1_addr = GPT_L0_TBLD_ADDR(gpt_l0_desc);
1380 gpi_info->idx = (unsigned int)GPT_L1_INDEX(base);
1381 gpi_info->gpi_shift = GPT_L1_GPI_IDX(gpt_config.p, base) << 2;
1382
1383 #if (RME_GPT_BITLOCK_BLOCK != 0)
1384 /* Block index */
1385 block_idx = (unsigned int)(base / (RME_GPT_BITLOCK_BLOCK * SZ_512M));
1386
1387 /* Bitlock address and mask */
1388 gpi_info->lock = &gpt_bitlock[block_idx / LOCK_BITS];
1389 gpi_info->mask = 1U << (block_idx & (LOCK_BITS - 1U));
1390 #endif
1391 return 0;
1392 }
1393
1394 /*
1395 * Helper to retrieve the gpt_l1_desc and GPI information from gpi_info.
1396 * This function is called with bitlock or spinlock acquired.
1397 */
read_gpi(gpi_info_t * gpi_info)1398 static void read_gpi(gpi_info_t *gpi_info)
1399 {
1400 gpi_info->gpt_l1_desc = (gpi_info->gpt_l1_addr)[gpi_info->idx];
1401
1402 if ((gpi_info->gpt_l1_desc & GPT_L1_TYPE_CONT_DESC_MASK) ==
1403 GPT_L1_TYPE_CONT_DESC) {
1404 /* Read GPI from Contiguous descriptor */
1405 gpi_info->gpi = (unsigned int)GPT_L1_CONT_GPI(gpi_info->gpt_l1_desc);
1406 } else {
1407 /* Read GPI from Granules descriptor */
1408 gpi_info->gpi = (unsigned int)((gpi_info->gpt_l1_desc >> gpi_info->gpi_shift) &
1409 GPT_L1_GRAN_DESC_GPI_MASK);
1410 }
1411 }
1412
flush_page_to_popa(uintptr_t addr)1413 static void flush_page_to_popa(uintptr_t addr)
1414 {
1415 size_t size = GPT_PGS_ACTUAL_SIZE(gpt_config.p);
1416
1417 if (is_feat_mte2_supported()) {
1418 flush_dcache_to_popa_range_mte2(addr, size);
1419 } else {
1420 flush_dcache_to_popa_range(addr, size);
1421 }
1422 }
1423
1424 /*
1425 * Helper function to check if all L1 entries in 2MB block have
1426 * the same Granules descriptor value.
1427 *
1428 * Parameters
1429 * base Base address of the region to be checked
1430 * gpi_info Pointer to 'gpt_config_t' structure
1431 * l1_desc GPT Granules descriptor with all entries
1432 * set to the same GPI.
1433 *
1434 * Return
1435 * true if L1 all entries have the same descriptor value, false otherwise.
1436 */
check_fuse_2mb(uint64_t base,const gpi_info_t * gpi_info,uint64_t l1_desc)1437 __unused static bool check_fuse_2mb(uint64_t base, const gpi_info_t *gpi_info,
1438 uint64_t l1_desc)
1439 {
1440 /* Last L1 entry index in 2MB block */
1441 unsigned int long idx = GPT_L1_INDEX(ALIGN_2MB(base)) +
1442 gpt_l1_cnt_2mb - 1UL;
1443
1444 /* Number of L1 entries in 2MB block */
1445 unsigned int cnt = gpt_l1_cnt_2mb;
1446
1447 /*
1448 * Start check from the last L1 entry and continue until the first
1449 * non-matching to the passed Granules descriptor value is found.
1450 */
1451 while (cnt-- != 0U) {
1452 if (gpi_info->gpt_l1_addr[idx--] != l1_desc) {
1453 /* Non-matching L1 entry found */
1454 return false;
1455 }
1456 }
1457
1458 return true;
1459 }
1460
fuse_2mb(uint64_t base,const gpi_info_t * gpi_info,uint64_t l1_desc)1461 __unused static void fuse_2mb(uint64_t base, const gpi_info_t *gpi_info,
1462 uint64_t l1_desc)
1463 {
1464 /* L1 entry index of the start of 2MB block */
1465 unsigned long idx_2 = GPT_L1_INDEX(ALIGN_2MB(base));
1466
1467 /* 2MB Contiguous descriptor */
1468 uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 2MB);
1469
1470 VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n", __func__, base, l1_desc);
1471
1472 fill_desc(&gpi_info->gpt_l1_addr[idx_2], l1_cont_desc, L1_QWORDS_2MB);
1473 }
1474
1475 /*
1476 * Helper function to check if all 1st L1 entries of 2MB blocks
1477 * in 32MB have the same 2MB Contiguous descriptor value.
1478 *
1479 * Parameters
1480 * base Base address of the region to be checked
1481 * gpi_info Pointer to 'gpt_config_t' structure
1482 * l1_desc GPT Granules descriptor.
1483 *
1484 * Return
1485 * true if all L1 entries have the same descriptor value, false otherwise.
1486 */
check_fuse_32mb(uint64_t base,const gpi_info_t * gpi_info,uint64_t l1_desc)1487 __unused static bool check_fuse_32mb(uint64_t base, const gpi_info_t *gpi_info,
1488 uint64_t l1_desc)
1489 {
1490 /* The 1st L1 entry index of the last 2MB block in 32MB */
1491 unsigned long idx = GPT_L1_INDEX(ALIGN_32MB(base)) +
1492 (15UL * gpt_l1_cnt_2mb);
1493
1494 /* 2MB Contiguous descriptor */
1495 uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 2MB);
1496
1497 /* Number of 2MB blocks in 32MB */
1498 unsigned int cnt = 16U;
1499
1500 /* Set the first L1 entry to 2MB Contiguous descriptor */
1501 gpi_info->gpt_l1_addr[GPT_L1_INDEX(ALIGN_2MB(base))] = l1_cont_desc;
1502
1503 /*
1504 * Start check from the 1st L1 entry of the last 2MB block and
1505 * continue until the first non-matching to 2MB Contiguous descriptor
1506 * value is found.
1507 */
1508 while (cnt-- != 0U) {
1509 if (gpi_info->gpt_l1_addr[idx] != l1_cont_desc) {
1510 /* Non-matching L1 entry found */
1511 return false;
1512 }
1513 idx -= gpt_l1_cnt_2mb;
1514 }
1515
1516 return true;
1517 }
1518
fuse_32mb(uint64_t base,const gpi_info_t * gpi_info,uint64_t l1_desc)1519 __unused static void fuse_32mb(uint64_t base, const gpi_info_t *gpi_info,
1520 uint64_t l1_desc)
1521 {
1522 /* L1 entry index of the start of 32MB block */
1523 unsigned long idx_32 = GPT_L1_INDEX(ALIGN_32MB(base));
1524
1525 /* 32MB Contiguous descriptor */
1526 uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 32MB);
1527
1528 VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n", __func__, base, l1_desc);
1529
1530 fill_desc(&gpi_info->gpt_l1_addr[idx_32], l1_cont_desc, L1_QWORDS_32MB);
1531 }
1532
1533 /*
1534 * Helper function to check if all 1st L1 entries of 32MB blocks
1535 * in 512MB have the same 32MB Contiguous descriptor value.
1536 *
1537 * Parameters
1538 * base Base address of the region to be checked
1539 * gpi_info Pointer to 'gpt_config_t' structure
1540 * l1_desc GPT Granules descriptor.
1541 *
1542 * Return
1543 * true if all L1 entries have the same descriptor value, false otherwise.
1544 */
check_fuse_512mb(uint64_t base,const gpi_info_t * gpi_info,uint64_t l1_desc)1545 __unused static bool check_fuse_512mb(uint64_t base, const gpi_info_t *gpi_info,
1546 uint64_t l1_desc)
1547 {
1548 /* The 1st L1 entry index of the last 32MB block in 512MB */
1549 unsigned long idx = GPT_L1_INDEX(ALIGN_512MB(base)) +
1550 (15UL * 16UL * gpt_l1_cnt_2mb);
1551
1552 /* 32MB Contiguous descriptor */
1553 uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 32MB);
1554
1555 /* Number of 32MB blocks in 512MB */
1556 unsigned int cnt = 16U;
1557
1558 /* Set the first L1 entry to 2MB Contiguous descriptor */
1559 gpi_info->gpt_l1_addr[GPT_L1_INDEX(ALIGN_32MB(base))] = l1_cont_desc;
1560
1561 /*
1562 * Start check from the 1st L1 entry of the last 32MB block and
1563 * continue until the first non-matching to 32MB Contiguous descriptor
1564 * value is found.
1565 */
1566 while (cnt-- != 0U) {
1567 if (gpi_info->gpt_l1_addr[idx] != l1_cont_desc) {
1568 /* Non-matching L1 entry found */
1569 return false;
1570 }
1571 idx -= 16UL * gpt_l1_cnt_2mb;
1572 }
1573
1574 return true;
1575 }
1576
fuse_512mb(uint64_t base,const gpi_info_t * gpi_info,uint64_t l1_desc)1577 __unused static void fuse_512mb(uint64_t base, const gpi_info_t *gpi_info,
1578 uint64_t l1_desc)
1579 {
1580 /* L1 entry index of the start of 512MB block */
1581 unsigned long idx_512 = GPT_L1_INDEX(ALIGN_512MB(base));
1582
1583 /* 512MB Contiguous descriptor */
1584 uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 512MB);
1585
1586 VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n", __func__, base, l1_desc);
1587
1588 fill_desc(&gpi_info->gpt_l1_addr[idx_512], l1_cont_desc, L1_QWORDS_512MB);
1589 }
1590
1591 /*
1592 * Helper function to convert GPI entries in a single L1 table
1593 * from Granules to Contiguous descriptor.
1594 *
1595 * Parameters
1596 * base Base address of the region to be written
1597 * gpi_info Pointer to 'gpt_config_t' structure
1598 * l1_desc GPT Granules descriptor with all entries
1599 * set to the same GPI.
1600 */
fuse_block(uint64_t base,const gpi_info_t * gpi_info,uint64_t l1_desc)1601 __unused static void fuse_block(uint64_t base, const gpi_info_t *gpi_info,
1602 uint64_t l1_desc)
1603 {
1604 /* Start with check for 2MB block */
1605 if (!check_fuse_2mb(base, gpi_info, l1_desc)) {
1606 /* Check for 2MB fusing failed */
1607 return;
1608 }
1609
1610 #if (RME_GPT_MAX_BLOCK == 2)
1611 fuse_2mb(base, gpi_info, l1_desc);
1612 #else
1613 /* Check for 32MB block */
1614 if (!check_fuse_32mb(base, gpi_info, l1_desc)) {
1615 /* Check for 32MB fusing failed, fuse to 2MB */
1616 fuse_2mb(base, gpi_info, l1_desc);
1617 return;
1618 }
1619
1620 #if (RME_GPT_MAX_BLOCK == 32)
1621 fuse_32mb(base, gpi_info, l1_desc);
1622 #else
1623 /* Check for 512MB block */
1624 if (!check_fuse_512mb(base, gpi_info, l1_desc)) {
1625 /* Check for 512MB fusing failed, fuse to 32MB */
1626 fuse_32mb(base, gpi_info, l1_desc);
1627 return;
1628 }
1629
1630 /* Fuse to 512MB */
1631 fuse_512mb(base, gpi_info, l1_desc);
1632
1633 #endif /* RME_GPT_MAX_BLOCK == 32 */
1634 #endif /* RME_GPT_MAX_BLOCK == 2 */
1635 }
1636
1637 /*
1638 * Helper function to convert GPI entries in a single L1 table
1639 * from Contiguous to Granules descriptor. This function updates
1640 * descriptor to Granules in passed 'gpt_config_t' structure as
1641 * the result of shuttering.
1642 *
1643 * Parameters
1644 * base Base address of the region to be written
1645 * gpi_info Pointer to 'gpt_config_t' structure
1646 * l1_desc GPT Granules descriptor set this range to.
1647 */
shatter_block(uint64_t base,gpi_info_t * gpi_info,uint64_t l1_desc)1648 __unused static void shatter_block(uint64_t base, gpi_info_t *gpi_info,
1649 uint64_t l1_desc)
1650 {
1651 /* Look-up table for 2MB, 32MB and 512MB locks shattering */
1652 static const gpt_shatter_func gpt_shatter_lookup[] = {
1653 shatter_2mb,
1654 shatter_32mb,
1655 shatter_512mb
1656 };
1657
1658 /* Look-up table for invalidation TLBs for 2MB, 32MB and 512MB blocks */
1659 static const gpt_tlbi_lookup_t tlbi_lookup[] = {
1660 { tlbirpalos_2m, ~(SZ_2M - 1UL) },
1661 { tlbirpalos_32m, ~(SZ_32M - 1UL) },
1662 { tlbirpalos_512m, ~(SZ_512M - 1UL) }
1663 };
1664
1665 /* Get shattering level from Contig field of Contiguous descriptor */
1666 unsigned long level = GPT_L1_CONT_CONTIG(gpi_info->gpt_l1_desc) - 1UL;
1667
1668 /* Shatter contiguous block */
1669 gpt_shatter_lookup[level](base, gpi_info, l1_desc);
1670
1671 tlbi_lookup[level].function(base & tlbi_lookup[level].mask);
1672 dsbosh();
1673
1674 /*
1675 * Update 'gpt_config_t' structure's descriptor to Granules to reflect
1676 * the shattered GPI back to caller.
1677 */
1678 gpi_info->gpt_l1_desc = l1_desc;
1679 }
1680
1681 /*
1682 * This function is the granule transition delegate service. When a granule
1683 * transition request occurs it is routed to this function to have the request,
1684 * if valid, fulfilled following A1.1.1 Delegate of RME supplement.
1685 *
1686 * TODO: implement support for transitioning multiple granules at once.
1687 *
1688 * Parameters
1689 * base Base address of the region to transition, must be
1690 * aligned to granule size.
1691 * size Size of region to transition, must be aligned to granule
1692 * size.
1693 * src_sec_state Security state of the caller.
1694 *
1695 * Return
1696 * Negative Linux error code in the event of a failure, 0 for success.
1697 */
gpt_delegate_pas(uint64_t base,size_t size,unsigned int src_sec_state)1698 int gpt_delegate_pas(uint64_t base, size_t size, unsigned int src_sec_state)
1699 {
1700 gpi_info_t gpi_info;
1701 uint64_t nse, __unused l1_desc;
1702 unsigned int target_pas;
1703 int res;
1704
1705 /* Ensure that the tables have been set up before taking requests */
1706 assert(gpt_config.plat_gpt_l0_base != 0UL);
1707
1708 /* Ensure that caches are enabled */
1709 assert((read_sctlr_el3() & SCTLR_C_BIT) != 0UL);
1710
1711 /* See if this is a single or a range of granule transition */
1712 if (size != GPT_PGS_ACTUAL_SIZE(gpt_config.p)) {
1713 return -EINVAL;
1714 }
1715
1716 /* Check that base and size are valid */
1717 if ((ULONG_MAX - base) < size) {
1718 VERBOSE("GPT: Transition request address overflow!\n");
1719 VERBOSE(" Base=0x%"PRIx64"\n", base);
1720 VERBOSE(" Size=0x%lx\n", size);
1721 return -EINVAL;
1722 }
1723
1724 /* Make sure base and size are valid */
1725 if (((base & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) != 0UL) ||
1726 ((size & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) != 0UL) ||
1727 (size == 0UL) ||
1728 ((base + size) >= GPT_PPS_ACTUAL_SIZE(gpt_config.t))) {
1729 VERBOSE("GPT: Invalid granule transition address range!\n");
1730 VERBOSE(" Base=0x%"PRIx64"\n", base);
1731 VERBOSE(" Size=0x%lx\n", size);
1732 return -EINVAL;
1733 }
1734
1735 /* Delegate request can only come from REALM or SECURE */
1736 if ((src_sec_state != SMC_FROM_REALM) &&
1737 (src_sec_state != SMC_FROM_SECURE)) {
1738 VERBOSE("GPT: Invalid caller security state 0x%x\n",
1739 src_sec_state);
1740 return -EINVAL;
1741 }
1742
1743 if (src_sec_state == SMC_FROM_REALM) {
1744 target_pas = GPT_GPI_REALM;
1745 nse = (uint64_t)GPT_NSE_REALM << GPT_NSE_SHIFT;
1746 l1_desc = GPT_L1_REALM_DESC;
1747 } else {
1748 target_pas = GPT_GPI_SECURE;
1749 nse = (uint64_t)GPT_NSE_SECURE << GPT_NSE_SHIFT;
1750 l1_desc = GPT_L1_SECURE_DESC;
1751 }
1752
1753 res = get_gpi_params(base, &gpi_info);
1754 if (res != 0) {
1755 return res;
1756 }
1757
1758 /*
1759 * Access to GPT is controlled by a lock to ensure that no more
1760 * than one CPU is allowed to make changes at any given time.
1761 */
1762 GPT_LOCK;
1763 read_gpi(&gpi_info);
1764
1765 /* Check that the current address is in NS state */
1766 if (gpi_info.gpi != GPT_GPI_NS) {
1767 VERBOSE("GPT: Only Granule in NS state can be delegated.\n");
1768 VERBOSE(" Caller: %u, Current GPI: %u\n", src_sec_state,
1769 gpi_info.gpi);
1770 GPT_UNLOCK;
1771 return -EPERM;
1772 }
1773
1774 #if (RME_GPT_MAX_BLOCK != 0)
1775 /* Check for Contiguous descriptor */
1776 if ((gpi_info.gpt_l1_desc & GPT_L1_TYPE_CONT_DESC_MASK) ==
1777 GPT_L1_TYPE_CONT_DESC) {
1778 shatter_block(base, &gpi_info, GPT_L1_NS_DESC);
1779 }
1780 #endif
1781 /*
1782 * In order to maintain mutual distrust between Realm and Secure
1783 * states, remove any data speculatively fetched into the target
1784 * physical address space.
1785 * Issue DC CIPAPA or DC_CIGDPAPA on implementations with FEAT_MTE2.
1786 */
1787 flush_page_to_popa(base | nse);
1788
1789 write_gpt(&gpi_info.gpt_l1_desc, gpi_info.gpt_l1_addr,
1790 gpi_info.gpi_shift, gpi_info.idx, target_pas);
1791
1792 /* Ensure that all agents observe the new configuration */
1793 tlbi_page_dsbosh(base);
1794
1795 nse = (uint64_t)GPT_NSE_NS << GPT_NSE_SHIFT;
1796
1797 /* Ensure that the scrubbed data have made it past the PoPA */
1798 flush_page_to_popa(base | nse);
1799
1800 #if (RME_GPT_MAX_BLOCK != 0)
1801 if (gpi_info.gpt_l1_desc == l1_desc) {
1802 /* Try to fuse */
1803 fuse_block(base, &gpi_info, l1_desc);
1804 }
1805 #endif
1806
1807 /* Unlock the lock to GPT */
1808 GPT_UNLOCK;
1809
1810 /*
1811 * The isb() will be done as part of context
1812 * synchronization when returning to lower EL.
1813 */
1814 VERBOSE("GPT: Granule 0x%"PRIx64" GPI 0x%x->0x%x\n",
1815 base, gpi_info.gpi, target_pas);
1816
1817 return 0;
1818 }
1819
1820 /*
1821 * This function is the granule transition undelegate service. When a granule
1822 * transition request occurs it is routed to this function where the request is
1823 * validated then fulfilled if possible.
1824 *
1825 * TODO: implement support for transitioning multiple granules at once.
1826 *
1827 * Parameters
1828 * base Base address of the region to transition, must be
1829 * aligned to granule size.
1830 * size Size of region to transition, must be aligned to granule
1831 * size.
1832 * src_sec_state Security state of the caller.
1833 *
1834 * Return
1835 * Negative Linux error code in the event of a failure, 0 for success.
1836 */
gpt_undelegate_pas(uint64_t base,size_t size,unsigned int src_sec_state)1837 int gpt_undelegate_pas(uint64_t base, size_t size, unsigned int src_sec_state)
1838 {
1839 gpi_info_t gpi_info;
1840 uint64_t nse, __unused l1_desc;
1841 int res;
1842
1843 /* Ensure that the tables have been set up before taking requests */
1844 assert(gpt_config.plat_gpt_l0_base != 0UL);
1845
1846 /* Ensure that MMU and caches are enabled */
1847 assert((read_sctlr_el3() & SCTLR_C_BIT) != 0UL);
1848
1849 /* See if this is a single or a range of granule transition */
1850 if (size != GPT_PGS_ACTUAL_SIZE(gpt_config.p)) {
1851 return -EINVAL;
1852 }
1853
1854 /* Check that base and size are valid */
1855 if ((ULONG_MAX - base) < size) {
1856 VERBOSE("GPT: Transition request address overflow!\n");
1857 VERBOSE(" Base=0x%"PRIx64"\n", base);
1858 VERBOSE(" Size=0x%lx\n", size);
1859 return -EINVAL;
1860 }
1861
1862 /* Make sure base and size are valid */
1863 if (((base & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) != 0UL) ||
1864 ((size & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) != 0UL) ||
1865 (size == 0UL) ||
1866 ((base + size) >= GPT_PPS_ACTUAL_SIZE(gpt_config.t))) {
1867 VERBOSE("GPT: Invalid granule transition address range!\n");
1868 VERBOSE(" Base=0x%"PRIx64"\n", base);
1869 VERBOSE(" Size=0x%lx\n", size);
1870 return -EINVAL;
1871 }
1872
1873 res = get_gpi_params(base, &gpi_info);
1874 if (res != 0) {
1875 return res;
1876 }
1877
1878 /*
1879 * Access to GPT is controlled by a lock to ensure that no more
1880 * than one CPU is allowed to make changes at any given time.
1881 */
1882 GPT_LOCK;
1883 read_gpi(&gpi_info);
1884
1885 /* Check that the current address is in the delegated state */
1886 if ((src_sec_state == SMC_FROM_REALM) &&
1887 (gpi_info.gpi == GPT_GPI_REALM)) {
1888 l1_desc = GPT_L1_REALM_DESC;
1889 nse = (uint64_t)GPT_NSE_REALM << GPT_NSE_SHIFT;
1890 } else if ((src_sec_state == SMC_FROM_SECURE) &&
1891 (gpi_info.gpi == GPT_GPI_SECURE)) {
1892 l1_desc = GPT_L1_SECURE_DESC;
1893 nse = (uint64_t)GPT_NSE_SECURE << GPT_NSE_SHIFT;
1894 } else {
1895 VERBOSE("GPT: Only Granule in REALM or SECURE state can be undelegated\n");
1896 VERBOSE(" Caller: %u Current GPI: %u\n", src_sec_state,
1897 gpi_info.gpi);
1898 GPT_UNLOCK;
1899 return -EPERM;
1900 }
1901
1902 #if (RME_GPT_MAX_BLOCK != 0)
1903 /* Check for Contiguous descriptor */
1904 if ((gpi_info.gpt_l1_desc & GPT_L1_TYPE_CONT_DESC_MASK) ==
1905 GPT_L1_TYPE_CONT_DESC) {
1906 shatter_block(base, &gpi_info, l1_desc);
1907 }
1908 #endif
1909 /*
1910 * In order to maintain mutual distrust between Realm and Secure
1911 * states, remove access now, in order to guarantee that writes
1912 * to the currently-accessible physical address space will not
1913 * later become observable.
1914 */
1915 write_gpt(&gpi_info.gpt_l1_desc, gpi_info.gpt_l1_addr,
1916 gpi_info.gpi_shift, gpi_info.idx, GPT_GPI_NO_ACCESS);
1917
1918 /* Ensure that all agents observe the new NO_ACCESS configuration */
1919 tlbi_page_dsbosh(base);
1920
1921 /* Ensure that the scrubbed data have made it past the PoPA */
1922 flush_page_to_popa(base | nse);
1923
1924 /*
1925 * Remove any data loaded speculatively in NS space from before
1926 * the scrubbing.
1927 */
1928 nse = (uint64_t)GPT_NSE_NS << GPT_NSE_SHIFT;
1929
1930 flush_page_to_popa(base | nse);
1931
1932 /* Clear existing GPI encoding and transition granule */
1933 write_gpt(&gpi_info.gpt_l1_desc, gpi_info.gpt_l1_addr,
1934 gpi_info.gpi_shift, gpi_info.idx, GPT_GPI_NS);
1935
1936 /* Ensure that all agents observe the new NS configuration */
1937 tlbi_page_dsbosh(base);
1938
1939 #if (RME_GPT_MAX_BLOCK != 0)
1940 if (gpi_info.gpt_l1_desc == GPT_L1_NS_DESC) {
1941 /* Try to fuse */
1942 fuse_block(base, &gpi_info, GPT_L1_NS_DESC);
1943 }
1944 #endif
1945 /* Unlock the lock to GPT */
1946 GPT_UNLOCK;
1947
1948 /*
1949 * The isb() will be done as part of context
1950 * synchronization when returning to lower EL.
1951 */
1952 VERBOSE("GPT: Granule 0x%"PRIx64" GPI 0x%x->0x%x\n",
1953 base, gpi_info.gpi, GPT_GPI_NS);
1954
1955 return 0;
1956 }
1957