xref: /rk3399_ARM-atf/drivers/qti/accesscontrol/access_control.c (revision 5de3e03dbd7c2da6748e294f423c83f9582f459c)
1 /*
2  * Copyright (c) 2026, Qualcomm Technologies, Inc. and/or its subsidiaries.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <stddef.h>
9 #include <stdint.h>
10 
11 #include <arch_helpers.h>
12 #include <common/debug.h>
13 #include <drivers/console.h>
14 #include <drivers/qti/accesscontrol/accesscontrol.h>
15 #include <lib/mmio.h>
16 #include <lib/spinlock.h>
17 #include <vmidmt.h>
18 #include <xpu_target_info.h>
19 
20 #include <qti_interrupt_svc.h>
21 
22 #define AC_PERM_X 0x1
23 #define AC_PERM_W 0x2
24 #define AC_PERM_R 0x4
25 
26 static spinlock_t mem_assign_lock;
27 
28 enum virtual_machine_id {
29 	AC_VM_NONE = 0,
30 	AC_VM_HLOS = 3,
31 	AC_VM_MSS_MSA = 15,
32 	AC_VM_MSS_NAV = 43,
33 	AC_VM_LAST = 44,
34 	AC_VM_MAX = 0x7FFFFFFF,
35 };
36 
37 enum ac_error {
38 	AC_SUCCESS = 0,
39 	AC_FAILURE = 1,
40 	AC_ERR_VM_CREATE_FAIL = 2,
41 	AC_ERR_VM_MAP_FAIL1 = 3,
42 	AC_ERR_RAM_PARTITION_TABLE = 4,
43 	AC_ERR_VM_MAP_FAIL2 = 5,
44 	AC_ERR_VM_UNMAP_FAIL1 = 6,
45 	AC_ERR_VM_MAP_FAIL3 = 7,
46 	AC_ERR_VM_UNMAP_FAIL2 = 8,
47 	AC_ERR_TRANSLATION_SET1 = 9,
48 	AC_ERR_TRANSLATION_SET2 = 10,
49 	AC_ERR_TRANSLATION_SET3 = 11,
50 	AC_ERR_VALIDATION_FAIL1 = 12,
51 	AC_ERR_INCORRECT_VM = 13,
52 	AC_ERR_IO_ADDRESS_MISMATCH = 14,
53 	AC_ERR_SHARED_MEMORY_SINGLE_SOURCE = 15,
54 	AC_ERR_SHARED_MEMORY_SOURCE_MISMATCH = 16,
55 	AC_ERR_NOT_SHARED_MEMORY_MULTIPLE_SOURCE_GIVEN = 17,
56 	AC_ERR_MAPPING_TYPE_NOT_SUPPORTED = 18,
57 	AC_ERR_MAPPING_NOT_FOUND = 19,
58 	AC_ERR_REMOVE_MEMORY_FROM_LIST_FAIL = 20,
59 	AC_ERR_CLEAR_MEMORY_FAIL = 21,
60 	AC_ERR_IS_DEVICE_MEMORY = 22,
61 	AC_ERR_VMISMAPPED_FAILED = 23,
62 	AC_ERR_UNCACHED_ALLOC_FAILED = 24,
63 	AC_ERR_MEMORY_NOT_OWNED_BY_SOURCE_VM = 25,
64 	AC_ERR_TZ_ASSIGN_SMC_FAILED = 26,
65 	AC_ERR_VM_UNMAP_FAIL3 = 27,
66 	AC_ERR_VM_MAP_FAIL4 = 28,
67 	AC_ERR_MEMORY_FULL = 29,
68 	AC_ERR_MEMORY_IN_OWNED_BY_TZ = 30,
69 	AC_ERR_MEMORY_IN_USE_BY_TZ = 31,
70 	AC_ERR_XPU_TYPE_NOT_SUPPORTED = 32,
71 	AC_ERR_XPU_REMOVE_MAPPING_FAILED = 33,
72 	AC_ERR_XPU_ADD_MAPPING_FAILED = 34,
73 	AC_ERR_MEMORY_NOT_FOUND_IN_LIST = 35,
74 	AC_ERR_MEMORY_ALREADY_IN_LIST = 36,
75 	AC_ERR_ADD_MEMORY_FROM_LIST_FAIL = 37,
76 	AC_ERR_SIZE_GREATER_THAN_32BITS = 38,
77 	AC_ERR_INVALID_INDEX = 39,
78 	AC_ERR_UPDATING_RAM_PARTITION_TABLE = 40,
79 	AC_ERR_NOT_4K_ALIGNED = 41,
80 	AC_ERR_MEMORY_NOT_IN_LIST = 42,
81 	AC_ERR_NOT_DDR_MEMORY = 43,
82 	AC_ERR_IPA_OVERFLOW = 44,
83 	AC_ERR_SRC_SIZE_ZERO = 45,
84 	AC_ERR_SRC_LIST_NULL = 46,
85 	AC_ERR_DST_SIZE_ZERO = 47,
86 	AC_ERR_DST_LIST_NULL = 48,
87 	AC_ERR_SID2VM_SMMU_API_FAILED = 49,
88 	AC_ERR_SID_VALIDATION_FAIL = 50,
89 	AC_ERR_UNABLE_TO_XPU_LOCK = 51,
90 	AC_ERR_STRUCT_SIZE_LESS_THAN_EXPECTED = 52,
91 	AC_ERR_INVALID_POINTER = 53,
92 	AC_ERR_IPA_LIST_NULL = 54,
93 	AC_ERR_IPA_LIST_SIZE_ZER0 = 55,
94 	AC_ERR_MMU_ADD_MAPPING_FAILED = 56,
95 	AC_ERR_MMU_REMOVE_MAPPING_FAILED = 57,
96 	AC_ERR_INVALID_PERM_TYPE = 58,
97 	AC_ERR_MPU_LOCK_MEMORY_FAILED = 59,
98 	AC_ERR_MPU_UNLOCK_MEMORY_FAILED = 60,
99 	AC_ERR_USECASE_NOT_SUPPORTED = 61,
100 	AC_ERR_SRC_VM_TZ_INVALID = 62,
101 	AC_ERR_NULL_POINTER = 63,
102 	AC_ERR_TZ_IO_ASSIGN_SMC_FAILED = 64,
103 	AC_ERR_NOT_DEVICE_MEMORY = 65,
104 	AC_ERR_SMMU_CFG_TYPE_INVALID = 66,
105 	AC_ERR_INCORRECT_PERM = 67,
106 	AC_ERR_MEMORY_IS_SHARED = 68,
107 	AC_ERR_CANNOT_CHANGE_HLOS_RO_MEMORY = 69,
108 	AC_ERR_UNABLE_TO_XPU_UNLOCK = 70,
109 	AC_ERR_ADD_CLEAR_REGION_FAILED = 71,
110 	AC_ERR_REMOVE_CLEAR_REGION_FAILED = 72,
111 	AC_ERR_OVERLAPPING_MEMORY = 73,
112 	AC_ERR_DEVICE_RANGE_CHECK_OVERFLOW = 74,
113 	AC_ERR_ITS_A_SECURE_DEVICE = 75,
114 	AC_ERR_NOT_IN_WHITELIST = 76,
115 	AC_ERR_API_FAILED = 77,
116 	AC_ERR_SRC_NUM_INVALID = 78,
117 	AC_ERR_NUM_MAPPING_OVERFLOW = 79,
118 	AC_ERR_DST_NUM_INVALID = 80,
119 	AC_ERR_INCORRECT_DEVICE = 81,
120 	AC_ERR_DEVICE_NOT_FOUND = 82,
121 	AC_ERR_XPU_PARTIAL_MAPPING_NOT_ALLOWED = 83,
122 	AC_ERR_HASH_OVERFLOW = 84,
123 	AC_ERR_RULE_NOT_FOUND = 85,
124 	AC_ERR_TZ_SHM_CREATE_SMC_FAILED = 86,
125 	AC_ERR_VM_SIZE_OVERFLOW = 87,
126 	AC_ERR_SHM_BRIDGE_NOT_FOUND = 88,
127 	AC_ERR_GETTING_RANDOM_NUMBER = 89,
128 	AC_ERR_CALLER_VM_ID_INCORRECT = 90,
129 	AC_ERR_SHM_RULE_NOT_FOUND = 91,
130 	AC_ERR_MUTEX_ACQUIRE_FAIL = 92,
131 	AC_ERR_MUTEX_RELEASE_FAIL = 93,
132 	AC_ERR_OUTPUT_ADDRESS_MISMATCH = 94,
133 	AC_ERR_DST_VM_TZ_INVALID = 95,
134 	AC_ERR_MPU_UPDATE_LOCK_MEMORY_FAILED = 96,
135 	AC_ERR_DDR_MPU_STATIC_CFG_BLACKLIST_UPDATE = 97,
136 	AC_ERR_FATAL_ACCESS_CONTROL = 98,
137 	AC_ERR_LAST,
138 	AC_ERR_MAX = 0x7FFFFFFF,
139 };
140 
141 #define ACC_INT_XPU_NON_SEC_DESC "SPI XPU NonSec"
142 #define ACC_INT_XPU_SEC_DESC "SPI XPU Sec"
143 
144 static int xpu_err_non_sec_ctx = XPU_ERR_NON_SEC_CTX;
145 static int xpu_err_sec_ctx = XPU_ERR_SEC_CTX;
146 
update_master_side_mpu(struct xpu_instance * instance,uint32_t dynamic_partition_count,enum domain_type domain,uintptr_t start_addr,uintptr_t end_addr,uint32_t perm_r,uint32_t perm_w)147 static int update_master_side_mpu(struct xpu_instance *instance,
148 				  uint32_t dynamic_partition_count,
149 				  enum domain_type domain, uintptr_t start_addr,
150 				  uintptr_t end_addr, uint32_t perm_r,
151 				  uint32_t perm_w)
152 {
153 	uint64_t last_index = instance->part_range_arr_size;
154 	uint64_t first_index = last_index - dynamic_partition_count;
155 	struct rg_partition_range *range_base = instance->partition_range;
156 	struct rg_domain_ownership *owner_base = instance->rg_owner;
157 	struct rg_domain_ownership *found_owner = NULL;
158 	struct rg_partition_range *found_range = NULL;
159 	struct rg_domain_ownership *owner;
160 	struct rg_partition_range *range;
161 	uint64_t idx;
162 
163 	range = range_base + first_index;
164 	owner = owner_base + first_index + 1; /* +1 for unmapped entry */
165 
166 	for (idx = first_index; idx < last_index; idx++, range++, owner++) {
167 		/* Check if this RG already covers the requested range */
168 		if (range->start_addr == start_addr &&
169 		    range->end_addr == end_addr) {
170 			/* Free region */
171 			if (domain == APPS_NS_DOMAIN) {
172 				start_addr = 0xfffffffful;
173 				end_addr = 0xfffffffful;
174 				domain = NO_DOMAIN;
175 
176 				INFO("freeing RG for xpu 0x%lx idx:%llu\n",
177 				     (unsigned long)instance->xpu_base_addr,
178 				     (unsigned long long)idx);
179 			}
180 
181 			found_range = range;
182 			found_owner = owner;
183 			break;
184 		}
185 
186 		/*
187 		 * Keep the first free RG; might be overridden if we later
188 		 * find an exact range match.
189 		 */
190 		if (owner->owner_domain == NO_DOMAIN &&
191 		    range->start_addr == 0xfffffffful &&
192 		    range->end_addr == 0xfffffffful) {
193 			if (!found_range) {
194 				found_range = range;
195 				found_owner = owner;
196 			}
197 		}
198 	}
199 
200 	if (!found_range) {
201 		ERROR("No free RG xpu addr : 0x%lx",
202 		      (unsigned long)instance->xpu_base_addr);
203 		return 1;
204 	}
205 
206 	found_owner->owner_domain = domain;
207 	found_range->start_addr = start_addr;
208 	found_range->end_addr = end_addr;
209 
210 	return xpu_lock_down_assets_dynamic(instance, 1, instance->xpu_id,
211 					    found_range->rg_num, perm_r,
212 					    perm_w);
213 }
214 
mpu_master_mpus_range(enum device_type dev_type,enum domain_type domain,uintptr_t start_addr,uintptr_t end_addr,uint32_t perm_r,uint32_t perm_w)215 static int mpu_master_mpus_range(enum device_type dev_type,
216 				 enum domain_type domain, uintptr_t start_addr,
217 				 uintptr_t end_addr, uint32_t perm_r,
218 				 uint32_t perm_w)
219 {
220 	struct mpu_ranges *range = msm_mpu_ranges;
221 	uint32_t i, j;
222 	int ret = 0;
223 
224 	for (i = 0; i < msm_mpu_ranges_count; i++, range++) {
225 		if (range->device != dev_type)
226 			continue;
227 
228 		struct xpu_instance *mpu = range->mpus;
229 
230 		for (j = 0; j < range->mpus_count; j++, mpu++) {
231 			ret = update_master_side_mpu(mpu,
232 						     range->device_prtn_cnt,
233 						     domain, start_addr,
234 						     end_addr, perm_r, perm_w);
235 			if (ret)
236 				goto error;
237 		}
238 
239 		/* We found the device; no need to scan the rest. */
240 		break;
241 	}
242 
243 	return 0;
244 
245 error:
246 	ERROR("Access control fatal (%x)\n",
247 	      AC_ERR_MPU_UPDATE_LOCK_MEMORY_FAILED);
248 
249 	for (;;)
250 		wfi();
251 
252 	return ret;
253 }
254 
255 static enum ac_error
process_sources(const uint32_t * src_vm_list,uint32_t src_vm_count,const qti_accesscontrol_perm_t * dst_vm_list,uint32_t dst_vm_count,enum device_type * device,enum domain_type * domain)256 process_sources(const uint32_t *src_vm_list, uint32_t src_vm_count,
257 		const qti_accesscontrol_perm_t *dst_vm_list,
258 		uint32_t dst_vm_count, enum device_type *device,
259 		enum domain_type *domain)
260 {
261 	const uint32_t *src_vm;
262 	const qti_accesscontrol_perm_t *dst_vm;
263 	const qti_accesscontrol_perm_t *dst_vm_limit;
264 	uint32_t src_index;
265 	uint32_t dst_index;
266 	uint32_t vm_also_in_other_list = 0U;
267 
268 	src_vm = src_vm_list;
269 
270 	for (src_index = 0U; src_index < src_vm_count; src_index++, src_vm++) {
271 		uint32_t src_vm_id = *src_vm;
272 
273 		if (src_vm_id != AC_VM_MSS_MSA && src_vm_id != AC_VM_MSS_NAV)
274 			continue;
275 
276 		/* Check if same VM appears as destination */
277 		dst_vm = dst_vm_list;
278 		dst_vm_limit =
279 			dst_vm_list + ((dst_vm_count < (uint32_t)AC_VM_LAST) ?
280 					       dst_vm_count :
281 					       (uint32_t)AC_VM_LAST);
282 
283 		for (dst_index = 0U; dst_vm < dst_vm_limit;
284 		     dst_index++, dst_vm++) {
285 			if (dst_vm->dst_vm != src_vm_id)
286 				continue;
287 
288 			vm_also_in_other_list = 1U;
289 			break;
290 		}
291 
292 		if (vm_also_in_other_list != 0U) {
293 			vm_also_in_other_list = 0U;
294 			continue;
295 		}
296 
297 		if (src_vm_id == AC_VM_MSS_MSA)
298 			*device = DEVICE_MODEM;
299 		else if (src_vm_id == AC_VM_MSS_NAV)
300 			*device = DEVICE_MSS_NAV;
301 		else
302 			return AC_ERR_XPU_TYPE_NOT_SUPPORTED;
303 
304 		*domain = APPS_NS_DOMAIN;
305 	}
306 
307 	return AC_SUCCESS;
308 }
309 
310 static enum ac_error
process_destinations(const uint32_t * src_vm_list,uint32_t src_vm_count,const qti_accesscontrol_perm_t * dst_vm_list,uint32_t dst_vm_count,enum device_type * device,enum domain_type * domain,uint32_t * read_perm_domain,uint32_t * write_perm_domain)311 process_destinations(const uint32_t *src_vm_list, uint32_t src_vm_count,
312 		     const qti_accesscontrol_perm_t *dst_vm_list,
313 		     uint32_t dst_vm_count, enum device_type *device,
314 		     enum domain_type *domain, uint32_t *read_perm_domain,
315 		     uint32_t *write_perm_domain)
316 {
317 	const qti_accesscontrol_perm_t *dst_vm;
318 	const uint32_t *src_check;
319 	uint32_t vm_also_in_other_list = 0U;
320 	uint32_t dst_index;
321 	uint32_t src_index;
322 
323 	dst_vm = dst_vm_list;
324 
325 	for (dst_index = 0U; dst_index < dst_vm_count; dst_index++, dst_vm++) {
326 		uint32_t dst_vm_id = dst_vm->dst_vm;
327 		uint32_t dst_vm_perm = dst_vm->dst_vm_perm;
328 
329 		if (dst_vm_id != AC_VM_MSS_MSA && dst_vm_id != AC_VM_MSS_NAV)
330 			continue;
331 
332 		/* Check if same VM exists in source list */
333 		src_check = src_vm_list;
334 		for (src_index = 0U; src_index < src_vm_count;
335 		     src_index++, src_check++) {
336 			if (dst_vm_id != *src_check)
337 				continue;
338 
339 			vm_also_in_other_list = 1U;
340 			break;
341 		}
342 
343 		if (vm_also_in_other_list != 0U) {
344 			vm_also_in_other_list = 0U;
345 			continue;
346 		}
347 
348 		if (dst_vm_id == AC_VM_MSS_MSA)
349 			*device = DEVICE_MODEM;
350 		else if (dst_vm_id == AC_VM_MSS_NAV)
351 			*device = DEVICE_MSS_NAV;
352 		else
353 			return AC_ERR_XPU_TYPE_NOT_SUPPORTED;
354 
355 		/* Permissions */
356 		if ((dst_vm_perm & AC_PERM_W) != 0U)
357 			*write_perm_domain = MSA_DOMAIN;
358 
359 		if ((dst_vm_perm & AC_PERM_R) != 0U)
360 			*read_perm_domain = MSA_DOMAIN;
361 
362 		/* Modem keeps secure RG ownership permanently */
363 		*domain = APPS_S_DOMAIN;
364 	}
365 
366 	return AC_SUCCESS;
367 }
368 
assign_regions(const qti_accesscontrol_mem_t * mem_regions,uint32_t mem_region_count,enum device_type device,enum domain_type domain,uint32_t read_perm_domain,uint32_t write_perm_domain)369 static enum ac_error assign_regions(const qti_accesscontrol_mem_t *mem_regions,
370 				    uint32_t mem_region_count,
371 				    enum device_type device,
372 				    enum domain_type domain,
373 				    uint32_t read_perm_domain,
374 				    uint32_t write_perm_domain)
375 {
376 	const qti_accesscontrol_mem_t *mem_region = mem_regions;
377 	uint32_t mem_index;
378 
379 	for (mem_index = 0U; mem_index < mem_region_count;
380 	     mem_index++, mem_region++) {
381 		uintptr_t region_base = mem_region->mem_addr;
382 		uintptr_t region_size = mem_region->mem_size;
383 		uintptr_t region_end = region_base + region_size;
384 		int rc;
385 
386 		rc = mpu_master_mpus_range(device, domain, region_base,
387 					   region_end, read_perm_domain,
388 					   write_perm_domain);
389 		if (rc != 0)
390 			return AC_ERR_XPU_ADD_MAPPING_FAILED;
391 	}
392 
393 	return AC_SUCCESS;
394 }
395 
mem_assign(const qti_accesscontrol_mem_t * mem_regions,uint32_t mem_region_count,const uint32_t * src_vm_list,uint32_t src_vm_count,const qti_accesscontrol_perm_t * dst_vm_list,uint32_t dst_vm_count)396 static uint64_t mem_assign(const qti_accesscontrol_mem_t *mem_regions,
397 			   uint32_t mem_region_count,
398 			   const uint32_t *src_vm_list, uint32_t src_vm_count,
399 			   const qti_accesscontrol_perm_t *dst_vm_list,
400 			   uint32_t dst_vm_count)
401 {
402 	enum ac_error result = AC_SUCCESS;
403 	uint32_t write_perm_domain = NO_DOMAIN;
404 	uint32_t read_perm_domain = NO_DOMAIN;
405 	enum domain_type domain = NO_DOMAIN;
406 	enum device_type device = 0;
407 
408 	spin_lock(&mem_assign_lock);
409 
410 	result = process_sources(src_vm_list, src_vm_count, dst_vm_list,
411 				 dst_vm_count, &device, &domain);
412 	if (result != AC_SUCCESS)
413 		goto out;
414 
415 	result = process_destinations(src_vm_list, src_vm_count, dst_vm_list,
416 				      dst_vm_count, &device, &domain,
417 				      &read_perm_domain, &write_perm_domain);
418 	if (result != AC_SUCCESS)
419 		goto out;
420 
421 	result = assign_regions(mem_regions, mem_region_count, device, domain,
422 				read_perm_domain, write_perm_domain);
423 	if (result != AC_SUCCESS)
424 		goto out;
425 
426 out:
427 	spin_unlock(&mem_assign_lock);
428 
429 	if (result != AC_SUCCESS) {
430 		ERROR("Access Control: memory assignment failed %x\n", result);
431 		return (uint64_t)-1;
432 	}
433 
434 	return 0;
435 }
436 
xpu_isr(uint32_t int_num,void * ctx)437 static void *xpu_isr(uint32_t int_num, void *ctx)
438 {
439 	xpu_print_log(ctx);
440 	console_flush();
441 
442 	return ctx;
443 }
444 
xpu_register_interrupts(void)445 static int xpu_register_interrupts(void)
446 {
447 	int err = 0;
448 
449 	err = qti_interrupt_svc_register(QTISECLIB_INT_ID_XPU_SEC, xpu_isr,
450 					 &xpu_err_sec_ctx);
451 	if (err)
452 		return err;
453 
454 	err = qti_interrupt_svc_register(QTISECLIB_INT_ID_XPU_NON_SEC, xpu_isr,
455 					 &xpu_err_non_sec_ctx);
456 	if (err)
457 		qti_interrupt_svc_unregister(QTISECLIB_INT_ID_XPU_SEC);
458 
459 	return err;
460 }
461 
enable_interrupts(const struct xpu_intr_reg_dtls * nsec,const struct xpu_intr_reg_dtls * sec)462 static void enable_interrupts(const struct xpu_intr_reg_dtls *nsec,
463 			      const struct xpu_intr_reg_dtls *sec)
464 {
465 	for (size_t i = 0; i < ACC_XPU_ERR_INT_REG_NUM; i++) {
466 		if (nsec) {
467 			mmio_setbits_32(nsec->xpu_intr_reg_addr,
468 					nsec->xpu_intr_reg_mask);
469 			nsec++;
470 		}
471 
472 		if (sec) {
473 			mmio_setbits_32(sec->xpu_intr_reg_addr,
474 					sec->xpu_intr_reg_mask);
475 			sec++;
476 		}
477 	}
478 }
479 
xpu_static_config(void)480 static void xpu_static_config(void)
481 {
482 	xpu_master_mpu_init(msm_mpu_ranges, msm_mpu_ranges_count);
483 	xpu_lock_down_assets(msm_xpu_cfg, msm_xpu_cfg_count);
484 	xpu_configure_tz();
485 	dsbsy();
486 
487 	enable_interrupts(xpu_non_sec_intr_en_reg, xpu_sec_intr_en_reg);
488 }
489 
qti_accesscontrol_mem_assign(const qti_accesscontrol_mem_t * mem,uint32_t mem_len,const uint32_t * src,uint32_t src_len,const qti_accesscontrol_perm_t * perm,uint32_t perm_len)490 uint64_t qti_accesscontrol_mem_assign(const qti_accesscontrol_mem_t *mem,
491 				      uint32_t mem_len, const uint32_t *src,
492 				      uint32_t src_len,
493 				      const qti_accesscontrol_perm_t *perm,
494 				      uint32_t perm_len)
495 {
496 	return mem_assign(mem, mem_len, src, src_len, perm, perm_len);
497 }
498 
qti_accesscontrol_init(void)499 void qti_accesscontrol_init(void)
500 {
501 	int rc;
502 
503 	rc = vmidmt_configure();
504 	if (rc) {
505 		ERROR("Error configuring the VMIDMT, fatal (%d)\n", rc);
506 		goto error;
507 	}
508 
509 	xpu_static_config();
510 
511 	rc = xpu_register_interrupts();
512 	if (rc) {
513 		ERROR("Error registering the XPU interrupts, fatal\n");
514 		goto error;
515 	}
516 
517 	return;
518 error:
519 	for (;;)
520 		wfi();
521 }
522