xref: /rk3399_ARM-atf/drivers/qti/accesscontrol/xpu/xpu3.c (revision 5de3e03dbd7c2da6748e294f423c83f9582f459c)
1 /*
2  * Copyright (c) 2026, Qualcomm Technologies, Inc. and/or its subsidiaries.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <limits.h>
8 #include <stddef.h>
9 #include <stdint.h>
10 
11 #include <arch_helpers.h>
12 #include <common/debug.h>
13 #include <lib/mmio.h>
14 #include <lib/utils_def.h>
15 #include <xpu3.h>
16 #include <xpu_target_info.h>
17 
18 #define FIELD_GET(mask, reg) (((reg) & (mask)) >> __builtin_ctz(mask))
19 
20 #define XPU_TYPE_MPU 0x2
21 #define XPU_INITIALIZED 0x2
22 #define XPU_TYPE_BITMASK GENMASK(1, 0)
23 #define XPU_INVALID_ADDR 0xffffffffUL
24 
25 #define XPU3_IDR0_NRG_BMSK GENMASK(25, 16)
26 #define XPU3_GCR0_DOMAIN_ENABLE BIT(0)
27 #define XPU3_GCR0_LOG_MODE_DISABLE BIT(1)
28 #define XPU3_GCR0_BASE_CFG XPU3_GCR0_DOMAIN_ENABLE
29 #define XPU3_CR0_INTR_ENABLE (GENMASK(3, 0) | BIT(8))
30 #define XPU3_GCR0_OFFSET 0x0
31 #define XPU3_SCR0_OFFSET 0x8
32 #define XPU3_CR0_OFFSET 0x10
33 #define XPU3_QAD0_GCR0_OFFSET 0x80
34 #define XPU3_QAD0_CR0_OFFSET 0x90
35 #define XPU3_QAD1_GCR0_OFFSET 0x100
36 #define XPU3_QAD1_CR0_OFFSET 0x110
37 #define XPU3_UMR_GCR0_OFFSET 0x300
38 #define XPU3_IDR0_OFFSET 0x3F8
39 #define XPU3_IDR2_OFFSET 0x3F0
40 #define XPU3_REV_OFFSET 0x3FC
41 #define XPU3_LOG_MODE_DIS_OFFSET 0x400
42 #define XPU3_RGn_GCR0_OFFSET 0x1000
43 #define XPU3_RGn_CR0_OFFSET 0x1010
44 #define XPU3_RGn_CR1_OFFSET 0x1014
45 #define XPU3_RGn_CR2_OFFSET 0x1018
46 #define XPU3_RGn_CR3_OFFSET 0x101C
47 #define XPU3_RGn_START0_OFFSET 0x1030
48 #define XPU3_RGn_START1_OFFSET 0x1034
49 #define XPU3_RGn_END0_OFFSET 0x1038
50 #define XPU3_RGn_END1_OFFSET 0x103C
51 #define XPU3_UMR_CR1_OFFSET 0x314
52 #define XPU3_UMR_CR3_OFFSET 0x31C
53 #define XPU3_UMR_CR0_OFFSET 0x310
54 #define XPU3_UMR_CR2_OFFSET 0x318
55 #define XPU3_RGn_REG_SPACE_SIZE 0x80
56 #define XPU3_REV_SHFT 0x10
57 #define XPU3_EAR0_OFFSET 0
58 #define XPU3_EAR1_OFFSET 1
59 #define XPU3_ESR_OFFSET 2
60 #define XPU3_SRRESTORE_OFFSET 3
61 #define XPU3_ESYNR0_OFFSET 4
62 #define XPU3_ESYNR1_OFFSET 5
63 #define XPU3_ESYNR2_OFFSET 6
64 #define XPU3_ESYNR3_OFFSET 7
65 #define XPU3_ESYNR4_OFFSET 8
66 
67 #define XPU3_RGn_RACR_OFFSET 0x1040
68 #define XPU3_RGn_WACR_OFFSET 0x1060
69 #define XPU3_UMR_RACR_OFFSET 0x40
70 #define XPU3_UMR_WACR_OFFSET 0x60
71 
72 #define XPU_IDR2_VMID_SUPPORT_BITMASK   0xFF00U
73 #define XPU_IDR2_VMID_SUPPORT_BITSHIFT  0x8U
74 
75 #define DEFAULT_VMID_0 (1U << 0)
76 
get_xpu_type(struct xpu_instance * xpu)77 static uint8_t get_xpu_type(struct xpu_instance *xpu)
78 {
79 	uintptr_t base = xpu->xpu_base_addr;
80 	uint32_t idr0;
81 
82 	idr0 = mmio_read_32(base + XPU3_IDR0_OFFSET);
83 
84 	return (uint8_t)(idr0 & XPU_TYPE_BITMASK);
85 }
86 
dump_log(enum xpu xpu,int type)87 static void dump_log(enum xpu xpu, int type)
88 {
89 	char sec_char[2] = { ' ', 0 };
90 	uint32_t xpu_addr;
91 	uint32_t offset;
92 
93 	switch (type) {
94 	case XPU_ERR_SEC_CTX:
95 		offset = 0x800;
96 		sec_char[0] = 'S';
97 		break;
98 	case XPU_ERR_NON_SEC_CTX:
99 		offset = 0x880;
100 		break;
101 	default:
102 		return;
103 	}
104 
105 	for (size_t i = 0; i < g_xpu_base_addr_array_count; i++) {
106 		if (g_xpu_base_addr_array[i].e_xpu != xpu)
107 			continue;
108 
109 		xpu_addr = g_xpu_base_addr_array[i].base_addr + offset;
110 
111 		ERROR("%s_ear0 0x%x\n", sec_char,
112 		      mmio_read_32(xpu_addr + XPU3_EAR0_OFFSET));
113 		ERROR("%s_ear1 0x%x\n", sec_char,
114 		      mmio_read_32(xpu_addr + XPU3_EAR1_OFFSET));
115 		ERROR("%s_esr 0x%x\n", sec_char,
116 		      mmio_read_32(xpu_addr + XPU3_ESR_OFFSET));
117 		ERROR("%s_esynr0 0x%x\n", sec_char,
118 		      mmio_read_32(xpu_addr + XPU3_ESYNR0_OFFSET));
119 		ERROR("%s_esynr1 0x%x\n", sec_char,
120 		      mmio_read_32(xpu_addr + XPU3_ESYNR1_OFFSET));
121 		ERROR("%s_esynr2 0x%x\n", sec_char,
122 		      mmio_read_32(xpu_addr + XPU3_ESYNR2_OFFSET));
123 		ERROR("%s_esynr3 0x%x\n", sec_char,
124 		      mmio_read_32(xpu_addr + XPU3_ESYNR3_OFFSET));
125 		ERROR("%s_esynr4 0x%x\n", sec_char,
126 		      mmio_read_32(xpu_addr + XPU3_ESYNR4_OFFSET));
127 
128 		/* clear error (SRRESTORE) as xpu errors are non fatal */
129 		mmio_write_32(xpu_addr + XPU3_SRRESTORE_OFFSET, 0);
130 		break;
131 	}
132 }
133 
xpu_print_log(void * ctx)134 void xpu_print_log(void *ctx)
135 {
136 	uint32_t err_bitmask[ACC_XPU_ERR_INT_REG_NUM] = { 0 };
137 	const struct xpu_intr_reg_dtls *p = NULL;
138 	int err_type = *(int *)ctx;
139 	uint32_t addr;
140 	uint32_t mask;
141 
142 	ERROR("xpu: ISR\n");
143 	switch (err_type) {
144 	case XPU_ERR_SEC_CTX:
145 		ERROR("XPU ERROR: secure\n");
146 		p = xpu_sec_intr_status_reg;
147 		break;
148 	case XPU_ERR_NON_SEC_CTX:
149 		ERROR("XPU ERROR: non secure\n");
150 		p = xpu_non_sec_intr_status_reg;
151 		break;
152 	default:
153 		return;
154 	}
155 
156 	for (size_t i = 0; i < ACC_XPU_ERR_INT_REG_NUM; i++, p++) {
157 		addr = p->xpu_intr_reg_addr;
158 		mask = p->xpu_intr_reg_mask;
159 
160 		err_bitmask[i] = mmio_read_32(addr) & mask;
161 	}
162 
163 	if (ACC_XPU_ERR_INT_REG_NUM == 1) {
164 		ERROR("XPU INTR 0 >> %08x\n", err_bitmask[0]);
165 	} else {
166 		ERROR("XPU INTR 0:1 >> %08x:%08x\n", err_bitmask[0],
167 		      err_bitmask[1]);
168 	}
169 
170 	for (size_t reg = 0; reg < ACC_XPU_ERR_INT_REG_NUM; reg++) {
171 		struct xpu_err_pos_to_hal_map *row;
172 
173 		mask = err_bitmask[reg];
174 		if (!mask)
175 			continue;
176 
177 		row = xpu_err_pos_to_hal_map[reg];
178 
179 		for (size_t i = 0; row[i].bit_mask != 0; i++) {
180 			const struct xpu_err_pos_to_hal_map *m;
181 
182 			if (i >= ACC_XPU_ERR_NUM_PER_REG)
183 				break;
184 
185 			m = &row[i];
186 
187 			if (!(m->bit_mask & mask))
188 				continue;
189 
190 			if (m->xpu >= XPU_TYPE_COUNT)
191 				continue;
192 
193 			if (m->xpu == XPU_TYPE_NONE)
194 				continue;
195 
196 			dump_log(m->xpu, err_type);
197 		}
198 	}
199 }
200 
get_idr0_nrg(struct xpu_instance * xpu)201 static uint32_t get_idr0_nrg(struct xpu_instance *xpu)
202 {
203 	uint32_t id_r0 = mmio_read_32(xpu->xpu_base_addr + XPU3_IDR0_OFFSET);
204 
205 	return FIELD_GET(XPU3_IDR0_NRG_BMSK, id_r0) + 1;
206 }
207 
enable_domain(struct xpu_instance * xpu)208 static void enable_domain(struct xpu_instance *xpu)
209 {
210 	uint32_t gcr0_cfg = XPU3_GCR0_DOMAIN_ENABLE;
211 	uint32_t gcr0_val;
212 	uint32_t rev;
213 
214 	/* Already initialized from this context */
215 	if ((xpu->flag & XPU_INITIALIZED) != 0U)
216 		return;
217 
218 	xpu->flag |= XPU_INITIALIZED;
219 
220 	/* If GCR0 is already programmed */
221 	gcr0_val = mmio_read_32(xpu->xpu_base_addr + XPU3_GCR0_OFFSET);
222 	if (gcr0_val != 0U)
223 		return;
224 
225 	/* HW revision check: enable log-mode disable on newer revisions */
226 	rev = mmio_read_32(xpu->xpu_base_addr + XPU3_REV_OFFSET);
227 	rev >>= XPU3_REV_SHFT;
228 
229 	if (rev > 0x3002U) {
230 		mmio_write_32(xpu->xpu_base_addr + XPU3_LOG_MODE_DIS_OFFSET, 1);
231 		gcr0_cfg |= XPU3_GCR0_LOG_MODE_DISABLE;
232 	}
233 
234 	/* Main XPU instance */
235 	mmio_write_32(xpu->xpu_base_addr + XPU3_GCR0_OFFSET, gcr0_cfg);
236 	mmio_write_32(xpu->xpu_base_addr + XPU3_CR0_OFFSET,
237 		      XPU3_CR0_INTR_ENABLE);
238 
239 	/* QAD0 */
240 	mmio_write_32(xpu->xpu_base_addr + XPU3_QAD0_CR0_OFFSET,
241 		      XPU3_CR0_INTR_ENABLE);
242 	mmio_write_32(xpu->xpu_base_addr + XPU3_QAD0_GCR0_OFFSET, gcr0_cfg);
243 
244 	/* QAD1 */
245 	mmio_write_32(xpu->xpu_base_addr + XPU3_QAD1_CR0_OFFSET,
246 		      XPU3_CR0_INTR_ENABLE);
247 	mmio_write_32(xpu->xpu_base_addr + XPU3_QAD1_GCR0_OFFSET, gcr0_cfg);
248 
249 	dmbsy();
250 	isb();
251 }
252 
vmid_supported(const struct xpu_instance * xpu)253 static inline bool vmid_supported(const struct xpu_instance *xpu)
254 {
255 	uint32_t idr2;
256 
257 	idr2 = mmio_read_32(xpu->xpu_base_addr + XPU3_IDR2_OFFSET);
258 
259 	return ((idr2 & XPU_IDR2_VMID_SUPPORT_BITMASK) >>
260 		XPU_IDR2_VMID_SUPPORT_BITSHIFT) != 0U;
261 }
262 
set_mpu_permissions(struct xpu_instance * xpu,uint32_t rg_num)263 static void set_mpu_permissions(struct xpu_instance *xpu, uint32_t rg_num)
264 {
265 	struct rg_partition_range *range = xpu->partition_range;
266 	struct rg_domain_ownership *owner = xpu->rg_owner;
267 	uintptr_t base = xpu->xpu_base_addr;
268 	uintptr_t cr0, cr1, cr2, cr3;
269 	uintptr_t racr, racw;
270 	bool no_perms;
271 	uint32_t tmp;
272 
273 	if (get_xpu_type(xpu) != XPU_TYPE_MPU)
274 		return;
275 
276 	for (int i = 0; i < xpu->part_range_arr_size; i++, range++, owner++) {
277 		if (rg_num != XPU_RG_ALL && range->rg_num != rg_num)
278 			continue;
279 
280 		if (i >= xpu->owner_arr_size)
281 			goto out;
282 
283 		no_perms = (owner->perm_r == 0 && owner->perm_w == 0);
284 
285 		if (owner->rg_num == XPU_UMR_RG || no_perms)
286 			continue;
287 
288 		cr1 = base + XPU3_RGn_CR1_OFFSET +
289 		      XPU3_RGn_REG_SPACE_SIZE * range->rg_num;
290 		cr3 = base + XPU3_RGn_CR3_OFFSET +
291 		      XPU3_RGn_REG_SPACE_SIZE * range->rg_num;
292 		cr0 = base + XPU3_RGn_CR0_OFFSET +
293 		      XPU3_RGn_REG_SPACE_SIZE * range->rg_num;
294 		cr2 = base + XPU3_RGn_CR2_OFFSET +
295 		      XPU3_RGn_REG_SPACE_SIZE * range->rg_num;
296 
297 		/* Set permissions */
298 		mmio_write_32(cr1, owner->perm_r);
299 		mmio_write_32(cr3, owner->perm_w);
300 
301 		if (owner->perm_r & APPS_S_DOMAIN)
302 			mmio_write_32(cr0, 1);
303 
304 		if (owner->perm_w & APPS_S_DOMAIN)
305 			mmio_write_32(cr2, 1);
306 
307 		if (vmid_supported(xpu)) {
308 			/* RACR */
309 			racr = base + XPU3_RGn_RACR_OFFSET +
310 				XPU3_RGn_REG_SPACE_SIZE * range->rg_num;
311 			tmp = owner->perm_r & APPS_NS_DOMAIN ?
312 				DEFAULT_VMID_0 : 0;
313 			mmio_write_32(racr, tmp);
314 
315 			/* RACW */
316 			racw = base + XPU3_RGn_WACR_OFFSET +
317 				XPU3_RGn_REG_SPACE_SIZE * range->rg_num;
318 			tmp = owner->perm_w & APPS_NS_DOMAIN ?
319 				DEFAULT_VMID_0 : 0;
320 			mmio_write_32(racw, tmp);
321 		}
322 	}
323 
324 	/* Handle UMR region */
325 	no_perms = (owner->perm_r == 0 && owner->perm_w == 0);
326 
327 	if ((owner->rg_num == XPU_UMR_RG) && !no_perms) {
328 		cr1 = base + XPU3_UMR_CR1_OFFSET;
329 		cr3 = base + XPU3_UMR_CR3_OFFSET;
330 		cr0 = base + XPU3_UMR_CR0_OFFSET;
331 		cr2 = base + XPU3_UMR_CR2_OFFSET;
332 
333 		mmio_write_32(cr1, owner->perm_r);
334 		mmio_write_32(cr3, owner->perm_w);
335 
336 		if (owner->perm_r & APPS_S_DOMAIN)
337 			mmio_write_32(cr0, 1);
338 
339 		if (owner->perm_w & APPS_S_DOMAIN)
340 			mmio_write_32(cr2, 1);
341 
342 		if (vmid_supported(xpu)) {
343 			/* RACR */
344 			racr = base + XPU3_UMR_RACR_OFFSET;
345 			tmp = owner->perm_r & APPS_NS_DOMAIN ?
346 				DEFAULT_VMID_0 : 0;
347 			mmio_write_32(racr, tmp);
348 
349 			/* RACW */
350 			racw = base + XPU3_UMR_WACR_OFFSET;
351 			tmp = owner->perm_w & APPS_NS_DOMAIN ?
352 				DEFAULT_VMID_0 : 0;
353 			mmio_write_32(racw, tmp);
354 		}
355 	}
356 out:
357 	dmbsy();
358 	isb();
359 }
360 
set_mpu_dynamic_permissions(struct xpu_instance * xpu,uint32_t rg_num,uint32_t perm_r,uint32_t perm_w)361 static void set_mpu_dynamic_permissions(struct xpu_instance *xpu,
362 					uint32_t rg_num, uint32_t perm_r,
363 					uint32_t perm_w)
364 {
365 	struct rg_partition_range *range = xpu->partition_range;
366 	uintptr_t base = xpu->xpu_base_addr;
367 	uintptr_t racr, racw;
368 	uintptr_t cr1, cr3;
369 	uint32_t tmp;
370 
371 	if (get_xpu_type(xpu) != XPU_TYPE_MPU)
372 		return;
373 
374 	for (size_t i = 0; i < xpu->part_range_arr_size; i++, range++) {
375 		if (rg_num != XPU_RG_ALL && range->rg_num != rg_num)
376 			continue;
377 
378 		cr1 = base + XPU3_RGn_CR1_OFFSET +
379 		      XPU3_RGn_REG_SPACE_SIZE * range->rg_num;
380 		cr3 = base + XPU3_RGn_CR3_OFFSET +
381 		      XPU3_RGn_REG_SPACE_SIZE * range->rg_num;
382 
383 		mmio_write_32(cr1, perm_r);
384 		mmio_write_32(cr3, perm_w);
385 
386 		if (!vmid_supported(xpu))
387 			continue;
388 
389 		racr = base + XPU3_RGn_RACR_OFFSET +
390 			XPU3_RGn_REG_SPACE_SIZE * range->rg_num;
391 		tmp = perm_r & APPS_NS_DOMAIN ? DEFAULT_VMID_0 : 0;
392 		mmio_write_32(racr, tmp);
393 
394 		racw = base + XPU3_RGn_WACR_OFFSET +
395 			XPU3_RGn_REG_SPACE_SIZE * range->rg_num;
396 		tmp = perm_w & APPS_NS_DOMAIN ? DEFAULT_VMID_0 : 0;
397 		mmio_write_32(racw, tmp);
398 	}
399 }
400 
program_mpu_partitions(struct xpu_instance * xpu,uint32_t rg_num)401 static void program_mpu_partitions(struct xpu_instance *xpu, uint32_t rg_num)
402 {
403 	struct rg_partition_range *range = xpu->partition_range;
404 	uint32_t start_lo, start_hi, end_lo, end_hi;
405 	uintptr_t start_0, start_1, end_0, end_1;
406 
407 	if (get_xpu_type(xpu) != XPU_TYPE_MPU)
408 		return;
409 
410 	for (size_t i = 0; i < xpu->part_range_arr_size; i++, range++) {
411 		if (rg_num != XPU_RG_ALL && range->rg_num != rg_num)
412 			continue;
413 
414 		start_lo = (uint32_t)range->start_addr;
415 		start_hi = (uint32_t)(range->start_addr >> 32);
416 
417 		end_lo = (uint32_t)range->end_addr;
418 		end_hi = (uint32_t)(range->end_addr >> 32);
419 
420 		start_0 = xpu->xpu_base_addr + XPU3_RGn_START0_OFFSET +
421 			  XPU3_RGn_REG_SPACE_SIZE * range->rg_num;
422 		start_1 = xpu->xpu_base_addr + XPU3_RGn_START1_OFFSET +
423 			  XPU3_RGn_REG_SPACE_SIZE * range->rg_num;
424 		end_0 = xpu->xpu_base_addr + XPU3_RGn_END0_OFFSET +
425 			XPU3_RGn_REG_SPACE_SIZE * range->rg_num;
426 		end_1 = xpu->xpu_base_addr + XPU3_RGn_END1_OFFSET +
427 			XPU3_RGn_REG_SPACE_SIZE * range->rg_num;
428 
429 		/* Set the specified address range in the partition */
430 		mmio_write_32(start_0, start_lo);
431 		mmio_write_32(start_1, start_hi);
432 		mmio_write_32(end_0, end_lo);
433 		mmio_write_32(end_1, end_hi);
434 	}
435 
436 	dmbsy();
437 	isb();
438 }
439 
set_region_ownership(struct xpu_instance * xpu,uint32_t rg_num)440 static void set_region_ownership(struct xpu_instance *xpu, uint32_t rg_num)
441 {
442 	struct rg_domain_ownership *owner = xpu->rg_owner;
443 	uintptr_t base = xpu->xpu_base_addr;
444 	uintptr_t gcr0;
445 
446 	for (size_t i = 0; i < xpu->owner_arr_size; i++, owner++) {
447 		/* Handle unmapped region ownership */
448 		if (owner->rg_num == XPU_UMR_RG) {
449 			gcr0 = base + XPU3_UMR_GCR0_OFFSET;
450 			mmio_write_32(gcr0, owner->owner_domain);
451 			continue;
452 		}
453 
454 		/* Apply to ALL or specific region */
455 		if (rg_num != XPU_RG_ALL && owner->rg_num != rg_num)
456 			continue;
457 
458 		gcr0 = base + XPU3_RGn_GCR0_OFFSET +
459 		       XPU3_RGn_REG_SPACE_SIZE * owner->rg_num;
460 
461 		/* Set the specified domain as the owner */
462 		mmio_write_32(gcr0, owner->owner_domain);
463 	}
464 }
465 
xpu_lock_down_assets(struct xpu_instance * xpus,uint8_t xpu_count)466 void xpu_lock_down_assets(struct xpu_instance *xpus, uint8_t xpu_count)
467 {
468 	for (size_t i = 0; i < xpu_count; i++, xpus++) {
469 		if (!(xpus->flag & XPU_PROTECTION_STATIC))
470 			continue;
471 
472 		enable_domain(xpus);
473 
474 		program_mpu_partitions(xpus, XPU_RG_ALL);
475 		set_region_ownership(xpus, XPU_RG_ALL);
476 		set_mpu_permissions(xpus, XPU_RG_ALL);
477 	}
478 }
479 
xpu_lock_down_assets_dynamic(struct xpu_instance * xpus,uint8_t xpu_count,uint32_t xpu_id,uint32_t rg_num,uint32_t perm_r,uint32_t perm_w)480 int xpu_lock_down_assets_dynamic(struct xpu_instance *xpus, uint8_t xpu_count,
481 				 uint32_t xpu_id, uint32_t rg_num,
482 				 uint32_t perm_r, uint32_t perm_w)
483 {
484 	uint32_t i;
485 
486 	for (i = 0; i < xpu_count; i++, xpus++) {
487 		if (xpus->xpu_id == xpu_id)
488 			break;
489 	}
490 
491 	if (i >= xpu_count)
492 		return -1;
493 
494 	enable_domain(xpus);
495 
496 	program_mpu_partitions(xpus, rg_num);
497 	set_region_ownership(xpus, rg_num);
498 	set_mpu_dynamic_permissions(xpus, rg_num, perm_r, perm_w);
499 
500 	return 0;
501 }
502 
xpu_init_unused_regions(struct xpu_instance * mpu,uint32_t dev_cnt)503 static void xpu_init_unused_regions(struct xpu_instance *mpu, uint32_t dev_cnt)
504 {
505 	uint32_t part_len = (uint32_t)mpu->part_range_arr_size;
506 	uint32_t owner_len = (uint32_t)mpu->owner_arr_size;
507 	uint32_t num_regions = get_idr0_nrg(mpu);
508 
509 	if ((dev_cnt == 0U) || (dev_cnt > num_regions) ||
510 	    (dev_cnt > owner_len) || (dev_cnt > part_len)) {
511 		ERROR("XPU invalid board configuration\n");
512 		return;
513 	}
514 
515 	uint32_t rg_start = num_regions - dev_cnt;
516 
517 	struct rg_partition_range *part =
518 		mpu->partition_range + (part_len - dev_cnt);
519 
520 	struct rg_domain_ownership *owner =
521 		mpu->rg_owner + (owner_len - dev_cnt);
522 
523 	/* Prepare un-initialized regions for dynamic configuration */
524 	for (uint32_t k = 0U; k < dev_cnt; k++) {
525 		uint32_t rg = rg_start + k;
526 
527 		owner->owner_domain = NO_DOMAIN;
528 		owner->rg_num = rg;
529 
530 		part->start_addr = XPU_INVALID_ADDR;
531 		part->end_addr = XPU_INVALID_ADDR;
532 		part->rg_num = rg;
533 
534 		owner++;
535 		part++;
536 	}
537 }
538 
xpu_init_mpu_instances(struct mpu_ranges * range)539 static void xpu_init_mpu_instances(struct mpu_ranges *range)
540 {
541 	for (uint32_t i = 0; i < range->mpus_count; i++) {
542 		struct xpu_instance *mpu = &range->mpus[i];
543 
544 		if (get_xpu_type(mpu) != XPU_TYPE_MPU)
545 			continue;
546 
547 		uint32_t dev_cnt = range->device_prtn_cnt;
548 
549 		xpu_init_unused_regions(mpu, dev_cnt);
550 		enable_domain(mpu);
551 	}
552 }
553 
xpu_master_mpu_init(struct mpu_ranges * ranges,const uint32_t range_count)554 void xpu_master_mpu_init(struct mpu_ranges *ranges, const uint32_t range_count)
555 {
556 	struct mpu_ranges *range = ranges;
557 
558 	for (size_t i = 0; i < range_count; i++, range++)
559 		xpu_init_mpu_instances(range);
560 }
561