xref: /optee_os/core/arch/arm/kernel/secure_partition.c (revision a1d5c81f8834a9d2c6f4372cce2e59e70e709121)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2020, Arm Limited. All rights reserved.
4  * Copyright (c) 2019, Linaro Limited
5  */
6 
7 #include <crypto/crypto.h>
8 #include <ffa.h>
9 #include <kernel/abort.h>
10 #include <kernel/secure_partition.h>
11 #include <kernel/user_mode_ctx.h>
12 #include <mm/fobj.h>
13 #include <mm/mobj.h>
14 #include <mm/tee_mmu.h>
15 #include <pta_stmm.h>
16 #include <tee_api_defines_extensions.h>
17 #include <tee/tee_pobj.h>
18 #include <tee/tee_svc.h>
19 #include <tee/tee_svc_storage.h>
20 #include <zlib.h>
21 
22 #include "thread_private.h"
23 
24 static const TEE_UUID stmm_uuid = PTA_STMM_UUID;
25 
26 /*
27  * Once a complete FFA spec is added, these will become discoverable.
28  * Until then these are considered part of the internal ABI between
29  * OP-TEE and StMM.
30  */
31 static const uint16_t stmm_id = 1U;
32 static const uint16_t stmm_pta_id = 2U;
33 static const uint16_t mem_mgr_id = 3U;
34 static const uint16_t ffa_storage_id = 4U;
35 
36 static const unsigned int stmm_stack_size = 4 * SMALL_PAGE_SIZE;
37 static const unsigned int stmm_heap_size = 398 * SMALL_PAGE_SIZE;
38 static const unsigned int stmm_sec_buf_size = SMALL_PAGE_SIZE;
39 static const unsigned int stmm_ns_comm_buf_size = SMALL_PAGE_SIZE;
40 
41 extern unsigned char stmm_image[];
42 extern const unsigned int stmm_image_size;
43 extern const unsigned int stmm_image_uncompressed_size;
44 
45 static struct sec_part_ctx *sec_part_alloc_ctx(const TEE_UUID *uuid)
46 {
47 	TEE_Result res = TEE_SUCCESS;
48 	struct sec_part_ctx *spc = NULL;
49 
50 	spc = calloc(1, sizeof(*spc));
51 	if (!spc)
52 		return NULL;
53 
54 	spc->uctx.ctx.ops = &secure_partition_ops;
55 	spc->uctx.ctx.uuid = *uuid;
56 	spc->uctx.ctx.flags = TA_FLAG_SINGLE_INSTANCE |
57 			      TA_FLAG_INSTANCE_KEEP_ALIVE;
58 
59 	res = vm_info_init(&spc->uctx);
60 	if (res) {
61 		free(spc);
62 		return NULL;
63 	}
64 
65 	spc->uctx.ctx.ref_count = 1;
66 	condvar_init(&spc->uctx.ctx.busy_cv);
67 
68 	return spc;
69 }
70 
71 static void clear_vfp_state(struct sec_part_ctx *spc __maybe_unused)
72 {
73 	if (IS_ENABLED(CFG_WITH_VFP))
74 		thread_user_clear_vfp(&spc->uctx.vfp);
75 }
76 
77 static TEE_Result sec_part_enter_user_mode(struct sec_part_ctx *spc)
78 {
79 	uint32_t exceptions = 0;
80 	uint32_t panic_code = 0;
81 	uint32_t panicked = 0;
82 	uint64_t cntkctl = 0;
83 
84 	exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
85 	cntkctl = read_cntkctl();
86 	write_cntkctl(cntkctl | CNTKCTL_PL0PCTEN);
87 	__thread_enter_user_mode(&spc->regs, &panicked, &panic_code);
88 	write_cntkctl(cntkctl);
89 	thread_unmask_exceptions(exceptions);
90 
91 	clear_vfp_state(spc);
92 
93 	if (panicked) {
94 		abort_print_current_ta();
95 		DMSG("sec_part panicked with code %#"PRIx32, panic_code);
96 		return TEE_ERROR_TARGET_DEAD;
97 	}
98 
99 	return TEE_SUCCESS;
100 }
101 
102 static void init_stmm_regs(struct sec_part_ctx *spc, unsigned long a0,
103 			   unsigned long a1, unsigned long sp, unsigned long pc)
104 {
105 	spc->regs.x[0] = a0;
106 	spc->regs.x[1] = a1;
107 	spc->regs.sp = sp;
108 	spc->regs.pc = pc;
109 }
110 
111 static TEE_Result alloc_and_map_sp_fobj(struct sec_part_ctx *spc, size_t sz,
112 					uint32_t prot, vaddr_t *va)
113 {
114 	size_t num_pgs = ROUNDUP(sz, SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE;
115 	struct fobj *fobj = fobj_ta_mem_alloc(num_pgs);
116 	struct mobj *mobj = mobj_with_fobj_alloc(fobj, NULL);
117 	TEE_Result res = TEE_SUCCESS;
118 
119 	fobj_put(fobj);
120 	if (!mobj)
121 		return TEE_ERROR_OUT_OF_MEMORY;
122 
123 	res = vm_map(&spc->uctx, va, num_pgs * SMALL_PAGE_SIZE,
124 		     prot, 0, mobj, 0);
125 	if (res)
126 		mobj_put(mobj);
127 
128 	return TEE_SUCCESS;
129 }
130 
131 static void *zalloc(void *opaque __unused, unsigned int items,
132 		    unsigned int size)
133 {
134 	return malloc(items * size);
135 }
136 
137 static void zfree(void *opaque __unused, void *address)
138 {
139 	free(address);
140 }
141 
142 static void uncompress_image(void *dst, size_t dst_size, void *src,
143 			     size_t src_size)
144 {
145 	z_stream strm = {
146 		.next_in = src,
147 		.avail_in = src_size,
148 		.next_out = dst,
149 		.avail_out = dst_size,
150 		.zalloc = zalloc,
151 		.zfree = zfree,
152 	};
153 
154 	if (inflateInit(&strm) != Z_OK)
155 		panic("inflateInit");
156 
157 	if (inflate(&strm, Z_SYNC_FLUSH) != Z_STREAM_END)
158 		panic("inflate");
159 
160 	if (inflateEnd(&strm) != Z_OK)
161 		panic("inflateEnd");
162 }
163 
164 static TEE_Result load_stmm(struct sec_part_ctx *spc)
165 {
166 	struct secure_partition_boot_info *boot_info = NULL;
167 	struct secure_partition_mp_info *mp_info = NULL;
168 	TEE_Result res = TEE_SUCCESS;
169 	vaddr_t sp_addr = 0;
170 	vaddr_t image_addr = 0;
171 	vaddr_t heap_addr = 0;
172 	vaddr_t stack_addr = 0;
173 	vaddr_t sec_buf_addr = 0;
174 	vaddr_t comm_buf_addr = 0;
175 	unsigned int sp_size = 0;
176 	unsigned int uncompressed_size_roundup = 0;
177 
178 	uncompressed_size_roundup = ROUNDUP(stmm_image_uncompressed_size,
179 					    SMALL_PAGE_SIZE);
180 	sp_size = uncompressed_size_roundup + stmm_stack_size +
181 		  stmm_heap_size + stmm_sec_buf_size;
182 	res = alloc_and_map_sp_fobj(spc, sp_size,
183 				    TEE_MATTR_PRW, &sp_addr);
184 	if (res)
185 		return res;
186 
187 	res = alloc_and_map_sp_fobj(spc, stmm_ns_comm_buf_size,
188 				    TEE_MATTR_URW | TEE_MATTR_PRW,
189 				    &comm_buf_addr);
190 	/*
191 	 * We don't need to free the previous instance here, they'll all be
192 	 * handled during the destruction call (sec_part_ctx_destroy())
193 	 */
194 	if (res)
195 		return res;
196 
197 	image_addr = sp_addr;
198 	heap_addr = image_addr + uncompressed_size_roundup;
199 	stack_addr = heap_addr + stmm_heap_size;
200 	sec_buf_addr = stack_addr + stmm_stack_size;
201 
202 	tee_mmu_set_ctx(&spc->uctx.ctx);
203 	uncompress_image((void *)image_addr, stmm_image_uncompressed_size,
204 			 stmm_image, stmm_image_size);
205 
206 	res = vm_set_prot(&spc->uctx, image_addr, uncompressed_size_roundup,
207 			  TEE_MATTR_URX | TEE_MATTR_PR);
208 	if (res)
209 		return res;
210 
211 	res = vm_set_prot(&spc->uctx, heap_addr, stmm_heap_size,
212 			  TEE_MATTR_URW | TEE_MATTR_PRW);
213 	if (res)
214 		return res;
215 
216 	res = vm_set_prot(&spc->uctx, stack_addr, stmm_stack_size,
217 			  TEE_MATTR_URW | TEE_MATTR_PRW);
218 	if (res)
219 		return res;
220 
221 	res = vm_set_prot(&spc->uctx, sec_buf_addr, stmm_sec_buf_size,
222 			  TEE_MATTR_URW | TEE_MATTR_PRW);
223 	if (res)
224 		return res;
225 
226 	DMSG("stmm load address %#"PRIxVA, image_addr);
227 
228 	boot_info = (struct secure_partition_boot_info *)sec_buf_addr;
229 	mp_info = (struct secure_partition_mp_info *)(boot_info + 1);
230 	*boot_info = (struct secure_partition_boot_info){
231 		.h.type = SP_PARAM_SP_IMAGE_BOOT_INFO,
232 		.h.version = SP_PARAM_VERSION_1,
233 		.h.size = sizeof(struct secure_partition_boot_info),
234 		.h.attr = 0,
235 		.sp_mem_base = sp_addr,
236 		.sp_mem_limit = sp_addr + sp_size,
237 		.sp_image_base = image_addr,
238 		.sp_stack_base = stack_addr,
239 		.sp_heap_base = heap_addr,
240 		.sp_ns_comm_buf_base = comm_buf_addr,
241 		.sp_shared_buf_base = sec_buf_addr,
242 		.sp_image_size = stmm_image_size,
243 		.sp_pcpu_stack_size = stmm_stack_size,
244 		.sp_heap_size = stmm_heap_size,
245 		.sp_ns_comm_buf_size = stmm_ns_comm_buf_size,
246 		.sp_shared_buf_size = stmm_sec_buf_size,
247 		.num_sp_mem_regions = 6,
248 		.num_cpus = 1,
249 		.mp_info = mp_info,
250 	};
251 	mp_info->mpidr = read_mpidr_el1();
252 	mp_info->linear_id = 0;
253 	mp_info->flags = MP_INFO_FLAG_PRIMARY_CPU;
254 	spc->ns_comm_buf_addr = comm_buf_addr;
255 	spc->ns_comm_buf_size = stmm_ns_comm_buf_size;
256 
257 	init_stmm_regs(spc, sec_buf_addr,
258 		       (vaddr_t)(mp_info + 1) - sec_buf_addr,
259 		       stack_addr + stmm_stack_size, image_addr);
260 
261 	return sec_part_enter_user_mode(spc);
262 }
263 
264 TEE_Result sec_part_init_session(const TEE_UUID *uuid,
265 				 struct tee_ta_session *sess)
266 {
267 	struct sec_part_ctx *spc = NULL;
268 	TEE_Result res = TEE_SUCCESS;
269 
270 	if (memcmp(uuid, &stmm_uuid, sizeof(*uuid)))
271 		return TEE_ERROR_ITEM_NOT_FOUND;
272 
273 	spc = sec_part_alloc_ctx(uuid);
274 	if (!spc)
275 		return TEE_ERROR_OUT_OF_MEMORY;
276 
277 	spc->is_initializing = true;
278 
279 	sess->ctx = &spc->uctx.ctx;
280 	tee_ta_push_current_session(sess);
281 	res = load_stmm(spc);
282 	tee_ta_pop_current_session();
283 	tee_mmu_set_ctx(NULL);
284 	if (res) {
285 		sess->ctx = NULL;
286 		spc->uctx.ctx.ops->destroy(&spc->uctx.ctx);
287 
288 		return res;
289 	}
290 
291 	spc->is_initializing = false;
292 	TAILQ_INSERT_TAIL(&tee_ctxes, &spc->uctx.ctx, link);
293 
294 	return TEE_SUCCESS;
295 }
296 
297 static TEE_Result stmm_enter_open_session(struct tee_ta_session *s,
298 					  struct tee_ta_param *param,
299 					  TEE_ErrorOrigin *eo)
300 {
301 	struct sec_part_ctx *spc = to_sec_part_ctx(s->ctx);
302 	const uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_NONE,
303 						TEE_PARAM_TYPE_NONE,
304 						TEE_PARAM_TYPE_NONE,
305 						TEE_PARAM_TYPE_NONE);
306 
307 	if (param->types != exp_pt)
308 		return TEE_ERROR_BAD_PARAMETERS;
309 
310 	if (spc->is_initializing) {
311 		/* StMM is initialized in sec_part_init_session() */
312 		*eo = TEE_ORIGIN_TEE;
313 		return TEE_ERROR_BAD_STATE;
314 	}
315 
316 	return TEE_SUCCESS;
317 }
318 
319 static TEE_Result stmm_enter_invoke_cmd(struct tee_ta_session *s,
320 					uint32_t cmd,
321 					struct tee_ta_param *param,
322 					TEE_ErrorOrigin *eo __unused)
323 {
324 	struct sec_part_ctx *spc = to_sec_part_ctx(s->ctx);
325 	TEE_Result res = TEE_SUCCESS;
326 	TEE_Result __maybe_unused tmp_res = TEE_SUCCESS;
327 	unsigned int ns_buf_size = 0;
328 	struct param_mem *mem = NULL;
329 	void *va = NULL;
330 	const uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INOUT,
331 						TEE_PARAM_TYPE_VALUE_OUTPUT,
332 						TEE_PARAM_TYPE_NONE,
333 						TEE_PARAM_TYPE_NONE);
334 
335 	if (cmd != PTA_STMM_CMD_COMMUNICATE)
336 		return TEE_ERROR_BAD_PARAMETERS;
337 
338 	if (param->types != exp_pt)
339 		return TEE_ERROR_BAD_PARAMETERS;
340 
341 	mem = &param->u[0].mem;
342 	ns_buf_size = mem->size;
343 	if (ns_buf_size > spc->ns_comm_buf_size) {
344 		mem->size = spc->ns_comm_buf_size;
345 		return TEE_ERROR_EXCESS_DATA;
346 	}
347 
348 	res = mobj_inc_map(mem->mobj);
349 	if (res)
350 		return res;
351 
352 	va = mobj_get_va(mem->mobj, mem->offs);
353 	if (!va) {
354 		EMSG("Can't get a valid VA for NS buffer");
355 		res = TEE_ERROR_BAD_PARAMETERS;
356 		goto out_va;
357 	}
358 
359 	spc->regs.x[0] = FFA_MSG_SEND_DIRECT_REQ_64;
360 	spc->regs.x[1] = (stmm_pta_id << 16) | stmm_id;
361 	spc->regs.x[2] = FFA_PARAM_MBZ;
362 	spc->regs.x[3] = spc->ns_comm_buf_addr;
363 	spc->regs.x[4] = ns_buf_size;
364 	spc->regs.x[5] = 0;
365 	spc->regs.x[6] = 0;
366 	spc->regs.x[7] = 0;
367 
368 	tee_ta_push_current_session(s);
369 
370 	memcpy((void *)spc->ns_comm_buf_addr, va, ns_buf_size);
371 
372 	res = sec_part_enter_user_mode(spc);
373 	if (res)
374 		goto out_session;
375 	/*
376 	 * Copy the SPM response from secure partition back to the non-secure
377 	 * buffer of the client that called us.
378 	 */
379 	param->u[1].val.a = spc->regs.x[4];
380 
381 	memcpy(va, (void *)spc->ns_comm_buf_addr, ns_buf_size);
382 
383 out_session:
384 	tee_ta_pop_current_session();
385 out_va:
386 	tmp_res = mobj_dec_map(mem->mobj);
387 	assert(!tmp_res);
388 
389 	return res;
390 }
391 
392 static void stmm_enter_close_session(struct tee_ta_session *s __unused)
393 {
394 }
395 
396 static void sec_part_dump_state(struct tee_ta_ctx *ctx)
397 {
398 	user_mode_ctx_print_mappings(to_user_mode_ctx(ctx));
399 }
400 
401 static uint32_t sec_part_get_instance_id(struct tee_ta_ctx *ctx)
402 {
403 	return to_sec_part_ctx(ctx)->uctx.vm_info.asid;
404 }
405 
406 static void sec_part_ctx_destroy(struct tee_ta_ctx *ctx)
407 {
408 	struct sec_part_ctx *spc = to_sec_part_ctx(ctx);
409 
410 	tee_pager_rem_um_areas(&spc->uctx);
411 	vm_info_final(&spc->uctx);
412 	free(spc);
413 }
414 
415 static uint32_t sp_svc_get_mem_attr(vaddr_t va)
416 {
417 	TEE_Result res = TEE_ERROR_BAD_PARAMETERS;
418 	struct tee_ta_session *sess = NULL;
419 	struct sec_part_ctx *spc = NULL;
420 	uint16_t attrs = 0;
421 	uint16_t perm = 0;
422 
423 	if (!va)
424 		goto err;
425 
426 	res = tee_ta_get_current_session(&sess);
427 	if (res != TEE_SUCCESS)
428 		goto err;
429 
430 	spc = to_sec_part_ctx(sess->ctx);
431 
432 	res = vm_get_prot(&spc->uctx, va, SMALL_PAGE_SIZE, &attrs);
433 	if (res)
434 		goto err;
435 
436 	if (attrs & TEE_MATTR_UR)
437 		perm |= SP_MEM_ATTR_ACCESS_RO;
438 	else if (attrs & TEE_MATTR_UW)
439 		perm |= SP_MEM_ATTR_ACCESS_RW;
440 
441 	if (attrs & TEE_MATTR_UX)
442 		perm |= SP_MEM_ATTR_EXEC;
443 
444 	return perm;
445 err:
446 	return SP_RET_DENIED;
447 }
448 
449 static int sp_svc_set_mem_attr(vaddr_t va, unsigned int nr_pages, uint32_t perm)
450 {
451 	TEE_Result res = TEE_ERROR_BAD_PARAMETERS;
452 	struct tee_ta_session *sess = NULL;
453 	struct sec_part_ctx *spc = NULL;
454 	size_t sz = 0;
455 	uint32_t prot = 0;
456 
457 	if (!va || !nr_pages || MUL_OVERFLOW(nr_pages, SMALL_PAGE_SIZE, &sz))
458 		return SP_RET_INVALID_PARAM;
459 
460 	if (perm & ~SP_MEM_ATTR_ALL)
461 		return SP_RET_INVALID_PARAM;
462 
463 	res = tee_ta_get_current_session(&sess);
464 	if (res != TEE_SUCCESS)
465 		return SP_RET_DENIED;
466 
467 	spc = to_sec_part_ctx(sess->ctx);
468 
469 	if ((perm & SP_MEM_ATTR_ACCESS_MASK) == SP_MEM_ATTR_ACCESS_RO)
470 		prot |= TEE_MATTR_UR;
471 	else if ((perm & SP_MEM_ATTR_ACCESS_MASK) == SP_MEM_ATTR_ACCESS_RW)
472 		prot |= TEE_MATTR_URW;
473 
474 	if ((perm & SP_MEM_ATTR_EXEC_NEVER) == SP_MEM_ATTR_EXEC)
475 		prot |= TEE_MATTR_UX;
476 
477 	res = vm_set_prot(&spc->uctx, va, sz, prot);
478 	if (res)
479 		return SP_RET_DENIED;
480 
481 	return SP_RET_SUCCESS;
482 }
483 
484 static bool return_helper(bool panic, uint32_t panic_code,
485 			  struct thread_svc_regs *svc_regs)
486 {
487 	if (!panic) {
488 		struct tee_ta_session *sess = NULL;
489 		struct sec_part_ctx *spc = NULL;
490 		size_t n = 0;
491 
492 		tee_ta_get_current_session(&sess);
493 		spc = to_sec_part_ctx(sess->ctx);
494 
495 		/* Save the return values from StMM */
496 		for (n = 0; n <= 7; n++)
497 			spc->regs.x[n] = *(&svc_regs->x0 + n);
498 
499 		spc->regs.sp = svc_regs->sp_el0;
500 		spc->regs.pc = svc_regs->elr;
501 		spc->regs.cpsr = svc_regs->spsr;
502 	}
503 
504 	svc_regs->x0 = 0;
505 	svc_regs->x1 = panic;
506 	svc_regs->x2 = panic_code;
507 
508 	return false;
509 }
510 
511 static void service_compose_direct_resp(struct thread_svc_regs *regs,
512 					uint32_t ret_val)
513 {
514 	uint16_t src_id = 0;
515 	uint16_t dst_id = 0;
516 
517 	/* extract from request */
518 	src_id = (regs->x1 >> 16) & UINT16_MAX;
519 	dst_id = regs->x1 & UINT16_MAX;
520 
521 	/* compose message */
522 	regs->x0 = FFA_MSG_SEND_DIRECT_RESP_64;
523 	/* swap endpoint ids */
524 	regs->x1 = SHIFT_U32(dst_id, 16) | src_id;
525 	regs->x2 = FFA_PARAM_MBZ;
526 	regs->x3 = ret_val;
527 	regs->x4 = 0;
528 	regs->x5 = 0;
529 	regs->x6 = 0;
530 	regs->x7 = 0;
531 }
532 
533 /*
534  * Combined read from secure partition, this will open, read and
535  * close the file object.
536  */
537 static TEE_Result sec_storage_obj_read(unsigned long storage_id, char *obj_id,
538 				       unsigned long obj_id_len, void *data,
539 				       unsigned long len, unsigned long offset,
540 				       unsigned long flags)
541 {
542 	const struct tee_file_operations *fops = NULL;
543 	TEE_Result res = TEE_ERROR_BAD_STATE;
544 	struct tee_ta_session *sess = NULL;
545 	struct tee_file_handle *fh = NULL;
546 	struct sec_part_ctx *spc = NULL;
547 	struct tee_pobj *po = NULL;
548 	size_t file_size = 0;
549 	size_t read_len = 0;
550 
551 	fops = tee_svc_storage_file_ops(storage_id);
552 	if (!fops)
553 		return TEE_ERROR_ITEM_NOT_FOUND;
554 
555 	if (obj_id_len > TEE_OBJECT_ID_MAX_LEN)
556 		return TEE_ERROR_BAD_PARAMETERS;
557 
558 	res = tee_ta_get_current_session(&sess);
559 	if (res != TEE_SUCCESS)
560 		return res;
561 
562 	spc = to_sec_part_ctx(sess->ctx);
563 	res = tee_mmu_check_access_rights(&spc->uctx,
564 					  TEE_MEMORY_ACCESS_WRITE |
565 					  TEE_MEMORY_ACCESS_ANY_OWNER,
566 					  (uaddr_t)data, len);
567 	if (res != TEE_SUCCESS)
568 		return res;
569 
570 	res = tee_pobj_get(&sess->ctx->uuid, obj_id, obj_id_len, flags,
571 			   false, fops, &po);
572 	if (res != TEE_SUCCESS)
573 		return res;
574 
575 	res = po->fops->open(po, &file_size, &fh);
576 	if (res != TEE_SUCCESS)
577 		goto out;
578 
579 	read_len = len;
580 	res = po->fops->read(fh, offset, data, &read_len);
581 	if (res == TEE_ERROR_CORRUPT_OBJECT) {
582 		EMSG("Object corrupt");
583 		po->fops->remove(po);
584 	} else if (res == TEE_SUCCESS && len != read_len) {
585 		res = TEE_ERROR_CORRUPT_OBJECT;
586 	}
587 
588 	po->fops->close(&fh);
589 
590 out:
591 	tee_pobj_release(po);
592 
593 	return res;
594 }
595 
596 /*
597  * Combined write from secure partition, this will create/open, write and
598  * close the file object.
599  */
600 static TEE_Result sec_storage_obj_write(unsigned long storage_id, char *obj_id,
601 					unsigned long obj_id_len, void *data,
602 					unsigned long len, unsigned long offset,
603 					unsigned long flags)
604 
605 {
606 	const struct tee_file_operations *fops = NULL;
607 	struct tee_ta_session *sess = NULL;
608 	struct tee_file_handle *fh = NULL;
609 	struct sec_part_ctx *spc = NULL;
610 	TEE_Result res = TEE_SUCCESS;
611 	struct tee_pobj *po = NULL;
612 
613 	fops = tee_svc_storage_file_ops(storage_id);
614 	if (!fops)
615 		return TEE_ERROR_ITEM_NOT_FOUND;
616 
617 	if (obj_id_len > TEE_OBJECT_ID_MAX_LEN)
618 		return TEE_ERROR_BAD_PARAMETERS;
619 
620 	res = tee_ta_get_current_session(&sess);
621 	if (res != TEE_SUCCESS)
622 		return res;
623 
624 	spc = to_sec_part_ctx(sess->ctx);
625 	res = tee_mmu_check_access_rights(&spc->uctx,
626 					  TEE_MEMORY_ACCESS_READ |
627 					  TEE_MEMORY_ACCESS_ANY_OWNER,
628 					  (uaddr_t)data, len);
629 	if (res != TEE_SUCCESS)
630 		return res;
631 
632 	res = tee_pobj_get(&sess->ctx->uuid, obj_id, obj_id_len, flags,
633 			   false, fops, &po);
634 	if (res != TEE_SUCCESS)
635 		return res;
636 
637 	res = po->fops->open(po, NULL, &fh);
638 	if (res == TEE_ERROR_ITEM_NOT_FOUND)
639 		res = po->fops->create(po, false, NULL, 0, NULL, 0, NULL, 0,
640 				       &fh);
641 	if (res == TEE_SUCCESS) {
642 		res = po->fops->write(fh, offset, data, len);
643 		po->fops->close(&fh);
644 	}
645 
646 	tee_pobj_release(po);
647 
648 	return res;
649 }
650 
651 static bool stmm_handle_mem_mgr_service(struct thread_svc_regs *regs)
652 {
653 	uint32_t action = regs->x3;
654 	uintptr_t va = regs->x4;
655 	uint32_t nr_pages = regs->x5;
656 	uint32_t perm = regs->x6;
657 
658 	switch (action) {
659 	case FFA_SVC_MEMORY_ATTRIBUTES_GET_64:
660 		service_compose_direct_resp(regs, sp_svc_get_mem_attr(va));
661 		return true;
662 	case FFA_SVC_MEMORY_ATTRIBUTES_SET_64:
663 		service_compose_direct_resp(regs,
664 					    sp_svc_set_mem_attr(va, nr_pages,
665 								perm));
666 		return true;
667 	default:
668 		EMSG("Undefined service id %#"PRIx32, action);
669 		service_compose_direct_resp(regs, SP_RET_INVALID_PARAM);
670 		return true;
671 	}
672 }
673 
674 #define FILENAME "EFI_VARS"
675 static bool stmm_handle_storage_service(struct thread_svc_regs *regs)
676 {
677 	uint32_t flags = TEE_DATA_FLAG_ACCESS_READ |
678 			 TEE_DATA_FLAG_ACCESS_WRITE |
679 			 TEE_DATA_FLAG_SHARE_READ |
680 			 TEE_DATA_FLAG_SHARE_WRITE;
681 	uint32_t action = regs->x3;
682 	void *va = (void *)regs->x4;
683 	unsigned long len = regs->x5;
684 	unsigned long offset = regs->x6;
685 	char obj_id[] = FILENAME;
686 	size_t obj_id_len = strlen(obj_id);
687 	TEE_Result res = TEE_SUCCESS;
688 
689 	switch (action) {
690 	case FFA_SVC_RPMB_READ:
691 		res = sec_storage_obj_read(TEE_STORAGE_PRIVATE_RPMB, obj_id,
692 					   obj_id_len, va, len, offset, flags);
693 		service_compose_direct_resp(regs, res);
694 
695 		return true;
696 	case FFA_SVC_RPMB_WRITE:
697 		res = sec_storage_obj_write(TEE_STORAGE_PRIVATE_RPMB, obj_id,
698 					    obj_id_len, va, len, offset, flags);
699 		service_compose_direct_resp(regs, res);
700 
701 		return true;
702 	default:
703 		EMSG("Undefined service id %#"PRIx32, action);
704 		service_compose_direct_resp(regs, SP_RET_INVALID_PARAM);
705 		return true;
706 	}
707 }
708 
709 static bool spm_eret_error(int32_t error_code, struct thread_svc_regs *regs)
710 {
711 	regs->x0 = FFA_ERROR;
712 	regs->x1 = FFA_PARAM_MBZ;
713 	regs->x2 = error_code;
714 	regs->x3 = FFA_PARAM_MBZ;
715 	regs->x4 = FFA_PARAM_MBZ;
716 	regs->x5 = FFA_PARAM_MBZ;
717 	regs->x6 = FFA_PARAM_MBZ;
718 	regs->x7 = FFA_PARAM_MBZ;
719 	return true;
720 }
721 
722 static bool spm_handle_direct_req(struct thread_svc_regs *regs)
723 {
724 	uint16_t dst_id = regs->x1 & UINT16_MAX;
725 
726 	/* Look-up of destination endpoint */
727 	if (dst_id == mem_mgr_id)
728 		return stmm_handle_mem_mgr_service(regs);
729 	else if (dst_id == ffa_storage_id)
730 		return stmm_handle_storage_service(regs);
731 
732 	EMSG("Undefined endpoint id %#"PRIx16, dst_id);
733 	return spm_eret_error(SP_RET_INVALID_PARAM, regs);
734 }
735 
736 static bool spm_handle_svc(struct thread_svc_regs *regs)
737 {
738 	switch (regs->x0) {
739 	case FFA_VERSION:
740 		DMSG("Received FFA version");
741 		regs->x0 = FFA_VERSION;
742 		return true;
743 	case FFA_MSG_SEND_DIRECT_RESP_64:
744 		DMSG("Received FFA direct response");
745 		return return_helper(false, 0, regs);
746 	case FFA_MSG_SEND_DIRECT_REQ_64:
747 		DMSG("Received FFA direct request");
748 		return spm_handle_direct_req(regs);
749 	default:
750 		EMSG("Undefined syscall %#"PRIx32, (uint32_t)regs->x0);
751 		return return_helper(true /*panic*/, 0xabcd, regs);
752 	}
753 }
754 
755 const struct tee_ta_ops secure_partition_ops __rodata_unpaged = {
756 	.enter_open_session = stmm_enter_open_session,
757 	.enter_invoke_cmd = stmm_enter_invoke_cmd,
758 	.enter_close_session = stmm_enter_close_session,
759 	.dump_state = sec_part_dump_state,
760 	.destroy = sec_part_ctx_destroy,
761 	.get_instance_id = sec_part_get_instance_id,
762 	.handle_svc = spm_handle_svc,
763 };
764