xref: /optee_os/core/arch/arm/kernel/secure_partition.c (revision 89c9728d981ff0f4a8edecc325858537441d721e)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2020, Arm Limited. All rights reserved.
4  * Copyright (c) 2019, Linaro Limited
5  */
6 
7 #include <crypto/crypto.h>
8 #include <ffa.h>
9 #include <kernel/abort.h>
10 #include <kernel/secure_partition.h>
11 #include <kernel/user_mode_ctx.h>
12 #include <mm/fobj.h>
13 #include <mm/mobj.h>
14 #include <mm/vm.h>
15 #include <pta_stmm.h>
16 #include <tee_api_defines_extensions.h>
17 #include <tee/tee_pobj.h>
18 #include <tee/tee_svc.h>
19 #include <tee/tee_svc_storage.h>
20 #include <zlib.h>
21 
22 #include "thread_private.h"
23 
24 static const TEE_UUID stmm_uuid = PTA_STMM_UUID;
25 
26 /*
27  * Once a complete FFA spec is added, these will become discoverable.
28  * Until then these are considered part of the internal ABI between
29  * OP-TEE and StMM.
30  */
31 static const uint16_t stmm_id = 1U;
32 static const uint16_t stmm_pta_id = 2U;
33 static const uint16_t mem_mgr_id = 3U;
34 static const uint16_t ffa_storage_id = 4U;
35 
36 static const unsigned int stmm_stack_size = 4 * SMALL_PAGE_SIZE;
37 static const unsigned int stmm_heap_size = 398 * SMALL_PAGE_SIZE;
38 static const unsigned int stmm_sec_buf_size = SMALL_PAGE_SIZE;
39 static const unsigned int stmm_ns_comm_buf_size = SMALL_PAGE_SIZE;
40 
41 extern unsigned char stmm_image[];
42 extern const unsigned int stmm_image_size;
43 extern const unsigned int stmm_image_uncompressed_size;
44 
45 static struct sec_part_ctx *sec_part_alloc_ctx(const TEE_UUID *uuid)
46 {
47 	TEE_Result res = TEE_SUCCESS;
48 	struct sec_part_ctx *spc = NULL;
49 
50 	spc = calloc(1, sizeof(*spc));
51 	if (!spc)
52 		return NULL;
53 
54 	spc->ta_ctx.ts_ctx.ops = &secure_partition_ops;
55 	spc->ta_ctx.ts_ctx.uuid = *uuid;
56 	spc->ta_ctx.flags = TA_FLAG_SINGLE_INSTANCE |
57 			    TA_FLAG_INSTANCE_KEEP_ALIVE;
58 	spc->uctx.ts_ctx = &spc->ta_ctx.ts_ctx;
59 
60 	res = vm_info_init(&spc->uctx);
61 	if (res) {
62 		free(spc);
63 		return NULL;
64 	}
65 
66 	spc->ta_ctx.ref_count = 1;
67 	condvar_init(&spc->ta_ctx.busy_cv);
68 
69 	return spc;
70 }
71 
72 static void clear_vfp_state(struct sec_part_ctx *spc __maybe_unused)
73 {
74 	if (IS_ENABLED(CFG_WITH_VFP))
75 		thread_user_clear_vfp(&spc->uctx.vfp);
76 }
77 
78 static TEE_Result sec_part_enter_user_mode(struct sec_part_ctx *spc)
79 {
80 	uint32_t exceptions = 0;
81 	uint32_t panic_code = 0;
82 	uint32_t panicked = 0;
83 	uint64_t cntkctl = 0;
84 
85 	exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
86 	cntkctl = read_cntkctl();
87 	write_cntkctl(cntkctl | CNTKCTL_PL0PCTEN);
88 	__thread_enter_user_mode(&spc->regs, &panicked, &panic_code);
89 	write_cntkctl(cntkctl);
90 	thread_unmask_exceptions(exceptions);
91 
92 	clear_vfp_state(spc);
93 
94 	if (panicked) {
95 		abort_print_current_ta();
96 		DMSG("sec_part panicked with code %#"PRIx32, panic_code);
97 		return TEE_ERROR_TARGET_DEAD;
98 	}
99 
100 	return TEE_SUCCESS;
101 }
102 
103 static void init_stmm_regs(struct sec_part_ctx *spc, unsigned long a0,
104 			   unsigned long a1, unsigned long sp, unsigned long pc)
105 {
106 	spc->regs.x[0] = a0;
107 	spc->regs.x[1] = a1;
108 	spc->regs.sp = sp;
109 	spc->regs.pc = pc;
110 }
111 
112 static TEE_Result alloc_and_map_sp_fobj(struct sec_part_ctx *spc, size_t sz,
113 					uint32_t prot, vaddr_t *va)
114 {
115 	size_t num_pgs = ROUNDUP(sz, SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE;
116 	struct fobj *fobj = fobj_ta_mem_alloc(num_pgs);
117 	struct mobj *mobj = mobj_with_fobj_alloc(fobj, NULL);
118 	TEE_Result res = TEE_SUCCESS;
119 
120 	fobj_put(fobj);
121 	if (!mobj)
122 		return TEE_ERROR_OUT_OF_MEMORY;
123 
124 	res = vm_map(&spc->uctx, va, num_pgs * SMALL_PAGE_SIZE,
125 		     prot, 0, mobj, 0);
126 	if (res)
127 		mobj_put(mobj);
128 
129 	return TEE_SUCCESS;
130 }
131 
132 static void *zalloc(void *opaque __unused, unsigned int items,
133 		    unsigned int size)
134 {
135 	return malloc(items * size);
136 }
137 
138 static void zfree(void *opaque __unused, void *address)
139 {
140 	free(address);
141 }
142 
143 static void uncompress_image(void *dst, size_t dst_size, void *src,
144 			     size_t src_size)
145 {
146 	z_stream strm = {
147 		.next_in = src,
148 		.avail_in = src_size,
149 		.next_out = dst,
150 		.avail_out = dst_size,
151 		.zalloc = zalloc,
152 		.zfree = zfree,
153 	};
154 
155 	if (inflateInit(&strm) != Z_OK)
156 		panic("inflateInit");
157 
158 	if (inflate(&strm, Z_SYNC_FLUSH) != Z_STREAM_END)
159 		panic("inflate");
160 
161 	if (inflateEnd(&strm) != Z_OK)
162 		panic("inflateEnd");
163 }
164 
165 static TEE_Result load_stmm(struct sec_part_ctx *spc)
166 {
167 	struct secure_partition_boot_info *boot_info = NULL;
168 	struct secure_partition_mp_info *mp_info = NULL;
169 	TEE_Result res = TEE_SUCCESS;
170 	vaddr_t sp_addr = 0;
171 	vaddr_t image_addr = 0;
172 	vaddr_t heap_addr = 0;
173 	vaddr_t stack_addr = 0;
174 	vaddr_t sec_buf_addr = 0;
175 	vaddr_t comm_buf_addr = 0;
176 	unsigned int sp_size = 0;
177 	unsigned int uncompressed_size_roundup = 0;
178 
179 	uncompressed_size_roundup = ROUNDUP(stmm_image_uncompressed_size,
180 					    SMALL_PAGE_SIZE);
181 	sp_size = uncompressed_size_roundup + stmm_stack_size +
182 		  stmm_heap_size + stmm_sec_buf_size;
183 	res = alloc_and_map_sp_fobj(spc, sp_size,
184 				    TEE_MATTR_PRW, &sp_addr);
185 	if (res)
186 		return res;
187 
188 	res = alloc_and_map_sp_fobj(spc, stmm_ns_comm_buf_size,
189 				    TEE_MATTR_URW | TEE_MATTR_PRW,
190 				    &comm_buf_addr);
191 	/*
192 	 * We don't need to free the previous instance here, they'll all be
193 	 * handled during the destruction call (sec_part_ctx_destroy())
194 	 */
195 	if (res)
196 		return res;
197 
198 	image_addr = sp_addr;
199 	heap_addr = image_addr + uncompressed_size_roundup;
200 	stack_addr = heap_addr + stmm_heap_size;
201 	sec_buf_addr = stack_addr + stmm_stack_size;
202 
203 	vm_set_ctx(&spc->ta_ctx.ts_ctx);
204 	uncompress_image((void *)image_addr, stmm_image_uncompressed_size,
205 			 stmm_image, stmm_image_size);
206 
207 	res = vm_set_prot(&spc->uctx, image_addr, uncompressed_size_roundup,
208 			  TEE_MATTR_URX | TEE_MATTR_PR);
209 	if (res)
210 		return res;
211 
212 	res = vm_set_prot(&spc->uctx, heap_addr, stmm_heap_size,
213 			  TEE_MATTR_URW | TEE_MATTR_PRW);
214 	if (res)
215 		return res;
216 
217 	res = vm_set_prot(&spc->uctx, stack_addr, stmm_stack_size,
218 			  TEE_MATTR_URW | TEE_MATTR_PRW);
219 	if (res)
220 		return res;
221 
222 	res = vm_set_prot(&spc->uctx, sec_buf_addr, stmm_sec_buf_size,
223 			  TEE_MATTR_URW | TEE_MATTR_PRW);
224 	if (res)
225 		return res;
226 
227 	DMSG("stmm load address %#"PRIxVA, image_addr);
228 
229 	boot_info = (struct secure_partition_boot_info *)sec_buf_addr;
230 	mp_info = (struct secure_partition_mp_info *)(boot_info + 1);
231 	*boot_info = (struct secure_partition_boot_info){
232 		.h.type = SP_PARAM_SP_IMAGE_BOOT_INFO,
233 		.h.version = SP_PARAM_VERSION_1,
234 		.h.size = sizeof(struct secure_partition_boot_info),
235 		.h.attr = 0,
236 		.sp_mem_base = sp_addr,
237 		.sp_mem_limit = sp_addr + sp_size,
238 		.sp_image_base = image_addr,
239 		.sp_stack_base = stack_addr,
240 		.sp_heap_base = heap_addr,
241 		.sp_ns_comm_buf_base = comm_buf_addr,
242 		.sp_shared_buf_base = sec_buf_addr,
243 		.sp_image_size = stmm_image_size,
244 		.sp_pcpu_stack_size = stmm_stack_size,
245 		.sp_heap_size = stmm_heap_size,
246 		.sp_ns_comm_buf_size = stmm_ns_comm_buf_size,
247 		.sp_shared_buf_size = stmm_sec_buf_size,
248 		.num_sp_mem_regions = 6,
249 		.num_cpus = 1,
250 		.mp_info = mp_info,
251 	};
252 	mp_info->mpidr = read_mpidr_el1();
253 	mp_info->linear_id = 0;
254 	mp_info->flags = MP_INFO_FLAG_PRIMARY_CPU;
255 	spc->ns_comm_buf_addr = comm_buf_addr;
256 	spc->ns_comm_buf_size = stmm_ns_comm_buf_size;
257 
258 	init_stmm_regs(spc, sec_buf_addr,
259 		       (vaddr_t)(mp_info + 1) - sec_buf_addr,
260 		       stack_addr + stmm_stack_size, image_addr);
261 
262 	return sec_part_enter_user_mode(spc);
263 }
264 
265 TEE_Result sec_part_init_session(const TEE_UUID *uuid,
266 				 struct tee_ta_session *sess)
267 {
268 	struct sec_part_ctx *spc = NULL;
269 	TEE_Result res = TEE_SUCCESS;
270 
271 	if (memcmp(uuid, &stmm_uuid, sizeof(*uuid)))
272 		return TEE_ERROR_ITEM_NOT_FOUND;
273 
274 	spc = sec_part_alloc_ctx(uuid);
275 	if (!spc)
276 		return TEE_ERROR_OUT_OF_MEMORY;
277 
278 	spc->is_initializing = true;
279 
280 	mutex_lock(&tee_ta_mutex);
281 	sess->ts_sess.ctx = &spc->ta_ctx.ts_ctx;
282 	mutex_unlock(&tee_ta_mutex);
283 
284 	ts_push_current_session(&sess->ts_sess);
285 	res = load_stmm(spc);
286 	ts_pop_current_session();
287 	vm_set_ctx(NULL);
288 	if (res) {
289 		sess->ts_sess.ctx = NULL;
290 		spc->ta_ctx.ts_ctx.ops->destroy(&spc->ta_ctx.ts_ctx);
291 
292 		return res;
293 	}
294 
295 	mutex_lock(&tee_ta_mutex);
296 	spc->is_initializing = false;
297 	TAILQ_INSERT_TAIL(&tee_ctxes, &spc->ta_ctx, link);
298 	mutex_unlock(&tee_ta_mutex);
299 
300 	return TEE_SUCCESS;
301 }
302 
303 static TEE_Result stmm_enter_open_session(struct ts_session *s)
304 {
305 	struct sec_part_ctx *spc = to_sec_part_ctx(s->ctx);
306 	struct tee_ta_session *ta_sess = to_ta_session(s);
307 	const uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_NONE,
308 						TEE_PARAM_TYPE_NONE,
309 						TEE_PARAM_TYPE_NONE,
310 						TEE_PARAM_TYPE_NONE);
311 
312 	if (ta_sess->param->types != exp_pt)
313 		return TEE_ERROR_BAD_PARAMETERS;
314 
315 	if (spc->is_initializing) {
316 		/* StMM is initialized in sec_part_init_session() */
317 		ta_sess->err_origin = TEE_ORIGIN_TEE;
318 		return TEE_ERROR_BAD_STATE;
319 	}
320 
321 	return TEE_SUCCESS;
322 }
323 
324 static TEE_Result stmm_enter_invoke_cmd(struct ts_session *s, uint32_t cmd)
325 {
326 	struct sec_part_ctx *spc = to_sec_part_ctx(s->ctx);
327 	struct tee_ta_session *ta_sess = to_ta_session(s);
328 	TEE_Result res = TEE_SUCCESS;
329 	TEE_Result __maybe_unused tmp_res = TEE_SUCCESS;
330 	unsigned int ns_buf_size = 0;
331 	struct param_mem *mem = NULL;
332 	void *va = NULL;
333 	const uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INOUT,
334 						TEE_PARAM_TYPE_VALUE_OUTPUT,
335 						TEE_PARAM_TYPE_NONE,
336 						TEE_PARAM_TYPE_NONE);
337 
338 	if (cmd != PTA_STMM_CMD_COMMUNICATE)
339 		return TEE_ERROR_BAD_PARAMETERS;
340 
341 	if (ta_sess->param->types != exp_pt)
342 		return TEE_ERROR_BAD_PARAMETERS;
343 
344 	mem = &ta_sess->param->u[0].mem;
345 	ns_buf_size = mem->size;
346 	if (ns_buf_size > spc->ns_comm_buf_size) {
347 		mem->size = spc->ns_comm_buf_size;
348 		return TEE_ERROR_EXCESS_DATA;
349 	}
350 
351 	res = mobj_inc_map(mem->mobj);
352 	if (res)
353 		return res;
354 
355 	va = mobj_get_va(mem->mobj, mem->offs);
356 	if (!va) {
357 		EMSG("Can't get a valid VA for NS buffer");
358 		res = TEE_ERROR_BAD_PARAMETERS;
359 		goto out_va;
360 	}
361 
362 	spc->regs.x[0] = FFA_MSG_SEND_DIRECT_REQ_64;
363 	spc->regs.x[1] = (stmm_pta_id << 16) | stmm_id;
364 	spc->regs.x[2] = FFA_PARAM_MBZ;
365 	spc->regs.x[3] = spc->ns_comm_buf_addr;
366 	spc->regs.x[4] = ns_buf_size;
367 	spc->regs.x[5] = 0;
368 	spc->regs.x[6] = 0;
369 	spc->regs.x[7] = 0;
370 
371 	ts_push_current_session(s);
372 
373 	memcpy((void *)spc->ns_comm_buf_addr, va, ns_buf_size);
374 
375 	res = sec_part_enter_user_mode(spc);
376 	if (res)
377 		goto out_session;
378 	/*
379 	 * Copy the SPM response from secure partition back to the non-secure
380 	 * buffer of the client that called us.
381 	 */
382 	ta_sess->param->u[1].val.a = spc->regs.x[4];
383 
384 	memcpy(va, (void *)spc->ns_comm_buf_addr, ns_buf_size);
385 
386 out_session:
387 	ts_pop_current_session();
388 out_va:
389 	tmp_res = mobj_dec_map(mem->mobj);
390 	assert(!tmp_res);
391 
392 	return res;
393 }
394 
395 static void stmm_enter_close_session(struct ts_session *s __unused)
396 {
397 }
398 
399 static void sec_part_dump_state(struct ts_ctx *ctx)
400 {
401 	user_mode_ctx_print_mappings(to_user_mode_ctx(ctx));
402 }
403 
404 static uint32_t sec_part_get_instance_id(struct ts_ctx *ctx)
405 {
406 	return to_sec_part_ctx(ctx)->uctx.vm_info.asid;
407 }
408 
409 static void sec_part_ctx_destroy(struct ts_ctx *ctx)
410 {
411 	struct sec_part_ctx *spc = to_sec_part_ctx(ctx);
412 
413 	tee_pager_rem_um_areas(&spc->uctx);
414 	vm_info_final(&spc->uctx);
415 	free(spc);
416 }
417 
418 static uint32_t sp_svc_get_mem_attr(vaddr_t va)
419 {
420 	TEE_Result res = TEE_ERROR_BAD_PARAMETERS;
421 	struct ts_session *sess = NULL;
422 	struct sec_part_ctx *spc = NULL;
423 	uint16_t attrs = 0;
424 	uint16_t perm = 0;
425 
426 	if (!va)
427 		goto err;
428 
429 	sess = ts_get_current_session();
430 	spc = to_sec_part_ctx(sess->ctx);
431 
432 	res = vm_get_prot(&spc->uctx, va, SMALL_PAGE_SIZE, &attrs);
433 	if (res)
434 		goto err;
435 
436 	if (attrs & TEE_MATTR_UR)
437 		perm |= SP_MEM_ATTR_ACCESS_RO;
438 	else if (attrs & TEE_MATTR_UW)
439 		perm |= SP_MEM_ATTR_ACCESS_RW;
440 
441 	if (attrs & TEE_MATTR_UX)
442 		perm |= SP_MEM_ATTR_EXEC;
443 
444 	return perm;
445 err:
446 	return SP_RET_DENIED;
447 }
448 
449 static int sp_svc_set_mem_attr(vaddr_t va, unsigned int nr_pages, uint32_t perm)
450 {
451 	TEE_Result res = TEE_ERROR_BAD_PARAMETERS;
452 	struct ts_session *sess = NULL;
453 	struct sec_part_ctx *spc = NULL;
454 	size_t sz = 0;
455 	uint32_t prot = 0;
456 
457 	if (!va || !nr_pages || MUL_OVERFLOW(nr_pages, SMALL_PAGE_SIZE, &sz))
458 		return SP_RET_INVALID_PARAM;
459 
460 	if (perm & ~SP_MEM_ATTR_ALL)
461 		return SP_RET_INVALID_PARAM;
462 
463 	sess = ts_get_current_session();
464 	spc = to_sec_part_ctx(sess->ctx);
465 
466 	if ((perm & SP_MEM_ATTR_ACCESS_MASK) == SP_MEM_ATTR_ACCESS_RO)
467 		prot |= TEE_MATTR_UR;
468 	else if ((perm & SP_MEM_ATTR_ACCESS_MASK) == SP_MEM_ATTR_ACCESS_RW)
469 		prot |= TEE_MATTR_URW;
470 
471 	if ((perm & SP_MEM_ATTR_EXEC_NEVER) == SP_MEM_ATTR_EXEC)
472 		prot |= TEE_MATTR_UX;
473 
474 	res = vm_set_prot(&spc->uctx, va, sz, prot);
475 	if (res)
476 		return SP_RET_DENIED;
477 
478 	return SP_RET_SUCCESS;
479 }
480 
481 static bool return_helper(bool panic, uint32_t panic_code,
482 			  struct thread_svc_regs *svc_regs)
483 {
484 	if (!panic) {
485 		struct ts_session *sess = ts_get_current_session();
486 		struct sec_part_ctx *spc = to_sec_part_ctx(sess->ctx);
487 		size_t n = 0;
488 
489 		/* Save the return values from StMM */
490 		for (n = 0; n <= 7; n++)
491 			spc->regs.x[n] = *(&svc_regs->x0 + n);
492 
493 		spc->regs.sp = svc_regs->sp_el0;
494 		spc->regs.pc = svc_regs->elr;
495 		spc->regs.cpsr = svc_regs->spsr;
496 	}
497 
498 	svc_regs->x0 = 0;
499 	svc_regs->x1 = panic;
500 	svc_regs->x2 = panic_code;
501 
502 	return false;
503 }
504 
505 static void service_compose_direct_resp(struct thread_svc_regs *regs,
506 					uint32_t ret_val)
507 {
508 	uint16_t src_id = 0;
509 	uint16_t dst_id = 0;
510 
511 	/* extract from request */
512 	src_id = (regs->x1 >> 16) & UINT16_MAX;
513 	dst_id = regs->x1 & UINT16_MAX;
514 
515 	/* compose message */
516 	regs->x0 = FFA_MSG_SEND_DIRECT_RESP_64;
517 	/* swap endpoint ids */
518 	regs->x1 = SHIFT_U32(dst_id, 16) | src_id;
519 	regs->x2 = FFA_PARAM_MBZ;
520 	regs->x3 = ret_val;
521 	regs->x4 = 0;
522 	regs->x5 = 0;
523 	regs->x6 = 0;
524 	regs->x7 = 0;
525 }
526 
527 /*
528  * Combined read from secure partition, this will open, read and
529  * close the file object.
530  */
531 static TEE_Result sec_storage_obj_read(unsigned long storage_id, char *obj_id,
532 				       unsigned long obj_id_len, void *data,
533 				       unsigned long len, unsigned long offset,
534 				       unsigned long flags)
535 {
536 	const struct tee_file_operations *fops = NULL;
537 	TEE_Result res = TEE_ERROR_BAD_STATE;
538 	struct ts_session *sess = NULL;
539 	struct tee_file_handle *fh = NULL;
540 	struct sec_part_ctx *spc = NULL;
541 	struct tee_pobj *po = NULL;
542 	size_t file_size = 0;
543 	size_t read_len = 0;
544 
545 	fops = tee_svc_storage_file_ops(storage_id);
546 	if (!fops)
547 		return TEE_ERROR_ITEM_NOT_FOUND;
548 
549 	if (obj_id_len > TEE_OBJECT_ID_MAX_LEN)
550 		return TEE_ERROR_BAD_PARAMETERS;
551 
552 	sess = ts_get_current_session();
553 	spc = to_sec_part_ctx(sess->ctx);
554 	res = vm_check_access_rights(&spc->uctx,
555 				     TEE_MEMORY_ACCESS_WRITE |
556 				     TEE_MEMORY_ACCESS_ANY_OWNER,
557 				     (uaddr_t)data, len);
558 	if (res != TEE_SUCCESS)
559 		return res;
560 
561 	res = tee_pobj_get(&sess->ctx->uuid, obj_id, obj_id_len, flags,
562 			   false, fops, &po);
563 	if (res != TEE_SUCCESS)
564 		return res;
565 
566 	res = po->fops->open(po, &file_size, &fh);
567 	if (res != TEE_SUCCESS)
568 		goto out;
569 
570 	read_len = len;
571 	res = po->fops->read(fh, offset, data, &read_len);
572 	if (res == TEE_ERROR_CORRUPT_OBJECT) {
573 		EMSG("Object corrupt");
574 		po->fops->remove(po);
575 	} else if (res == TEE_SUCCESS && len != read_len) {
576 		res = TEE_ERROR_CORRUPT_OBJECT;
577 	}
578 
579 	po->fops->close(&fh);
580 
581 out:
582 	tee_pobj_release(po);
583 
584 	return res;
585 }
586 
587 /*
588  * Combined write from secure partition, this will create/open, write and
589  * close the file object.
590  */
591 static TEE_Result sec_storage_obj_write(unsigned long storage_id, char *obj_id,
592 					unsigned long obj_id_len, void *data,
593 					unsigned long len, unsigned long offset,
594 					unsigned long flags)
595 
596 {
597 	const struct tee_file_operations *fops = NULL;
598 	struct ts_session *sess = NULL;
599 	struct tee_file_handle *fh = NULL;
600 	struct sec_part_ctx *spc = NULL;
601 	TEE_Result res = TEE_SUCCESS;
602 	struct tee_pobj *po = NULL;
603 
604 	fops = tee_svc_storage_file_ops(storage_id);
605 	if (!fops)
606 		return TEE_ERROR_ITEM_NOT_FOUND;
607 
608 	if (obj_id_len > TEE_OBJECT_ID_MAX_LEN)
609 		return TEE_ERROR_BAD_PARAMETERS;
610 
611 	sess = ts_get_current_session();
612 	spc = to_sec_part_ctx(sess->ctx);
613 	res = vm_check_access_rights(&spc->uctx,
614 				     TEE_MEMORY_ACCESS_READ |
615 				     TEE_MEMORY_ACCESS_ANY_OWNER,
616 				     (uaddr_t)data, len);
617 	if (res != TEE_SUCCESS)
618 		return res;
619 
620 	res = tee_pobj_get(&sess->ctx->uuid, obj_id, obj_id_len, flags,
621 			   false, fops, &po);
622 	if (res != TEE_SUCCESS)
623 		return res;
624 
625 	res = po->fops->open(po, NULL, &fh);
626 	if (res == TEE_ERROR_ITEM_NOT_FOUND)
627 		res = po->fops->create(po, false, NULL, 0, NULL, 0, NULL, 0,
628 				       &fh);
629 	if (res == TEE_SUCCESS) {
630 		res = po->fops->write(fh, offset, data, len);
631 		po->fops->close(&fh);
632 	}
633 
634 	tee_pobj_release(po);
635 
636 	return res;
637 }
638 
639 static bool stmm_handle_mem_mgr_service(struct thread_svc_regs *regs)
640 {
641 	uint32_t action = regs->x3;
642 	uintptr_t va = regs->x4;
643 	uint32_t nr_pages = regs->x5;
644 	uint32_t perm = regs->x6;
645 
646 	switch (action) {
647 	case FFA_SVC_MEMORY_ATTRIBUTES_GET_64:
648 		service_compose_direct_resp(regs, sp_svc_get_mem_attr(va));
649 		return true;
650 	case FFA_SVC_MEMORY_ATTRIBUTES_SET_64:
651 		service_compose_direct_resp(regs,
652 					    sp_svc_set_mem_attr(va, nr_pages,
653 								perm));
654 		return true;
655 	default:
656 		EMSG("Undefined service id %#"PRIx32, action);
657 		service_compose_direct_resp(regs, SP_RET_INVALID_PARAM);
658 		return true;
659 	}
660 }
661 
662 #define FILENAME "EFI_VARS"
663 static bool stmm_handle_storage_service(struct thread_svc_regs *regs)
664 {
665 	uint32_t flags = TEE_DATA_FLAG_ACCESS_READ |
666 			 TEE_DATA_FLAG_ACCESS_WRITE |
667 			 TEE_DATA_FLAG_SHARE_READ |
668 			 TEE_DATA_FLAG_SHARE_WRITE;
669 	uint32_t action = regs->x3;
670 	void *va = (void *)regs->x4;
671 	unsigned long len = regs->x5;
672 	unsigned long offset = regs->x6;
673 	char obj_id[] = FILENAME;
674 	size_t obj_id_len = strlen(obj_id);
675 	TEE_Result res = TEE_SUCCESS;
676 
677 	switch (action) {
678 	case FFA_SVC_RPMB_READ:
679 		res = sec_storage_obj_read(TEE_STORAGE_PRIVATE_RPMB, obj_id,
680 					   obj_id_len, va, len, offset, flags);
681 		service_compose_direct_resp(regs, res);
682 
683 		return true;
684 	case FFA_SVC_RPMB_WRITE:
685 		res = sec_storage_obj_write(TEE_STORAGE_PRIVATE_RPMB, obj_id,
686 					    obj_id_len, va, len, offset, flags);
687 		service_compose_direct_resp(regs, res);
688 
689 		return true;
690 	default:
691 		EMSG("Undefined service id %#"PRIx32, action);
692 		service_compose_direct_resp(regs, SP_RET_INVALID_PARAM);
693 		return true;
694 	}
695 }
696 
697 static bool spm_eret_error(int32_t error_code, struct thread_svc_regs *regs)
698 {
699 	regs->x0 = FFA_ERROR;
700 	regs->x1 = FFA_PARAM_MBZ;
701 	regs->x2 = error_code;
702 	regs->x3 = FFA_PARAM_MBZ;
703 	regs->x4 = FFA_PARAM_MBZ;
704 	regs->x5 = FFA_PARAM_MBZ;
705 	regs->x6 = FFA_PARAM_MBZ;
706 	regs->x7 = FFA_PARAM_MBZ;
707 	return true;
708 }
709 
710 static bool spm_handle_direct_req(struct thread_svc_regs *regs)
711 {
712 	uint16_t dst_id = regs->x1 & UINT16_MAX;
713 
714 	/* Look-up of destination endpoint */
715 	if (dst_id == mem_mgr_id)
716 		return stmm_handle_mem_mgr_service(regs);
717 	else if (dst_id == ffa_storage_id)
718 		return stmm_handle_storage_service(regs);
719 
720 	EMSG("Undefined endpoint id %#"PRIx16, dst_id);
721 	return spm_eret_error(SP_RET_INVALID_PARAM, regs);
722 }
723 
724 static bool spm_handle_svc(struct thread_svc_regs *regs)
725 {
726 	switch (regs->x0) {
727 	case FFA_VERSION:
728 		DMSG("Received FFA version");
729 		regs->x0 = MAKE_FFA_VERSION(FFA_VERSION_MAJOR,
730 					    FFA_VERSION_MINOR);
731 		return true;
732 	case FFA_MSG_SEND_DIRECT_RESP_64:
733 		DMSG("Received FFA direct response");
734 		return return_helper(false, 0, regs);
735 	case FFA_MSG_SEND_DIRECT_REQ_64:
736 		DMSG("Received FFA direct request");
737 		return spm_handle_direct_req(regs);
738 	default:
739 		EMSG("Undefined syscall %#"PRIx32, (uint32_t)regs->x0);
740 		return return_helper(true /*panic*/, 0xabcd, regs);
741 	}
742 }
743 
744 const struct ts_ops secure_partition_ops __rodata_unpaged = {
745 	.enter_open_session = stmm_enter_open_session,
746 	.enter_invoke_cmd = stmm_enter_invoke_cmd,
747 	.enter_close_session = stmm_enter_close_session,
748 	.dump_state = sec_part_dump_state,
749 	.destroy = sec_part_ctx_destroy,
750 	.get_instance_id = sec_part_get_instance_id,
751 	.handle_svc = spm_handle_svc,
752 };
753