xref: /optee_os/core/arch/arm/kernel/stmm_sp.c (revision 5118efbe82358fd69fda6e0158a30e59f59ba09d)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2019, Linaro Limited
4  * Copyright (c) 2020, Arm Limited.
5  */
6 
7 #include <crypto/crypto.h>
8 #include <ffa.h>
9 #include <kernel/abort.h>
10 #include <kernel/stmm_sp.h>
11 #include <kernel/user_mode_ctx.h>
12 #include <mm/fobj.h>
13 #include <mm/mobj.h>
14 #include <mm/vm.h>
15 #include <pta_stmm.h>
16 #include <tee_api_defines_extensions.h>
17 #include <tee/tee_pobj.h>
18 #include <tee/tee_svc.h>
19 #include <tee/tee_svc_storage.h>
20 #include <zlib.h>
21 
22 #include "thread_private.h"
23 
24 static const TEE_UUID stmm_uuid = PTA_STMM_UUID;
25 
26 /*
27  * Once a complete FFA spec is added, these will become discoverable.
28  * Until then these are considered part of the internal ABI between
29  * OP-TEE and StMM.
30  */
31 static const uint16_t stmm_id = 1U;
32 static const uint16_t stmm_pta_id = 2U;
33 static const uint16_t mem_mgr_id = 3U;
34 static const uint16_t ffa_storage_id = 4U;
35 
36 static const unsigned int stmm_stack_size = 4 * SMALL_PAGE_SIZE;
37 static const unsigned int stmm_heap_size = 398 * SMALL_PAGE_SIZE;
38 static const unsigned int stmm_sec_buf_size = SMALL_PAGE_SIZE;
39 static const unsigned int stmm_ns_comm_buf_size = SMALL_PAGE_SIZE;
40 
41 extern unsigned char stmm_image[];
42 extern const unsigned int stmm_image_size;
43 extern const unsigned int stmm_image_uncompressed_size;
44 
45 static struct stmm_ctx *stmm_alloc_ctx(const TEE_UUID *uuid)
46 {
47 	TEE_Result res = TEE_SUCCESS;
48 	struct stmm_ctx *spc = NULL;
49 
50 	spc = calloc(1, sizeof(*spc));
51 	if (!spc)
52 		return NULL;
53 
54 	spc->ta_ctx.ts_ctx.ops = &stmm_sp_ops;
55 	spc->ta_ctx.ts_ctx.uuid = *uuid;
56 	spc->ta_ctx.flags = TA_FLAG_SINGLE_INSTANCE |
57 			    TA_FLAG_INSTANCE_KEEP_ALIVE;
58 	spc->uctx.ts_ctx = &spc->ta_ctx.ts_ctx;
59 
60 	res = vm_info_init(&spc->uctx);
61 	if (res) {
62 		free(spc);
63 		return NULL;
64 	}
65 
66 	spc->ta_ctx.ref_count = 1;
67 	condvar_init(&spc->ta_ctx.busy_cv);
68 
69 	return spc;
70 }
71 
72 static TEE_Result stmm_enter_user_mode(struct stmm_ctx *spc)
73 {
74 	uint32_t exceptions = 0;
75 	uint32_t panic_code = 0;
76 	uint32_t panicked = 0;
77 	uint64_t cntkctl = 0;
78 
79 	exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
80 	cntkctl = read_cntkctl();
81 	write_cntkctl(cntkctl | CNTKCTL_PL0PCTEN);
82 	__thread_enter_user_mode(&spc->regs, &panicked, &panic_code);
83 	write_cntkctl(cntkctl);
84 	thread_unmask_exceptions(exceptions);
85 
86 	thread_user_clear_vfp(&spc->uctx);
87 
88 	if (panicked) {
89 		abort_print_current_ta();
90 		DMSG("stmm panicked with code %#"PRIx32, panic_code);
91 		return TEE_ERROR_TARGET_DEAD;
92 	}
93 
94 	return TEE_SUCCESS;
95 }
96 
97 static void init_stmm_regs(struct stmm_ctx *spc, unsigned long a0,
98 			   unsigned long a1, unsigned long sp, unsigned long pc)
99 {
100 	spc->regs.x[0] = a0;
101 	spc->regs.x[1] = a1;
102 	spc->regs.sp = sp;
103 	spc->regs.pc = pc;
104 }
105 
106 static TEE_Result alloc_and_map_sp_fobj(struct stmm_ctx *spc, size_t sz,
107 					uint32_t prot, vaddr_t *va)
108 {
109 	size_t num_pgs = ROUNDUP(sz, SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE;
110 	struct fobj *fobj = fobj_ta_mem_alloc(num_pgs);
111 	struct mobj *mobj = mobj_with_fobj_alloc(fobj, NULL);
112 	TEE_Result res = TEE_SUCCESS;
113 
114 	fobj_put(fobj);
115 	if (!mobj)
116 		return TEE_ERROR_OUT_OF_MEMORY;
117 
118 	res = vm_map(&spc->uctx, va, num_pgs * SMALL_PAGE_SIZE,
119 		     prot, 0, mobj, 0);
120 	if (res)
121 		mobj_put(mobj);
122 
123 	return TEE_SUCCESS;
124 }
125 
126 static void *zalloc(void *opaque __unused, unsigned int items,
127 		    unsigned int size)
128 {
129 	return malloc(items * size);
130 }
131 
132 static void zfree(void *opaque __unused, void *address)
133 {
134 	free(address);
135 }
136 
137 static void uncompress_image(void *dst, size_t dst_size, void *src,
138 			     size_t src_size)
139 {
140 	z_stream strm = {
141 		.next_in = src,
142 		.avail_in = src_size,
143 		.next_out = dst,
144 		.avail_out = dst_size,
145 		.zalloc = zalloc,
146 		.zfree = zfree,
147 	};
148 
149 	if (inflateInit(&strm) != Z_OK)
150 		panic("inflateInit");
151 
152 	if (inflate(&strm, Z_SYNC_FLUSH) != Z_STREAM_END)
153 		panic("inflate");
154 
155 	if (inflateEnd(&strm) != Z_OK)
156 		panic("inflateEnd");
157 }
158 
159 static TEE_Result load_stmm(struct stmm_ctx *spc)
160 {
161 	struct stmm_boot_info *boot_info = NULL;
162 	struct stmm_mp_info *mp_info = NULL;
163 	TEE_Result res = TEE_SUCCESS;
164 	vaddr_t sp_addr = 0;
165 	vaddr_t image_addr = 0;
166 	vaddr_t heap_addr = 0;
167 	vaddr_t stack_addr = 0;
168 	vaddr_t sec_buf_addr = 0;
169 	vaddr_t comm_buf_addr = 0;
170 	unsigned int sp_size = 0;
171 	unsigned int uncompressed_size_roundup = 0;
172 
173 	uncompressed_size_roundup = ROUNDUP(stmm_image_uncompressed_size,
174 					    SMALL_PAGE_SIZE);
175 	sp_size = uncompressed_size_roundup + stmm_stack_size +
176 		  stmm_heap_size + stmm_sec_buf_size;
177 	res = alloc_and_map_sp_fobj(spc, sp_size,
178 				    TEE_MATTR_PRW, &sp_addr);
179 	if (res)
180 		return res;
181 
182 	res = alloc_and_map_sp_fobj(spc, stmm_ns_comm_buf_size,
183 				    TEE_MATTR_URW | TEE_MATTR_PRW,
184 				    &comm_buf_addr);
185 	/*
186 	 * We don't need to free the previous instance here, they'll all be
187 	 * handled during the destruction call (stmm_ctx_destroy())
188 	 */
189 	if (res)
190 		return res;
191 
192 	image_addr = sp_addr;
193 	heap_addr = image_addr + uncompressed_size_roundup;
194 	stack_addr = heap_addr + stmm_heap_size;
195 	sec_buf_addr = stack_addr + stmm_stack_size;
196 
197 	vm_set_ctx(&spc->ta_ctx.ts_ctx);
198 	uncompress_image((void *)image_addr, stmm_image_uncompressed_size,
199 			 stmm_image, stmm_image_size);
200 
201 	res = vm_set_prot(&spc->uctx, image_addr, uncompressed_size_roundup,
202 			  TEE_MATTR_URX | TEE_MATTR_PR);
203 	if (res)
204 		return res;
205 
206 	res = vm_set_prot(&spc->uctx, heap_addr, stmm_heap_size,
207 			  TEE_MATTR_URW | TEE_MATTR_PRW);
208 	if (res)
209 		return res;
210 
211 	res = vm_set_prot(&spc->uctx, stack_addr, stmm_stack_size,
212 			  TEE_MATTR_URW | TEE_MATTR_PRW);
213 	if (res)
214 		return res;
215 
216 	res = vm_set_prot(&spc->uctx, sec_buf_addr, stmm_sec_buf_size,
217 			  TEE_MATTR_URW | TEE_MATTR_PRW);
218 	if (res)
219 		return res;
220 
221 	DMSG("stmm load address %#"PRIxVA, image_addr);
222 
223 	boot_info = (struct stmm_boot_info *)sec_buf_addr;
224 	mp_info = (struct stmm_mp_info *)(boot_info + 1);
225 	*boot_info = (struct stmm_boot_info){
226 		.h.type = STMM_PARAM_SP_IMAGE_BOOT_INFO,
227 		.h.version = STMM_PARAM_VERSION_1,
228 		.h.size = sizeof(struct stmm_boot_info),
229 		.h.attr = 0,
230 		.sp_mem_base = sp_addr,
231 		.sp_mem_limit = sp_addr + sp_size,
232 		.sp_image_base = image_addr,
233 		.sp_stack_base = stack_addr,
234 		.sp_heap_base = heap_addr,
235 		.sp_ns_comm_buf_base = comm_buf_addr,
236 		.sp_shared_buf_base = sec_buf_addr,
237 		.sp_image_size = stmm_image_size,
238 		.sp_pcpu_stack_size = stmm_stack_size,
239 		.sp_heap_size = stmm_heap_size,
240 		.sp_ns_comm_buf_size = stmm_ns_comm_buf_size,
241 		.sp_shared_buf_size = stmm_sec_buf_size,
242 		.num_sp_mem_regions = 6,
243 		.num_cpus = 1,
244 		.mp_info = mp_info,
245 	};
246 	mp_info->mpidr = read_mpidr_el1();
247 	mp_info->linear_id = 0;
248 	mp_info->flags = MP_INFO_FLAG_PRIMARY_CPU;
249 	spc->ns_comm_buf_addr = comm_buf_addr;
250 	spc->ns_comm_buf_size = stmm_ns_comm_buf_size;
251 
252 	init_stmm_regs(spc, sec_buf_addr,
253 		       (vaddr_t)(mp_info + 1) - sec_buf_addr,
254 		       stack_addr + stmm_stack_size, image_addr);
255 
256 	return stmm_enter_user_mode(spc);
257 }
258 
259 TEE_Result stmm_init_session(const TEE_UUID *uuid, struct tee_ta_session *sess)
260 {
261 	struct stmm_ctx *spc = NULL;
262 	TEE_Result res = TEE_SUCCESS;
263 
264 	if (memcmp(uuid, &stmm_uuid, sizeof(*uuid)))
265 		return TEE_ERROR_ITEM_NOT_FOUND;
266 
267 	spc = stmm_alloc_ctx(uuid);
268 	if (!spc)
269 		return TEE_ERROR_OUT_OF_MEMORY;
270 
271 	spc->is_initializing = true;
272 
273 	mutex_lock(&tee_ta_mutex);
274 	sess->ts_sess.ctx = &spc->ta_ctx.ts_ctx;
275 	mutex_unlock(&tee_ta_mutex);
276 
277 	ts_push_current_session(&sess->ts_sess);
278 	res = load_stmm(spc);
279 	ts_pop_current_session();
280 	vm_set_ctx(NULL);
281 	if (res) {
282 		sess->ts_sess.ctx = NULL;
283 		spc->ta_ctx.ts_ctx.ops->destroy(&spc->ta_ctx.ts_ctx);
284 
285 		return res;
286 	}
287 
288 	mutex_lock(&tee_ta_mutex);
289 	spc->is_initializing = false;
290 	TAILQ_INSERT_TAIL(&tee_ctxes, &spc->ta_ctx, link);
291 	mutex_unlock(&tee_ta_mutex);
292 
293 	return TEE_SUCCESS;
294 }
295 
296 static TEE_Result stmm_enter_open_session(struct ts_session *s)
297 {
298 	struct stmm_ctx *spc = to_stmm_ctx(s->ctx);
299 	struct tee_ta_session *ta_sess = to_ta_session(s);
300 	const uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_NONE,
301 						TEE_PARAM_TYPE_NONE,
302 						TEE_PARAM_TYPE_NONE,
303 						TEE_PARAM_TYPE_NONE);
304 
305 	if (ta_sess->param->types != exp_pt)
306 		return TEE_ERROR_BAD_PARAMETERS;
307 
308 	if (spc->is_initializing) {
309 		/* StMM is initialized in stmm_init_session() */
310 		ta_sess->err_origin = TEE_ORIGIN_TEE;
311 		return TEE_ERROR_BAD_STATE;
312 	}
313 
314 	return TEE_SUCCESS;
315 }
316 
317 static TEE_Result stmm_enter_invoke_cmd(struct ts_session *s, uint32_t cmd)
318 {
319 	struct stmm_ctx *spc = to_stmm_ctx(s->ctx);
320 	struct tee_ta_session *ta_sess = to_ta_session(s);
321 	TEE_Result res = TEE_SUCCESS;
322 	TEE_Result __maybe_unused tmp_res = TEE_SUCCESS;
323 	unsigned int ns_buf_size = 0;
324 	struct param_mem *mem = NULL;
325 	void *va = NULL;
326 	const uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INOUT,
327 						TEE_PARAM_TYPE_VALUE_OUTPUT,
328 						TEE_PARAM_TYPE_NONE,
329 						TEE_PARAM_TYPE_NONE);
330 
331 	if (cmd != PTA_STMM_CMD_COMMUNICATE)
332 		return TEE_ERROR_BAD_PARAMETERS;
333 
334 	if (ta_sess->param->types != exp_pt)
335 		return TEE_ERROR_BAD_PARAMETERS;
336 
337 	mem = &ta_sess->param->u[0].mem;
338 	ns_buf_size = mem->size;
339 	if (ns_buf_size > spc->ns_comm_buf_size) {
340 		mem->size = spc->ns_comm_buf_size;
341 		return TEE_ERROR_EXCESS_DATA;
342 	}
343 
344 	res = mobj_inc_map(mem->mobj);
345 	if (res)
346 		return res;
347 
348 	va = mobj_get_va(mem->mobj, mem->offs);
349 	if (!va) {
350 		EMSG("Can't get a valid VA for NS buffer");
351 		res = TEE_ERROR_BAD_PARAMETERS;
352 		goto out_va;
353 	}
354 
355 	spc->regs.x[0] = FFA_MSG_SEND_DIRECT_REQ_64;
356 	spc->regs.x[1] = (stmm_pta_id << 16) | stmm_id;
357 	spc->regs.x[2] = FFA_PARAM_MBZ;
358 	spc->regs.x[3] = spc->ns_comm_buf_addr;
359 	spc->regs.x[4] = ns_buf_size;
360 	spc->regs.x[5] = 0;
361 	spc->regs.x[6] = 0;
362 	spc->regs.x[7] = 0;
363 
364 	ts_push_current_session(s);
365 
366 	memcpy((void *)spc->ns_comm_buf_addr, va, ns_buf_size);
367 
368 	res = stmm_enter_user_mode(spc);
369 	if (res)
370 		goto out_session;
371 	/*
372 	 * Copy the SPM response from secure partition back to the non-secure
373 	 * buffer of the client that called us.
374 	 */
375 	ta_sess->param->u[1].val.a = spc->regs.x[4];
376 
377 	memcpy(va, (void *)spc->ns_comm_buf_addr, ns_buf_size);
378 
379 out_session:
380 	ts_pop_current_session();
381 out_va:
382 	tmp_res = mobj_dec_map(mem->mobj);
383 	assert(!tmp_res);
384 
385 	return res;
386 }
387 
388 static void stmm_enter_close_session(struct ts_session *s __unused)
389 {
390 }
391 
392 static void stmm_dump_state(struct ts_ctx *ctx)
393 {
394 	user_mode_ctx_print_mappings(to_user_mode_ctx(ctx));
395 }
396 
397 static uint32_t stmm_get_instance_id(struct ts_ctx *ctx)
398 {
399 	return to_stmm_ctx(ctx)->uctx.vm_info.asid;
400 }
401 
402 static void stmm_ctx_destroy(struct ts_ctx *ctx)
403 {
404 	struct stmm_ctx *spc = to_stmm_ctx(ctx);
405 
406 	tee_pager_rem_um_areas(&spc->uctx);
407 	vm_info_final(&spc->uctx);
408 	free(spc);
409 }
410 
411 static uint32_t sp_svc_get_mem_attr(vaddr_t va)
412 {
413 	TEE_Result res = TEE_ERROR_BAD_PARAMETERS;
414 	struct ts_session *sess = NULL;
415 	struct stmm_ctx *spc = NULL;
416 	uint16_t attrs = 0;
417 	uint16_t perm = 0;
418 
419 	if (!va)
420 		goto err;
421 
422 	sess = ts_get_current_session();
423 	spc = to_stmm_ctx(sess->ctx);
424 
425 	res = vm_get_prot(&spc->uctx, va, SMALL_PAGE_SIZE, &attrs);
426 	if (res)
427 		goto err;
428 
429 	if (attrs & TEE_MATTR_UR)
430 		perm |= STMM_MEM_ATTR_ACCESS_RO;
431 	else if (attrs & TEE_MATTR_UW)
432 		perm |= STMM_MEM_ATTR_ACCESS_RW;
433 
434 	if (attrs & TEE_MATTR_UX)
435 		perm |= STMM_MEM_ATTR_EXEC;
436 
437 	return perm;
438 err:
439 	return STMM_RET_DENIED;
440 }
441 
442 static int sp_svc_set_mem_attr(vaddr_t va, unsigned int nr_pages, uint32_t perm)
443 {
444 	TEE_Result res = TEE_ERROR_BAD_PARAMETERS;
445 	struct ts_session *sess = NULL;
446 	struct stmm_ctx *spc = NULL;
447 	size_t sz = 0;
448 	uint32_t prot = 0;
449 
450 	if (!va || !nr_pages || MUL_OVERFLOW(nr_pages, SMALL_PAGE_SIZE, &sz))
451 		return STMM_RET_INVALID_PARAM;
452 
453 	if (perm & ~STMM_MEM_ATTR_ALL)
454 		return STMM_RET_INVALID_PARAM;
455 
456 	sess = ts_get_current_session();
457 	spc = to_stmm_ctx(sess->ctx);
458 
459 	if ((perm & STMM_MEM_ATTR_ACCESS_MASK) == STMM_MEM_ATTR_ACCESS_RO)
460 		prot |= TEE_MATTR_UR;
461 	else if ((perm & STMM_MEM_ATTR_ACCESS_MASK) == STMM_MEM_ATTR_ACCESS_RW)
462 		prot |= TEE_MATTR_URW;
463 
464 	if ((perm & STMM_MEM_ATTR_EXEC_NEVER) == STMM_MEM_ATTR_EXEC)
465 		prot |= TEE_MATTR_UX;
466 
467 	res = vm_set_prot(&spc->uctx, va, sz, prot);
468 	if (res)
469 		return STMM_RET_DENIED;
470 
471 	return STMM_RET_SUCCESS;
472 }
473 
474 static bool return_helper(bool panic, uint32_t panic_code,
475 			  struct thread_svc_regs *svc_regs)
476 {
477 	if (!panic) {
478 		struct ts_session *sess = ts_get_current_session();
479 		struct stmm_ctx *spc = to_stmm_ctx(sess->ctx);
480 		size_t n = 0;
481 
482 		/* Save the return values from StMM */
483 		for (n = 0; n <= 7; n++)
484 			spc->regs.x[n] = *(&svc_regs->x0 + n);
485 
486 		spc->regs.sp = svc_regs->sp_el0;
487 		spc->regs.pc = svc_regs->elr;
488 		spc->regs.cpsr = svc_regs->spsr;
489 	}
490 
491 	svc_regs->x0 = 0;
492 	svc_regs->x1 = panic;
493 	svc_regs->x2 = panic_code;
494 
495 	return false;
496 }
497 
498 static void service_compose_direct_resp(struct thread_svc_regs *regs,
499 					uint32_t ret_val)
500 {
501 	uint16_t src_id = 0;
502 	uint16_t dst_id = 0;
503 
504 	/* extract from request */
505 	src_id = (regs->x1 >> 16) & UINT16_MAX;
506 	dst_id = regs->x1 & UINT16_MAX;
507 
508 	/* compose message */
509 	regs->x0 = FFA_MSG_SEND_DIRECT_RESP_64;
510 	/* swap endpoint ids */
511 	regs->x1 = SHIFT_U32(dst_id, 16) | src_id;
512 	regs->x2 = FFA_PARAM_MBZ;
513 	regs->x3 = ret_val;
514 	regs->x4 = 0;
515 	regs->x5 = 0;
516 	regs->x6 = 0;
517 	regs->x7 = 0;
518 }
519 
520 /*
521  * Combined read from secure partition, this will open, read and
522  * close the file object.
523  */
524 static TEE_Result sec_storage_obj_read(unsigned long storage_id, char *obj_id,
525 				       unsigned long obj_id_len, void *data,
526 				       unsigned long len, unsigned long offset,
527 				       unsigned long flags)
528 {
529 	const struct tee_file_operations *fops = NULL;
530 	TEE_Result res = TEE_ERROR_BAD_STATE;
531 	struct ts_session *sess = NULL;
532 	struct tee_file_handle *fh = NULL;
533 	struct stmm_ctx *spc = NULL;
534 	struct tee_pobj *po = NULL;
535 	size_t file_size = 0;
536 	size_t read_len = 0;
537 
538 	fops = tee_svc_storage_file_ops(storage_id);
539 	if (!fops)
540 		return TEE_ERROR_ITEM_NOT_FOUND;
541 
542 	if (obj_id_len > TEE_OBJECT_ID_MAX_LEN)
543 		return TEE_ERROR_BAD_PARAMETERS;
544 
545 	sess = ts_get_current_session();
546 	spc = to_stmm_ctx(sess->ctx);
547 	res = vm_check_access_rights(&spc->uctx,
548 				     TEE_MEMORY_ACCESS_WRITE |
549 				     TEE_MEMORY_ACCESS_ANY_OWNER,
550 				     (uaddr_t)data, len);
551 	if (res != TEE_SUCCESS)
552 		return res;
553 
554 	res = tee_pobj_get(&sess->ctx->uuid, obj_id, obj_id_len, flags,
555 			   false, fops, &po);
556 	if (res != TEE_SUCCESS)
557 		return res;
558 
559 	res = po->fops->open(po, &file_size, &fh);
560 	if (res != TEE_SUCCESS)
561 		goto out;
562 
563 	read_len = len;
564 	res = po->fops->read(fh, offset, data, &read_len);
565 	if (res == TEE_ERROR_CORRUPT_OBJECT) {
566 		EMSG("Object corrupt");
567 		po->fops->remove(po);
568 	} else if (res == TEE_SUCCESS && len != read_len) {
569 		res = TEE_ERROR_CORRUPT_OBJECT;
570 	}
571 
572 	po->fops->close(&fh);
573 
574 out:
575 	tee_pobj_release(po);
576 
577 	return res;
578 }
579 
580 /*
581  * Combined write from secure partition, this will create/open, write and
582  * close the file object.
583  */
584 static TEE_Result sec_storage_obj_write(unsigned long storage_id, char *obj_id,
585 					unsigned long obj_id_len, void *data,
586 					unsigned long len, unsigned long offset,
587 					unsigned long flags)
588 
589 {
590 	const struct tee_file_operations *fops = NULL;
591 	struct ts_session *sess = NULL;
592 	struct tee_file_handle *fh = NULL;
593 	struct stmm_ctx *spc = NULL;
594 	TEE_Result res = TEE_SUCCESS;
595 	struct tee_pobj *po = NULL;
596 
597 	fops = tee_svc_storage_file_ops(storage_id);
598 	if (!fops)
599 		return TEE_ERROR_ITEM_NOT_FOUND;
600 
601 	if (obj_id_len > TEE_OBJECT_ID_MAX_LEN)
602 		return TEE_ERROR_BAD_PARAMETERS;
603 
604 	sess = ts_get_current_session();
605 	spc = to_stmm_ctx(sess->ctx);
606 	res = vm_check_access_rights(&spc->uctx,
607 				     TEE_MEMORY_ACCESS_READ |
608 				     TEE_MEMORY_ACCESS_ANY_OWNER,
609 				     (uaddr_t)data, len);
610 	if (res != TEE_SUCCESS)
611 		return res;
612 
613 	res = tee_pobj_get(&sess->ctx->uuid, obj_id, obj_id_len, flags,
614 			   false, fops, &po);
615 	if (res != TEE_SUCCESS)
616 		return res;
617 
618 	res = po->fops->open(po, NULL, &fh);
619 	if (res == TEE_ERROR_ITEM_NOT_FOUND)
620 		res = po->fops->create(po, false, NULL, 0, NULL, 0, NULL, 0,
621 				       &fh);
622 	if (res == TEE_SUCCESS) {
623 		res = po->fops->write(fh, offset, data, len);
624 		po->fops->close(&fh);
625 	}
626 
627 	tee_pobj_release(po);
628 
629 	return res;
630 }
631 
632 static bool stmm_handle_mem_mgr_service(struct thread_svc_regs *regs)
633 {
634 	uint32_t action = regs->x3;
635 	uintptr_t va = regs->x4;
636 	uint32_t nr_pages = regs->x5;
637 	uint32_t perm = regs->x6;
638 
639 	switch (action) {
640 	case FFA_SVC_MEMORY_ATTRIBUTES_GET_64:
641 		service_compose_direct_resp(regs, sp_svc_get_mem_attr(va));
642 		return true;
643 	case FFA_SVC_MEMORY_ATTRIBUTES_SET_64:
644 		service_compose_direct_resp(regs,
645 					    sp_svc_set_mem_attr(va, nr_pages,
646 								perm));
647 		return true;
648 	default:
649 		EMSG("Undefined service id %#"PRIx32, action);
650 		service_compose_direct_resp(regs, STMM_RET_INVALID_PARAM);
651 		return true;
652 	}
653 }
654 
655 #define FILENAME "EFI_VARS"
656 static bool stmm_handle_storage_service(struct thread_svc_regs *regs)
657 {
658 	uint32_t flags = TEE_DATA_FLAG_ACCESS_READ |
659 			 TEE_DATA_FLAG_ACCESS_WRITE |
660 			 TEE_DATA_FLAG_SHARE_READ |
661 			 TEE_DATA_FLAG_SHARE_WRITE;
662 	uint32_t action = regs->x3;
663 	void *va = (void *)regs->x4;
664 	unsigned long len = regs->x5;
665 	unsigned long offset = regs->x6;
666 	char obj_id[] = FILENAME;
667 	size_t obj_id_len = strlen(obj_id);
668 	TEE_Result res = TEE_SUCCESS;
669 
670 	switch (action) {
671 	case FFA_SVC_RPMB_READ:
672 		res = sec_storage_obj_read(TEE_STORAGE_PRIVATE_RPMB, obj_id,
673 					   obj_id_len, va, len, offset, flags);
674 		service_compose_direct_resp(regs, res);
675 
676 		return true;
677 	case FFA_SVC_RPMB_WRITE:
678 		res = sec_storage_obj_write(TEE_STORAGE_PRIVATE_RPMB, obj_id,
679 					    obj_id_len, va, len, offset, flags);
680 		service_compose_direct_resp(regs, res);
681 
682 		return true;
683 	default:
684 		EMSG("Undefined service id %#"PRIx32, action);
685 		service_compose_direct_resp(regs, STMM_RET_INVALID_PARAM);
686 		return true;
687 	}
688 }
689 
690 static bool spm_eret_error(int32_t error_code, struct thread_svc_regs *regs)
691 {
692 	regs->x0 = FFA_ERROR;
693 	regs->x1 = FFA_PARAM_MBZ;
694 	regs->x2 = error_code;
695 	regs->x3 = FFA_PARAM_MBZ;
696 	regs->x4 = FFA_PARAM_MBZ;
697 	regs->x5 = FFA_PARAM_MBZ;
698 	regs->x6 = FFA_PARAM_MBZ;
699 	regs->x7 = FFA_PARAM_MBZ;
700 	return true;
701 }
702 
703 static bool spm_handle_direct_req(struct thread_svc_regs *regs)
704 {
705 	uint16_t dst_id = regs->x1 & UINT16_MAX;
706 
707 	/* Look-up of destination endpoint */
708 	if (dst_id == mem_mgr_id)
709 		return stmm_handle_mem_mgr_service(regs);
710 	else if (dst_id == ffa_storage_id)
711 		return stmm_handle_storage_service(regs);
712 
713 	EMSG("Undefined endpoint id %#"PRIx16, dst_id);
714 	return spm_eret_error(STMM_RET_INVALID_PARAM, regs);
715 }
716 
717 static bool spm_handle_svc(struct thread_svc_regs *regs)
718 {
719 	switch (regs->x0) {
720 	case FFA_VERSION:
721 		DMSG("Received FFA version");
722 		regs->x0 = MAKE_FFA_VERSION(FFA_VERSION_MAJOR,
723 					    FFA_VERSION_MINOR);
724 		return true;
725 	case FFA_MSG_SEND_DIRECT_RESP_64:
726 		DMSG("Received FFA direct response");
727 		return return_helper(false, 0, regs);
728 	case FFA_MSG_SEND_DIRECT_REQ_64:
729 		DMSG("Received FFA direct request");
730 		return spm_handle_direct_req(regs);
731 	default:
732 		EMSG("Undefined syscall %#"PRIx32, (uint32_t)regs->x0);
733 		return return_helper(true /*panic*/, 0xabcd, regs);
734 	}
735 }
736 
737 const struct ts_ops stmm_sp_ops __rodata_unpaged = {
738 	.enter_open_session = stmm_enter_open_session,
739 	.enter_invoke_cmd = stmm_enter_invoke_cmd,
740 	.enter_close_session = stmm_enter_close_session,
741 	.dump_state = stmm_dump_state,
742 	.destroy = stmm_ctx_destroy,
743 	.get_instance_id = stmm_get_instance_id,
744 	.handle_svc = spm_handle_svc,
745 };
746