xref: /optee_os/core/arch/arm/kernel/secure_partition.c (revision 82061b8d7b34f09553f2526b9661036a69b5bb84)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2020, Arm Limited. All rights reserved.
4  * Copyright (c) 2019, Linaro Limited
5  */
6 
7 #include <crypto/crypto.h>
8 #include <ffa.h>
9 #include <kernel/abort.h>
10 #include <kernel/secure_partition.h>
11 #include <kernel/user_mode_ctx.h>
12 #include <mm/fobj.h>
13 #include <mm/mobj.h>
14 #include <mm/tee_mmu.h>
15 #include <pta_stmm.h>
16 #include <tee_api_defines_extensions.h>
17 #include <tee/tee_pobj.h>
18 #include <tee/tee_svc.h>
19 #include <tee/tee_svc_storage.h>
20 #include <zlib.h>
21 
22 #include "thread_private.h"
23 
24 static const TEE_UUID stmm_uuid = PTA_STMM_UUID;
25 
26 /*
27  * Once a complete FFA spec is added, these will become discoverable.
28  * Until then these are considered part of the internal ABI between
29  * OP-TEE and StMM.
30  */
31 static const uint16_t stmm_id = 1U;
32 static const uint16_t stmm_pta_id = 2U;
33 static const uint16_t mem_mgr_id = 3U;
34 static const uint16_t ffa_storage_id = 4U;
35 
36 static const unsigned int stmm_stack_size = 4 * SMALL_PAGE_SIZE;
37 static const unsigned int stmm_heap_size = 398 * SMALL_PAGE_SIZE;
38 static const unsigned int stmm_sec_buf_size = SMALL_PAGE_SIZE;
39 static const unsigned int stmm_ns_comm_buf_size = SMALL_PAGE_SIZE;
40 
41 extern unsigned char stmm_image[];
42 extern const unsigned int stmm_image_size;
43 extern const unsigned int stmm_image_uncompressed_size;
44 
45 static struct sec_part_ctx *sec_part_alloc_ctx(const TEE_UUID *uuid)
46 {
47 	TEE_Result res = TEE_SUCCESS;
48 	struct sec_part_ctx *spc = NULL;
49 
50 	spc = calloc(1, sizeof(*spc));
51 	if (!spc)
52 		return NULL;
53 
54 	spc->uctx.ctx.ts_ctx.ops = &secure_partition_ops;
55 	spc->uctx.ctx.ts_ctx.uuid = *uuid;
56 	spc->uctx.ctx.flags = TA_FLAG_SINGLE_INSTANCE |
57 			      TA_FLAG_INSTANCE_KEEP_ALIVE;
58 
59 	res = vm_info_init(&spc->uctx);
60 	if (res) {
61 		free(spc);
62 		return NULL;
63 	}
64 
65 	spc->uctx.ctx.ref_count = 1;
66 	condvar_init(&spc->uctx.ctx.busy_cv);
67 
68 	return spc;
69 }
70 
71 static void clear_vfp_state(struct sec_part_ctx *spc __maybe_unused)
72 {
73 	if (IS_ENABLED(CFG_WITH_VFP))
74 		thread_user_clear_vfp(&spc->uctx.vfp);
75 }
76 
77 static TEE_Result sec_part_enter_user_mode(struct sec_part_ctx *spc)
78 {
79 	uint32_t exceptions = 0;
80 	uint32_t panic_code = 0;
81 	uint32_t panicked = 0;
82 	uint64_t cntkctl = 0;
83 
84 	exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
85 	cntkctl = read_cntkctl();
86 	write_cntkctl(cntkctl | CNTKCTL_PL0PCTEN);
87 	__thread_enter_user_mode(&spc->regs, &panicked, &panic_code);
88 	write_cntkctl(cntkctl);
89 	thread_unmask_exceptions(exceptions);
90 
91 	clear_vfp_state(spc);
92 
93 	if (panicked) {
94 		abort_print_current_ta();
95 		DMSG("sec_part panicked with code %#"PRIx32, panic_code);
96 		return TEE_ERROR_TARGET_DEAD;
97 	}
98 
99 	return TEE_SUCCESS;
100 }
101 
102 static void init_stmm_regs(struct sec_part_ctx *spc, unsigned long a0,
103 			   unsigned long a1, unsigned long sp, unsigned long pc)
104 {
105 	spc->regs.x[0] = a0;
106 	spc->regs.x[1] = a1;
107 	spc->regs.sp = sp;
108 	spc->regs.pc = pc;
109 }
110 
111 static TEE_Result alloc_and_map_sp_fobj(struct sec_part_ctx *spc, size_t sz,
112 					uint32_t prot, vaddr_t *va)
113 {
114 	size_t num_pgs = ROUNDUP(sz, SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE;
115 	struct fobj *fobj = fobj_ta_mem_alloc(num_pgs);
116 	struct mobj *mobj = mobj_with_fobj_alloc(fobj, NULL);
117 	TEE_Result res = TEE_SUCCESS;
118 
119 	fobj_put(fobj);
120 	if (!mobj)
121 		return TEE_ERROR_OUT_OF_MEMORY;
122 
123 	res = vm_map(&spc->uctx, va, num_pgs * SMALL_PAGE_SIZE,
124 		     prot, 0, mobj, 0);
125 	if (res)
126 		mobj_put(mobj);
127 
128 	return TEE_SUCCESS;
129 }
130 
131 static void *zalloc(void *opaque __unused, unsigned int items,
132 		    unsigned int size)
133 {
134 	return malloc(items * size);
135 }
136 
137 static void zfree(void *opaque __unused, void *address)
138 {
139 	free(address);
140 }
141 
142 static void uncompress_image(void *dst, size_t dst_size, void *src,
143 			     size_t src_size)
144 {
145 	z_stream strm = {
146 		.next_in = src,
147 		.avail_in = src_size,
148 		.next_out = dst,
149 		.avail_out = dst_size,
150 		.zalloc = zalloc,
151 		.zfree = zfree,
152 	};
153 
154 	if (inflateInit(&strm) != Z_OK)
155 		panic("inflateInit");
156 
157 	if (inflate(&strm, Z_SYNC_FLUSH) != Z_STREAM_END)
158 		panic("inflate");
159 
160 	if (inflateEnd(&strm) != Z_OK)
161 		panic("inflateEnd");
162 }
163 
164 static TEE_Result load_stmm(struct sec_part_ctx *spc)
165 {
166 	struct secure_partition_boot_info *boot_info = NULL;
167 	struct secure_partition_mp_info *mp_info = NULL;
168 	TEE_Result res = TEE_SUCCESS;
169 	vaddr_t sp_addr = 0;
170 	vaddr_t image_addr = 0;
171 	vaddr_t heap_addr = 0;
172 	vaddr_t stack_addr = 0;
173 	vaddr_t sec_buf_addr = 0;
174 	vaddr_t comm_buf_addr = 0;
175 	unsigned int sp_size = 0;
176 	unsigned int uncompressed_size_roundup = 0;
177 
178 	uncompressed_size_roundup = ROUNDUP(stmm_image_uncompressed_size,
179 					    SMALL_PAGE_SIZE);
180 	sp_size = uncompressed_size_roundup + stmm_stack_size +
181 		  stmm_heap_size + stmm_sec_buf_size;
182 	res = alloc_and_map_sp_fobj(spc, sp_size,
183 				    TEE_MATTR_PRW, &sp_addr);
184 	if (res)
185 		return res;
186 
187 	res = alloc_and_map_sp_fobj(spc, stmm_ns_comm_buf_size,
188 				    TEE_MATTR_URW | TEE_MATTR_PRW,
189 				    &comm_buf_addr);
190 	/*
191 	 * We don't need to free the previous instance here, they'll all be
192 	 * handled during the destruction call (sec_part_ctx_destroy())
193 	 */
194 	if (res)
195 		return res;
196 
197 	image_addr = sp_addr;
198 	heap_addr = image_addr + uncompressed_size_roundup;
199 	stack_addr = heap_addr + stmm_heap_size;
200 	sec_buf_addr = stack_addr + stmm_stack_size;
201 
202 	tee_mmu_set_ctx(&spc->uctx.ctx.ts_ctx);
203 	uncompress_image((void *)image_addr, stmm_image_uncompressed_size,
204 			 stmm_image, stmm_image_size);
205 
206 	res = vm_set_prot(&spc->uctx, image_addr, uncompressed_size_roundup,
207 			  TEE_MATTR_URX | TEE_MATTR_PR);
208 	if (res)
209 		return res;
210 
211 	res = vm_set_prot(&spc->uctx, heap_addr, stmm_heap_size,
212 			  TEE_MATTR_URW | TEE_MATTR_PRW);
213 	if (res)
214 		return res;
215 
216 	res = vm_set_prot(&spc->uctx, stack_addr, stmm_stack_size,
217 			  TEE_MATTR_URW | TEE_MATTR_PRW);
218 	if (res)
219 		return res;
220 
221 	res = vm_set_prot(&spc->uctx, sec_buf_addr, stmm_sec_buf_size,
222 			  TEE_MATTR_URW | TEE_MATTR_PRW);
223 	if (res)
224 		return res;
225 
226 	DMSG("stmm load address %#"PRIxVA, image_addr);
227 
228 	boot_info = (struct secure_partition_boot_info *)sec_buf_addr;
229 	mp_info = (struct secure_partition_mp_info *)(boot_info + 1);
230 	*boot_info = (struct secure_partition_boot_info){
231 		.h.type = SP_PARAM_SP_IMAGE_BOOT_INFO,
232 		.h.version = SP_PARAM_VERSION_1,
233 		.h.size = sizeof(struct secure_partition_boot_info),
234 		.h.attr = 0,
235 		.sp_mem_base = sp_addr,
236 		.sp_mem_limit = sp_addr + sp_size,
237 		.sp_image_base = image_addr,
238 		.sp_stack_base = stack_addr,
239 		.sp_heap_base = heap_addr,
240 		.sp_ns_comm_buf_base = comm_buf_addr,
241 		.sp_shared_buf_base = sec_buf_addr,
242 		.sp_image_size = stmm_image_size,
243 		.sp_pcpu_stack_size = stmm_stack_size,
244 		.sp_heap_size = stmm_heap_size,
245 		.sp_ns_comm_buf_size = stmm_ns_comm_buf_size,
246 		.sp_shared_buf_size = stmm_sec_buf_size,
247 		.num_sp_mem_regions = 6,
248 		.num_cpus = 1,
249 		.mp_info = mp_info,
250 	};
251 	mp_info->mpidr = read_mpidr_el1();
252 	mp_info->linear_id = 0;
253 	mp_info->flags = MP_INFO_FLAG_PRIMARY_CPU;
254 	spc->ns_comm_buf_addr = comm_buf_addr;
255 	spc->ns_comm_buf_size = stmm_ns_comm_buf_size;
256 
257 	init_stmm_regs(spc, sec_buf_addr,
258 		       (vaddr_t)(mp_info + 1) - sec_buf_addr,
259 		       stack_addr + stmm_stack_size, image_addr);
260 
261 	return sec_part_enter_user_mode(spc);
262 }
263 
264 TEE_Result sec_part_init_session(const TEE_UUID *uuid,
265 				 struct tee_ta_session *sess)
266 {
267 	struct sec_part_ctx *spc = NULL;
268 	TEE_Result res = TEE_SUCCESS;
269 
270 	if (memcmp(uuid, &stmm_uuid, sizeof(*uuid)))
271 		return TEE_ERROR_ITEM_NOT_FOUND;
272 
273 	spc = sec_part_alloc_ctx(uuid);
274 	if (!spc)
275 		return TEE_ERROR_OUT_OF_MEMORY;
276 
277 	spc->is_initializing = true;
278 
279 	mutex_lock(&tee_ta_mutex);
280 	sess->ts_sess.ctx = &spc->uctx.ctx.ts_ctx;
281 	mutex_unlock(&tee_ta_mutex);
282 
283 	ts_push_current_session(&sess->ts_sess);
284 	res = load_stmm(spc);
285 	ts_pop_current_session();
286 	tee_mmu_set_ctx(NULL);
287 	if (res) {
288 		sess->ts_sess.ctx = NULL;
289 		spc->uctx.ctx.ts_ctx.ops->destroy(&spc->uctx.ctx.ts_ctx);
290 
291 		return res;
292 	}
293 
294 	mutex_lock(&tee_ta_mutex);
295 	spc->is_initializing = false;
296 	TAILQ_INSERT_TAIL(&tee_ctxes, &spc->uctx.ctx, link);
297 	mutex_unlock(&tee_ta_mutex);
298 
299 	return TEE_SUCCESS;
300 }
301 
302 static TEE_Result stmm_enter_open_session(struct ts_session *s)
303 {
304 	struct sec_part_ctx *spc = to_sec_part_ctx(s->ctx);
305 	struct tee_ta_session *ta_sess = to_ta_session(s);
306 	const uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_NONE,
307 						TEE_PARAM_TYPE_NONE,
308 						TEE_PARAM_TYPE_NONE,
309 						TEE_PARAM_TYPE_NONE);
310 
311 	if (ta_sess->param->types != exp_pt)
312 		return TEE_ERROR_BAD_PARAMETERS;
313 
314 	if (spc->is_initializing) {
315 		/* StMM is initialized in sec_part_init_session() */
316 		ta_sess->err_origin = TEE_ORIGIN_TEE;
317 		return TEE_ERROR_BAD_STATE;
318 	}
319 
320 	return TEE_SUCCESS;
321 }
322 
323 static TEE_Result stmm_enter_invoke_cmd(struct ts_session *s, uint32_t cmd)
324 {
325 	struct sec_part_ctx *spc = to_sec_part_ctx(s->ctx);
326 	struct tee_ta_session *ta_sess = to_ta_session(s);
327 	TEE_Result res = TEE_SUCCESS;
328 	TEE_Result __maybe_unused tmp_res = TEE_SUCCESS;
329 	unsigned int ns_buf_size = 0;
330 	struct param_mem *mem = NULL;
331 	void *va = NULL;
332 	const uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INOUT,
333 						TEE_PARAM_TYPE_VALUE_OUTPUT,
334 						TEE_PARAM_TYPE_NONE,
335 						TEE_PARAM_TYPE_NONE);
336 
337 	if (cmd != PTA_STMM_CMD_COMMUNICATE)
338 		return TEE_ERROR_BAD_PARAMETERS;
339 
340 	if (ta_sess->param->types != exp_pt)
341 		return TEE_ERROR_BAD_PARAMETERS;
342 
343 	mem = &ta_sess->param->u[0].mem;
344 	ns_buf_size = mem->size;
345 	if (ns_buf_size > spc->ns_comm_buf_size) {
346 		mem->size = spc->ns_comm_buf_size;
347 		return TEE_ERROR_EXCESS_DATA;
348 	}
349 
350 	res = mobj_inc_map(mem->mobj);
351 	if (res)
352 		return res;
353 
354 	va = mobj_get_va(mem->mobj, mem->offs);
355 	if (!va) {
356 		EMSG("Can't get a valid VA for NS buffer");
357 		res = TEE_ERROR_BAD_PARAMETERS;
358 		goto out_va;
359 	}
360 
361 	spc->regs.x[0] = FFA_MSG_SEND_DIRECT_REQ_64;
362 	spc->regs.x[1] = (stmm_pta_id << 16) | stmm_id;
363 	spc->regs.x[2] = FFA_PARAM_MBZ;
364 	spc->regs.x[3] = spc->ns_comm_buf_addr;
365 	spc->regs.x[4] = ns_buf_size;
366 	spc->regs.x[5] = 0;
367 	spc->regs.x[6] = 0;
368 	spc->regs.x[7] = 0;
369 
370 	ts_push_current_session(s);
371 
372 	memcpy((void *)spc->ns_comm_buf_addr, va, ns_buf_size);
373 
374 	res = sec_part_enter_user_mode(spc);
375 	if (res)
376 		goto out_session;
377 	/*
378 	 * Copy the SPM response from secure partition back to the non-secure
379 	 * buffer of the client that called us.
380 	 */
381 	ta_sess->param->u[1].val.a = spc->regs.x[4];
382 
383 	memcpy(va, (void *)spc->ns_comm_buf_addr, ns_buf_size);
384 
385 out_session:
386 	ts_pop_current_session();
387 out_va:
388 	tmp_res = mobj_dec_map(mem->mobj);
389 	assert(!tmp_res);
390 
391 	return res;
392 }
393 
394 static void stmm_enter_close_session(struct ts_session *s __unused)
395 {
396 }
397 
398 static void sec_part_dump_state(struct ts_ctx *ctx)
399 {
400 	user_mode_ctx_print_mappings(to_user_mode_ctx(ctx));
401 }
402 
403 static uint32_t sec_part_get_instance_id(struct ts_ctx *ctx)
404 {
405 	return to_sec_part_ctx(ctx)->uctx.vm_info.asid;
406 }
407 
408 static void sec_part_ctx_destroy(struct ts_ctx *ctx)
409 {
410 	struct sec_part_ctx *spc = to_sec_part_ctx(ctx);
411 
412 	tee_pager_rem_um_areas(&spc->uctx);
413 	vm_info_final(&spc->uctx);
414 	free(spc);
415 }
416 
417 static uint32_t sp_svc_get_mem_attr(vaddr_t va)
418 {
419 	TEE_Result res = TEE_ERROR_BAD_PARAMETERS;
420 	struct ts_session *sess = NULL;
421 	struct sec_part_ctx *spc = NULL;
422 	uint16_t attrs = 0;
423 	uint16_t perm = 0;
424 
425 	if (!va)
426 		goto err;
427 
428 	sess = ts_get_current_session();
429 	spc = to_sec_part_ctx(sess->ctx);
430 
431 	res = vm_get_prot(&spc->uctx, va, SMALL_PAGE_SIZE, &attrs);
432 	if (res)
433 		goto err;
434 
435 	if (attrs & TEE_MATTR_UR)
436 		perm |= SP_MEM_ATTR_ACCESS_RO;
437 	else if (attrs & TEE_MATTR_UW)
438 		perm |= SP_MEM_ATTR_ACCESS_RW;
439 
440 	if (attrs & TEE_MATTR_UX)
441 		perm |= SP_MEM_ATTR_EXEC;
442 
443 	return perm;
444 err:
445 	return SP_RET_DENIED;
446 }
447 
448 static int sp_svc_set_mem_attr(vaddr_t va, unsigned int nr_pages, uint32_t perm)
449 {
450 	TEE_Result res = TEE_ERROR_BAD_PARAMETERS;
451 	struct ts_session *sess = NULL;
452 	struct sec_part_ctx *spc = NULL;
453 	size_t sz = 0;
454 	uint32_t prot = 0;
455 
456 	if (!va || !nr_pages || MUL_OVERFLOW(nr_pages, SMALL_PAGE_SIZE, &sz))
457 		return SP_RET_INVALID_PARAM;
458 
459 	if (perm & ~SP_MEM_ATTR_ALL)
460 		return SP_RET_INVALID_PARAM;
461 
462 	sess = ts_get_current_session();
463 	spc = to_sec_part_ctx(sess->ctx);
464 
465 	if ((perm & SP_MEM_ATTR_ACCESS_MASK) == SP_MEM_ATTR_ACCESS_RO)
466 		prot |= TEE_MATTR_UR;
467 	else if ((perm & SP_MEM_ATTR_ACCESS_MASK) == SP_MEM_ATTR_ACCESS_RW)
468 		prot |= TEE_MATTR_URW;
469 
470 	if ((perm & SP_MEM_ATTR_EXEC_NEVER) == SP_MEM_ATTR_EXEC)
471 		prot |= TEE_MATTR_UX;
472 
473 	res = vm_set_prot(&spc->uctx, va, sz, prot);
474 	if (res)
475 		return SP_RET_DENIED;
476 
477 	return SP_RET_SUCCESS;
478 }
479 
480 static bool return_helper(bool panic, uint32_t panic_code,
481 			  struct thread_svc_regs *svc_regs)
482 {
483 	if (!panic) {
484 		struct ts_session *sess = ts_get_current_session();
485 		struct sec_part_ctx *spc = to_sec_part_ctx(sess->ctx);
486 		size_t n = 0;
487 
488 		/* Save the return values from StMM */
489 		for (n = 0; n <= 7; n++)
490 			spc->regs.x[n] = *(&svc_regs->x0 + n);
491 
492 		spc->regs.sp = svc_regs->sp_el0;
493 		spc->regs.pc = svc_regs->elr;
494 		spc->regs.cpsr = svc_regs->spsr;
495 	}
496 
497 	svc_regs->x0 = 0;
498 	svc_regs->x1 = panic;
499 	svc_regs->x2 = panic_code;
500 
501 	return false;
502 }
503 
504 static void service_compose_direct_resp(struct thread_svc_regs *regs,
505 					uint32_t ret_val)
506 {
507 	uint16_t src_id = 0;
508 	uint16_t dst_id = 0;
509 
510 	/* extract from request */
511 	src_id = (regs->x1 >> 16) & UINT16_MAX;
512 	dst_id = regs->x1 & UINT16_MAX;
513 
514 	/* compose message */
515 	regs->x0 = FFA_MSG_SEND_DIRECT_RESP_64;
516 	/* swap endpoint ids */
517 	regs->x1 = SHIFT_U32(dst_id, 16) | src_id;
518 	regs->x2 = FFA_PARAM_MBZ;
519 	regs->x3 = ret_val;
520 	regs->x4 = 0;
521 	regs->x5 = 0;
522 	regs->x6 = 0;
523 	regs->x7 = 0;
524 }
525 
526 /*
527  * Combined read from secure partition, this will open, read and
528  * close the file object.
529  */
530 static TEE_Result sec_storage_obj_read(unsigned long storage_id, char *obj_id,
531 				       unsigned long obj_id_len, void *data,
532 				       unsigned long len, unsigned long offset,
533 				       unsigned long flags)
534 {
535 	const struct tee_file_operations *fops = NULL;
536 	TEE_Result res = TEE_ERROR_BAD_STATE;
537 	struct ts_session *sess = NULL;
538 	struct tee_file_handle *fh = NULL;
539 	struct sec_part_ctx *spc = NULL;
540 	struct tee_pobj *po = NULL;
541 	size_t file_size = 0;
542 	size_t read_len = 0;
543 
544 	fops = tee_svc_storage_file_ops(storage_id);
545 	if (!fops)
546 		return TEE_ERROR_ITEM_NOT_FOUND;
547 
548 	if (obj_id_len > TEE_OBJECT_ID_MAX_LEN)
549 		return TEE_ERROR_BAD_PARAMETERS;
550 
551 	sess = ts_get_current_session();
552 	spc = to_sec_part_ctx(sess->ctx);
553 	res = tee_mmu_check_access_rights(&spc->uctx,
554 					  TEE_MEMORY_ACCESS_WRITE |
555 					  TEE_MEMORY_ACCESS_ANY_OWNER,
556 					  (uaddr_t)data, len);
557 	if (res != TEE_SUCCESS)
558 		return res;
559 
560 	res = tee_pobj_get(&sess->ctx->uuid, obj_id, obj_id_len, flags,
561 			   false, fops, &po);
562 	if (res != TEE_SUCCESS)
563 		return res;
564 
565 	res = po->fops->open(po, &file_size, &fh);
566 	if (res != TEE_SUCCESS)
567 		goto out;
568 
569 	read_len = len;
570 	res = po->fops->read(fh, offset, data, &read_len);
571 	if (res == TEE_ERROR_CORRUPT_OBJECT) {
572 		EMSG("Object corrupt");
573 		po->fops->remove(po);
574 	} else if (res == TEE_SUCCESS && len != read_len) {
575 		res = TEE_ERROR_CORRUPT_OBJECT;
576 	}
577 
578 	po->fops->close(&fh);
579 
580 out:
581 	tee_pobj_release(po);
582 
583 	return res;
584 }
585 
586 /*
587  * Combined write from secure partition, this will create/open, write and
588  * close the file object.
589  */
590 static TEE_Result sec_storage_obj_write(unsigned long storage_id, char *obj_id,
591 					unsigned long obj_id_len, void *data,
592 					unsigned long len, unsigned long offset,
593 					unsigned long flags)
594 
595 {
596 	const struct tee_file_operations *fops = NULL;
597 	struct ts_session *sess = NULL;
598 	struct tee_file_handle *fh = NULL;
599 	struct sec_part_ctx *spc = NULL;
600 	TEE_Result res = TEE_SUCCESS;
601 	struct tee_pobj *po = NULL;
602 
603 	fops = tee_svc_storage_file_ops(storage_id);
604 	if (!fops)
605 		return TEE_ERROR_ITEM_NOT_FOUND;
606 
607 	if (obj_id_len > TEE_OBJECT_ID_MAX_LEN)
608 		return TEE_ERROR_BAD_PARAMETERS;
609 
610 	sess = ts_get_current_session();
611 	spc = to_sec_part_ctx(sess->ctx);
612 	res = tee_mmu_check_access_rights(&spc->uctx,
613 					  TEE_MEMORY_ACCESS_READ |
614 					  TEE_MEMORY_ACCESS_ANY_OWNER,
615 					  (uaddr_t)data, len);
616 	if (res != TEE_SUCCESS)
617 		return res;
618 
619 	res = tee_pobj_get(&sess->ctx->uuid, obj_id, obj_id_len, flags,
620 			   false, fops, &po);
621 	if (res != TEE_SUCCESS)
622 		return res;
623 
624 	res = po->fops->open(po, NULL, &fh);
625 	if (res == TEE_ERROR_ITEM_NOT_FOUND)
626 		res = po->fops->create(po, false, NULL, 0, NULL, 0, NULL, 0,
627 				       &fh);
628 	if (res == TEE_SUCCESS) {
629 		res = po->fops->write(fh, offset, data, len);
630 		po->fops->close(&fh);
631 	}
632 
633 	tee_pobj_release(po);
634 
635 	return res;
636 }
637 
638 static bool stmm_handle_mem_mgr_service(struct thread_svc_regs *regs)
639 {
640 	uint32_t action = regs->x3;
641 	uintptr_t va = regs->x4;
642 	uint32_t nr_pages = regs->x5;
643 	uint32_t perm = regs->x6;
644 
645 	switch (action) {
646 	case FFA_SVC_MEMORY_ATTRIBUTES_GET_64:
647 		service_compose_direct_resp(regs, sp_svc_get_mem_attr(va));
648 		return true;
649 	case FFA_SVC_MEMORY_ATTRIBUTES_SET_64:
650 		service_compose_direct_resp(regs,
651 					    sp_svc_set_mem_attr(va, nr_pages,
652 								perm));
653 		return true;
654 	default:
655 		EMSG("Undefined service id %#"PRIx32, action);
656 		service_compose_direct_resp(regs, SP_RET_INVALID_PARAM);
657 		return true;
658 	}
659 }
660 
661 #define FILENAME "EFI_VARS"
662 static bool stmm_handle_storage_service(struct thread_svc_regs *regs)
663 {
664 	uint32_t flags = TEE_DATA_FLAG_ACCESS_READ |
665 			 TEE_DATA_FLAG_ACCESS_WRITE |
666 			 TEE_DATA_FLAG_SHARE_READ |
667 			 TEE_DATA_FLAG_SHARE_WRITE;
668 	uint32_t action = regs->x3;
669 	void *va = (void *)regs->x4;
670 	unsigned long len = regs->x5;
671 	unsigned long offset = regs->x6;
672 	char obj_id[] = FILENAME;
673 	size_t obj_id_len = strlen(obj_id);
674 	TEE_Result res = TEE_SUCCESS;
675 
676 	switch (action) {
677 	case FFA_SVC_RPMB_READ:
678 		res = sec_storage_obj_read(TEE_STORAGE_PRIVATE_RPMB, obj_id,
679 					   obj_id_len, va, len, offset, flags);
680 		service_compose_direct_resp(regs, res);
681 
682 		return true;
683 	case FFA_SVC_RPMB_WRITE:
684 		res = sec_storage_obj_write(TEE_STORAGE_PRIVATE_RPMB, obj_id,
685 					    obj_id_len, va, len, offset, flags);
686 		service_compose_direct_resp(regs, res);
687 
688 		return true;
689 	default:
690 		EMSG("Undefined service id %#"PRIx32, action);
691 		service_compose_direct_resp(regs, SP_RET_INVALID_PARAM);
692 		return true;
693 	}
694 }
695 
696 static bool spm_eret_error(int32_t error_code, struct thread_svc_regs *regs)
697 {
698 	regs->x0 = FFA_ERROR;
699 	regs->x1 = FFA_PARAM_MBZ;
700 	regs->x2 = error_code;
701 	regs->x3 = FFA_PARAM_MBZ;
702 	regs->x4 = FFA_PARAM_MBZ;
703 	regs->x5 = FFA_PARAM_MBZ;
704 	regs->x6 = FFA_PARAM_MBZ;
705 	regs->x7 = FFA_PARAM_MBZ;
706 	return true;
707 }
708 
709 static bool spm_handle_direct_req(struct thread_svc_regs *regs)
710 {
711 	uint16_t dst_id = regs->x1 & UINT16_MAX;
712 
713 	/* Look-up of destination endpoint */
714 	if (dst_id == mem_mgr_id)
715 		return stmm_handle_mem_mgr_service(regs);
716 	else if (dst_id == ffa_storage_id)
717 		return stmm_handle_storage_service(regs);
718 
719 	EMSG("Undefined endpoint id %#"PRIx16, dst_id);
720 	return spm_eret_error(SP_RET_INVALID_PARAM, regs);
721 }
722 
723 static bool spm_handle_svc(struct thread_svc_regs *regs)
724 {
725 	switch (regs->x0) {
726 	case FFA_VERSION:
727 		DMSG("Received FFA version");
728 		regs->x0 = MAKE_FFA_VERSION(FFA_VERSION_MAJOR,
729 					    FFA_VERSION_MINOR);
730 		return true;
731 	case FFA_MSG_SEND_DIRECT_RESP_64:
732 		DMSG("Received FFA direct response");
733 		return return_helper(false, 0, regs);
734 	case FFA_MSG_SEND_DIRECT_REQ_64:
735 		DMSG("Received FFA direct request");
736 		return spm_handle_direct_req(regs);
737 	default:
738 		EMSG("Undefined syscall %#"PRIx32, (uint32_t)regs->x0);
739 		return return_helper(true /*panic*/, 0xabcd, regs);
740 	}
741 }
742 
743 const struct ts_ops secure_partition_ops __rodata_unpaged = {
744 	.enter_open_session = stmm_enter_open_session,
745 	.enter_invoke_cmd = stmm_enter_invoke_cmd,
746 	.enter_close_session = stmm_enter_close_session,
747 	.dump_state = sec_part_dump_state,
748 	.destroy = sec_part_ctx_destroy,
749 	.get_instance_id = sec_part_get_instance_id,
750 	.handle_svc = spm_handle_svc,
751 };
752