xref: /optee_os/core/arch/arm/kernel/secure_partition.c (revision ab2422916fc822d6c8ffc5769cf01d4984113c14)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2020, Arm Limited. All rights reserved.
4  * Copyright (c) 2019, Linaro Limited
5  */
6 
7 #include <crypto/crypto.h>
8 #include <ffa.h>
9 #include <kernel/abort.h>
10 #include <kernel/secure_partition.h>
11 #include <kernel/user_mode_ctx.h>
12 #include <mm/fobj.h>
13 #include <mm/mobj.h>
14 #include <mm/tee_mmu.h>
15 #include <pta_stmm.h>
16 #include <tee_api_defines_extensions.h>
17 #include <tee/tee_pobj.h>
18 #include <tee/tee_svc.h>
19 #include <tee/tee_svc_storage.h>
20 #include <zlib.h>
21 
22 #include "thread_private.h"
23 
24 static const TEE_UUID stmm_uuid = PTA_STMM_UUID;
25 
26 /*
27  * Once a complete FFA spec is added, these will become discoverable.
28  * Until then these are considered part of the internal ABI between
29  * OP-TEE and StMM.
30  */
31 static const uint16_t stmm_id = 1U;
32 static const uint16_t stmm_pta_id = 2U;
33 static const uint16_t mem_mgr_id = 3U;
34 static const uint16_t ffa_storage_id = 4U;
35 
36 static const unsigned int stmm_stack_size = 4 * SMALL_PAGE_SIZE;
37 static const unsigned int stmm_heap_size = 398 * SMALL_PAGE_SIZE;
38 static const unsigned int stmm_sec_buf_size = SMALL_PAGE_SIZE;
39 static const unsigned int stmm_ns_comm_buf_size = SMALL_PAGE_SIZE;
40 
41 extern unsigned char stmm_image[];
42 extern const unsigned int stmm_image_size;
43 extern const unsigned int stmm_image_uncompressed_size;
44 
45 static struct sec_part_ctx *sec_part_alloc_ctx(const TEE_UUID *uuid)
46 {
47 	TEE_Result res = TEE_SUCCESS;
48 	struct sec_part_ctx *spc = NULL;
49 
50 	spc = calloc(1, sizeof(*spc));
51 	if (!spc)
52 		return NULL;
53 
54 	spc->uctx.ctx.ts_ctx.ops = &secure_partition_ops;
55 	spc->uctx.ctx.ts_ctx.uuid = *uuid;
56 	spc->uctx.ctx.flags = TA_FLAG_SINGLE_INSTANCE |
57 			      TA_FLAG_INSTANCE_KEEP_ALIVE;
58 
59 	res = vm_info_init(&spc->uctx);
60 	if (res) {
61 		free(spc);
62 		return NULL;
63 	}
64 
65 	spc->uctx.ctx.ref_count = 1;
66 	condvar_init(&spc->uctx.ctx.busy_cv);
67 
68 	return spc;
69 }
70 
71 static void clear_vfp_state(struct sec_part_ctx *spc __maybe_unused)
72 {
73 	if (IS_ENABLED(CFG_WITH_VFP))
74 		thread_user_clear_vfp(&spc->uctx.vfp);
75 }
76 
77 static TEE_Result sec_part_enter_user_mode(struct sec_part_ctx *spc)
78 {
79 	uint32_t exceptions = 0;
80 	uint32_t panic_code = 0;
81 	uint32_t panicked = 0;
82 	uint64_t cntkctl = 0;
83 
84 	exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
85 	cntkctl = read_cntkctl();
86 	write_cntkctl(cntkctl | CNTKCTL_PL0PCTEN);
87 	__thread_enter_user_mode(&spc->regs, &panicked, &panic_code);
88 	write_cntkctl(cntkctl);
89 	thread_unmask_exceptions(exceptions);
90 
91 	clear_vfp_state(spc);
92 
93 	if (panicked) {
94 		abort_print_current_ta();
95 		DMSG("sec_part panicked with code %#"PRIx32, panic_code);
96 		return TEE_ERROR_TARGET_DEAD;
97 	}
98 
99 	return TEE_SUCCESS;
100 }
101 
102 static void init_stmm_regs(struct sec_part_ctx *spc, unsigned long a0,
103 			   unsigned long a1, unsigned long sp, unsigned long pc)
104 {
105 	spc->regs.x[0] = a0;
106 	spc->regs.x[1] = a1;
107 	spc->regs.sp = sp;
108 	spc->regs.pc = pc;
109 }
110 
111 static TEE_Result alloc_and_map_sp_fobj(struct sec_part_ctx *spc, size_t sz,
112 					uint32_t prot, vaddr_t *va)
113 {
114 	size_t num_pgs = ROUNDUP(sz, SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE;
115 	struct fobj *fobj = fobj_ta_mem_alloc(num_pgs);
116 	struct mobj *mobj = mobj_with_fobj_alloc(fobj, NULL);
117 	TEE_Result res = TEE_SUCCESS;
118 
119 	fobj_put(fobj);
120 	if (!mobj)
121 		return TEE_ERROR_OUT_OF_MEMORY;
122 
123 	res = vm_map(&spc->uctx, va, num_pgs * SMALL_PAGE_SIZE,
124 		     prot, 0, mobj, 0);
125 	if (res)
126 		mobj_put(mobj);
127 
128 	return TEE_SUCCESS;
129 }
130 
131 static void *zalloc(void *opaque __unused, unsigned int items,
132 		    unsigned int size)
133 {
134 	return malloc(items * size);
135 }
136 
137 static void zfree(void *opaque __unused, void *address)
138 {
139 	free(address);
140 }
141 
142 static void uncompress_image(void *dst, size_t dst_size, void *src,
143 			     size_t src_size)
144 {
145 	z_stream strm = {
146 		.next_in = src,
147 		.avail_in = src_size,
148 		.next_out = dst,
149 		.avail_out = dst_size,
150 		.zalloc = zalloc,
151 		.zfree = zfree,
152 	};
153 
154 	if (inflateInit(&strm) != Z_OK)
155 		panic("inflateInit");
156 
157 	if (inflate(&strm, Z_SYNC_FLUSH) != Z_STREAM_END)
158 		panic("inflate");
159 
160 	if (inflateEnd(&strm) != Z_OK)
161 		panic("inflateEnd");
162 }
163 
164 static TEE_Result load_stmm(struct sec_part_ctx *spc)
165 {
166 	struct secure_partition_boot_info *boot_info = NULL;
167 	struct secure_partition_mp_info *mp_info = NULL;
168 	TEE_Result res = TEE_SUCCESS;
169 	vaddr_t sp_addr = 0;
170 	vaddr_t image_addr = 0;
171 	vaddr_t heap_addr = 0;
172 	vaddr_t stack_addr = 0;
173 	vaddr_t sec_buf_addr = 0;
174 	vaddr_t comm_buf_addr = 0;
175 	unsigned int sp_size = 0;
176 	unsigned int uncompressed_size_roundup = 0;
177 
178 	uncompressed_size_roundup = ROUNDUP(stmm_image_uncompressed_size,
179 					    SMALL_PAGE_SIZE);
180 	sp_size = uncompressed_size_roundup + stmm_stack_size +
181 		  stmm_heap_size + stmm_sec_buf_size;
182 	res = alloc_and_map_sp_fobj(spc, sp_size,
183 				    TEE_MATTR_PRW, &sp_addr);
184 	if (res)
185 		return res;
186 
187 	res = alloc_and_map_sp_fobj(spc, stmm_ns_comm_buf_size,
188 				    TEE_MATTR_URW | TEE_MATTR_PRW,
189 				    &comm_buf_addr);
190 	/*
191 	 * We don't need to free the previous instance here, they'll all be
192 	 * handled during the destruction call (sec_part_ctx_destroy())
193 	 */
194 	if (res)
195 		return res;
196 
197 	image_addr = sp_addr;
198 	heap_addr = image_addr + uncompressed_size_roundup;
199 	stack_addr = heap_addr + stmm_heap_size;
200 	sec_buf_addr = stack_addr + stmm_stack_size;
201 
202 	tee_mmu_set_ctx(&spc->uctx.ctx.ts_ctx);
203 	uncompress_image((void *)image_addr, stmm_image_uncompressed_size,
204 			 stmm_image, stmm_image_size);
205 
206 	res = vm_set_prot(&spc->uctx, image_addr, uncompressed_size_roundup,
207 			  TEE_MATTR_URX | TEE_MATTR_PR);
208 	if (res)
209 		return res;
210 
211 	res = vm_set_prot(&spc->uctx, heap_addr, stmm_heap_size,
212 			  TEE_MATTR_URW | TEE_MATTR_PRW);
213 	if (res)
214 		return res;
215 
216 	res = vm_set_prot(&spc->uctx, stack_addr, stmm_stack_size,
217 			  TEE_MATTR_URW | TEE_MATTR_PRW);
218 	if (res)
219 		return res;
220 
221 	res = vm_set_prot(&spc->uctx, sec_buf_addr, stmm_sec_buf_size,
222 			  TEE_MATTR_URW | TEE_MATTR_PRW);
223 	if (res)
224 		return res;
225 
226 	DMSG("stmm load address %#"PRIxVA, image_addr);
227 
228 	boot_info = (struct secure_partition_boot_info *)sec_buf_addr;
229 	mp_info = (struct secure_partition_mp_info *)(boot_info + 1);
230 	*boot_info = (struct secure_partition_boot_info){
231 		.h.type = SP_PARAM_SP_IMAGE_BOOT_INFO,
232 		.h.version = SP_PARAM_VERSION_1,
233 		.h.size = sizeof(struct secure_partition_boot_info),
234 		.h.attr = 0,
235 		.sp_mem_base = sp_addr,
236 		.sp_mem_limit = sp_addr + sp_size,
237 		.sp_image_base = image_addr,
238 		.sp_stack_base = stack_addr,
239 		.sp_heap_base = heap_addr,
240 		.sp_ns_comm_buf_base = comm_buf_addr,
241 		.sp_shared_buf_base = sec_buf_addr,
242 		.sp_image_size = stmm_image_size,
243 		.sp_pcpu_stack_size = stmm_stack_size,
244 		.sp_heap_size = stmm_heap_size,
245 		.sp_ns_comm_buf_size = stmm_ns_comm_buf_size,
246 		.sp_shared_buf_size = stmm_sec_buf_size,
247 		.num_sp_mem_regions = 6,
248 		.num_cpus = 1,
249 		.mp_info = mp_info,
250 	};
251 	mp_info->mpidr = read_mpidr_el1();
252 	mp_info->linear_id = 0;
253 	mp_info->flags = MP_INFO_FLAG_PRIMARY_CPU;
254 	spc->ns_comm_buf_addr = comm_buf_addr;
255 	spc->ns_comm_buf_size = stmm_ns_comm_buf_size;
256 
257 	init_stmm_regs(spc, sec_buf_addr,
258 		       (vaddr_t)(mp_info + 1) - sec_buf_addr,
259 		       stack_addr + stmm_stack_size, image_addr);
260 
261 	return sec_part_enter_user_mode(spc);
262 }
263 
264 TEE_Result sec_part_init_session(const TEE_UUID *uuid,
265 				 struct tee_ta_session *sess)
266 {
267 	struct sec_part_ctx *spc = NULL;
268 	TEE_Result res = TEE_SUCCESS;
269 
270 	if (memcmp(uuid, &stmm_uuid, sizeof(*uuid)))
271 		return TEE_ERROR_ITEM_NOT_FOUND;
272 
273 	spc = sec_part_alloc_ctx(uuid);
274 	if (!spc)
275 		return TEE_ERROR_OUT_OF_MEMORY;
276 
277 	spc->is_initializing = true;
278 
279 	mutex_lock(&tee_ta_mutex);
280 	sess->ts_sess.ctx = &spc->uctx.ctx.ts_ctx;
281 	mutex_unlock(&tee_ta_mutex);
282 
283 	ts_push_current_session(&sess->ts_sess);
284 	res = load_stmm(spc);
285 	ts_pop_current_session();
286 	tee_mmu_set_ctx(NULL);
287 	if (res) {
288 		sess->ts_sess.ctx = NULL;
289 		spc->uctx.ctx.ts_ctx.ops->destroy(&spc->uctx.ctx.ts_ctx);
290 
291 		return res;
292 	}
293 
294 	mutex_lock(&tee_ta_mutex);
295 	spc->is_initializing = false;
296 	TAILQ_INSERT_TAIL(&tee_ctxes, &spc->uctx.ctx, link);
297 	mutex_unlock(&tee_ta_mutex);
298 
299 	return TEE_SUCCESS;
300 }
301 
302 static TEE_Result stmm_enter_open_session(struct ts_session *s,
303 					  struct tee_ta_param *param,
304 					  TEE_ErrorOrigin *eo)
305 {
306 	struct sec_part_ctx *spc = to_sec_part_ctx(s->ctx);
307 	const uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_NONE,
308 						TEE_PARAM_TYPE_NONE,
309 						TEE_PARAM_TYPE_NONE,
310 						TEE_PARAM_TYPE_NONE);
311 
312 	if (param->types != exp_pt)
313 		return TEE_ERROR_BAD_PARAMETERS;
314 
315 	if (spc->is_initializing) {
316 		/* StMM is initialized in sec_part_init_session() */
317 		*eo = TEE_ORIGIN_TEE;
318 		return TEE_ERROR_BAD_STATE;
319 	}
320 
321 	return TEE_SUCCESS;
322 }
323 
324 static TEE_Result stmm_enter_invoke_cmd(struct ts_session *s, uint32_t cmd,
325 					struct tee_ta_param *param,
326 					TEE_ErrorOrigin *eo __unused)
327 {
328 	struct sec_part_ctx *spc = to_sec_part_ctx(s->ctx);
329 	TEE_Result res = TEE_SUCCESS;
330 	TEE_Result __maybe_unused tmp_res = TEE_SUCCESS;
331 	unsigned int ns_buf_size = 0;
332 	struct param_mem *mem = NULL;
333 	void *va = NULL;
334 	const uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INOUT,
335 						TEE_PARAM_TYPE_VALUE_OUTPUT,
336 						TEE_PARAM_TYPE_NONE,
337 						TEE_PARAM_TYPE_NONE);
338 
339 	if (cmd != PTA_STMM_CMD_COMMUNICATE)
340 		return TEE_ERROR_BAD_PARAMETERS;
341 
342 	if (param->types != exp_pt)
343 		return TEE_ERROR_BAD_PARAMETERS;
344 
345 	mem = &param->u[0].mem;
346 	ns_buf_size = mem->size;
347 	if (ns_buf_size > spc->ns_comm_buf_size) {
348 		mem->size = spc->ns_comm_buf_size;
349 		return TEE_ERROR_EXCESS_DATA;
350 	}
351 
352 	res = mobj_inc_map(mem->mobj);
353 	if (res)
354 		return res;
355 
356 	va = mobj_get_va(mem->mobj, mem->offs);
357 	if (!va) {
358 		EMSG("Can't get a valid VA for NS buffer");
359 		res = TEE_ERROR_BAD_PARAMETERS;
360 		goto out_va;
361 	}
362 
363 	spc->regs.x[0] = FFA_MSG_SEND_DIRECT_REQ_64;
364 	spc->regs.x[1] = (stmm_pta_id << 16) | stmm_id;
365 	spc->regs.x[2] = FFA_PARAM_MBZ;
366 	spc->regs.x[3] = spc->ns_comm_buf_addr;
367 	spc->regs.x[4] = ns_buf_size;
368 	spc->regs.x[5] = 0;
369 	spc->regs.x[6] = 0;
370 	spc->regs.x[7] = 0;
371 
372 	ts_push_current_session(s);
373 
374 	memcpy((void *)spc->ns_comm_buf_addr, va, ns_buf_size);
375 
376 	res = sec_part_enter_user_mode(spc);
377 	if (res)
378 		goto out_session;
379 	/*
380 	 * Copy the SPM response from secure partition back to the non-secure
381 	 * buffer of the client that called us.
382 	 */
383 	param->u[1].val.a = spc->regs.x[4];
384 
385 	memcpy(va, (void *)spc->ns_comm_buf_addr, ns_buf_size);
386 
387 out_session:
388 	ts_pop_current_session();
389 out_va:
390 	tmp_res = mobj_dec_map(mem->mobj);
391 	assert(!tmp_res);
392 
393 	return res;
394 }
395 
396 static void stmm_enter_close_session(struct ts_session *s __unused)
397 {
398 }
399 
400 static void sec_part_dump_state(struct ts_ctx *ctx)
401 {
402 	user_mode_ctx_print_mappings(to_user_mode_ctx(ctx));
403 }
404 
405 static uint32_t sec_part_get_instance_id(struct ts_ctx *ctx)
406 {
407 	return to_sec_part_ctx(ctx)->uctx.vm_info.asid;
408 }
409 
410 static void sec_part_ctx_destroy(struct ts_ctx *ctx)
411 {
412 	struct sec_part_ctx *spc = to_sec_part_ctx(ctx);
413 
414 	tee_pager_rem_um_areas(&spc->uctx);
415 	vm_info_final(&spc->uctx);
416 	free(spc);
417 }
418 
419 static uint32_t sp_svc_get_mem_attr(vaddr_t va)
420 {
421 	TEE_Result res = TEE_ERROR_BAD_PARAMETERS;
422 	struct ts_session *sess = NULL;
423 	struct sec_part_ctx *spc = NULL;
424 	uint16_t attrs = 0;
425 	uint16_t perm = 0;
426 
427 	if (!va)
428 		goto err;
429 
430 	sess = ts_get_current_session();
431 	spc = to_sec_part_ctx(sess->ctx);
432 
433 	res = vm_get_prot(&spc->uctx, va, SMALL_PAGE_SIZE, &attrs);
434 	if (res)
435 		goto err;
436 
437 	if (attrs & TEE_MATTR_UR)
438 		perm |= SP_MEM_ATTR_ACCESS_RO;
439 	else if (attrs & TEE_MATTR_UW)
440 		perm |= SP_MEM_ATTR_ACCESS_RW;
441 
442 	if (attrs & TEE_MATTR_UX)
443 		perm |= SP_MEM_ATTR_EXEC;
444 
445 	return perm;
446 err:
447 	return SP_RET_DENIED;
448 }
449 
450 static int sp_svc_set_mem_attr(vaddr_t va, unsigned int nr_pages, uint32_t perm)
451 {
452 	TEE_Result res = TEE_ERROR_BAD_PARAMETERS;
453 	struct ts_session *sess = NULL;
454 	struct sec_part_ctx *spc = NULL;
455 	size_t sz = 0;
456 	uint32_t prot = 0;
457 
458 	if (!va || !nr_pages || MUL_OVERFLOW(nr_pages, SMALL_PAGE_SIZE, &sz))
459 		return SP_RET_INVALID_PARAM;
460 
461 	if (perm & ~SP_MEM_ATTR_ALL)
462 		return SP_RET_INVALID_PARAM;
463 
464 	sess = ts_get_current_session();
465 	spc = to_sec_part_ctx(sess->ctx);
466 
467 	if ((perm & SP_MEM_ATTR_ACCESS_MASK) == SP_MEM_ATTR_ACCESS_RO)
468 		prot |= TEE_MATTR_UR;
469 	else if ((perm & SP_MEM_ATTR_ACCESS_MASK) == SP_MEM_ATTR_ACCESS_RW)
470 		prot |= TEE_MATTR_URW;
471 
472 	if ((perm & SP_MEM_ATTR_EXEC_NEVER) == SP_MEM_ATTR_EXEC)
473 		prot |= TEE_MATTR_UX;
474 
475 	res = vm_set_prot(&spc->uctx, va, sz, prot);
476 	if (res)
477 		return SP_RET_DENIED;
478 
479 	return SP_RET_SUCCESS;
480 }
481 
482 static bool return_helper(bool panic, uint32_t panic_code,
483 			  struct thread_svc_regs *svc_regs)
484 {
485 	if (!panic) {
486 		struct ts_session *sess = ts_get_current_session();
487 		struct sec_part_ctx *spc = to_sec_part_ctx(sess->ctx);
488 		size_t n = 0;
489 
490 		/* Save the return values from StMM */
491 		for (n = 0; n <= 7; n++)
492 			spc->regs.x[n] = *(&svc_regs->x0 + n);
493 
494 		spc->regs.sp = svc_regs->sp_el0;
495 		spc->regs.pc = svc_regs->elr;
496 		spc->regs.cpsr = svc_regs->spsr;
497 	}
498 
499 	svc_regs->x0 = 0;
500 	svc_regs->x1 = panic;
501 	svc_regs->x2 = panic_code;
502 
503 	return false;
504 }
505 
506 static void service_compose_direct_resp(struct thread_svc_regs *regs,
507 					uint32_t ret_val)
508 {
509 	uint16_t src_id = 0;
510 	uint16_t dst_id = 0;
511 
512 	/* extract from request */
513 	src_id = (regs->x1 >> 16) & UINT16_MAX;
514 	dst_id = regs->x1 & UINT16_MAX;
515 
516 	/* compose message */
517 	regs->x0 = FFA_MSG_SEND_DIRECT_RESP_64;
518 	/* swap endpoint ids */
519 	regs->x1 = SHIFT_U32(dst_id, 16) | src_id;
520 	regs->x2 = FFA_PARAM_MBZ;
521 	regs->x3 = ret_val;
522 	regs->x4 = 0;
523 	regs->x5 = 0;
524 	regs->x6 = 0;
525 	regs->x7 = 0;
526 }
527 
528 /*
529  * Combined read from secure partition, this will open, read and
530  * close the file object.
531  */
532 static TEE_Result sec_storage_obj_read(unsigned long storage_id, char *obj_id,
533 				       unsigned long obj_id_len, void *data,
534 				       unsigned long len, unsigned long offset,
535 				       unsigned long flags)
536 {
537 	const struct tee_file_operations *fops = NULL;
538 	TEE_Result res = TEE_ERROR_BAD_STATE;
539 	struct ts_session *sess = NULL;
540 	struct tee_file_handle *fh = NULL;
541 	struct sec_part_ctx *spc = NULL;
542 	struct tee_pobj *po = NULL;
543 	size_t file_size = 0;
544 	size_t read_len = 0;
545 
546 	fops = tee_svc_storage_file_ops(storage_id);
547 	if (!fops)
548 		return TEE_ERROR_ITEM_NOT_FOUND;
549 
550 	if (obj_id_len > TEE_OBJECT_ID_MAX_LEN)
551 		return TEE_ERROR_BAD_PARAMETERS;
552 
553 	sess = ts_get_current_session();
554 	spc = to_sec_part_ctx(sess->ctx);
555 	res = tee_mmu_check_access_rights(&spc->uctx,
556 					  TEE_MEMORY_ACCESS_WRITE |
557 					  TEE_MEMORY_ACCESS_ANY_OWNER,
558 					  (uaddr_t)data, len);
559 	if (res != TEE_SUCCESS)
560 		return res;
561 
562 	res = tee_pobj_get(&sess->ctx->uuid, obj_id, obj_id_len, flags,
563 			   false, fops, &po);
564 	if (res != TEE_SUCCESS)
565 		return res;
566 
567 	res = po->fops->open(po, &file_size, &fh);
568 	if (res != TEE_SUCCESS)
569 		goto out;
570 
571 	read_len = len;
572 	res = po->fops->read(fh, offset, data, &read_len);
573 	if (res == TEE_ERROR_CORRUPT_OBJECT) {
574 		EMSG("Object corrupt");
575 		po->fops->remove(po);
576 	} else if (res == TEE_SUCCESS && len != read_len) {
577 		res = TEE_ERROR_CORRUPT_OBJECT;
578 	}
579 
580 	po->fops->close(&fh);
581 
582 out:
583 	tee_pobj_release(po);
584 
585 	return res;
586 }
587 
588 /*
589  * Combined write from secure partition, this will create/open, write and
590  * close the file object.
591  */
592 static TEE_Result sec_storage_obj_write(unsigned long storage_id, char *obj_id,
593 					unsigned long obj_id_len, void *data,
594 					unsigned long len, unsigned long offset,
595 					unsigned long flags)
596 
597 {
598 	const struct tee_file_operations *fops = NULL;
599 	struct ts_session *sess = NULL;
600 	struct tee_file_handle *fh = NULL;
601 	struct sec_part_ctx *spc = NULL;
602 	TEE_Result res = TEE_SUCCESS;
603 	struct tee_pobj *po = NULL;
604 
605 	fops = tee_svc_storage_file_ops(storage_id);
606 	if (!fops)
607 		return TEE_ERROR_ITEM_NOT_FOUND;
608 
609 	if (obj_id_len > TEE_OBJECT_ID_MAX_LEN)
610 		return TEE_ERROR_BAD_PARAMETERS;
611 
612 	sess = ts_get_current_session();
613 	spc = to_sec_part_ctx(sess->ctx);
614 	res = tee_mmu_check_access_rights(&spc->uctx,
615 					  TEE_MEMORY_ACCESS_READ |
616 					  TEE_MEMORY_ACCESS_ANY_OWNER,
617 					  (uaddr_t)data, len);
618 	if (res != TEE_SUCCESS)
619 		return res;
620 
621 	res = tee_pobj_get(&sess->ctx->uuid, obj_id, obj_id_len, flags,
622 			   false, fops, &po);
623 	if (res != TEE_SUCCESS)
624 		return res;
625 
626 	res = po->fops->open(po, NULL, &fh);
627 	if (res == TEE_ERROR_ITEM_NOT_FOUND)
628 		res = po->fops->create(po, false, NULL, 0, NULL, 0, NULL, 0,
629 				       &fh);
630 	if (res == TEE_SUCCESS) {
631 		res = po->fops->write(fh, offset, data, len);
632 		po->fops->close(&fh);
633 	}
634 
635 	tee_pobj_release(po);
636 
637 	return res;
638 }
639 
640 static bool stmm_handle_mem_mgr_service(struct thread_svc_regs *regs)
641 {
642 	uint32_t action = regs->x3;
643 	uintptr_t va = regs->x4;
644 	uint32_t nr_pages = regs->x5;
645 	uint32_t perm = regs->x6;
646 
647 	switch (action) {
648 	case FFA_SVC_MEMORY_ATTRIBUTES_GET_64:
649 		service_compose_direct_resp(regs, sp_svc_get_mem_attr(va));
650 		return true;
651 	case FFA_SVC_MEMORY_ATTRIBUTES_SET_64:
652 		service_compose_direct_resp(regs,
653 					    sp_svc_set_mem_attr(va, nr_pages,
654 								perm));
655 		return true;
656 	default:
657 		EMSG("Undefined service id %#"PRIx32, action);
658 		service_compose_direct_resp(regs, SP_RET_INVALID_PARAM);
659 		return true;
660 	}
661 }
662 
663 #define FILENAME "EFI_VARS"
664 static bool stmm_handle_storage_service(struct thread_svc_regs *regs)
665 {
666 	uint32_t flags = TEE_DATA_FLAG_ACCESS_READ |
667 			 TEE_DATA_FLAG_ACCESS_WRITE |
668 			 TEE_DATA_FLAG_SHARE_READ |
669 			 TEE_DATA_FLAG_SHARE_WRITE;
670 	uint32_t action = regs->x3;
671 	void *va = (void *)regs->x4;
672 	unsigned long len = regs->x5;
673 	unsigned long offset = regs->x6;
674 	char obj_id[] = FILENAME;
675 	size_t obj_id_len = strlen(obj_id);
676 	TEE_Result res = TEE_SUCCESS;
677 
678 	switch (action) {
679 	case FFA_SVC_RPMB_READ:
680 		res = sec_storage_obj_read(TEE_STORAGE_PRIVATE_RPMB, obj_id,
681 					   obj_id_len, va, len, offset, flags);
682 		service_compose_direct_resp(regs, res);
683 
684 		return true;
685 	case FFA_SVC_RPMB_WRITE:
686 		res = sec_storage_obj_write(TEE_STORAGE_PRIVATE_RPMB, obj_id,
687 					    obj_id_len, va, len, offset, flags);
688 		service_compose_direct_resp(regs, res);
689 
690 		return true;
691 	default:
692 		EMSG("Undefined service id %#"PRIx32, action);
693 		service_compose_direct_resp(regs, SP_RET_INVALID_PARAM);
694 		return true;
695 	}
696 }
697 
698 static bool spm_eret_error(int32_t error_code, struct thread_svc_regs *regs)
699 {
700 	regs->x0 = FFA_ERROR;
701 	regs->x1 = FFA_PARAM_MBZ;
702 	regs->x2 = error_code;
703 	regs->x3 = FFA_PARAM_MBZ;
704 	regs->x4 = FFA_PARAM_MBZ;
705 	regs->x5 = FFA_PARAM_MBZ;
706 	regs->x6 = FFA_PARAM_MBZ;
707 	regs->x7 = FFA_PARAM_MBZ;
708 	return true;
709 }
710 
711 static bool spm_handle_direct_req(struct thread_svc_regs *regs)
712 {
713 	uint16_t dst_id = regs->x1 & UINT16_MAX;
714 
715 	/* Look-up of destination endpoint */
716 	if (dst_id == mem_mgr_id)
717 		return stmm_handle_mem_mgr_service(regs);
718 	else if (dst_id == ffa_storage_id)
719 		return stmm_handle_storage_service(regs);
720 
721 	EMSG("Undefined endpoint id %#"PRIx16, dst_id);
722 	return spm_eret_error(SP_RET_INVALID_PARAM, regs);
723 }
724 
725 static bool spm_handle_svc(struct thread_svc_regs *regs)
726 {
727 	switch (regs->x0) {
728 	case FFA_VERSION:
729 		DMSG("Received FFA version");
730 		regs->x0 = MAKE_FFA_VERSION(FFA_VERSION_MAJOR,
731 					    FFA_VERSION_MINOR);
732 		return true;
733 	case FFA_MSG_SEND_DIRECT_RESP_64:
734 		DMSG("Received FFA direct response");
735 		return return_helper(false, 0, regs);
736 	case FFA_MSG_SEND_DIRECT_REQ_64:
737 		DMSG("Received FFA direct request");
738 		return spm_handle_direct_req(regs);
739 	default:
740 		EMSG("Undefined syscall %#"PRIx32, (uint32_t)regs->x0);
741 		return return_helper(true /*panic*/, 0xabcd, regs);
742 	}
743 }
744 
745 const struct ts_ops secure_partition_ops __rodata_unpaged = {
746 	.enter_open_session = stmm_enter_open_session,
747 	.enter_invoke_cmd = stmm_enter_invoke_cmd,
748 	.enter_close_session = stmm_enter_close_session,
749 	.dump_state = sec_part_dump_state,
750 	.destroy = sec_part_ctx_destroy,
751 	.get_instance_id = sec_part_get_instance_id,
752 	.handle_svc = spm_handle_svc,
753 };
754