1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2019, Linaro Limited
4 * Copyright (c) 2020, Arm Limited.
5 */
6
7 #include <crypto/crypto.h>
8 #include <efi/hob.h>
9 #include <ffa.h>
10 #include <keep.h>
11 #include <kernel/abort.h>
12 #include <kernel/stmm_sp.h>
13 #include <kernel/tee_ta_manager.h>
14 #include <kernel/thread_private.h>
15 #include <kernel/user_mode_ctx.h>
16 #include <mempool.h>
17 #include <mm/fobj.h>
18 #include <mm/mobj.h>
19 #include <mm/vm.h>
20 #include <pta_stmm.h>
21 #include <tee_api_defines_extensions.h>
22 #include <tee/tee_pobj.h>
23 #include <tee/tee_svc.h>
24 #include <tee/tee_svc_storage.h>
25 #include <zlib.h>
26
27 #ifdef ARM64
28 #define SVC_REGS_A0(_regs) ((_regs)->x0)
29 #define SVC_REGS_A1(_regs) ((_regs)->x1)
30 #define SVC_REGS_A2(_regs) ((_regs)->x2)
31 #define SVC_REGS_A3(_regs) ((_regs)->x3)
32 #define SVC_REGS_A4(_regs) ((_regs)->x4)
33 #define SVC_REGS_A5(_regs) ((_regs)->x5)
34 #define SVC_REGS_A6(_regs) ((_regs)->x6)
35 #define SVC_REGS_A7(_regs) ((_regs)->x7)
36 #define __FFA_SVC_RPMB_READ FFA_SVC_RPMB_READ
37 #define __FFA_SVC_RPMB_WRITE FFA_SVC_RPMB_WRITE
38 #define __FFA_MSG_SEND_DIRECT_RESP FFA_MSG_SEND_DIRECT_RESP_64
39 #define __FFA_MSG_SEND_DIRECT_REQ FFA_MSG_SEND_DIRECT_REQ_64
40 #define __FFA_MEM_PERM_GET FFA_MEM_PERM_GET_64
41 #define __FFA_MEM_PERM_SET FFA_MEM_PERM_SET_64
42 #endif
43 #ifdef ARM32
44 #define SVC_REGS_A0(_regs) ((_regs)->r0)
45 #define SVC_REGS_A1(_regs) ((_regs)->r1)
46 #define SVC_REGS_A2(_regs) ((_regs)->r2)
47 #define SVC_REGS_A3(_regs) ((_regs)->r3)
48 #define SVC_REGS_A4(_regs) ((_regs)->r4)
49 #define SVC_REGS_A5(_regs) ((_regs)->r5)
50 #define SVC_REGS_A6(_regs) ((_regs)->r6)
51 #define SVC_REGS_A7(_regs) ((_regs)->r7)
52 #define __FFA_SVC_RPMB_READ FFA_SVC_RPMB_READ_32
53 #define __FFA_SVC_RPMB_WRITE FFA_SVC_RPMB_WRITE_32
54 #define __FFA_MSG_SEND_DIRECT_RESP FFA_MSG_SEND_DIRECT_RESP_32
55 #define __FFA_MSG_SEND_DIRECT_REQ FFA_MSG_SEND_DIRECT_REQ_32
56 #define __FFA_MEM_PERM_GET FFA_MEM_PERM_GET_32
57 #define __FFA_MEM_PERM_SET FFA_MEM_PERM_SET_32
58 #endif
59
60 static const TEE_UUID stmm_uuid = PTA_STMM_UUID;
61 static TEE_UUID ns_buf_guid = MM_NS_BUFFER_GUID;
62 static TEE_UUID mmram_resv_guid = MM_PEI_MMRAM_MEMORY_RESERVE_GUID;
63
64 /*
65 * Once a complete FFA spec is added, these will become discoverable.
66 * Until then these are considered part of the internal ABI between
67 * OP-TEE and StMM.
68 */
69 static const uint16_t stmm_id = 1U;
70 static const uint16_t stmm_pta_id = 2U;
71 static const uint16_t ffa_storage_id = 4U;
72
73 static const unsigned int stmm_heap_size = 402 * SMALL_PAGE_SIZE;
74 static const unsigned int stmm_sec_buf_size = 4 * SMALL_PAGE_SIZE;
75 static const unsigned int stmm_ns_comm_buf_size = 4 * SMALL_PAGE_SIZE;
76
77 extern unsigned char stmm_image[];
78 extern const unsigned int stmm_image_size;
79 extern const unsigned int stmm_image_uncompressed_size;
80
81 static vaddr_t stmm_image_addr;
82 static vaddr_t stmm_heap_addr;
83 static vaddr_t stmm_ns_comm_buf_addr;
84 static vaddr_t stmm_sec_buf_addr;
85
stmm_get_uuid(void)86 const TEE_UUID *stmm_get_uuid(void)
87 {
88 return &stmm_uuid;
89 }
90
stmm_alloc_ctx(const TEE_UUID * uuid)91 static struct stmm_ctx *stmm_alloc_ctx(const TEE_UUID *uuid)
92 {
93 TEE_Result res = TEE_ERROR_GENERIC;
94 struct stmm_ctx *spc = NULL;
95
96 spc = calloc(1, sizeof(*spc));
97 if (!spc)
98 return NULL;
99
100 spc->ta_ctx.ts_ctx.ops = &stmm_sp_ops;
101 spc->ta_ctx.ts_ctx.uuid = *uuid;
102 spc->ta_ctx.flags = TA_FLAG_SINGLE_INSTANCE |
103 TA_FLAG_INSTANCE_KEEP_ALIVE;
104
105 res = vm_info_init(&spc->uctx, &spc->ta_ctx.ts_ctx);
106 if (res) {
107 free(spc);
108 return NULL;
109 }
110
111 spc->ta_ctx.ref_count = 1;
112 condvar_init(&spc->ta_ctx.busy_cv);
113
114 return spc;
115 }
116
stmm_enter_user_mode(struct stmm_ctx * spc)117 static TEE_Result stmm_enter_user_mode(struct stmm_ctx *spc)
118 {
119 uint32_t exceptions = 0;
120 uint32_t panic_code = 0;
121 uint32_t panicked = 0;
122 uint64_t cntkctl = 0;
123
124 exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
125 cntkctl = read_cntkctl();
126 write_cntkctl(cntkctl | CNTKCTL_PL0PCTEN);
127
128 #ifdef ARM32
129 /* Handle usr_lr in place of __thread_enter_user_mode() */
130 thread_set_usr_lr(spc->regs.usr_lr);
131 #endif
132
133 __thread_enter_user_mode(&spc->regs, &panicked, &panic_code);
134
135 #ifdef ARM32
136 spc->regs.usr_lr = thread_get_usr_lr();
137 #endif
138
139 write_cntkctl(cntkctl);
140 thread_unmask_exceptions(exceptions);
141
142 thread_user_clear_vfp(&spc->uctx);
143
144 if (panicked) {
145 abort_print_current_ts();
146 DMSG("stmm panicked with code %#"PRIx32, panic_code);
147 return TEE_ERROR_TARGET_DEAD;
148 }
149
150 return TEE_SUCCESS;
151 }
152
153 #ifdef ARM64
init_stmm_regs(struct stmm_ctx * spc,unsigned long a0,unsigned long a1,unsigned long a2,unsigned long a3,unsigned long pc)154 static void init_stmm_regs(struct stmm_ctx *spc, unsigned long a0,
155 unsigned long a1, unsigned long a2, unsigned long a3,
156 unsigned long pc)
157 {
158 spc->regs.x[0] = a0;
159 spc->regs.x[1] = a1;
160 spc->regs.x[2] = a2;
161 spc->regs.x[3] = a3;
162 spc->regs.pc = pc;
163 }
164 #endif
165
166 #ifdef ARM32
get_spsr(void)167 static uint32_t __maybe_unused get_spsr(void)
168 {
169 uint32_t s = 0;
170
171 s = read_cpsr();
172 s &= ~(CPSR_MODE_MASK | CPSR_T | ARM32_CPSR_IT_MASK);
173 s |= CPSR_MODE_USR;
174
175 return s;
176 }
177
init_stmm_regs(struct stmm_ctx * spc,unsigned long a0,unsigned long a1,unsigned long a2,unsigned long a3,unsigned long pc)178 static void init_stmm_regs(struct stmm_ctx *spc, unsigned long a0,
179 unsigned long a1, unsigned long a2, unsigned long a3,
180 unsigned long pc)
181 {
182 spc->regs.r0 = a0;
183 spc->regs.r1 = a1;
184 spc->regs.r2 = a2;
185 spc->regs.r3 = a3;
186 spc->regs.cpsr = get_spsr();
187 spc->regs.pc = pc;
188 }
189 #endif
190
alloc_and_map_sp_fobj(struct stmm_ctx * spc,size_t sz,uint32_t prot,vaddr_t * va)191 static TEE_Result alloc_and_map_sp_fobj(struct stmm_ctx *spc, size_t sz,
192 uint32_t prot, vaddr_t *va)
193 {
194 size_t num_pgs = ROUNDUP_DIV(sz, SMALL_PAGE_SIZE);
195 struct fobj *fobj = fobj_ta_mem_alloc(num_pgs);
196 TEE_Result res = TEE_SUCCESS;
197 struct mobj *mobj = NULL;
198
199 mobj = mobj_with_fobj_alloc(fobj, NULL, TEE_MATTR_MEM_TYPE_TAGGED);
200 fobj_put(fobj);
201 if (!mobj)
202 return TEE_ERROR_OUT_OF_MEMORY;
203
204 res = vm_map(&spc->uctx, va, num_pgs * SMALL_PAGE_SIZE,
205 prot, 0, mobj, 0);
206 if (res)
207 mobj_put(mobj);
208
209 return TEE_SUCCESS;
210 }
211
zalloc(void * opaque __unused,unsigned int items,unsigned int size)212 static void *zalloc(void *opaque __unused, unsigned int items,
213 unsigned int size)
214 {
215 return mempool_alloc(mempool_default, items * size);
216 }
217
zfree(void * opaque __unused,void * address)218 static void zfree(void *opaque __unused, void *address)
219 {
220 mempool_free(mempool_default, address);
221 }
222
uncompress_image(void * dst,size_t dst_size,void * src,size_t src_size)223 static void uncompress_image(void *dst, size_t dst_size, void *src,
224 size_t src_size)
225 {
226 z_stream strm = {
227 .next_in = src,
228 .avail_in = src_size,
229 .next_out = dst,
230 .avail_out = dst_size,
231 .zalloc = zalloc,
232 .zfree = zfree,
233 };
234
235 if (inflateInit(&strm) != Z_OK)
236 panic("inflateInit");
237
238 if (inflate(&strm, Z_SYNC_FLUSH) != Z_STREAM_END)
239 panic("inflate");
240
241 if (inflateEnd(&strm) != Z_OK)
242 panic("inflateEnd");
243 }
244
245 static struct efi_hob_handoff_info_table *
build_stmm_boot_hob_list(vaddr_t sp_addr,uint32_t sp_size,uint32_t * hob_table_size)246 build_stmm_boot_hob_list(vaddr_t sp_addr,
247 uint32_t sp_size, uint32_t *hob_table_size)
248 {
249 struct efi_hob_handoff_info_table *hob_table = NULL;
250 unsigned int uncompressed_size_roundup = 0;
251 struct efi_mmram_descriptor *mmram_desc_data = NULL;
252 struct efi_mmram_hob_descriptor_block *mmram_resv_data = NULL;
253 uint16_t mmram_resv_data_size = 0;
254 TEE_Result ret = TEE_ERROR_GENERIC;
255 uint32_t hob_table_offset = 0;
256 void *guid_hob_data = NULL;
257
258 uncompressed_size_roundup = ROUNDUP(stmm_image_uncompressed_size,
259 SMALL_PAGE_SIZE);
260 stmm_image_addr = sp_addr;
261 stmm_heap_addr = stmm_image_addr + uncompressed_size_roundup;
262 stmm_sec_buf_addr = stmm_heap_addr + stmm_heap_size;
263 hob_table_offset = sizeof(struct ffa_boot_info_header_1_1) +
264 sizeof(struct ffa_boot_info_1_1);
265
266 hob_table = efi_create_hob_list(sp_addr, sp_size,
267 stmm_sec_buf_addr + hob_table_offset,
268 stmm_sec_buf_size - hob_table_offset);
269 if (!hob_table) {
270 EMSG("Failed to create hob_table.");
271 return NULL;
272 }
273
274 ret = efi_create_fv_hob(hob_table, sp_addr, uncompressed_size_roundup);
275 if (ret) {
276 EMSG("Failed to create fv hob.");
277 return NULL;
278 }
279
280 ret = efi_create_guid_hob(hob_table, &ns_buf_guid,
281 sizeof(struct efi_mmram_descriptor),
282 &guid_hob_data);
283 if (ret) {
284 EMSG("Failed to create ns buffer hob.");
285 return NULL;
286 }
287
288 mmram_desc_data = guid_hob_data;
289 mmram_desc_data->physical_start = stmm_ns_comm_buf_addr;
290 mmram_desc_data->physical_size = stmm_ns_comm_buf_size;
291 mmram_desc_data->cpu_start = stmm_ns_comm_buf_addr;
292 mmram_desc_data->region_state = EFI_CACHEABLE | EFI_ALLOCATED;
293
294 mmram_resv_data_size = sizeof(struct efi_mmram_hob_descriptor_block) +
295 sizeof(struct efi_mmram_descriptor) * 5;
296
297 ret = efi_create_guid_hob(hob_table, &mmram_resv_guid,
298 mmram_resv_data_size, &guid_hob_data);
299 if (ret) {
300 EMSG("Failed to create mm range hob");
301 return NULL;
302 }
303
304 mmram_resv_data = guid_hob_data;
305 mmram_resv_data->number_of_mm_reserved_regions = 4;
306 mmram_desc_data = &mmram_resv_data->descriptor[0];
307
308 mmram_desc_data[0].physical_start = stmm_image_addr;
309 mmram_desc_data[0].physical_size = uncompressed_size_roundup;
310 mmram_desc_data[0].cpu_start = stmm_image_addr;
311 mmram_desc_data[0].region_state = EFI_CACHEABLE | EFI_ALLOCATED;
312
313 mmram_desc_data[1].physical_start = stmm_sec_buf_addr;
314 mmram_desc_data[1].physical_size = stmm_sec_buf_size;
315 mmram_desc_data[1].cpu_start = stmm_sec_buf_addr;
316 mmram_desc_data[1].region_state = EFI_CACHEABLE | EFI_ALLOCATED;
317
318 mmram_desc_data[2].physical_start = stmm_ns_comm_buf_addr;
319 mmram_desc_data[2].physical_size = stmm_ns_comm_buf_size;
320 mmram_desc_data[2].cpu_start = stmm_ns_comm_buf_addr;
321 mmram_desc_data[2].region_state = EFI_CACHEABLE | EFI_ALLOCATED;
322
323 mmram_desc_data[3].physical_start = stmm_heap_addr;
324 mmram_desc_data[3].physical_size = stmm_heap_size;
325 mmram_desc_data[3].cpu_start = stmm_heap_addr;
326 mmram_desc_data[3].region_state = EFI_CACHEABLE;
327
328 *hob_table_size = hob_table->efi_free_memory_bottom -
329 (efi_physical_address_t)hob_table;
330
331 return hob_table;
332 }
333
load_stmm(struct stmm_ctx * spc)334 static TEE_Result load_stmm(struct stmm_ctx *spc)
335 {
336 struct ffa_boot_info_header_1_1 *hdr = NULL;
337 struct ffa_boot_info_1_1 *desc = NULL;
338 struct efi_hob_handoff_info_table *hob_table = NULL;
339 uint32_t hob_table_size = 0;
340 vaddr_t sp_addr = 0;
341 unsigned int sp_size = 0;
342 unsigned int uncompressed_size_roundup = 0;
343 TEE_Result res = TEE_ERROR_GENERIC;
344
345 uncompressed_size_roundup = ROUNDUP(stmm_image_uncompressed_size,
346 SMALL_PAGE_SIZE);
347 sp_size = uncompressed_size_roundup + stmm_heap_size +
348 stmm_sec_buf_size;
349 res = alloc_and_map_sp_fobj(spc, sp_size,
350 TEE_MATTR_PRW, &sp_addr);
351 if (res)
352 return res;
353
354 res = alloc_and_map_sp_fobj(spc, stmm_ns_comm_buf_size,
355 TEE_MATTR_URW | TEE_MATTR_PRW,
356 &stmm_ns_comm_buf_addr);
357 /*
358 * We don't need to free the previous instance here, they'll all be
359 * handled during the destruction call (stmm_ctx_destroy())
360 */
361 if (res)
362 return res;
363
364 hob_table = build_stmm_boot_hob_list(sp_addr, sp_size, &hob_table_size);
365 if (!hob_table)
366 return TEE_ERROR_NO_DATA;
367
368 hdr = (void *)stmm_sec_buf_addr;
369
370 hdr->signature = FFA_BOOT_INFO_SIGNATURE;
371 hdr->version = FFA_VERSION_1_2;
372 hdr->desc_size = sizeof(struct ffa_boot_info_1_1);
373 hdr->desc_count = 1;
374 hdr->desc_offset = sizeof(struct ffa_boot_info_header_1_1);
375 hdr->reserved = 0;
376 hdr->blob_size = hdr->desc_size * hdr->desc_count + hdr->desc_offset;
377
378 desc = (void *)(stmm_sec_buf_addr + hdr->desc_offset);
379
380 memset(desc->name, 0, FFA_BOOT_INFO_NAME_LEN);
381 desc->type = FFA_BOOT_INFO_TYPE_ID_HOB;
382 desc->flags = FFA_BOOT_INFO_FLAG_NAME_FORMAT_UUID |
383 (FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_ADDR <<
384 FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_SHIFT);
385 desc->size = hob_table_size;
386 desc->contents = (vaddr_t)hob_table;
387
388 vm_set_ctx(&spc->ta_ctx.ts_ctx);
389 uncompress_image((void *)stmm_image_addr, stmm_image_uncompressed_size,
390 stmm_image, stmm_image_size);
391
392 res = vm_set_prot(&spc->uctx, stmm_image_addr,
393 uncompressed_size_roundup,
394 TEE_MATTR_URX | TEE_MATTR_PR);
395 if (res)
396 return res;
397
398 res = vm_set_prot(&spc->uctx, stmm_heap_addr, stmm_heap_size,
399 TEE_MATTR_URW | TEE_MATTR_PRW);
400 if (res)
401 return res;
402
403 res = vm_set_prot(&spc->uctx, stmm_sec_buf_addr, stmm_sec_buf_size,
404 TEE_MATTR_URW | TEE_MATTR_PRW);
405 if (res)
406 return res;
407
408 DMSG("stmm load address %#"PRIxVA, stmm_image_addr);
409
410 spc->ns_comm_buf_addr = stmm_ns_comm_buf_addr;
411 spc->ns_comm_buf_size = stmm_ns_comm_buf_size;
412
413 init_stmm_regs(spc, (unsigned long)hdr, 0, 0, 0, stmm_image_addr);
414
415 return stmm_enter_user_mode(spc);
416 }
417
stmm_init_session(const TEE_UUID * uuid,struct tee_ta_session * sess)418 TEE_Result stmm_init_session(const TEE_UUID *uuid, struct tee_ta_session *sess)
419 {
420 struct stmm_ctx *spc = NULL;
421
422 /* Caller is expected to hold tee_ta_mutex for safe changes in @sess */
423 assert(mutex_is_locked(&tee_ta_mutex));
424
425 if (memcmp(uuid, &stmm_uuid, sizeof(*uuid)))
426 return TEE_ERROR_ITEM_NOT_FOUND;
427
428 spc = stmm_alloc_ctx(uuid);
429 if (!spc)
430 return TEE_ERROR_OUT_OF_MEMORY;
431
432 spc->ta_ctx.is_initializing = true;
433
434 sess->ts_sess.ctx = &spc->ta_ctx.ts_ctx;
435 sess->ts_sess.handle_scall = sess->ts_sess.ctx->ops->handle_scall;
436
437 return TEE_SUCCESS;
438 }
439
stmm_complete_session(struct tee_ta_session * sess)440 TEE_Result stmm_complete_session(struct tee_ta_session *sess)
441 {
442 struct stmm_ctx *spc = to_stmm_ctx(sess->ts_sess.ctx);
443 TEE_Result res = TEE_ERROR_GENERIC;
444
445 ts_push_current_session(&sess->ts_sess);
446 res = load_stmm(spc);
447 ts_pop_current_session();
448 vm_set_ctx(NULL);
449 if (res) {
450 sess->ts_sess.ctx = NULL;
451 spc->ta_ctx.ts_ctx.ops->destroy(&spc->ta_ctx.ts_ctx);
452
453 return res;
454 }
455
456 mutex_lock(&tee_ta_mutex);
457 spc->ta_ctx.is_initializing = false;
458 TAILQ_INSERT_TAIL(&tee_ctxes, &spc->ta_ctx, link);
459 mutex_unlock(&tee_ta_mutex);
460
461 return TEE_SUCCESS;
462 }
463
stmm_enter_open_session(struct ts_session * s)464 static TEE_Result stmm_enter_open_session(struct ts_session *s)
465 {
466 struct stmm_ctx *spc = to_stmm_ctx(s->ctx);
467 struct tee_ta_session *ta_sess = to_ta_session(s);
468 const uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_NONE,
469 TEE_PARAM_TYPE_NONE,
470 TEE_PARAM_TYPE_NONE,
471 TEE_PARAM_TYPE_NONE);
472
473 if (ta_sess->param->types != exp_pt)
474 return TEE_ERROR_BAD_PARAMETERS;
475
476 if (spc->ta_ctx.is_initializing) {
477 /* StMM is initialized in stmm_init_session() */
478 ta_sess->err_origin = TEE_ORIGIN_TEE;
479 return TEE_ERROR_BAD_STATE;
480 }
481
482 return TEE_SUCCESS;
483 }
484
stmm_enter_invoke_cmd(struct ts_session * s,uint32_t cmd)485 static TEE_Result stmm_enter_invoke_cmd(struct ts_session *s, uint32_t cmd)
486 {
487 struct stmm_ctx *spc = to_stmm_ctx(s->ctx);
488 struct tee_ta_session *ta_sess = to_ta_session(s);
489 TEE_Result res = TEE_SUCCESS;
490 TEE_Result __maybe_unused tmp_res = TEE_SUCCESS;
491 unsigned int ns_buf_size = 0;
492 struct param_mem *mem = NULL;
493 void *va = NULL;
494 const uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INOUT,
495 TEE_PARAM_TYPE_VALUE_OUTPUT,
496 TEE_PARAM_TYPE_NONE,
497 TEE_PARAM_TYPE_NONE);
498
499 if (cmd != PTA_STMM_CMD_COMMUNICATE)
500 return TEE_ERROR_BAD_PARAMETERS;
501
502 if (ta_sess->param->types != exp_pt)
503 return TEE_ERROR_BAD_PARAMETERS;
504
505 mem = &ta_sess->param->u[0].mem;
506 ns_buf_size = mem->size;
507 if (ns_buf_size > spc->ns_comm_buf_size) {
508 mem->size = spc->ns_comm_buf_size;
509 return TEE_ERROR_EXCESS_DATA;
510 }
511
512 res = mobj_inc_map(mem->mobj);
513 if (res)
514 return res;
515
516 va = mobj_get_va(mem->mobj, mem->offs, mem->size);
517 if (!va) {
518 EMSG("Can't get a valid VA for NS buffer");
519 res = TEE_ERROR_BAD_PARAMETERS;
520 goto out_va;
521 }
522
523 #ifdef ARM64
524 spc->regs.x[0] = __FFA_MSG_SEND_DIRECT_REQ;
525 spc->regs.x[1] = (stmm_pta_id << 16) | stmm_id;
526 spc->regs.x[2] = FFA_PARAM_MBZ;
527 spc->regs.x[3] = spc->ns_comm_buf_addr;
528 spc->regs.x[4] = ns_buf_size;
529 spc->regs.x[5] = 0;
530 spc->regs.x[6] = 0;
531 spc->regs.x[7] = 0;
532 #endif
533 #ifdef ARM32
534 spc->regs.r0 = __FFA_MSG_SEND_DIRECT_REQ;
535 spc->regs.r1 = (stmm_pta_id << 16) | stmm_id;
536 spc->regs.r2 = FFA_PARAM_MBZ;
537 spc->regs.r3 = spc->ns_comm_buf_addr;
538 spc->regs.r4 = ns_buf_size;
539 spc->regs.r5 = 0;
540 spc->regs.r6 = 0;
541 spc->regs.r7 = 0;
542 #endif
543
544 ts_push_current_session(s);
545
546 memcpy((void *)spc->ns_comm_buf_addr, va, ns_buf_size);
547
548 res = stmm_enter_user_mode(spc);
549 if (res)
550 goto out_session;
551 /*
552 * Copy the SPM response from secure partition back to the non-secure
553 * buffer of the client that called us.
554 */
555 #ifdef ARM64
556 ta_sess->param->u[1].val.a = spc->regs.x[4];
557 #endif
558 #ifdef ARM32
559 ta_sess->param->u[1].val.a = spc->regs.r4;
560 #endif
561
562 memcpy(va, (void *)spc->ns_comm_buf_addr, ns_buf_size);
563
564 out_session:
565 ts_pop_current_session();
566 out_va:
567 tmp_res = mobj_dec_map(mem->mobj);
568 assert(!tmp_res);
569
570 return res;
571 }
572
stmm_enter_close_session(struct ts_session * s __unused)573 static void stmm_enter_close_session(struct ts_session *s __unused)
574 {
575 }
576
stmm_dump_state(struct ts_ctx * ctx)577 static void stmm_dump_state(struct ts_ctx *ctx)
578 {
579 user_mode_ctx_print_mappings(to_user_mode_ctx(ctx));
580 }
581 DECLARE_KEEP_PAGER(stmm_dump_state);
582
stmm_get_instance_id(struct ts_ctx * ctx)583 static uint32_t stmm_get_instance_id(struct ts_ctx *ctx)
584 {
585 return to_stmm_ctx(ctx)->uctx.vm_info.asid;
586 }
587
stmm_ctx_destroy(struct ts_ctx * ctx)588 static void stmm_ctx_destroy(struct ts_ctx *ctx)
589 {
590 struct stmm_ctx *spc = to_stmm_ctx(ctx);
591
592 vm_info_final(&spc->uctx);
593 free(spc);
594 }
595
596 #ifdef ARM64
save_sp_ctx(struct stmm_ctx * spc,struct thread_scall_regs * regs)597 static void save_sp_ctx(struct stmm_ctx *spc,
598 struct thread_scall_regs *regs)
599 {
600 size_t n = 0;
601
602 /* Save the return values from StMM */
603 for (n = 0; n <= 7; n++)
604 spc->regs.x[n] = *(®s->x0 + n);
605
606 spc->regs.sp = regs->sp_el0;
607 spc->regs.pc = regs->elr;
608 spc->regs.cpsr = regs->spsr;
609 }
610 #endif
611
612 #ifdef ARM32
save_sp_ctx(struct stmm_ctx * spc,struct thread_scall_regs * regs)613 static void save_sp_ctx(struct stmm_ctx *spc,
614 struct thread_scall_regs *regs)
615 {
616 spc->regs.r0 = regs->r0;
617 spc->regs.r1 = regs->r1;
618 spc->regs.r2 = regs->r2;
619 spc->regs.r3 = regs->r3;
620 spc->regs.r4 = regs->r4;
621 spc->regs.r5 = regs->r5;
622 spc->regs.r6 = regs->r6;
623 spc->regs.r7 = regs->r7;
624 spc->regs.pc = regs->lr;
625 spc->regs.cpsr = regs->spsr;
626 spc->regs.usr_sp = thread_get_usr_sp();
627 }
628 #endif
629
return_from_sp_helper(bool panic,uint32_t panic_code,struct thread_scall_regs * regs)630 static void return_from_sp_helper(bool panic, uint32_t panic_code,
631 struct thread_scall_regs *regs)
632 {
633 struct ts_session *sess = ts_get_current_session();
634 struct stmm_ctx *spc = to_stmm_ctx(sess->ctx);
635
636 if (panic)
637 spc->ta_ctx.panicked = true;
638 else
639 save_sp_ctx(spc, regs);
640
641 SVC_REGS_A0(regs) = 0;
642 SVC_REGS_A1(regs) = panic;
643 SVC_REGS_A2(regs) = panic_code;
644 }
645
service_compose_direct_resp(struct thread_scall_regs * regs,uint32_t ret_val)646 static void service_compose_direct_resp(struct thread_scall_regs *regs,
647 uint32_t ret_val)
648 {
649 uint16_t src_id = 0;
650 uint16_t dst_id = 0;
651
652 /* extract from request */
653 src_id = (SVC_REGS_A1(regs) >> 16) & UINT16_MAX;
654 dst_id = SVC_REGS_A1(regs) & UINT16_MAX;
655
656 /* compose message */
657 SVC_REGS_A0(regs) = __FFA_MSG_SEND_DIRECT_RESP;
658 /* swap endpoint ids */
659 SVC_REGS_A1(regs) = SHIFT_U32(dst_id, 16) | src_id;
660 SVC_REGS_A2(regs) = FFA_PARAM_MBZ;
661 SVC_REGS_A3(regs) = ret_val;
662 SVC_REGS_A4(regs) = 0;
663 SVC_REGS_A5(regs) = 0;
664 SVC_REGS_A6(regs) = 0;
665 SVC_REGS_A7(regs) = 0;
666 }
667
668 /*
669 * Combined read from secure partition, this will open, read and
670 * close the file object.
671 */
sec_storage_obj_read(unsigned long storage_id,char * obj_id,unsigned long obj_id_len,void * data,unsigned long len,unsigned long offset,unsigned long flags)672 static TEE_Result sec_storage_obj_read(unsigned long storage_id, char *obj_id,
673 unsigned long obj_id_len, void *data,
674 unsigned long len, unsigned long offset,
675 unsigned long flags)
676 {
677 const struct tee_file_operations *fops = NULL;
678 TEE_Result res = TEE_ERROR_BAD_STATE;
679 struct ts_session *sess = NULL;
680 struct tee_file_handle *fh = NULL;
681 struct tee_pobj *po = NULL;
682 size_t file_size = 0;
683 size_t read_len = 0;
684
685 fops = tee_svc_storage_file_ops(storage_id);
686 if (!fops)
687 return TEE_ERROR_ITEM_NOT_FOUND;
688
689 if (obj_id_len > TEE_OBJECT_ID_MAX_LEN)
690 return TEE_ERROR_BAD_PARAMETERS;
691
692 sess = ts_get_current_session();
693
694 res = tee_pobj_get(&sess->ctx->uuid, obj_id, obj_id_len, flags,
695 TEE_POBJ_USAGE_OPEN, fops, &po);
696 if (res != TEE_SUCCESS)
697 return res;
698
699 res = po->fops->open(po, &file_size, &fh);
700 if (res != TEE_SUCCESS)
701 goto out;
702
703 read_len = len;
704 res = po->fops->read(fh, offset, NULL, data, &read_len);
705 if (res == TEE_ERROR_CORRUPT_OBJECT) {
706 EMSG("Object corrupt");
707 po->fops->remove(po);
708 } else if (res == TEE_SUCCESS && len != read_len) {
709 res = TEE_ERROR_CORRUPT_OBJECT;
710 }
711
712 po->fops->close(&fh);
713
714 out:
715 tee_pobj_release(po);
716
717 return res;
718 }
719
720 /*
721 * Combined write from secure partition, this will create/open, write and
722 * close the file object.
723 */
sec_storage_obj_write(unsigned long storage_id,char * obj_id,unsigned long obj_id_len,void * data,unsigned long len,unsigned long offset,unsigned long flags)724 static TEE_Result sec_storage_obj_write(unsigned long storage_id, char *obj_id,
725 unsigned long obj_id_len, void *data,
726 unsigned long len, unsigned long offset,
727 unsigned long flags)
728
729 {
730 const struct tee_file_operations *fops = NULL;
731 struct ts_session *sess = NULL;
732 struct tee_file_handle *fh = NULL;
733 TEE_Result res = TEE_SUCCESS;
734 struct tee_pobj *po = NULL;
735
736 fops = tee_svc_storage_file_ops(storage_id);
737 if (!fops)
738 return TEE_ERROR_ITEM_NOT_FOUND;
739
740 if (obj_id_len > TEE_OBJECT_ID_MAX_LEN)
741 return TEE_ERROR_BAD_PARAMETERS;
742
743 sess = ts_get_current_session();
744
745 res = tee_pobj_get(&sess->ctx->uuid, obj_id, obj_id_len, flags,
746 TEE_POBJ_USAGE_OPEN, fops, &po);
747 if (res != TEE_SUCCESS)
748 return res;
749
750 res = po->fops->open(po, NULL, &fh);
751 if (res == TEE_ERROR_ITEM_NOT_FOUND)
752 res = po->fops->create(po, false, NULL, 0, NULL, 0,
753 NULL, NULL, 0, &fh);
754 if (res == TEE_SUCCESS) {
755 res = po->fops->write(fh, offset, NULL, data, len);
756 po->fops->close(&fh);
757 }
758
759 tee_pobj_release(po);
760
761 return res;
762 }
763
tee2ffa_ret_val(TEE_Result res)764 static uint32_t tee2ffa_ret_val(TEE_Result res)
765 {
766 switch (res) {
767 case TEE_SUCCESS:
768 return FFA_OK;
769 case TEE_ERROR_NOT_IMPLEMENTED:
770 case TEE_ERROR_NOT_SUPPORTED:
771 return FFA_NOT_SUPPORTED;
772 case TEE_ERROR_OUT_OF_MEMORY:
773 return FFA_NO_MEMORY;
774 case TEE_ERROR_ACCESS_DENIED:
775 return FFA_DENIED;
776 case TEE_ERROR_NO_DATA:
777 return FFA_NO_DATA;
778 case TEE_ERROR_BAD_PARAMETERS:
779 default:
780 return FFA_INVALID_PARAMETERS;
781 }
782 }
783
spm_eret_error(int32_t error_code,struct thread_scall_regs * regs)784 static void spm_eret_error(int32_t error_code, struct thread_scall_regs *regs)
785 {
786 SVC_REGS_A0(regs) = FFA_ERROR;
787 SVC_REGS_A1(regs) = FFA_PARAM_MBZ;
788 SVC_REGS_A2(regs) = error_code;
789 SVC_REGS_A3(regs) = FFA_PARAM_MBZ;
790 SVC_REGS_A4(regs) = FFA_PARAM_MBZ;
791 SVC_REGS_A5(regs) = FFA_PARAM_MBZ;
792 SVC_REGS_A6(regs) = FFA_PARAM_MBZ;
793 SVC_REGS_A7(regs) = FFA_PARAM_MBZ;
794 }
795
796 #define FILENAME "EFI_VARS"
stmm_handle_storage_service(struct thread_scall_regs * regs)797 static void stmm_handle_storage_service(struct thread_scall_regs *regs)
798 {
799 uint32_t flags = TEE_DATA_FLAG_ACCESS_READ |
800 TEE_DATA_FLAG_ACCESS_WRITE |
801 TEE_DATA_FLAG_SHARE_READ |
802 TEE_DATA_FLAG_SHARE_WRITE;
803 uint32_t action = SVC_REGS_A3(regs);
804 void *va = (void *)SVC_REGS_A4(regs);
805 unsigned long len = SVC_REGS_A5(regs);
806 unsigned long offset = SVC_REGS_A6(regs);
807 char obj_id[] = FILENAME;
808 size_t obj_id_len = strlen(obj_id);
809 TEE_Result res = TEE_SUCCESS;
810 uint32_t stmm_rc = STMM_RET_INVALID_PARAM;
811
812 switch (action) {
813 case __FFA_SVC_RPMB_READ:
814 DMSG("RPMB read");
815 res = sec_storage_obj_read(TEE_STORAGE_PRIVATE_RPMB, obj_id,
816 obj_id_len, va, len, offset, flags);
817 stmm_rc = tee2ffa_ret_val(res);
818 break;
819 case __FFA_SVC_RPMB_WRITE:
820 DMSG("RPMB write");
821 res = sec_storage_obj_write(TEE_STORAGE_PRIVATE_RPMB, obj_id,
822 obj_id_len, va, len, offset, flags);
823 stmm_rc = tee2ffa_ret_val(res);
824 break;
825 default:
826 EMSG("Undefined service id %#"PRIx32, action);
827 break;
828 }
829
830 service_compose_direct_resp(regs, stmm_rc);
831 }
832
spm_handle_direct_req(struct thread_scall_regs * regs)833 static void spm_handle_direct_req(struct thread_scall_regs *regs)
834 {
835 uint16_t dst_id = SVC_REGS_A1(regs) & UINT16_MAX;
836
837 if (dst_id == ffa_storage_id) {
838 stmm_handle_storage_service(regs);
839 } else {
840 EMSG("Undefined endpoint id %#"PRIx16, dst_id);
841 spm_eret_error(STMM_RET_INVALID_PARAM, regs);
842 }
843 }
844
spm_handle_get_mem_attr(struct thread_scall_regs * regs)845 static void spm_handle_get_mem_attr(struct thread_scall_regs *regs)
846 {
847 TEE_Result res = TEE_ERROR_GENERIC;
848 struct ts_session *sess = NULL;
849 struct stmm_ctx *spc = NULL;
850 uint16_t attrs = 0;
851 uint16_t perm = 0;
852 vaddr_t va = 0;
853 uint32_t ffa_ret = FFA_INVALID_PARAMETERS;
854
855 sess = ts_get_current_session();
856 spc = to_stmm_ctx(sess->ctx);
857
858 va = SVC_REGS_A1(regs);
859 if (!va)
860 goto err;
861
862 res = vm_get_prot(&spc->uctx, va, SMALL_PAGE_SIZE, &attrs);
863 if (res)
864 goto err;
865
866 if ((attrs & TEE_MATTR_URW) == TEE_MATTR_URW)
867 perm |= FFA_MEM_PERM_RW;
868 else if ((attrs & TEE_MATTR_UR) == TEE_MATTR_UR)
869 perm |= FFA_MEM_PERM_RO;
870
871 if (!(attrs & TEE_MATTR_UX))
872 perm |= FFA_MEM_PERM_NX;
873
874 SVC_REGS_A0(regs) = FFA_SUCCESS_32;
875 SVC_REGS_A1(regs) = FFA_PARAM_MBZ;
876 SVC_REGS_A2(regs) = perm;
877 SVC_REGS_A3(regs) = FFA_PARAM_MBZ;
878 SVC_REGS_A4(regs) = FFA_PARAM_MBZ;
879 SVC_REGS_A5(regs) = FFA_PARAM_MBZ;
880 SVC_REGS_A6(regs) = FFA_PARAM_MBZ;
881 SVC_REGS_A7(regs) = FFA_PARAM_MBZ;
882
883 return;
884
885 err:
886 spm_eret_error(ffa_ret, regs);
887 }
888
spm_handle_set_mem_attr(struct thread_scall_regs * regs)889 static void spm_handle_set_mem_attr(struct thread_scall_regs *regs)
890 {
891 TEE_Result res = TEE_ERROR_GENERIC;
892 struct ts_session *sess = NULL;
893 struct stmm_ctx *spc = NULL;
894 uintptr_t va = SVC_REGS_A1(regs);
895 uint32_t nr_pages = SVC_REGS_A2(regs);
896 uint32_t perm = SVC_REGS_A3(regs);
897 size_t sz = 0;
898 uint32_t prot = 0;
899 uint32_t ffa_ret = FFA_INVALID_PARAMETERS;
900
901 if (!va || !nr_pages ||
902 MUL_OVERFLOW(nr_pages, SMALL_PAGE_SIZE, &sz) ||
903 (perm & FFA_MEM_PERM_RESERVED))
904 goto err;
905
906 sess = ts_get_current_session();
907 spc = to_stmm_ctx(sess->ctx);
908
909 if ((perm & FFA_MEM_PERM_DATA_PERM) == FFA_MEM_PERM_RO)
910 prot |= TEE_MATTR_UR;
911 else if ((perm & FFA_MEM_PERM_DATA_PERM) == FFA_MEM_PERM_RW)
912 prot |= TEE_MATTR_URW;
913
914 if ((perm & FFA_MEM_PERM_INSTRUCTION_PERM) != FFA_MEM_PERM_NX)
915 prot |= TEE_MATTR_UX;
916
917 res = vm_set_prot(&spc->uctx, va, sz, prot);
918 if (res) {
919 ffa_ret = FFA_DENIED;
920 goto err;
921 }
922
923 SVC_REGS_A0(regs) = FFA_SUCCESS_32;
924 SVC_REGS_A1(regs) = FFA_PARAM_MBZ;
925 SVC_REGS_A2(regs) = FFA_PARAM_MBZ;
926 SVC_REGS_A3(regs) = FFA_PARAM_MBZ;
927 SVC_REGS_A4(regs) = FFA_PARAM_MBZ;
928 SVC_REGS_A5(regs) = FFA_PARAM_MBZ;
929 SVC_REGS_A6(regs) = FFA_PARAM_MBZ;
930 SVC_REGS_A7(regs) = FFA_PARAM_MBZ;
931
932 return;
933
934 err:
935 spm_eret_error(ffa_ret, regs);
936 }
937
938 /* Return true if returning to SP, false if returning to caller */
spm_handle_scall(struct thread_scall_regs * regs)939 static bool spm_handle_scall(struct thread_scall_regs *regs)
940 {
941 #ifdef ARM64
942 uint64_t *a0 = ®s->x0;
943 #endif
944 #ifdef ARM32
945 uint32_t *a0 = ®s->r0;
946 #endif
947
948 switch (*a0) {
949 case FFA_VERSION:
950 DMSG("Received FFA version");
951 *a0 = FFA_VERSION_1_2;
952 return true;
953 case FFA_ID_GET:
954 DMSG("Received FFA ID GET");
955 SVC_REGS_A0(regs) = FFA_SUCCESS_32;
956 SVC_REGS_A2(regs) = stmm_id;
957 return true;
958 case FFA_MSG_WAIT:
959 DMSG("Received FFA_MSG_WAIT");
960 return_from_sp_helper(false, 0, regs);
961 return false;
962 case __FFA_MSG_SEND_DIRECT_RESP:
963 DMSG("Received FFA direct response");
964 return_from_sp_helper(false, 0, regs);
965 return false;
966 case __FFA_MSG_SEND_DIRECT_REQ:
967 DMSG("Received FFA direct request");
968 spm_handle_direct_req(regs);
969 return true;
970 case __FFA_MEM_PERM_GET:
971 DMSG("Received FFA mem perm get");
972 spm_handle_get_mem_attr(regs);
973 return true;
974 case __FFA_MEM_PERM_SET:
975 DMSG("Received FFA mem perm set");
976 spm_handle_set_mem_attr(regs);
977 return true;
978 case FFA_ERROR:
979 EMSG("Received FFA error");
980 return_from_sp_helper(true /*panic*/, 0xabcd, regs);
981 return false;
982 default:
983 DMSG("Undefined syscall %#"PRIx32, (uint32_t)*a0);
984 spm_eret_error(FFA_NOT_SUPPORTED, regs);
985 return true;
986 }
987 }
988
989 /*
990 * Note: this variable is weak just to ease breaking its dependency chain
991 * when added to the unpaged area.
992 */
993 const struct ts_ops stmm_sp_ops __weak __relrodata_unpaged("stmm_sp_ops") = {
994 .enter_open_session = stmm_enter_open_session,
995 .enter_invoke_cmd = stmm_enter_invoke_cmd,
996 .enter_close_session = stmm_enter_close_session,
997 .dump_state = stmm_dump_state,
998 .destroy = stmm_ctx_destroy,
999 .get_instance_id = stmm_get_instance_id,
1000 .handle_scall = spm_handle_scall,
1001 };
1002