1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2021-2026, Arm Limited
4 */
5 #include <assert.h>
6 #include <io.h>
7 #include <kernel/panic.h>
8 #include <kernel/secure_partition.h>
9 #include <kernel/spinlock.h>
10 #include <kernel/spmc_sp_handler.h>
11 #include <kernel/tee_misc.h>
12 #include <kernel/thread_private.h>
13 #include <kernel/thread_spmc.h>
14 #include <mm/mobj.h>
15 #include <mm/sp_mem.h>
16 #include <mm/vm.h>
17 #include <optee_ffa.h>
18 #include <string.h>
19
20 static unsigned int mem_ref_lock = SPINLOCK_UNLOCK;
21
spmc_sp_start_thread(struct thread_smc_1_2_regs * args)22 int spmc_sp_start_thread(struct thread_smc_1_2_regs *args)
23 {
24 thread_sp_alloc_and_run(&args->arg11);
25 /*
26 * thread_sp_alloc_and_run() only returns if all threads are busy.
27 * The caller must try again.
28 */
29 return FFA_BUSY;
30 }
31
ffa_set_error(struct thread_smc_1_2_regs * args,uint32_t error)32 static void ffa_set_error(struct thread_smc_1_2_regs *args, uint32_t error)
33 {
34 spmc_set_args(args, FFA_ERROR, FFA_PARAM_MBZ, error, FFA_PARAM_MBZ,
35 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
36 }
37
ffa_success(struct thread_smc_1_2_regs * args)38 static void ffa_success(struct thread_smc_1_2_regs *args)
39 {
40 spmc_set_args(args, FFA_SUCCESS_32, 0, 0, 0, 0, 0);
41 }
42
ffa_get_dst(struct thread_smc_1_2_regs * args,struct sp_session * caller,struct sp_session ** dst)43 static TEE_Result ffa_get_dst(struct thread_smc_1_2_regs *args,
44 struct sp_session *caller,
45 struct sp_session **dst)
46 {
47 struct sp_session *s = NULL;
48
49 s = sp_get_session(FFA_DST(args->a1));
50
51 /* Message came from the NW */
52 if (!caller) {
53 if (!s) {
54 EMSG("Neither destination nor source is a SP");
55 return FFA_INVALID_PARAMETERS;
56 }
57 } else {
58 /* Check if the source matches the endpoint we came from */
59 if (FFA_SRC(args->a1) != caller->endpoint_id) {
60 EMSG("Source address doesn't match the endpoint id");
61 return FFA_INVALID_PARAMETERS;
62 }
63 }
64
65 *dst = s;
66
67 return FFA_OK;
68 }
69
find_sp_mem_receiver(struct sp_session * s,struct sp_mem * smem)70 static struct sp_mem_receiver *find_sp_mem_receiver(struct sp_session *s,
71 struct sp_mem *smem)
72 {
73 struct sp_mem_receiver *receiver = NULL;
74
75 /*
76 * FF-A Spec 8.10.2:
77 * Each Handle identifies a single unique composite memory region
78 * description that is, there is a 1:1 mapping between the two.
79 *
80 * Each memory share has an unique handle. We can only have each SP
81 * once as a receiver in the memory share. For each receiver of a
82 * memory share, we have one sp_mem_access_descr object.
83 * This means that there can only be one SP linked to a specific
84 * struct sp_mem_access_descr.
85 */
86 SLIST_FOREACH(receiver, &smem->receivers, link) {
87 if (receiver->perm.endpoint_id == s->endpoint_id)
88 break;
89 }
90 return receiver;
91 }
92
add_mem_region_to_sp(struct ffa_mem_access_perm * access_perm,struct sp_mem * smem)93 static int add_mem_region_to_sp(struct ffa_mem_access_perm *access_perm,
94 struct sp_mem *smem)
95 {
96 struct sp_session *s = NULL;
97 struct sp_mem_receiver *receiver = NULL;
98 uint8_t perm = READ_ONCE(access_perm->perm);
99 uint16_t endpoint_id = READ_ONCE(access_perm->endpoint_id);
100
101 s = sp_get_session(endpoint_id);
102
103 /* Only add memory shares of loaded SPs */
104 if (!s)
105 return FFA_DENIED;
106
107 /* Only allow each endpoint once */
108 if (find_sp_mem_receiver(s, smem))
109 return FFA_DENIED;
110
111 if (perm & ~FFA_MEM_ACC_MASK)
112 return FFA_DENIED;
113
114 receiver = calloc(1, sizeof(struct sp_mem_receiver));
115 if (!receiver)
116 return FFA_NO_MEMORY;
117
118 receiver->smem = smem;
119
120 receiver->perm.endpoint_id = endpoint_id;
121 receiver->perm.perm = perm;
122 receiver->perm.flags = READ_ONCE(access_perm->flags);
123
124 SLIST_INSERT_HEAD(&smem->receivers, receiver, link);
125
126 return FFA_OK;
127 }
128
spmc_sp_handle_mem_share(struct thread_smc_1_2_regs * args,struct ffa_rxtx * rxtx,struct sp_session * owner_sp)129 static void spmc_sp_handle_mem_share(struct thread_smc_1_2_regs *args,
130 struct ffa_rxtx *rxtx,
131 struct sp_session *owner_sp)
132 {
133 struct ffa_mem_transaction_x mem_trans = { };
134 uint32_t tot_len = args->a1;
135 uint32_t frag_len = args->a2;
136 uint64_t global_handle = 0;
137 int res = FFA_OK;
138
139 cpu_spin_lock(&rxtx->spinlock);
140
141 /* Descriptor fragments or custom buffers aren't supported yet. */
142 if (frag_len != tot_len || args->a3 || args->a4)
143 res = FFA_NOT_SUPPORTED;
144 else if (frag_len > rxtx->size)
145 res = FFA_INVALID_PARAMETERS;
146 else
147 res = spmc_read_mem_transaction(rxtx->ffa_vers, rxtx->rx,
148 rxtx->size,
149 tot_len, frag_len, &mem_trans);
150 if (!res)
151 res = spmc_sp_add_share(&mem_trans, rxtx, tot_len, frag_len,
152 &global_handle, owner_sp);
153 if (!res) {
154 args->a3 = high32_from_64(global_handle);
155 args->a2 = low32_from_64(global_handle);
156 args->a1 = FFA_PARAM_MBZ;
157 args->a0 = FFA_SUCCESS_32;
158 } else {
159 ffa_set_error(args, res);
160 }
161
162 cpu_spin_unlock(&rxtx->spinlock);
163 }
164
spmc_sp_add_sp_region(struct sp_mem * smem,struct ffa_address_range * mem_reg,struct sp_session * owner_sp,uint8_t highest_permission)165 static int spmc_sp_add_sp_region(struct sp_mem *smem,
166 struct ffa_address_range *mem_reg,
167 struct sp_session *owner_sp,
168 uint8_t highest_permission)
169 {
170 struct sp_ctx *sp_ctx = NULL;
171 uint64_t va = READ_ONCE(mem_reg->address);
172 int res = FFA_OK;
173 uint64_t region_len = READ_ONCE(mem_reg->page_count) * SMALL_PAGE_SIZE;
174 struct mobj *mobj = NULL;
175
176 sp_ctx = to_sp_ctx(owner_sp->ts_sess.ctx);
177
178 /*
179 * The memory region we try to share might not be linked to just one
180 * mobj. Create a new region for each mobj.
181 */
182 while (region_len) {
183 size_t len = region_len;
184 struct sp_mem_map_region *region = NULL;
185 uint16_t prot = 0;
186 size_t offs = 0;
187
188 /*
189 * There is already a mobj for each address that is in the SPs
190 * address range.
191 */
192 mobj = vm_get_mobj(&sp_ctx->uctx, va, &len, &prot, &offs);
193 if (!mobj)
194 return FFA_DENIED;
195
196 /*
197 * If we share memory from a SP, check if we are not sharing
198 * with a higher permission than the memory was originally
199 * mapped.
200 */
201 if ((highest_permission & FFA_MEM_ACC_RW) &&
202 !(prot & TEE_MATTR_UW)) {
203 res = FFA_DENIED;
204 goto err;
205 }
206
207 if ((highest_permission & FFA_MEM_ACC_EXE) &&
208 !(prot & TEE_MATTR_UX)) {
209 res = FFA_DENIED;
210 goto err;
211 }
212
213 region = calloc(1, sizeof(*region));
214 region->mobj = mobj;
215 region->page_offset = offs;
216 region->page_count = len / SMALL_PAGE_SIZE;
217
218 if (!sp_has_exclusive_access(region, &sp_ctx->uctx)) {
219 free(region);
220 res = FFA_DENIED;
221 goto err;
222 }
223
224 va += len;
225 region_len -= len;
226 SLIST_INSERT_HEAD(&smem->regions, region, link);
227 }
228
229 return FFA_OK;
230 err:
231 mobj_put(mobj);
232
233 return res;
234 }
235
spmc_sp_add_nw_region(struct sp_mem * smem,struct ffa_mem_region * mem_reg)236 static int spmc_sp_add_nw_region(struct sp_mem *smem,
237 struct ffa_mem_region *mem_reg)
238 {
239 uint64_t page_count = READ_ONCE(mem_reg->total_page_count);
240 struct sp_mem_map_region *region = NULL;
241 struct mobj *m = sp_mem_new_mobj(page_count, TEE_MATTR_MEM_TYPE_CACHED,
242 false);
243 unsigned int i = 0;
244 unsigned int idx = 0;
245 int res = FFA_OK;
246 uint64_t address_count = READ_ONCE(mem_reg->address_range_count);
247
248 if (!m)
249 return FFA_NO_MEMORY;
250
251 for (i = 0; i < address_count; i++) {
252 struct ffa_address_range *addr_range = NULL;
253
254 addr_range = &mem_reg->address_range_array[i];
255 if (sp_mem_add_pages(m, &idx,
256 READ_ONCE(addr_range->address),
257 READ_ONCE(addr_range->page_count))) {
258 res = FFA_DENIED;
259 goto clean_up;
260 }
261 }
262
263 region = calloc(1, sizeof(*region));
264 if (!region) {
265 res = FFA_NO_MEMORY;
266 goto clean_up;
267 }
268
269 region->mobj = m;
270 region->page_count = page_count;
271
272 if (!sp_has_exclusive_access(region, NULL)) {
273 free(region);
274 res = FFA_DENIED;
275 goto clean_up;
276 }
277
278 SLIST_INSERT_HEAD(&smem->regions, region, link);
279 return FFA_OK;
280 clean_up:
281 mobj_put(m);
282 return res;
283 }
284
spmc_sp_add_share(struct ffa_mem_transaction_x * mem_trans,struct ffa_rxtx * rxtx,size_t blen,size_t flen,uint64_t * global_handle,struct sp_session * owner_sp)285 int spmc_sp_add_share(struct ffa_mem_transaction_x *mem_trans,
286 struct ffa_rxtx *rxtx, size_t blen, size_t flen,
287 uint64_t *global_handle, struct sp_session *owner_sp)
288 {
289 int res = FFA_INVALID_PARAMETERS;
290 unsigned int num_mem_accs = 0;
291 unsigned int i = 0;
292 struct ffa_mem_access_common *mem_acc = NULL;
293 size_t needed_size = 0;
294 size_t addr_range_offs = 0;
295 struct ffa_mem_region *mem_reg = NULL;
296 uint8_t highest_permission = 0;
297 struct sp_mem *smem = NULL;
298 uint16_t sender_id = mem_trans->sender_id;
299 size_t addr_range_cnt = 0;
300 struct ffa_address_range *addr_range = NULL;
301 size_t total_page_count = 0;
302 size_t page_count_sum = 0;
303 vaddr_t mem_acc_base = 0;
304 size_t mem_acc_size = 0;
305
306 if (blen != flen) {
307 DMSG("Fragmented memory share is not supported for SPs");
308 return FFA_NOT_SUPPORTED;
309 }
310
311 smem = sp_mem_new();
312 if (!smem)
313 return FFA_NO_MEMORY;
314
315 if ((owner_sp && owner_sp->endpoint_id != sender_id) ||
316 (!owner_sp && sp_get_session(sender_id))) {
317 res = FFA_DENIED;
318 goto cleanup;
319 }
320
321 mem_acc_size = mem_trans->mem_access_size;
322 num_mem_accs = mem_trans->mem_access_count;
323 mem_acc_base = (vaddr_t)rxtx->rx + mem_trans->mem_access_offs;
324
325 if (!num_mem_accs) {
326 res = FFA_INVALID_PARAMETERS;
327 goto cleanup;
328 }
329
330 /* Store the ffa_mem_transaction */
331 smem->sender_id = sender_id;
332 smem->mem_reg_attr = mem_trans->mem_reg_attr;
333 smem->flags = mem_trans->flags;
334 smem->tag = mem_trans->tag;
335
336 if (MUL_OVERFLOW(num_mem_accs, mem_acc_size, &needed_size) ||
337 ADD_OVERFLOW(needed_size, mem_trans->mem_access_offs,
338 &needed_size) || needed_size > blen) {
339 res = FFA_INVALID_PARAMETERS;
340 goto cleanup;
341 }
342
343 for (i = 0; i < num_mem_accs; i++) {
344 mem_acc = (void *)(mem_acc_base + i * mem_acc_size);
345 highest_permission |= READ_ONCE(mem_acc->access_perm.perm);
346 }
347
348 /* Check if the memory region array fits into the buffer */
349 addr_range_offs = READ_ONCE(mem_acc->region_offs);
350
351 if (ADD_OVERFLOW(addr_range_offs, sizeof(*mem_reg), &needed_size) ||
352 needed_size > blen) {
353 res = FFA_INVALID_PARAMETERS;
354 goto cleanup;
355 }
356
357 mem_reg = (void *)((char *)rxtx->rx + addr_range_offs);
358 addr_range_cnt = READ_ONCE(mem_reg->address_range_count);
359 total_page_count = READ_ONCE(mem_reg->total_page_count);
360
361 /* Memory transaction without address ranges or pages is invalid */
362 if (!addr_range_cnt || !total_page_count) {
363 res = FFA_INVALID_PARAMETERS;
364 goto cleanup;
365 }
366
367 /* Check if the region descriptors fit into the buffer */
368 if (MUL_OVERFLOW(addr_range_cnt, sizeof(*addr_range), &needed_size) ||
369 ADD_OVERFLOW(needed_size, addr_range_offs, &needed_size) ||
370 needed_size > blen) {
371 res = FFA_INVALID_PARAMETERS;
372 goto cleanup;
373 }
374
375 page_count_sum = 0;
376 for (i = 0; i < addr_range_cnt; i++) {
377 addr_range = &mem_reg->address_range_array[i];
378
379 /* Memory region without pages is invalid */
380 if (!addr_range->page_count) {
381 res = FFA_INVALID_PARAMETERS;
382 goto cleanup;
383 }
384
385 /* Sum the page count of each region */
386 if (ADD_OVERFLOW(page_count_sum, addr_range->page_count,
387 &page_count_sum)) {
388 res = FFA_INVALID_PARAMETERS;
389 goto cleanup;
390 }
391 }
392
393 /* Validate total page count */
394 if (total_page_count != page_count_sum) {
395 res = FFA_INVALID_PARAMETERS;
396 goto cleanup;
397 }
398
399 /* Iterate over all the addresses */
400 if (owner_sp) {
401 for (i = 0; i < addr_range_cnt; i++) {
402 addr_range = &mem_reg->address_range_array[i];
403 res = spmc_sp_add_sp_region(smem, addr_range,
404 owner_sp,
405 highest_permission);
406 if (res)
407 goto cleanup;
408 }
409 } else {
410 res = spmc_sp_add_nw_region(smem, mem_reg);
411 if (res)
412 goto cleanup;
413 }
414
415 /* Add the memory address to the SP */
416 for (i = 0; i < num_mem_accs; i++) {
417 mem_acc = (void *)(mem_acc_base + i * mem_acc_size);
418 res = add_mem_region_to_sp(&mem_acc->access_perm, smem);
419 if (res)
420 goto cleanup;
421 }
422 *global_handle = smem->global_handle;
423 sp_mem_add(smem);
424
425 return FFA_OK;
426
427 cleanup:
428 sp_mem_remove(smem);
429 return res;
430 }
431
spmc_sp_set_to_preempted(struct ts_session * ts_sess)432 void spmc_sp_set_to_preempted(struct ts_session *ts_sess)
433 {
434 if (ts_sess && is_sp_ctx(ts_sess->ctx)) {
435 struct sp_session *sp_sess = to_sp_session(ts_sess);
436
437 cpu_spin_lock(&sp_sess->spinlock);
438 assert(sp_sess->state == sp_busy);
439 sp_sess->state = sp_preempted;
440 cpu_spin_unlock(&sp_sess->spinlock);
441 }
442 }
443
spmc_sp_resume_from_preempted(uint16_t endpoint_id,uint16_t thread_id)444 int spmc_sp_resume_from_preempted(uint16_t endpoint_id, uint16_t thread_id)
445 {
446 struct sp_session *sp_sess = sp_get_session(endpoint_id);
447
448 if (!sp_sess)
449 return FFA_INVALID_PARAMETERS;
450
451 if (sp_sess->state != sp_preempted || sp_sess->thread_id != thread_id)
452 return FFA_DENIED;
453
454 cpu_spin_lock(&sp_sess->spinlock);
455 sp_sess->state = sp_busy;
456 cpu_spin_unlock(&sp_sess->spinlock);
457
458 thread_resume_from_rpc(thread_id, 0, 0, 0, 0);
459 panic();
460 }
461
check_rxtx(struct ffa_rxtx * rxtx)462 static bool check_rxtx(struct ffa_rxtx *rxtx)
463 {
464 return rxtx && rxtx->rx && rxtx->tx && rxtx->size > 0;
465 }
466
467 static TEE_Result
check_retrieve_request(struct sp_mem_receiver * receiver,uint32_t ffa_vers,struct ffa_mem_transaction_x * mem_trans,void * rx,struct sp_mem * smem,int64_t tx_len)468 check_retrieve_request(struct sp_mem_receiver *receiver, uint32_t ffa_vers,
469 struct ffa_mem_transaction_x *mem_trans,
470 void *rx, struct sp_mem *smem, int64_t tx_len)
471 {
472 struct ffa_mem_access_common *retr_access = NULL;
473 uint8_t share_perm = receiver->perm.perm;
474 uint32_t retr_perm = 0;
475 uint32_t retr_flags = mem_trans->flags;
476 uint64_t retr_tag = mem_trans->tag;
477 struct sp_mem_map_region *reg = NULL;
478
479 /*
480 * The request came from the endpoint. It should only have one
481 * ffa_mem_access element
482 */
483 if (mem_trans->mem_access_count != 1)
484 return TEE_ERROR_BAD_PARAMETERS;
485
486 retr_access = (void *)((vaddr_t)rx + mem_trans->mem_access_offs);
487 retr_perm = READ_ONCE(retr_access->access_perm.perm);
488
489 /* Check if tag is correct */
490 if (receiver->smem->tag != retr_tag) {
491 EMSG("Incorrect tag %#"PRIx64" %#"PRIx64, receiver->smem->tag,
492 retr_tag);
493 return TEE_ERROR_BAD_PARAMETERS;
494 }
495
496 /* Check permissions and flags */
497 if ((retr_perm & FFA_MEM_ACC_RW) &&
498 !(share_perm & FFA_MEM_ACC_RW)) {
499 DMSG("Incorrect memshare permission set");
500 return TEE_ERROR_BAD_PARAMETERS;
501 }
502
503 if ((retr_perm & FFA_MEM_ACC_EXE) &&
504 !(share_perm & FFA_MEM_ACC_EXE)) {
505 DMSG("Incorrect memshare permission set");
506 return TEE_ERROR_BAD_PARAMETERS;
507 }
508
509 if (retr_flags & FFA_MEMORY_REGION_FLAG_CLEAR_RELINQUISH) {
510 DMSG("CLEAR_RELINQUISH is not allowed for FFA_SHARE");
511 return TEE_ERROR_BAD_PARAMETERS;
512 }
513
514 /*
515 * Check if there is enough space in the tx buffer to send the respons.
516 */
517 if (ffa_vers <= FFA_VERSION_1_0)
518 tx_len -= sizeof(struct ffa_mem_transaction_1_0);
519 else
520 tx_len -= sizeof(struct ffa_mem_transaction_1_1);
521 tx_len -= mem_trans->mem_access_size + sizeof(struct ffa_mem_region);
522
523 if (tx_len < 0)
524 return FFA_NO_MEMORY;
525
526 SLIST_FOREACH(reg, &smem->regions, link) {
527 tx_len -= sizeof(struct ffa_address_range);
528 if (tx_len < 0)
529 return FFA_NO_MEMORY;
530 }
531
532 return TEE_SUCCESS;
533 }
534
create_retrieve_response(uint32_t ffa_vers,void * dst_buffer,struct sp_mem_receiver * receiver,struct sp_mem * smem,struct sp_session * s)535 static void create_retrieve_response(uint32_t ffa_vers, void *dst_buffer,
536 struct sp_mem_receiver *receiver,
537 struct sp_mem *smem, struct sp_session *s)
538 {
539 size_t off = 0;
540 struct ffa_mem_region *dst_region = NULL;
541 struct ffa_address_range *addr_dst = NULL;
542 struct sp_mem_map_region *reg = NULL;
543 struct ffa_mem_access_common *mem_acc = NULL;
544 size_t mem_acc_size = 0;
545
546 if (ffa_vers <= FFA_VERSION_1_1)
547 mem_acc_size = sizeof(struct ffa_mem_access_1_0);
548 else
549 mem_acc_size = sizeof(struct ffa_mem_access_1_2);
550
551 /*
552 * we respond with a ffa_mem_retrieve_resp which defines the
553 * following data in the rx buffer of the sp.
554 * struct mem_transaction_descr
555 * struct mem_access_descr (always 1 element)
556 * struct mem_region_descr
557 */
558 if (ffa_vers <= FFA_VERSION_1_0) {
559 struct ffa_mem_transaction_1_0 *d_ds = dst_buffer;
560
561 memset(d_ds, 0, sizeof(*d_ds));
562
563 off = sizeof(*d_ds);
564 mem_acc = (void *)d_ds->mem_access_array;
565
566 /* copy the mem_transaction_descr */
567 d_ds->sender_id = receiver->smem->sender_id;
568 d_ds->mem_reg_attr = receiver->smem->mem_reg_attr;
569 d_ds->flags = FFA_MEMORY_TRANSACTION_TYPE_SHARE;
570 d_ds->tag = receiver->smem->tag;
571 d_ds->mem_access_count = 1;
572 } else {
573 struct ffa_mem_transaction_1_1 *d_ds = dst_buffer;
574
575 memset(d_ds, 0, sizeof(*d_ds));
576
577 off = sizeof(*d_ds);
578 mem_acc = (void *)(d_ds + 1);
579
580 d_ds->sender_id = receiver->smem->sender_id;
581 d_ds->mem_reg_attr = receiver->smem->mem_reg_attr;
582 d_ds->flags = FFA_MEMORY_TRANSACTION_TYPE_SHARE;
583 d_ds->tag = receiver->smem->tag;
584 d_ds->mem_access_size = mem_acc_size;
585 d_ds->mem_access_count = 1;
586 d_ds->mem_access_offs = off;
587 }
588
589 off += mem_acc_size;
590 dst_region = (struct ffa_mem_region *)((vaddr_t)dst_buffer + off);
591
592 /* Copy the mem_accsess_descr */
593 mem_acc->region_offs = off;
594 memcpy(&mem_acc->access_perm, &receiver->perm,
595 sizeof(struct ffa_mem_access_perm));
596
597 /* Copy the mem_region_descr */
598 memset(dst_region, 0, sizeof(*dst_region));
599 dst_region->address_range_count = 0;
600 dst_region->total_page_count = 0;
601
602 addr_dst = dst_region->address_range_array;
603
604 SLIST_FOREACH(reg, &smem->regions, link) {
605 uint32_t offset = reg->page_offset;
606 struct sp_ctx *ctx = to_sp_ctx(s->ts_sess.ctx);
607
608 addr_dst->address = (uint64_t)sp_mem_get_va(&ctx->uctx,
609 offset,
610 reg->mobj);
611 addr_dst->page_count = reg->page_count;
612 dst_region->address_range_count++;
613
614 dst_region->total_page_count += addr_dst->page_count;
615 }
616 }
617
ffa_mem_retrieve(struct thread_smc_1_2_regs * args,struct sp_session * caller_sp,struct ffa_rxtx * rxtx)618 static void ffa_mem_retrieve(struct thread_smc_1_2_regs *args,
619 struct sp_session *caller_sp,
620 struct ffa_rxtx *rxtx)
621 {
622 struct ffa_mem_transaction_x mem_trans = { };
623 uint32_t tot_len = args->a1;
624 uint32_t frag_len = args->a2;
625 int ret = FFA_OK;
626 size_t tx_len = 0;
627 struct ffa_mem_access_common *mem_acc = NULL;
628 struct ffa_mem_region *mem_region = NULL;
629 uint64_t va = 0;
630 struct sp_mem *smem = NULL;
631 struct sp_mem_receiver *receiver = NULL;
632 uint32_t exceptions = 0;
633 uint32_t address_offset = 0;
634 size_t needed_size = 0;
635
636 if (!check_rxtx(rxtx) || !rxtx->tx_is_mine) {
637 ret = FFA_DENIED;
638 goto err;
639 }
640 /* Descriptor fragments aren't supported yet. */
641 if (frag_len != tot_len) {
642 ret = FFA_NOT_SUPPORTED;
643 goto err;
644 }
645 if (frag_len > rxtx->size) {
646 ret = FFA_INVALID_PARAMETERS;
647 goto err;
648 }
649
650 tx_len = rxtx->size;
651
652 ret = spmc_read_mem_transaction(rxtx->ffa_vers, rxtx->rx, rxtx->size,
653 tot_len, frag_len, &mem_trans);
654 if (ret)
655 goto err;
656
657 smem = sp_mem_get(mem_trans.global_handle);
658 if (!smem) {
659 DMSG("Incorrect handle");
660 ret = FFA_DENIED;
661 goto err;
662 }
663
664 receiver = sp_mem_get_receiver(caller_sp->endpoint_id, smem);
665
666 mem_acc = (void *)((vaddr_t)rxtx->rx + mem_trans.mem_access_offs);
667 address_offset = READ_ONCE(mem_acc->region_offs);
668
669 if (ADD_OVERFLOW(address_offset, sizeof(struct ffa_mem_region),
670 &needed_size) || needed_size > tx_len) {
671 ret = FFA_INVALID_PARAMETERS;
672 goto err;
673 }
674
675 if (check_retrieve_request(receiver, rxtx->ffa_vers, &mem_trans,
676 rxtx->rx, smem, tx_len) != TEE_SUCCESS) {
677 ret = FFA_INVALID_PARAMETERS;
678 goto err;
679 }
680
681 exceptions = cpu_spin_lock_xsave(&mem_ref_lock);
682
683 if (receiver->ref_count == UINT8_MAX) {
684 ret = FFA_DENIED;
685 cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
686 goto err;
687 }
688
689 receiver->ref_count++;
690
691 /* We only need to map the region the first time we request it. */
692 if (receiver->ref_count == 1) {
693 TEE_Result ret_map = TEE_SUCCESS;
694
695 cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
696
697 /*
698 * Try to map the memory linked to the handle in
699 * sp_mem_access_descr.
700 */
701 mem_region = (struct ffa_mem_region *)((vaddr_t)rxtx->rx +
702 address_offset);
703
704 va = READ_ONCE(mem_region->address_range_array[0].address);
705 ret_map = sp_map_shared(caller_sp, receiver, smem, &va);
706
707 if (ret_map) {
708 EMSG("Could not map memory region: %#"PRIx32, ret_map);
709 exceptions = cpu_spin_lock_xsave(&mem_ref_lock);
710 receiver->ref_count--;
711 cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
712 ret = FFA_DENIED;
713 goto err;
714 }
715 } else {
716 cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
717 }
718
719 create_retrieve_response(rxtx->ffa_vers, rxtx->tx, receiver, smem,
720 caller_sp);
721
722 args->a0 = FFA_MEM_RETRIEVE_RESP;
723 args->a1 = tx_len;
724 args->a2 = tx_len;
725
726 rxtx->tx_is_mine = false;
727
728 return;
729 err:
730 ffa_set_error(args, ret);
731 }
732
ffa_mem_relinquish(struct thread_smc_1_2_regs * args,struct sp_session * caller_sp,struct ffa_rxtx * rxtx)733 static void ffa_mem_relinquish(struct thread_smc_1_2_regs *args,
734 struct sp_session *caller_sp,
735 struct ffa_rxtx *rxtx)
736 {
737 struct sp_mem *smem = NULL;
738 struct ffa_mem_relinquish *mem = rxtx->rx;
739 struct sp_mem_receiver *receiver = NULL;
740 int err = FFA_NOT_SUPPORTED;
741 uint32_t exceptions = 0;
742
743 if (!check_rxtx(rxtx)) {
744 ffa_set_error(args, FFA_DENIED);
745 return;
746 }
747
748 exceptions = cpu_spin_lock_xsave(&rxtx->spinlock);
749 smem = sp_mem_get(READ_ONCE(mem->handle));
750
751 if (!smem) {
752 DMSG("Incorrect handle");
753 err = FFA_DENIED;
754 goto err_unlock_rxtwx;
755 }
756
757 if (READ_ONCE(mem->endpoint_count) != 1) {
758 DMSG("Incorrect endpoint count");
759 err = FFA_INVALID_PARAMETERS;
760 goto err_unlock_rxtwx;
761 }
762
763 if (READ_ONCE(mem->endpoint_id_array[0]) != caller_sp->endpoint_id) {
764 DMSG("Incorrect endpoint id");
765 err = FFA_DENIED;
766 goto err_unlock_rxtwx;
767 }
768
769 cpu_spin_unlock_xrestore(&rxtx->spinlock, exceptions);
770
771 receiver = sp_mem_get_receiver(caller_sp->endpoint_id, smem);
772
773 exceptions = cpu_spin_lock_xsave(&mem_ref_lock);
774 if (!receiver->ref_count) {
775 DMSG("To many relinquish requests");
776 err = FFA_DENIED;
777 goto err_unlock_memref;
778 }
779
780 receiver->ref_count--;
781 if (!receiver->ref_count) {
782 cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
783 if (sp_unmap_ffa_regions(caller_sp, smem) != TEE_SUCCESS) {
784 DMSG("Failed to unmap region");
785 ffa_set_error(args, FFA_DENIED);
786 return;
787 }
788 } else {
789 cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
790 }
791
792 ffa_success(args);
793 return;
794
795 err_unlock_rxtwx:
796 cpu_spin_unlock_xrestore(&rxtx->spinlock, exceptions);
797 ffa_set_error(args, err);
798 return;
799 err_unlock_memref:
800 cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
801 ffa_set_error(args, err);
802 }
803
zero_mem_region(struct sp_mem * smem,struct sp_session * s)804 static void zero_mem_region(struct sp_mem *smem, struct sp_session *s)
805 {
806 void *addr = NULL;
807 struct sp_ctx *ctx = to_sp_ctx(s->ts_sess.ctx);
808 struct sp_mem_map_region *reg = NULL;
809
810 ts_push_current_session(&s->ts_sess);
811 SLIST_FOREACH(reg, &smem->regions, link) {
812 size_t sz = reg->page_count * SMALL_PAGE_SIZE;
813
814 addr = sp_mem_get_va(&ctx->uctx, reg->page_offset, reg->mobj);
815
816 assert(addr);
817 memset(addr, 0, sz);
818 }
819 ts_pop_current_session();
820 }
821
822 /*
823 * ffa_mem_reclaim returns false if it couldn't process the reclaim message.
824 * This happens when the memory regions was shared with the OP-TEE endpoint.
825 * After this thread_spmc calls handle_mem_reclaim() to make sure that the
826 * region is reclaimed from the OP-TEE endpoint.
827 */
ffa_mem_reclaim(struct thread_smc_1_2_regs * args,struct sp_session * caller_sp)828 bool ffa_mem_reclaim(struct thread_smc_1_2_regs *args,
829 struct sp_session *caller_sp)
830 {
831 uint64_t handle = reg_pair_to_64(args->a2, args->a1);
832 uint32_t flags = args->a3;
833 struct sp_mem *smem = NULL;
834 struct sp_mem_receiver *receiver = NULL;
835 uint32_t exceptions = 0;
836
837 smem = sp_mem_get(handle);
838 if (!smem)
839 return false;
840
841 /*
842 * If the caller is an SP, make sure that it is the owner of the share.
843 * If the call comes from NWd this is ensured by the hypervisor.
844 */
845 if (caller_sp && caller_sp->endpoint_id != smem->sender_id) {
846 ffa_set_error(args, FFA_INVALID_PARAMETERS);
847 return true;
848 }
849
850 exceptions = cpu_spin_lock_xsave(&mem_ref_lock);
851
852 /* Make sure that all shares where relinquished */
853 SLIST_FOREACH(receiver, &smem->receivers, link) {
854 if (receiver->ref_count != 0) {
855 ffa_set_error(args, FFA_DENIED);
856 cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
857 return true;
858 }
859 }
860
861 if (flags & FFA_MEMORY_REGION_FLAG_CLEAR) {
862 if (caller_sp) {
863 zero_mem_region(smem, caller_sp);
864 } else {
865 /*
866 * Currently we don't support zeroing Normal World
867 * memory. To do this we would have to map the memory
868 * again, zero it and unmap it.
869 */
870 ffa_set_error(args, FFA_DENIED);
871 cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
872 return true;
873 }
874 }
875
876 sp_mem_remove(smem);
877 cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
878
879 ffa_success(args);
880 return true;
881 }
882
883 static struct sp_session *
ffa_handle_sp_direct_req(struct thread_smc_1_2_regs * args,struct sp_session * caller_sp)884 ffa_handle_sp_direct_req(struct thread_smc_1_2_regs *args,
885 struct sp_session *caller_sp)
886 {
887 struct sp_session *dst = NULL;
888 struct spmc_lsp_desc *lsp = NULL;
889 TEE_Result res = FFA_OK;
890
891 res = ffa_get_dst(args, caller_sp, &dst);
892 if (res) {
893 /* Tried to send message to an incorrect endpoint */
894 ffa_set_error(args, res);
895 return caller_sp;
896 }
897 if (!dst) {
898 lsp = spmc_find_lsp_by_sp_id(FFA_DST(args->a1));
899 if (!lsp) {
900 EMSG("Request to normal world not supported");
901 ffa_set_error(args, FFA_NOT_SUPPORTED);
902 return caller_sp;
903 }
904 }
905
906 if (dst == caller_sp) {
907 EMSG("Cannot send message to own ID");
908 ffa_set_error(args, FFA_INVALID_PARAMETERS);
909 return caller_sp;
910 }
911
912 if (caller_sp &&
913 !(caller_sp->props & FFA_PART_PROP_DIRECT_REQ_SEND)) {
914 EMSG("SP 0x%"PRIx16" doesn't support sending direct requests",
915 caller_sp->endpoint_id);
916 ffa_set_error(args, FFA_NOT_SUPPORTED);
917 return caller_sp;
918 }
919
920 if (dst && !(dst->props & FFA_PART_PROP_DIRECT_REQ_RECV)) {
921 EMSG("SP 0x%"PRIx16" doesn't support receipt of direct requests",
922 dst->endpoint_id);
923 ffa_set_error(args, FFA_NOT_SUPPORTED);
924 return caller_sp;
925 }
926
927 if (lsp && !(lsp->properties & FFA_PART_PROP_DIRECT_REQ_RECV)) {
928 EMSG("LSP 0x%"PRIx16" doesn't support receipt of direct requests",
929 lsp->sp_id);
930 ffa_set_error(args, FFA_NOT_SUPPORTED);
931 return caller_sp;
932 }
933
934 if (lsp) {
935 lsp->direct_req(args, caller_sp);
936 return caller_sp;
937 }
938
939 if (args->a2 & FFA_MSG_FLAG_FRAMEWORK) {
940 switch (args->a2 & FFA_MSG_TYPE_MASK) {
941 case FFA_MSG_SEND_VM_CREATED:
942 /* The sender must be the NWd hypervisor (ID 0) */
943 if (FFA_SRC(args->a1) != 0 || caller_sp) {
944 ffa_set_error(args, FFA_INVALID_PARAMETERS);
945 return caller_sp;
946 }
947
948 /* The SP must be subscribed for this message */
949 if (!(dst->props & FFA_PART_PROP_NOTIF_CREATED)) {
950 ffa_set_error(args, FFA_INVALID_PARAMETERS);
951 return caller_sp;
952 }
953 break;
954 case FFA_MSG_SEND_VM_DESTROYED:
955 /* The sender must be the NWd hypervisor (ID 0) */
956 if (FFA_SRC(args->a1) != 0 || caller_sp) {
957 ffa_set_error(args, FFA_INVALID_PARAMETERS);
958 return caller_sp;
959 }
960
961 /* The SP must be subscribed for this message */
962 if (!(dst->props & FFA_PART_PROP_NOTIF_DESTROYED)) {
963 ffa_set_error(args, FFA_INVALID_PARAMETERS);
964 return caller_sp;
965 }
966 break;
967 default:
968 ffa_set_error(args, FFA_NOT_SUPPORTED);
969 return caller_sp;
970 }
971 } else if (args->a2 != FFA_PARAM_MBZ) {
972 ffa_set_error(args, FFA_INVALID_PARAMETERS);
973 return caller_sp;
974 }
975
976 cpu_spin_lock(&dst->spinlock);
977 if (dst->state != sp_idle) {
978 DMSG("SP is busy");
979 ffa_set_error(args, FFA_BUSY);
980 cpu_spin_unlock(&dst->spinlock);
981 return caller_sp;
982 }
983
984 dst->state = sp_busy;
985 cpu_spin_unlock(&dst->spinlock);
986
987 /*
988 * Store the calling endpoint id. This will make it possible to check
989 * if the response is sent back to the correct endpoint.
990 */
991 dst->caller_id = FFA_SRC(args->a1);
992
993 /* Forward the message to the destination SP */
994 res = sp_enter(args, dst);
995 if (res) {
996 /* The SP Panicked */
997 ffa_set_error(args, FFA_ABORTED);
998 /* Return error to calling SP */
999 return caller_sp;
1000 }
1001
1002 return dst;
1003 }
1004
1005 static struct sp_session *
ffa_handle_sp_direct_resp(struct thread_smc_1_2_regs * args,struct sp_session * caller_sp)1006 ffa_handle_sp_direct_resp(struct thread_smc_1_2_regs *args,
1007 struct sp_session *caller_sp)
1008 {
1009 struct sp_session *dst = NULL;
1010 enum sp_status st = sp_idle;
1011 TEE_Result res = FFA_OK;
1012
1013 if (!caller_sp) {
1014 EMSG("Response from normal world not supported");
1015 ffa_set_error(args, FFA_NOT_SUPPORTED);
1016 return NULL;
1017 }
1018
1019 res = ffa_get_dst(args, caller_sp, &dst);
1020 if (res) {
1021 /* Tried to send response to an incorrect endpoint */
1022 ffa_set_error(args, res);
1023 return caller_sp;
1024 }
1025
1026 if (args->a2 & FFA_MSG_FLAG_FRAMEWORK) {
1027 switch (args->a2 & FFA_MSG_TYPE_MASK) {
1028 case FFA_MSG_RESP_VM_CREATED:
1029 /* The destination must be the NWd hypervisor (ID 0) */
1030 if (FFA_DST(args->a1) != 0 || dst) {
1031 ffa_set_error(args, FFA_INVALID_PARAMETERS);
1032 return caller_sp;
1033 }
1034
1035 /* The SP must be subscribed for this message */
1036 if (!(dst->props & FFA_PART_PROP_NOTIF_CREATED)) {
1037 ffa_set_error(args, FFA_INVALID_PARAMETERS);
1038 return caller_sp;
1039 }
1040 break;
1041 case FFA_MSG_RESP_VM_DESTROYED:
1042 /* The destination must be the NWd hypervisor (ID 0) */
1043 if (FFA_DST(args->a1) != 0 || dst) {
1044 ffa_set_error(args, FFA_INVALID_PARAMETERS);
1045 return caller_sp;
1046 }
1047
1048 /* The SP must be subscribed for this message */
1049 if (!(dst->props & FFA_PART_PROP_NOTIF_DESTROYED)) {
1050 ffa_set_error(args, FFA_INVALID_PARAMETERS);
1051 return caller_sp;
1052 }
1053 break;
1054 default:
1055 ffa_set_error(args, FFA_NOT_SUPPORTED);
1056 return caller_sp;
1057 }
1058 } else if (args->a2 != FFA_PARAM_MBZ) {
1059 ffa_set_error(args, FFA_INVALID_PARAMETERS);
1060 return caller_sp;
1061 }
1062
1063 if (dst) {
1064 cpu_spin_lock(&dst->spinlock);
1065 st = dst->state;
1066 cpu_spin_unlock(&dst->spinlock);
1067
1068 if (st != sp_busy) {
1069 EMSG("SP is not waiting for a request");
1070 ffa_set_error(args, FFA_INVALID_PARAMETERS);
1071 return caller_sp;
1072 }
1073 }
1074
1075 if (caller_sp->caller_id != FFA_DST(args->a1)) {
1076 EMSG("FFA_MSG_SEND_DIRECT_RESP to incorrect SP");
1077 ffa_set_error(args, FFA_INVALID_PARAMETERS);
1078 return caller_sp;
1079 }
1080
1081 caller_sp->caller_id = 0;
1082
1083 cpu_spin_lock(&caller_sp->spinlock);
1084 caller_sp->state = sp_idle;
1085 cpu_spin_unlock(&caller_sp->spinlock);
1086
1087 if (!dst) {
1088 /* Send message back to the NW */
1089 return NULL;
1090 }
1091
1092 /* Forward the message to the destination SP */
1093 res = sp_enter(args, dst);
1094 if (res) {
1095 /* The SP Panicked */
1096 ffa_set_error(args, FFA_ABORTED);
1097 /* Return error to calling SP */
1098 return caller_sp;
1099 }
1100 return dst;
1101 }
1102
1103 static struct sp_session *
ffa_handle_sp_error(struct thread_smc_1_2_regs * args,struct sp_session * caller_sp)1104 ffa_handle_sp_error(struct thread_smc_1_2_regs *args,
1105 struct sp_session *caller_sp)
1106 {
1107 /* If caller_sp == NULL send message to Normal World */
1108 if (caller_sp && sp_enter(args, caller_sp)) {
1109 /*
1110 * We can not return the error. Unwind the call chain with one
1111 * link. Set the state of the SP to dead.
1112 */
1113 cpu_spin_lock(&caller_sp->spinlock);
1114 caller_sp->state = sp_dead;
1115 cpu_spin_unlock(&caller_sp->spinlock);
1116 /* Create error. */
1117 ffa_set_error(args, FFA_ABORTED);
1118 return sp_get_session(caller_sp->caller_id);
1119 }
1120
1121 return caller_sp;
1122 }
1123
handle_features(struct thread_smc_1_2_regs * args)1124 static void handle_features(struct thread_smc_1_2_regs *args)
1125 {
1126 uint32_t ret_fid = 0;
1127 uint32_t ret_w2 = FFA_PARAM_MBZ;
1128
1129 switch (args->a1) {
1130 #ifdef ARM64
1131 case FFA_RXTX_MAP_64:
1132 #endif
1133 case FFA_RXTX_MAP_32:
1134 ret_fid = FFA_SUCCESS_32;
1135 ret_w2 = 0; /* 4kB Minimum buffer size and alignment boundary */
1136 break;
1137 case FFA_ERROR:
1138 case FFA_VERSION:
1139 case FFA_SUCCESS_32:
1140 #ifdef ARM64
1141 case FFA_SUCCESS_64:
1142 #endif
1143 default:
1144 ret_fid = FFA_ERROR;
1145 ret_w2 = FFA_NOT_SUPPORTED;
1146 break;
1147 }
1148
1149 spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, ret_w2, FFA_PARAM_MBZ,
1150 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
1151 }
1152
handle_mem_perm_get(struct thread_smc_1_2_regs * args,struct sp_session * sp_s)1153 static void handle_mem_perm_get(struct thread_smc_1_2_regs *args,
1154 struct sp_session *sp_s)
1155 {
1156 struct sp_ctx *sp_ctx = NULL;
1157 TEE_Result res = TEE_ERROR_BAD_PARAMETERS;
1158 uint16_t attrs = 0;
1159 uint32_t ret_fid = FFA_ERROR;
1160 uint32_t ret_val = FFA_INVALID_PARAMETERS;
1161
1162 /*
1163 * The FFA_MEM_PERM_GET interface is only allowed during initialization
1164 */
1165 if (sp_s->is_initialized) {
1166 ret_val = FFA_DENIED;
1167 goto out;
1168 }
1169
1170 sp_ctx = to_sp_ctx(sp_s->ts_sess.ctx);
1171 if (!sp_ctx)
1172 goto out;
1173
1174 /* Query memory attributes */
1175 ts_push_current_session(&sp_s->ts_sess);
1176 res = vm_get_prot(&sp_ctx->uctx, args->a1, SMALL_PAGE_SIZE, &attrs);
1177 ts_pop_current_session();
1178 if (res)
1179 goto out;
1180
1181 /* Build response value */
1182 ret_fid = FFA_SUCCESS_32;
1183 ret_val = 0;
1184 if ((attrs & TEE_MATTR_URW) == TEE_MATTR_URW)
1185 ret_val |= FFA_MEM_PERM_RW;
1186 else if (attrs & TEE_MATTR_UR)
1187 ret_val |= FFA_MEM_PERM_RO;
1188
1189 if ((attrs & TEE_MATTR_UX) == 0)
1190 ret_val |= FFA_MEM_PERM_NX;
1191
1192 out:
1193 spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, ret_val, FFA_PARAM_MBZ,
1194 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
1195 }
1196
handle_mem_perm_set(struct thread_smc_1_2_regs * args,struct sp_session * sp_s)1197 static void handle_mem_perm_set(struct thread_smc_1_2_regs *args,
1198 struct sp_session *sp_s)
1199 {
1200 struct sp_ctx *sp_ctx = NULL;
1201 TEE_Result res = TEE_ERROR_BAD_PARAMETERS;
1202 size_t region_size = 0;
1203 uint32_t data_perm = 0;
1204 uint32_t instruction_perm = 0;
1205 uint16_t attrs = 0;
1206 uint32_t ret_fid = FFA_ERROR;
1207 uint32_t ret_val = FFA_INVALID_PARAMETERS;
1208
1209 /*
1210 * The FFA_MEM_PERM_GET interface is only allowed during initialization
1211 */
1212 if (sp_s->is_initialized) {
1213 ret_val = FFA_DENIED;
1214 goto out;
1215 }
1216
1217 sp_ctx = to_sp_ctx(sp_s->ts_sess.ctx);
1218 if (!sp_ctx)
1219 goto out;
1220
1221 if (MUL_OVERFLOW(args->a2, SMALL_PAGE_SIZE, ®ion_size))
1222 goto out;
1223
1224 if (args->a3 & FFA_MEM_PERM_RESERVED) {
1225 /* Non-zero reserved bits */
1226 goto out;
1227 }
1228
1229 data_perm = args->a3 & FFA_MEM_PERM_DATA_PERM;
1230 instruction_perm = args->a3 & FFA_MEM_PERM_INSTRUCTION_PERM;
1231
1232 /* RWX access right configuration is not permitted */
1233 if (data_perm == FFA_MEM_PERM_RW && instruction_perm == FFA_MEM_PERM_X)
1234 goto out;
1235
1236 switch (data_perm) {
1237 case FFA_MEM_PERM_RO:
1238 attrs = TEE_MATTR_UR;
1239 break;
1240 case FFA_MEM_PERM_RW:
1241 attrs = TEE_MATTR_URW;
1242 break;
1243 default:
1244 /* Invalid permission value */
1245 goto out;
1246 }
1247
1248 if (instruction_perm == FFA_MEM_PERM_X)
1249 attrs |= TEE_MATTR_UX;
1250
1251 /* Set access rights */
1252 ts_push_current_session(&sp_s->ts_sess);
1253 res = vm_set_prot(&sp_ctx->uctx, args->a1, region_size, attrs);
1254 ts_pop_current_session();
1255 if (res != TEE_SUCCESS)
1256 goto out;
1257
1258 ret_fid = FFA_SUCCESS_32;
1259 ret_val = FFA_PARAM_MBZ;
1260
1261 out:
1262 spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, ret_val, FFA_PARAM_MBZ,
1263 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
1264 }
1265
spmc_handle_version(struct thread_smc_1_2_regs * args,struct ffa_rxtx * rxtx)1266 static void spmc_handle_version(struct thread_smc_1_2_regs *args,
1267 struct ffa_rxtx *rxtx)
1268 {
1269 spmc_set_args(args, spmc_exchange_version(args->a1, rxtx),
1270 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1271 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
1272 }
1273
handle_console_log(uint32_t ffa_vers,struct thread_smc_1_2_regs * args)1274 static void handle_console_log(uint32_t ffa_vers,
1275 struct thread_smc_1_2_regs *args)
1276 {
1277 uint32_t ret_fid = FFA_ERROR;
1278 uint32_t ret_val = FFA_INVALID_PARAMETERS;
1279 size_t char_count = args->a1 & FFA_CONSOLE_LOG_CHAR_COUNT_MASK;
1280 char buffer[FFA_CONSOLE_LOG_64_MAX_MSG_LEN + 1] = { 0 };
1281 size_t max_length = 0;
1282 size_t reg_size = 0;
1283 size_t n = 0;
1284
1285 if (args->a0 == FFA_CONSOLE_LOG_64) {
1286 if (ffa_vers >= FFA_VERSION_1_2)
1287 max_length = FFA_CONSOLE_LOG_64_MAX_MSG_LEN;
1288 else
1289 max_length = FFA_CONSOLE_LOG_64_V1_1_MAX_MSG_LEN;
1290 reg_size = sizeof(uint64_t);
1291 } else {
1292 max_length = FFA_CONSOLE_LOG_32_MAX_MSG_LEN;
1293 reg_size = sizeof(uint32_t);
1294 }
1295
1296 if (char_count < 1 || char_count > max_length)
1297 goto out;
1298
1299 for (n = 0; n < char_count; n += reg_size) {
1300 /* + 2 since we're starting from W2/X2 */
1301 memcpy(buffer + n, &args->a[2 + n / reg_size],
1302 MIN(char_count - n, reg_size));
1303 }
1304
1305 buffer[char_count] = '\0';
1306
1307 trace_ext_puts(buffer);
1308
1309 ret_fid = FFA_SUCCESS_32;
1310 ret_val = FFA_PARAM_MBZ;
1311
1312 out:
1313 spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, ret_val, FFA_PARAM_MBZ,
1314 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
1315 }
1316
1317 /*
1318 * FF-A messages handler for SP. Every messages for or from a SP is handled
1319 * here. This is the entry of the sp_spmc kernel thread. The caller_sp is set
1320 * to NULL when it is the Normal World.
1321 */
spmc_sp_msg_handler(struct thread_smc_1_2_regs * args,struct sp_session * caller_sp)1322 void spmc_sp_msg_handler(struct thread_smc_1_2_regs *args,
1323 struct sp_session *caller_sp)
1324 {
1325 thread_check_canaries();
1326 do {
1327 switch (args->a0) {
1328 #ifdef ARM64
1329 case FFA_MSG_SEND_DIRECT_REQ_64:
1330 #endif
1331 case FFA_MSG_SEND_DIRECT_REQ_32:
1332 caller_sp = ffa_handle_sp_direct_req(args, caller_sp);
1333 break;
1334 #ifdef ARM64
1335 case FFA_MSG_SEND_DIRECT_RESP_64:
1336 #endif
1337 case FFA_MSG_SEND_DIRECT_RESP_32:
1338 caller_sp = ffa_handle_sp_direct_resp(args, caller_sp);
1339 break;
1340 case FFA_ERROR:
1341 caller_sp = ffa_handle_sp_error(args, caller_sp);
1342 break;
1343 case FFA_MSG_WAIT:
1344 /* FFA_WAIT gives control back to NW */
1345 cpu_spin_lock(&caller_sp->spinlock);
1346 caller_sp->state = sp_idle;
1347 cpu_spin_unlock(&caller_sp->spinlock);
1348 caller_sp = NULL;
1349 break;
1350 #ifdef ARM64
1351 case FFA_RXTX_MAP_64:
1352 #endif
1353 case FFA_RXTX_MAP_32:
1354 ts_push_current_session(&caller_sp->ts_sess);
1355 spmc_handle_rxtx_map(args, &caller_sp->rxtx);
1356 ts_pop_current_session();
1357 sp_enter(args, caller_sp);
1358 break;
1359 case FFA_RXTX_UNMAP:
1360 ts_push_current_session(&caller_sp->ts_sess);
1361 spmc_handle_rxtx_unmap(args, &caller_sp->rxtx);
1362 ts_pop_current_session();
1363 sp_enter(args, caller_sp);
1364 break;
1365 case FFA_RX_RELEASE:
1366 ts_push_current_session(&caller_sp->ts_sess);
1367 spmc_handle_rx_release(args, &caller_sp->rxtx);
1368 ts_pop_current_session();
1369 sp_enter(args, caller_sp);
1370 break;
1371 case FFA_ID_GET:
1372 args->a0 = FFA_SUCCESS_32;
1373 args->a2 = caller_sp->endpoint_id;
1374 sp_enter(args, caller_sp);
1375 break;
1376 case FFA_VERSION:
1377 spmc_handle_version(args, &caller_sp->rxtx);
1378 sp_enter(args, caller_sp);
1379 break;
1380 case FFA_FEATURES:
1381 handle_features(args);
1382 sp_enter(args, caller_sp);
1383 break;
1384 case FFA_SPM_ID_GET:
1385 spmc_handle_spm_id_get(args);
1386 sp_enter(args, caller_sp);
1387 break;
1388 case FFA_PARTITION_INFO_GET:
1389 ts_push_current_session(&caller_sp->ts_sess);
1390 spmc_handle_partition_info_get(args, &caller_sp->rxtx);
1391 ts_pop_current_session();
1392 sp_enter(args, caller_sp);
1393 break;
1394 #ifdef ARM64
1395 case FFA_MEM_SHARE_64:
1396 #endif
1397 case FFA_MEM_SHARE_32:
1398 ts_push_current_session(&caller_sp->ts_sess);
1399 spmc_sp_handle_mem_share(args, &caller_sp->rxtx,
1400 caller_sp);
1401 ts_pop_current_session();
1402 sp_enter(args, caller_sp);
1403 break;
1404 #ifdef ARM64
1405 case FFA_MEM_RETRIEVE_REQ_64:
1406 #endif
1407 case FFA_MEM_RETRIEVE_REQ_32:
1408 ts_push_current_session(&caller_sp->ts_sess);
1409 ffa_mem_retrieve(args, caller_sp, &caller_sp->rxtx);
1410 ts_pop_current_session();
1411 sp_enter(args, caller_sp);
1412 break;
1413 case FFA_MEM_RELINQUISH:
1414 ts_push_current_session(&caller_sp->ts_sess);
1415 ffa_mem_relinquish(args, caller_sp, &caller_sp->rxtx);
1416 ts_pop_current_session();
1417 sp_enter(args, caller_sp);
1418 break;
1419 case FFA_MEM_RECLAIM:
1420 ffa_mem_reclaim(args, caller_sp);
1421 sp_enter(args, caller_sp);
1422 break;
1423 #ifdef ARM64
1424 case FFA_MEM_PERM_GET_64:
1425 #endif
1426 case FFA_MEM_PERM_GET_32:
1427 handle_mem_perm_get(args, caller_sp);
1428 sp_enter(args, caller_sp);
1429 break;
1430
1431 #ifdef ARM64
1432 case FFA_MEM_PERM_SET_64:
1433 #endif
1434 case FFA_MEM_PERM_SET_32:
1435 handle_mem_perm_set(args, caller_sp);
1436 sp_enter(args, caller_sp);
1437 break;
1438
1439 #ifdef ARM64
1440 case FFA_CONSOLE_LOG_64:
1441 #endif
1442 case FFA_CONSOLE_LOG_32:
1443 handle_console_log(caller_sp->rxtx.ffa_vers, args);
1444 sp_enter(args, caller_sp);
1445 break;
1446
1447 default:
1448 EMSG("Unhandled FFA function ID %#"PRIx32,
1449 (uint32_t)args->a0);
1450 ffa_set_error(args, FFA_INVALID_PARAMETERS);
1451 sp_enter(args, caller_sp);
1452 }
1453 } while (caller_sp);
1454 }
1455