1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2021-2024, Arm Limited
4 */
5 #include <assert.h>
6 #include <io.h>
7 #include <kernel/panic.h>
8 #include <kernel/secure_partition.h>
9 #include <kernel/spinlock.h>
10 #include <kernel/spmc_sp_handler.h>
11 #include <kernel/tee_misc.h>
12 #include <kernel/thread_private.h>
13 #include <mm/mobj.h>
14 #include <mm/sp_mem.h>
15 #include <mm/vm.h>
16 #include <optee_ffa.h>
17 #include <string.h>
18
19 static unsigned int mem_ref_lock = SPINLOCK_UNLOCK;
20
spmc_sp_start_thread(struct thread_smc_1_2_regs * args)21 int spmc_sp_start_thread(struct thread_smc_1_2_regs *args)
22 {
23 thread_sp_alloc_and_run(&args->arg11);
24 /*
25 * thread_sp_alloc_and_run() only returns if all threads are busy.
26 * The caller must try again.
27 */
28 return FFA_BUSY;
29 }
30
ffa_set_error(struct thread_smc_1_2_regs * args,uint32_t error)31 static void ffa_set_error(struct thread_smc_1_2_regs *args, uint32_t error)
32 {
33 spmc_set_args(args, FFA_ERROR, FFA_PARAM_MBZ, error, FFA_PARAM_MBZ,
34 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
35 }
36
ffa_success(struct thread_smc_1_2_regs * args)37 static void ffa_success(struct thread_smc_1_2_regs *args)
38 {
39 spmc_set_args(args, FFA_SUCCESS_32, 0, 0, 0, 0, 0);
40 }
41
ffa_get_dst(struct thread_smc_1_2_regs * args,struct sp_session * caller,struct sp_session ** dst)42 static TEE_Result ffa_get_dst(struct thread_smc_1_2_regs *args,
43 struct sp_session *caller,
44 struct sp_session **dst)
45 {
46 struct sp_session *s = NULL;
47
48 s = sp_get_session(FFA_DST(args->a1));
49
50 /* Message came from the NW */
51 if (!caller) {
52 if (!s) {
53 EMSG("Neither destination nor source is a SP");
54 return FFA_INVALID_PARAMETERS;
55 }
56 } else {
57 /* Check if the source matches the endpoint we came from */
58 if (FFA_SRC(args->a1) != caller->endpoint_id) {
59 EMSG("Source address doesn't match the endpoint id");
60 return FFA_INVALID_PARAMETERS;
61 }
62 }
63
64 *dst = s;
65
66 return FFA_OK;
67 }
68
find_sp_mem_receiver(struct sp_session * s,struct sp_mem * smem)69 static struct sp_mem_receiver *find_sp_mem_receiver(struct sp_session *s,
70 struct sp_mem *smem)
71 {
72 struct sp_mem_receiver *receiver = NULL;
73
74 /*
75 * FF-A Spec 8.10.2:
76 * Each Handle identifies a single unique composite memory region
77 * description that is, there is a 1:1 mapping between the two.
78 *
79 * Each memory share has an unique handle. We can only have each SP
80 * once as a receiver in the memory share. For each receiver of a
81 * memory share, we have one sp_mem_access_descr object.
82 * This means that there can only be one SP linked to a specific
83 * struct sp_mem_access_descr.
84 */
85 SLIST_FOREACH(receiver, &smem->receivers, link) {
86 if (receiver->perm.endpoint_id == s->endpoint_id)
87 break;
88 }
89 return receiver;
90 }
91
add_mem_region_to_sp(struct ffa_mem_access * mem_acc,struct sp_mem * smem)92 static int add_mem_region_to_sp(struct ffa_mem_access *mem_acc,
93 struct sp_mem *smem)
94 {
95 struct ffa_mem_access_perm *access_perm = &mem_acc->access_perm;
96 struct sp_session *s = NULL;
97 struct sp_mem_receiver *receiver = NULL;
98 uint8_t perm = READ_ONCE(access_perm->perm);
99 uint16_t endpoint_id = READ_ONCE(access_perm->endpoint_id);
100
101 s = sp_get_session(endpoint_id);
102
103 /* Only add memory shares of loaded SPs */
104 if (!s)
105 return FFA_DENIED;
106
107 /* Only allow each endpoint once */
108 if (find_sp_mem_receiver(s, smem))
109 return FFA_DENIED;
110
111 if (perm & ~FFA_MEM_ACC_MASK)
112 return FFA_DENIED;
113
114 receiver = calloc(1, sizeof(struct sp_mem_receiver));
115 if (!receiver)
116 return FFA_NO_MEMORY;
117
118 receiver->smem = smem;
119
120 receiver->perm.endpoint_id = endpoint_id;
121 receiver->perm.perm = perm;
122 receiver->perm.flags = READ_ONCE(access_perm->flags);
123
124 SLIST_INSERT_HEAD(&smem->receivers, receiver, link);
125
126 return FFA_OK;
127 }
128
spmc_sp_handle_mem_share(struct thread_smc_1_2_regs * args,struct ffa_rxtx * rxtx,struct sp_session * owner_sp)129 static void spmc_sp_handle_mem_share(struct thread_smc_1_2_regs *args,
130 struct ffa_rxtx *rxtx,
131 struct sp_session *owner_sp)
132 {
133 struct ffa_mem_transaction_x mem_trans = { };
134 uint32_t tot_len = args->a1;
135 uint32_t frag_len = args->a2;
136 uint64_t global_handle = 0;
137 int res = FFA_OK;
138
139 cpu_spin_lock(&rxtx->spinlock);
140
141 /* Descriptor fragments or custom buffers aren't supported yet. */
142 if (frag_len != tot_len || args->a3 || args->a4)
143 res = FFA_NOT_SUPPORTED;
144 else if (frag_len > rxtx->size)
145 res = FFA_INVALID_PARAMETERS;
146 else
147 res = spmc_read_mem_transaction(rxtx->ffa_vers, rxtx->rx,
148 frag_len, &mem_trans);
149 if (!res)
150 res = spmc_sp_add_share(&mem_trans, rxtx, tot_len, frag_len,
151 &global_handle, owner_sp);
152 if (!res) {
153 args->a3 = high32_from_64(global_handle);
154 args->a2 = low32_from_64(global_handle);
155 args->a1 = FFA_PARAM_MBZ;
156 args->a0 = FFA_SUCCESS_32;
157 } else {
158 ffa_set_error(args, res);
159 }
160
161 cpu_spin_unlock(&rxtx->spinlock);
162 }
163
spmc_sp_add_sp_region(struct sp_mem * smem,struct ffa_address_range * mem_reg,struct sp_session * owner_sp,uint8_t highest_permission)164 static int spmc_sp_add_sp_region(struct sp_mem *smem,
165 struct ffa_address_range *mem_reg,
166 struct sp_session *owner_sp,
167 uint8_t highest_permission)
168 {
169 struct sp_ctx *sp_ctx = NULL;
170 uint64_t va = READ_ONCE(mem_reg->address);
171 int res = FFA_OK;
172 uint64_t region_len = READ_ONCE(mem_reg->page_count) * SMALL_PAGE_SIZE;
173 struct mobj *mobj = NULL;
174
175 sp_ctx = to_sp_ctx(owner_sp->ts_sess.ctx);
176
177 /*
178 * The memory region we try to share might not be linked to just one
179 * mobj. Create a new region for each mobj.
180 */
181 while (region_len) {
182 size_t len = region_len;
183 struct sp_mem_map_region *region = NULL;
184 uint16_t prot = 0;
185 size_t offs = 0;
186
187 /*
188 * There is already a mobj for each address that is in the SPs
189 * address range.
190 */
191 mobj = vm_get_mobj(&sp_ctx->uctx, va, &len, &prot, &offs);
192 if (!mobj)
193 return FFA_DENIED;
194
195 /*
196 * If we share memory from a SP, check if we are not sharing
197 * with a higher permission than the memory was originally
198 * mapped.
199 */
200 if ((highest_permission & FFA_MEM_ACC_RW) &&
201 !(prot & TEE_MATTR_UW)) {
202 res = FFA_DENIED;
203 goto err;
204 }
205
206 if ((highest_permission & FFA_MEM_ACC_EXE) &&
207 !(prot & TEE_MATTR_UX)) {
208 res = FFA_DENIED;
209 goto err;
210 }
211
212 region = calloc(1, sizeof(*region));
213 region->mobj = mobj;
214 region->page_offset = offs;
215 region->page_count = len / SMALL_PAGE_SIZE;
216
217 if (!sp_has_exclusive_access(region, &sp_ctx->uctx)) {
218 free(region);
219 res = FFA_DENIED;
220 goto err;
221 }
222
223 va += len;
224 region_len -= len;
225 SLIST_INSERT_HEAD(&smem->regions, region, link);
226 }
227
228 return FFA_OK;
229 err:
230 mobj_put(mobj);
231
232 return res;
233 }
234
spmc_sp_add_nw_region(struct sp_mem * smem,struct ffa_mem_region * mem_reg)235 static int spmc_sp_add_nw_region(struct sp_mem *smem,
236 struct ffa_mem_region *mem_reg)
237 {
238 uint64_t page_count = READ_ONCE(mem_reg->total_page_count);
239 struct sp_mem_map_region *region = NULL;
240 struct mobj *m = sp_mem_new_mobj(page_count, TEE_MATTR_MEM_TYPE_CACHED,
241 false);
242 unsigned int i = 0;
243 unsigned int idx = 0;
244 int res = FFA_OK;
245 uint64_t address_count = READ_ONCE(mem_reg->address_range_count);
246
247 if (!m)
248 return FFA_NO_MEMORY;
249
250 for (i = 0; i < address_count; i++) {
251 struct ffa_address_range *addr_range = NULL;
252
253 addr_range = &mem_reg->address_range_array[i];
254 if (sp_mem_add_pages(m, &idx,
255 READ_ONCE(addr_range->address),
256 READ_ONCE(addr_range->page_count))) {
257 res = FFA_DENIED;
258 goto clean_up;
259 }
260 }
261
262 region = calloc(1, sizeof(*region));
263 if (!region) {
264 res = FFA_NO_MEMORY;
265 goto clean_up;
266 }
267
268 region->mobj = m;
269 region->page_count = page_count;
270
271 if (!sp_has_exclusive_access(region, NULL)) {
272 free(region);
273 res = FFA_DENIED;
274 goto clean_up;
275 }
276
277 SLIST_INSERT_HEAD(&smem->regions, region, link);
278 return FFA_OK;
279 clean_up:
280 mobj_put(m);
281 return res;
282 }
283
spmc_sp_add_share(struct ffa_mem_transaction_x * mem_trans,struct ffa_rxtx * rxtx,size_t blen,size_t flen,uint64_t * global_handle,struct sp_session * owner_sp)284 int spmc_sp_add_share(struct ffa_mem_transaction_x *mem_trans,
285 struct ffa_rxtx *rxtx, size_t blen, size_t flen,
286 uint64_t *global_handle, struct sp_session *owner_sp)
287 {
288 int res = FFA_INVALID_PARAMETERS;
289 unsigned int num_mem_accs = 0;
290 unsigned int i = 0;
291 struct ffa_mem_access *mem_acc = NULL;
292 size_t needed_size = 0;
293 size_t addr_range_offs = 0;
294 struct ffa_mem_region *mem_reg = NULL;
295 uint8_t highest_permission = 0;
296 struct sp_mem *smem = NULL;
297 uint16_t sender_id = mem_trans->sender_id;
298 size_t addr_range_cnt = 0;
299 struct ffa_address_range *addr_range = NULL;
300 size_t total_page_count = 0;
301 size_t page_count_sum = 0;
302
303 if (blen != flen) {
304 DMSG("Fragmented memory share is not supported for SPs");
305 return FFA_NOT_SUPPORTED;
306 }
307
308 smem = sp_mem_new();
309 if (!smem)
310 return FFA_NO_MEMORY;
311
312 if ((owner_sp && owner_sp->endpoint_id != sender_id) ||
313 (!owner_sp && sp_get_session(sender_id))) {
314 res = FFA_DENIED;
315 goto cleanup;
316 }
317
318 num_mem_accs = mem_trans->mem_access_count;
319 mem_acc = (void *)((vaddr_t)rxtx->rx + mem_trans->mem_access_offs);
320
321 if (!num_mem_accs) {
322 res = FFA_INVALID_PARAMETERS;
323 goto cleanup;
324 }
325
326 /* Store the ffa_mem_transaction */
327 smem->sender_id = sender_id;
328 smem->mem_reg_attr = mem_trans->mem_reg_attr;
329 smem->flags = mem_trans->flags;
330 smem->tag = mem_trans->tag;
331
332 if (MUL_OVERFLOW(num_mem_accs, sizeof(*mem_acc), &needed_size) ||
333 ADD_OVERFLOW(needed_size, mem_trans->mem_access_offs,
334 &needed_size) || needed_size > blen) {
335 res = FFA_INVALID_PARAMETERS;
336 goto cleanup;
337 }
338
339 for (i = 0; i < num_mem_accs; i++)
340 highest_permission |= READ_ONCE(mem_acc[i].access_perm.perm);
341
342 /* Check if the memory region array fits into the buffer */
343 addr_range_offs = READ_ONCE(mem_acc[0].region_offs);
344
345 if (ADD_OVERFLOW(addr_range_offs, sizeof(*mem_reg), &needed_size) ||
346 needed_size > blen) {
347 res = FFA_INVALID_PARAMETERS;
348 goto cleanup;
349 }
350
351 mem_reg = (void *)((char *)rxtx->rx + addr_range_offs);
352 addr_range_cnt = READ_ONCE(mem_reg->address_range_count);
353 total_page_count = READ_ONCE(mem_reg->total_page_count);
354
355 /* Memory transaction without address ranges or pages is invalid */
356 if (!addr_range_cnt || !total_page_count) {
357 res = FFA_INVALID_PARAMETERS;
358 goto cleanup;
359 }
360
361 /* Check if the region descriptors fit into the buffer */
362 if (MUL_OVERFLOW(addr_range_cnt, sizeof(*addr_range), &needed_size) ||
363 ADD_OVERFLOW(needed_size, addr_range_offs, &needed_size) ||
364 needed_size > blen) {
365 res = FFA_INVALID_PARAMETERS;
366 goto cleanup;
367 }
368
369 page_count_sum = 0;
370 for (i = 0; i < addr_range_cnt; i++) {
371 addr_range = &mem_reg->address_range_array[i];
372
373 /* Memory region without pages is invalid */
374 if (!addr_range->page_count) {
375 res = FFA_INVALID_PARAMETERS;
376 goto cleanup;
377 }
378
379 /* Sum the page count of each region */
380 if (ADD_OVERFLOW(page_count_sum, addr_range->page_count,
381 &page_count_sum)) {
382 res = FFA_INVALID_PARAMETERS;
383 goto cleanup;
384 }
385 }
386
387 /* Validate total page count */
388 if (total_page_count != page_count_sum) {
389 res = FFA_INVALID_PARAMETERS;
390 goto cleanup;
391 }
392
393 /* Iterate over all the addresses */
394 if (owner_sp) {
395 for (i = 0; i < addr_range_cnt; i++) {
396 addr_range = &mem_reg->address_range_array[i];
397 res = spmc_sp_add_sp_region(smem, addr_range,
398 owner_sp,
399 highest_permission);
400 if (res)
401 goto cleanup;
402 }
403 } else {
404 res = spmc_sp_add_nw_region(smem, mem_reg);
405 if (res)
406 goto cleanup;
407 }
408
409 /* Add the memory address to the SP */
410 for (i = 0; i < num_mem_accs; i++) {
411 res = add_mem_region_to_sp(&mem_acc[i], smem);
412 if (res)
413 goto cleanup;
414 }
415 *global_handle = smem->global_handle;
416 sp_mem_add(smem);
417
418 return FFA_OK;
419
420 cleanup:
421 sp_mem_remove(smem);
422 return res;
423 }
424
spmc_sp_set_to_preempted(struct ts_session * ts_sess)425 void spmc_sp_set_to_preempted(struct ts_session *ts_sess)
426 {
427 if (ts_sess && is_sp_ctx(ts_sess->ctx)) {
428 struct sp_session *sp_sess = to_sp_session(ts_sess);
429
430 cpu_spin_lock(&sp_sess->spinlock);
431 assert(sp_sess->state == sp_busy);
432 sp_sess->state = sp_preempted;
433 cpu_spin_unlock(&sp_sess->spinlock);
434 }
435 }
436
spmc_sp_resume_from_preempted(uint16_t endpoint_id,uint16_t thread_id)437 int spmc_sp_resume_from_preempted(uint16_t endpoint_id, uint16_t thread_id)
438 {
439 struct sp_session *sp_sess = sp_get_session(endpoint_id);
440
441 if (!sp_sess)
442 return FFA_INVALID_PARAMETERS;
443
444 if (sp_sess->state != sp_preempted || sp_sess->thread_id != thread_id)
445 return FFA_DENIED;
446
447 cpu_spin_lock(&sp_sess->spinlock);
448 sp_sess->state = sp_busy;
449 cpu_spin_unlock(&sp_sess->spinlock);
450
451 thread_resume_from_rpc(thread_id, 0, 0, 0, 0);
452 panic();
453 }
454
check_rxtx(struct ffa_rxtx * rxtx)455 static bool check_rxtx(struct ffa_rxtx *rxtx)
456 {
457 return rxtx && rxtx->rx && rxtx->tx && rxtx->size > 0;
458 }
459
460 static TEE_Result
check_retrieve_request(struct sp_mem_receiver * receiver,uint32_t ffa_vers,struct ffa_mem_transaction_x * mem_trans,void * rx,struct sp_mem * smem,int64_t tx_len)461 check_retrieve_request(struct sp_mem_receiver *receiver, uint32_t ffa_vers,
462 struct ffa_mem_transaction_x *mem_trans,
463 void *rx, struct sp_mem *smem, int64_t tx_len)
464 {
465 struct ffa_mem_access *retr_access = NULL;
466 uint8_t share_perm = receiver->perm.perm;
467 uint32_t retr_perm = 0;
468 uint32_t retr_flags = mem_trans->flags;
469 uint64_t retr_tag = mem_trans->tag;
470 struct sp_mem_map_region *reg = NULL;
471
472 /*
473 * The request came from the endpoint. It should only have one
474 * ffa_mem_access element
475 */
476 if (mem_trans->mem_access_count != 1)
477 return TEE_ERROR_BAD_PARAMETERS;
478
479 retr_access = (void *)((vaddr_t)rx + mem_trans->mem_access_offs);
480 retr_perm = READ_ONCE(retr_access->access_perm.perm);
481
482 /* Check if tag is correct */
483 if (receiver->smem->tag != retr_tag) {
484 EMSG("Incorrect tag %#"PRIx64" %#"PRIx64, receiver->smem->tag,
485 retr_tag);
486 return TEE_ERROR_BAD_PARAMETERS;
487 }
488
489 /* Check permissions and flags */
490 if ((retr_perm & FFA_MEM_ACC_RW) &&
491 !(share_perm & FFA_MEM_ACC_RW)) {
492 DMSG("Incorrect memshare permission set");
493 return TEE_ERROR_BAD_PARAMETERS;
494 }
495
496 if ((retr_perm & FFA_MEM_ACC_EXE) &&
497 !(share_perm & FFA_MEM_ACC_EXE)) {
498 DMSG("Incorrect memshare permission set");
499 return TEE_ERROR_BAD_PARAMETERS;
500 }
501
502 if (retr_flags & FFA_MEMORY_REGION_FLAG_CLEAR_RELINQUISH) {
503 DMSG("CLEAR_RELINQUISH is not allowed for FFA_SHARE");
504 return TEE_ERROR_BAD_PARAMETERS;
505 }
506
507 /*
508 * Check if there is enough space in the tx buffer to send the respons.
509 */
510 if (ffa_vers <= FFA_VERSION_1_0)
511 tx_len -= sizeof(struct ffa_mem_transaction_1_0);
512 else
513 tx_len -= sizeof(struct ffa_mem_transaction_1_1);
514 tx_len -= sizeof(struct ffa_mem_access) +
515 sizeof(struct ffa_mem_region);
516
517 if (tx_len < 0)
518 return FFA_NO_MEMORY;
519
520 SLIST_FOREACH(reg, &smem->regions, link) {
521 tx_len -= sizeof(struct ffa_address_range);
522 if (tx_len < 0)
523 return FFA_NO_MEMORY;
524 }
525
526 return TEE_SUCCESS;
527 }
528
create_retrieve_response(uint32_t ffa_vers,void * dst_buffer,struct sp_mem_receiver * receiver,struct sp_mem * smem,struct sp_session * s)529 static void create_retrieve_response(uint32_t ffa_vers, void *dst_buffer,
530 struct sp_mem_receiver *receiver,
531 struct sp_mem *smem, struct sp_session *s)
532 {
533 size_t off = 0;
534 struct ffa_mem_region *dst_region = NULL;
535 struct ffa_address_range *addr_dst = NULL;
536 struct sp_mem_map_region *reg = NULL;
537 struct ffa_mem_access *mem_acc = NULL;
538
539 /*
540 * we respond with a ffa_mem_retrieve_resp which defines the
541 * following data in the rx buffer of the sp.
542 * struct mem_transaction_descr
543 * struct mem_access_descr (always 1 element)
544 * struct mem_region_descr
545 */
546 if (ffa_vers <= FFA_VERSION_1_0) {
547 struct ffa_mem_transaction_1_0 *d_ds = dst_buffer;
548
549 memset(d_ds, 0, sizeof(*d_ds));
550
551 off = sizeof(*d_ds);
552 mem_acc = d_ds->mem_access_array;
553
554 /* copy the mem_transaction_descr */
555 d_ds->sender_id = receiver->smem->sender_id;
556 d_ds->mem_reg_attr = receiver->smem->mem_reg_attr;
557 d_ds->flags = FFA_MEMORY_TRANSACTION_TYPE_SHARE;
558 d_ds->tag = receiver->smem->tag;
559 d_ds->mem_access_count = 1;
560 } else {
561 struct ffa_mem_transaction_1_1 *d_ds = dst_buffer;
562
563 memset(d_ds, 0, sizeof(*d_ds));
564
565 off = sizeof(*d_ds);
566 mem_acc = (void *)(d_ds + 1);
567
568 d_ds->sender_id = receiver->smem->sender_id;
569 d_ds->mem_reg_attr = receiver->smem->mem_reg_attr;
570 d_ds->flags = FFA_MEMORY_TRANSACTION_TYPE_SHARE;
571 d_ds->tag = receiver->smem->tag;
572 d_ds->mem_access_size = sizeof(*mem_acc);
573 d_ds->mem_access_count = 1;
574 d_ds->mem_access_offs = off;
575 }
576
577 off += sizeof(struct ffa_mem_access);
578 dst_region = (struct ffa_mem_region *)(mem_acc + 1);
579
580 /* Copy the mem_accsess_descr */
581 mem_acc[0].region_offs = off;
582 memcpy(&mem_acc[0].access_perm, &receiver->perm,
583 sizeof(struct ffa_mem_access_perm));
584
585 /* Copy the mem_region_descr */
586 memset(dst_region, 0, sizeof(*dst_region));
587 dst_region->address_range_count = 0;
588 dst_region->total_page_count = 0;
589
590 addr_dst = dst_region->address_range_array;
591
592 SLIST_FOREACH(reg, &smem->regions, link) {
593 uint32_t offset = reg->page_offset;
594 struct sp_ctx *ctx = to_sp_ctx(s->ts_sess.ctx);
595
596 addr_dst->address = (uint64_t)sp_mem_get_va(&ctx->uctx,
597 offset,
598 reg->mobj);
599 addr_dst->page_count = reg->page_count;
600 dst_region->address_range_count++;
601
602 dst_region->total_page_count += addr_dst->page_count;
603 }
604 }
605
ffa_mem_retrieve(struct thread_smc_1_2_regs * args,struct sp_session * caller_sp,struct ffa_rxtx * rxtx)606 static void ffa_mem_retrieve(struct thread_smc_1_2_regs *args,
607 struct sp_session *caller_sp,
608 struct ffa_rxtx *rxtx)
609 {
610 struct ffa_mem_transaction_x mem_trans = { };
611 uint32_t tot_len = args->a1;
612 uint32_t frag_len = args->a2;
613 int ret = FFA_OK;
614 size_t tx_len = 0;
615 struct ffa_mem_access *mem_acc = NULL;
616 struct ffa_mem_region *mem_region = NULL;
617 uint64_t va = 0;
618 struct sp_mem *smem = NULL;
619 struct sp_mem_receiver *receiver = NULL;
620 uint32_t exceptions = 0;
621 uint32_t address_offset = 0;
622 size_t needed_size = 0;
623
624 if (!check_rxtx(rxtx) || !rxtx->tx_is_mine) {
625 ret = FFA_DENIED;
626 goto err;
627 }
628 /* Descriptor fragments aren't supported yet. */
629 if (frag_len != tot_len) {
630 ret = FFA_NOT_SUPPORTED;
631 goto err;
632 }
633 if (frag_len > rxtx->size) {
634 ret = FFA_INVALID_PARAMETERS;
635 goto err;
636 }
637
638 tx_len = rxtx->size;
639
640 ret = spmc_read_mem_transaction(rxtx->ffa_vers, rxtx->rx, frag_len,
641 &mem_trans);
642 if (ret)
643 goto err;
644
645 smem = sp_mem_get(mem_trans.global_handle);
646 if (!smem) {
647 DMSG("Incorrect handle");
648 ret = FFA_DENIED;
649 goto err;
650 }
651
652 receiver = sp_mem_get_receiver(caller_sp->endpoint_id, smem);
653
654 mem_acc = (void *)((vaddr_t)rxtx->rx + mem_trans.mem_access_offs);
655
656 address_offset = READ_ONCE(mem_acc[0].region_offs);
657
658 if (ADD_OVERFLOW(address_offset, sizeof(struct ffa_mem_region),
659 &needed_size) || needed_size > tx_len) {
660 ret = FFA_INVALID_PARAMETERS;
661 goto err;
662 }
663
664 if (check_retrieve_request(receiver, rxtx->ffa_vers, &mem_trans,
665 rxtx->rx, smem, tx_len) != TEE_SUCCESS) {
666 ret = FFA_INVALID_PARAMETERS;
667 goto err;
668 }
669
670 exceptions = cpu_spin_lock_xsave(&mem_ref_lock);
671
672 if (receiver->ref_count == UINT8_MAX) {
673 ret = FFA_DENIED;
674 cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
675 goto err;
676 }
677
678 receiver->ref_count++;
679
680 /* We only need to map the region the first time we request it. */
681 if (receiver->ref_count == 1) {
682 TEE_Result ret_map = TEE_SUCCESS;
683
684 cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
685
686 /*
687 * Try to map the memory linked to the handle in
688 * sp_mem_access_descr.
689 */
690 mem_region = (struct ffa_mem_region *)((vaddr_t)rxtx->rx +
691 address_offset);
692
693 va = READ_ONCE(mem_region->address_range_array[0].address);
694 ret_map = sp_map_shared(caller_sp, receiver, smem, &va);
695
696 if (ret_map) {
697 EMSG("Could not map memory region: %#"PRIx32, ret_map);
698 exceptions = cpu_spin_lock_xsave(&mem_ref_lock);
699 receiver->ref_count--;
700 cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
701 ret = FFA_DENIED;
702 goto err;
703 }
704 } else {
705 cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
706 }
707
708 create_retrieve_response(rxtx->ffa_vers, rxtx->tx, receiver, smem,
709 caller_sp);
710
711 args->a0 = FFA_MEM_RETRIEVE_RESP;
712 args->a1 = tx_len;
713 args->a2 = tx_len;
714
715 rxtx->tx_is_mine = false;
716
717 return;
718 err:
719 ffa_set_error(args, ret);
720 }
721
ffa_mem_relinquish(struct thread_smc_1_2_regs * args,struct sp_session * caller_sp,struct ffa_rxtx * rxtx)722 static void ffa_mem_relinquish(struct thread_smc_1_2_regs *args,
723 struct sp_session *caller_sp,
724 struct ffa_rxtx *rxtx)
725 {
726 struct sp_mem *smem = NULL;
727 struct ffa_mem_relinquish *mem = rxtx->rx;
728 struct sp_mem_receiver *receiver = NULL;
729 int err = FFA_NOT_SUPPORTED;
730 uint32_t exceptions = 0;
731
732 if (!check_rxtx(rxtx)) {
733 ffa_set_error(args, FFA_DENIED);
734 return;
735 }
736
737 exceptions = cpu_spin_lock_xsave(&rxtx->spinlock);
738 smem = sp_mem_get(READ_ONCE(mem->handle));
739
740 if (!smem) {
741 DMSG("Incorrect handle");
742 err = FFA_DENIED;
743 goto err_unlock_rxtwx;
744 }
745
746 if (READ_ONCE(mem->endpoint_count) != 1) {
747 DMSG("Incorrect endpoint count");
748 err = FFA_INVALID_PARAMETERS;
749 goto err_unlock_rxtwx;
750 }
751
752 if (READ_ONCE(mem->endpoint_id_array[0]) != caller_sp->endpoint_id) {
753 DMSG("Incorrect endpoint id");
754 err = FFA_DENIED;
755 goto err_unlock_rxtwx;
756 }
757
758 cpu_spin_unlock_xrestore(&rxtx->spinlock, exceptions);
759
760 receiver = sp_mem_get_receiver(caller_sp->endpoint_id, smem);
761
762 exceptions = cpu_spin_lock_xsave(&mem_ref_lock);
763 if (!receiver->ref_count) {
764 DMSG("To many relinquish requests");
765 err = FFA_DENIED;
766 goto err_unlock_memref;
767 }
768
769 receiver->ref_count--;
770 if (!receiver->ref_count) {
771 cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
772 if (sp_unmap_ffa_regions(caller_sp, smem) != TEE_SUCCESS) {
773 DMSG("Failed to unmap region");
774 ffa_set_error(args, FFA_DENIED);
775 return;
776 }
777 } else {
778 cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
779 }
780
781 ffa_success(args);
782 return;
783
784 err_unlock_rxtwx:
785 cpu_spin_unlock_xrestore(&rxtx->spinlock, exceptions);
786 ffa_set_error(args, err);
787 return;
788 err_unlock_memref:
789 cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
790 ffa_set_error(args, err);
791 }
792
zero_mem_region(struct sp_mem * smem,struct sp_session * s)793 static void zero_mem_region(struct sp_mem *smem, struct sp_session *s)
794 {
795 void *addr = NULL;
796 struct sp_ctx *ctx = to_sp_ctx(s->ts_sess.ctx);
797 struct sp_mem_map_region *reg = NULL;
798
799 ts_push_current_session(&s->ts_sess);
800 SLIST_FOREACH(reg, &smem->regions, link) {
801 size_t sz = reg->page_count * SMALL_PAGE_SIZE;
802
803 addr = sp_mem_get_va(&ctx->uctx, reg->page_offset, reg->mobj);
804
805 assert(addr);
806 memset(addr, 0, sz);
807 }
808 ts_pop_current_session();
809 }
810
811 /*
812 * ffa_mem_reclaim returns false if it couldn't process the reclaim message.
813 * This happens when the memory regions was shared with the OP-TEE endpoint.
814 * After this thread_spmc calls handle_mem_reclaim() to make sure that the
815 * region is reclaimed from the OP-TEE endpoint.
816 */
ffa_mem_reclaim(struct thread_smc_1_2_regs * args,struct sp_session * caller_sp)817 bool ffa_mem_reclaim(struct thread_smc_1_2_regs *args,
818 struct sp_session *caller_sp)
819 {
820 uint64_t handle = reg_pair_to_64(args->a2, args->a1);
821 uint32_t flags = args->a3;
822 struct sp_mem *smem = NULL;
823 struct sp_mem_receiver *receiver = NULL;
824 uint32_t exceptions = 0;
825
826 smem = sp_mem_get(handle);
827 if (!smem)
828 return false;
829
830 /*
831 * If the caller is an SP, make sure that it is the owner of the share.
832 * If the call comes from NWd this is ensured by the hypervisor.
833 */
834 if (caller_sp && caller_sp->endpoint_id != smem->sender_id) {
835 ffa_set_error(args, FFA_INVALID_PARAMETERS);
836 return true;
837 }
838
839 exceptions = cpu_spin_lock_xsave(&mem_ref_lock);
840
841 /* Make sure that all shares where relinquished */
842 SLIST_FOREACH(receiver, &smem->receivers, link) {
843 if (receiver->ref_count != 0) {
844 ffa_set_error(args, FFA_DENIED);
845 cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
846 return true;
847 }
848 }
849
850 if (flags & FFA_MEMORY_REGION_FLAG_CLEAR) {
851 if (caller_sp) {
852 zero_mem_region(smem, caller_sp);
853 } else {
854 /*
855 * Currently we don't support zeroing Normal World
856 * memory. To do this we would have to map the memory
857 * again, zero it and unmap it.
858 */
859 ffa_set_error(args, FFA_DENIED);
860 cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
861 return true;
862 }
863 }
864
865 sp_mem_remove(smem);
866 cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
867
868 ffa_success(args);
869 return true;
870 }
871
872 static struct sp_session *
ffa_handle_sp_direct_req(struct thread_smc_1_2_regs * args,struct sp_session * caller_sp)873 ffa_handle_sp_direct_req(struct thread_smc_1_2_regs *args,
874 struct sp_session *caller_sp)
875 {
876 struct sp_session *dst = NULL;
877 TEE_Result res = FFA_OK;
878
879 res = ffa_get_dst(args, caller_sp, &dst);
880 if (res) {
881 /* Tried to send message to an incorrect endpoint */
882 ffa_set_error(args, res);
883 return caller_sp;
884 }
885 if (!dst) {
886 EMSG("Request to normal world not supported");
887 ffa_set_error(args, FFA_NOT_SUPPORTED);
888 return caller_sp;
889 }
890
891 if (dst == caller_sp) {
892 EMSG("Cannot send message to own ID");
893 ffa_set_error(args, FFA_INVALID_PARAMETERS);
894 return caller_sp;
895 }
896
897 if (caller_sp &&
898 !(caller_sp->props & FFA_PART_PROP_DIRECT_REQ_SEND)) {
899 EMSG("SP 0x%"PRIx16" doesn't support sending direct requests",
900 caller_sp->endpoint_id);
901 ffa_set_error(args, FFA_NOT_SUPPORTED);
902 return caller_sp;
903 }
904
905 if (!(dst->props & FFA_PART_PROP_DIRECT_REQ_RECV)) {
906 EMSG("SP 0x%"PRIx16" doesn't support receipt of direct requests",
907 dst->endpoint_id);
908 ffa_set_error(args, FFA_NOT_SUPPORTED);
909 return caller_sp;
910 }
911
912 if (args->a2 & FFA_MSG_FLAG_FRAMEWORK) {
913 switch (args->a2 & FFA_MSG_TYPE_MASK) {
914 case FFA_MSG_SEND_VM_CREATED:
915 /* The sender must be the NWd hypervisor (ID 0) */
916 if (FFA_SRC(args->a1) != 0 || caller_sp) {
917 ffa_set_error(args, FFA_INVALID_PARAMETERS);
918 return caller_sp;
919 }
920
921 /* The SP must be subscribed for this message */
922 if (!(dst->props & FFA_PART_PROP_NOTIF_CREATED)) {
923 ffa_set_error(args, FFA_INVALID_PARAMETERS);
924 return caller_sp;
925 }
926 break;
927 case FFA_MSG_SEND_VM_DESTROYED:
928 /* The sender must be the NWd hypervisor (ID 0) */
929 if (FFA_SRC(args->a1) != 0 || caller_sp) {
930 ffa_set_error(args, FFA_INVALID_PARAMETERS);
931 return caller_sp;
932 }
933
934 /* The SP must be subscribed for this message */
935 if (!(dst->props & FFA_PART_PROP_NOTIF_DESTROYED)) {
936 ffa_set_error(args, FFA_INVALID_PARAMETERS);
937 return caller_sp;
938 }
939 break;
940 default:
941 ffa_set_error(args, FFA_NOT_SUPPORTED);
942 return caller_sp;
943 }
944 } else if (args->a2 != FFA_PARAM_MBZ) {
945 ffa_set_error(args, FFA_INVALID_PARAMETERS);
946 return caller_sp;
947 }
948
949 cpu_spin_lock(&dst->spinlock);
950 if (dst->state != sp_idle) {
951 DMSG("SP is busy");
952 ffa_set_error(args, FFA_BUSY);
953 cpu_spin_unlock(&dst->spinlock);
954 return caller_sp;
955 }
956
957 dst->state = sp_busy;
958 cpu_spin_unlock(&dst->spinlock);
959
960 /*
961 * Store the calling endpoint id. This will make it possible to check
962 * if the response is sent back to the correct endpoint.
963 */
964 dst->caller_id = FFA_SRC(args->a1);
965
966 /* Forward the message to the destination SP */
967 res = sp_enter(args, dst);
968 if (res) {
969 /* The SP Panicked */
970 ffa_set_error(args, FFA_ABORTED);
971 /* Return error to calling SP */
972 return caller_sp;
973 }
974
975 return dst;
976 }
977
978 static struct sp_session *
ffa_handle_sp_direct_resp(struct thread_smc_1_2_regs * args,struct sp_session * caller_sp)979 ffa_handle_sp_direct_resp(struct thread_smc_1_2_regs *args,
980 struct sp_session *caller_sp)
981 {
982 struct sp_session *dst = NULL;
983 enum sp_status st = sp_idle;
984 TEE_Result res = FFA_OK;
985
986 if (!caller_sp) {
987 EMSG("Response from normal world not supported");
988 ffa_set_error(args, FFA_NOT_SUPPORTED);
989 return NULL;
990 }
991
992 res = ffa_get_dst(args, caller_sp, &dst);
993 if (res) {
994 /* Tried to send response to an incorrect endpoint */
995 ffa_set_error(args, res);
996 return caller_sp;
997 }
998
999 if (args->a2 & FFA_MSG_FLAG_FRAMEWORK) {
1000 switch (args->a2 & FFA_MSG_TYPE_MASK) {
1001 case FFA_MSG_RESP_VM_CREATED:
1002 /* The destination must be the NWd hypervisor (ID 0) */
1003 if (FFA_DST(args->a1) != 0 || dst) {
1004 ffa_set_error(args, FFA_INVALID_PARAMETERS);
1005 return caller_sp;
1006 }
1007
1008 /* The SP must be subscribed for this message */
1009 if (!(dst->props & FFA_PART_PROP_NOTIF_CREATED)) {
1010 ffa_set_error(args, FFA_INVALID_PARAMETERS);
1011 return caller_sp;
1012 }
1013 break;
1014 case FFA_MSG_RESP_VM_DESTROYED:
1015 /* The destination must be the NWd hypervisor (ID 0) */
1016 if (FFA_DST(args->a1) != 0 || dst) {
1017 ffa_set_error(args, FFA_INVALID_PARAMETERS);
1018 return caller_sp;
1019 }
1020
1021 /* The SP must be subscribed for this message */
1022 if (!(dst->props & FFA_PART_PROP_NOTIF_DESTROYED)) {
1023 ffa_set_error(args, FFA_INVALID_PARAMETERS);
1024 return caller_sp;
1025 }
1026 break;
1027 default:
1028 ffa_set_error(args, FFA_NOT_SUPPORTED);
1029 return caller_sp;
1030 }
1031 } else if (args->a2 != FFA_PARAM_MBZ) {
1032 ffa_set_error(args, FFA_INVALID_PARAMETERS);
1033 return caller_sp;
1034 }
1035
1036 if (dst) {
1037 cpu_spin_lock(&dst->spinlock);
1038 st = dst->state;
1039 cpu_spin_unlock(&dst->spinlock);
1040
1041 if (st != sp_busy) {
1042 EMSG("SP is not waiting for a request");
1043 ffa_set_error(args, FFA_INVALID_PARAMETERS);
1044 return caller_sp;
1045 }
1046 }
1047
1048 if (caller_sp->caller_id != FFA_DST(args->a1)) {
1049 EMSG("FFA_MSG_SEND_DIRECT_RESP to incorrect SP");
1050 ffa_set_error(args, FFA_INVALID_PARAMETERS);
1051 return caller_sp;
1052 }
1053
1054 caller_sp->caller_id = 0;
1055
1056 cpu_spin_lock(&caller_sp->spinlock);
1057 caller_sp->state = sp_idle;
1058 cpu_spin_unlock(&caller_sp->spinlock);
1059
1060 if (!dst) {
1061 /* Send message back to the NW */
1062 return NULL;
1063 }
1064
1065 /* Forward the message to the destination SP */
1066 res = sp_enter(args, dst);
1067 if (res) {
1068 /* The SP Panicked */
1069 ffa_set_error(args, FFA_ABORTED);
1070 /* Return error to calling SP */
1071 return caller_sp;
1072 }
1073 return dst;
1074 }
1075
1076 static struct sp_session *
ffa_handle_sp_error(struct thread_smc_1_2_regs * args,struct sp_session * caller_sp)1077 ffa_handle_sp_error(struct thread_smc_1_2_regs *args,
1078 struct sp_session *caller_sp)
1079 {
1080 /* If caller_sp == NULL send message to Normal World */
1081 if (caller_sp && sp_enter(args, caller_sp)) {
1082 /*
1083 * We can not return the error. Unwind the call chain with one
1084 * link. Set the state of the SP to dead.
1085 */
1086 cpu_spin_lock(&caller_sp->spinlock);
1087 caller_sp->state = sp_dead;
1088 cpu_spin_unlock(&caller_sp->spinlock);
1089 /* Create error. */
1090 ffa_set_error(args, FFA_ABORTED);
1091 return sp_get_session(caller_sp->caller_id);
1092 }
1093
1094 return caller_sp;
1095 }
1096
handle_features(struct thread_smc_1_2_regs * args)1097 static void handle_features(struct thread_smc_1_2_regs *args)
1098 {
1099 uint32_t ret_fid = 0;
1100 uint32_t ret_w2 = FFA_PARAM_MBZ;
1101
1102 switch (args->a1) {
1103 #ifdef ARM64
1104 case FFA_RXTX_MAP_64:
1105 #endif
1106 case FFA_RXTX_MAP_32:
1107 ret_fid = FFA_SUCCESS_32;
1108 ret_w2 = 0; /* 4kB Minimum buffer size and alignment boundary */
1109 break;
1110 case FFA_ERROR:
1111 case FFA_VERSION:
1112 case FFA_SUCCESS_32:
1113 #ifdef ARM64
1114 case FFA_SUCCESS_64:
1115 #endif
1116 default:
1117 ret_fid = FFA_ERROR;
1118 ret_w2 = FFA_NOT_SUPPORTED;
1119 break;
1120 }
1121
1122 spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, ret_w2, FFA_PARAM_MBZ,
1123 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
1124 }
1125
handle_mem_perm_get(struct thread_smc_1_2_regs * args,struct sp_session * sp_s)1126 static void handle_mem_perm_get(struct thread_smc_1_2_regs *args,
1127 struct sp_session *sp_s)
1128 {
1129 struct sp_ctx *sp_ctx = NULL;
1130 TEE_Result res = TEE_ERROR_BAD_PARAMETERS;
1131 uint16_t attrs = 0;
1132 uint32_t ret_fid = FFA_ERROR;
1133 uint32_t ret_val = FFA_INVALID_PARAMETERS;
1134
1135 /*
1136 * The FFA_MEM_PERM_GET interface is only allowed during initialization
1137 */
1138 if (sp_s->is_initialized) {
1139 ret_val = FFA_DENIED;
1140 goto out;
1141 }
1142
1143 sp_ctx = to_sp_ctx(sp_s->ts_sess.ctx);
1144 if (!sp_ctx)
1145 goto out;
1146
1147 /* Query memory attributes */
1148 ts_push_current_session(&sp_s->ts_sess);
1149 res = vm_get_prot(&sp_ctx->uctx, args->a1, SMALL_PAGE_SIZE, &attrs);
1150 ts_pop_current_session();
1151 if (res)
1152 goto out;
1153
1154 /* Build response value */
1155 ret_fid = FFA_SUCCESS_32;
1156 ret_val = 0;
1157 if ((attrs & TEE_MATTR_URW) == TEE_MATTR_URW)
1158 ret_val |= FFA_MEM_PERM_RW;
1159 else if (attrs & TEE_MATTR_UR)
1160 ret_val |= FFA_MEM_PERM_RO;
1161
1162 if ((attrs & TEE_MATTR_UX) == 0)
1163 ret_val |= FFA_MEM_PERM_NX;
1164
1165 out:
1166 spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, ret_val, FFA_PARAM_MBZ,
1167 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
1168 }
1169
handle_mem_perm_set(struct thread_smc_1_2_regs * args,struct sp_session * sp_s)1170 static void handle_mem_perm_set(struct thread_smc_1_2_regs *args,
1171 struct sp_session *sp_s)
1172 {
1173 struct sp_ctx *sp_ctx = NULL;
1174 TEE_Result res = TEE_ERROR_BAD_PARAMETERS;
1175 size_t region_size = 0;
1176 uint32_t data_perm = 0;
1177 uint32_t instruction_perm = 0;
1178 uint16_t attrs = 0;
1179 uint32_t ret_fid = FFA_ERROR;
1180 uint32_t ret_val = FFA_INVALID_PARAMETERS;
1181
1182 /*
1183 * The FFA_MEM_PERM_GET interface is only allowed during initialization
1184 */
1185 if (sp_s->is_initialized) {
1186 ret_val = FFA_DENIED;
1187 goto out;
1188 }
1189
1190 sp_ctx = to_sp_ctx(sp_s->ts_sess.ctx);
1191 if (!sp_ctx)
1192 goto out;
1193
1194 if (MUL_OVERFLOW(args->a2, SMALL_PAGE_SIZE, ®ion_size))
1195 goto out;
1196
1197 if (args->a3 & FFA_MEM_PERM_RESERVED) {
1198 /* Non-zero reserved bits */
1199 goto out;
1200 }
1201
1202 data_perm = args->a3 & FFA_MEM_PERM_DATA_PERM;
1203 instruction_perm = args->a3 & FFA_MEM_PERM_INSTRUCTION_PERM;
1204
1205 /* RWX access right configuration is not permitted */
1206 if (data_perm == FFA_MEM_PERM_RW && instruction_perm == FFA_MEM_PERM_X)
1207 goto out;
1208
1209 switch (data_perm) {
1210 case FFA_MEM_PERM_RO:
1211 attrs = TEE_MATTR_UR;
1212 break;
1213 case FFA_MEM_PERM_RW:
1214 attrs = TEE_MATTR_URW;
1215 break;
1216 default:
1217 /* Invalid permission value */
1218 goto out;
1219 }
1220
1221 if (instruction_perm == FFA_MEM_PERM_X)
1222 attrs |= TEE_MATTR_UX;
1223
1224 /* Set access rights */
1225 ts_push_current_session(&sp_s->ts_sess);
1226 res = vm_set_prot(&sp_ctx->uctx, args->a1, region_size, attrs);
1227 ts_pop_current_session();
1228 if (res != TEE_SUCCESS)
1229 goto out;
1230
1231 ret_fid = FFA_SUCCESS_32;
1232 ret_val = FFA_PARAM_MBZ;
1233
1234 out:
1235 spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, ret_val, FFA_PARAM_MBZ,
1236 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
1237 }
1238
spmc_handle_version(struct thread_smc_1_2_regs * args,struct ffa_rxtx * rxtx)1239 static void spmc_handle_version(struct thread_smc_1_2_regs *args,
1240 struct ffa_rxtx *rxtx)
1241 {
1242 spmc_set_args(args, spmc_exchange_version(args->a1, rxtx),
1243 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1244 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
1245 }
1246
handle_console_log(uint32_t ffa_vers,struct thread_smc_1_2_regs * args)1247 static void handle_console_log(uint32_t ffa_vers,
1248 struct thread_smc_1_2_regs *args)
1249 {
1250 uint32_t ret_fid = FFA_ERROR;
1251 uint32_t ret_val = FFA_INVALID_PARAMETERS;
1252 size_t char_count = args->a1 & FFA_CONSOLE_LOG_CHAR_COUNT_MASK;
1253 char buffer[FFA_CONSOLE_LOG_64_MAX_MSG_LEN + 1] = { 0 };
1254 size_t max_length = 0;
1255 size_t reg_size = 0;
1256 size_t n = 0;
1257
1258 if (args->a0 == FFA_CONSOLE_LOG_64) {
1259 if (ffa_vers >= FFA_VERSION_1_2)
1260 max_length = FFA_CONSOLE_LOG_64_MAX_MSG_LEN;
1261 else
1262 max_length = FFA_CONSOLE_LOG_64_V1_1_MAX_MSG_LEN;
1263 reg_size = sizeof(uint64_t);
1264 } else {
1265 max_length = FFA_CONSOLE_LOG_32_MAX_MSG_LEN;
1266 reg_size = sizeof(uint32_t);
1267 }
1268
1269 if (char_count < 1 || char_count > max_length)
1270 goto out;
1271
1272 for (n = 0; n < char_count; n += reg_size) {
1273 /* + 2 since we're starting from W2/X2 */
1274 memcpy(buffer + n, &args->a[2 + n / reg_size],
1275 MIN(char_count - n, reg_size));
1276 }
1277
1278 buffer[char_count] = '\0';
1279
1280 trace_ext_puts(buffer);
1281
1282 ret_fid = FFA_SUCCESS_32;
1283 ret_val = FFA_PARAM_MBZ;
1284
1285 out:
1286 spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, ret_val, FFA_PARAM_MBZ,
1287 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
1288 }
1289
1290 /*
1291 * FF-A messages handler for SP. Every messages for or from a SP is handled
1292 * here. This is the entry of the sp_spmc kernel thread. The caller_sp is set
1293 * to NULL when it is the Normal World.
1294 */
spmc_sp_msg_handler(struct thread_smc_1_2_regs * args,struct sp_session * caller_sp)1295 void spmc_sp_msg_handler(struct thread_smc_1_2_regs *args,
1296 struct sp_session *caller_sp)
1297 {
1298 thread_check_canaries();
1299 do {
1300 switch (args->a0) {
1301 #ifdef ARM64
1302 case FFA_MSG_SEND_DIRECT_REQ_64:
1303 #endif
1304 case FFA_MSG_SEND_DIRECT_REQ_32:
1305 caller_sp = ffa_handle_sp_direct_req(args, caller_sp);
1306 break;
1307 #ifdef ARM64
1308 case FFA_MSG_SEND_DIRECT_RESP_64:
1309 #endif
1310 case FFA_MSG_SEND_DIRECT_RESP_32:
1311 caller_sp = ffa_handle_sp_direct_resp(args, caller_sp);
1312 break;
1313 case FFA_ERROR:
1314 caller_sp = ffa_handle_sp_error(args, caller_sp);
1315 break;
1316 case FFA_MSG_WAIT:
1317 /* FFA_WAIT gives control back to NW */
1318 cpu_spin_lock(&caller_sp->spinlock);
1319 caller_sp->state = sp_idle;
1320 cpu_spin_unlock(&caller_sp->spinlock);
1321 caller_sp = NULL;
1322 break;
1323 #ifdef ARM64
1324 case FFA_RXTX_MAP_64:
1325 #endif
1326 case FFA_RXTX_MAP_32:
1327 ts_push_current_session(&caller_sp->ts_sess);
1328 spmc_handle_rxtx_map(args, &caller_sp->rxtx);
1329 ts_pop_current_session();
1330 sp_enter(args, caller_sp);
1331 break;
1332 case FFA_RXTX_UNMAP:
1333 ts_push_current_session(&caller_sp->ts_sess);
1334 spmc_handle_rxtx_unmap(args, &caller_sp->rxtx);
1335 ts_pop_current_session();
1336 sp_enter(args, caller_sp);
1337 break;
1338 case FFA_RX_RELEASE:
1339 ts_push_current_session(&caller_sp->ts_sess);
1340 spmc_handle_rx_release(args, &caller_sp->rxtx);
1341 ts_pop_current_session();
1342 sp_enter(args, caller_sp);
1343 break;
1344 case FFA_ID_GET:
1345 args->a0 = FFA_SUCCESS_32;
1346 args->a2 = caller_sp->endpoint_id;
1347 sp_enter(args, caller_sp);
1348 break;
1349 case FFA_VERSION:
1350 spmc_handle_version(args, &caller_sp->rxtx);
1351 sp_enter(args, caller_sp);
1352 break;
1353 case FFA_FEATURES:
1354 handle_features(args);
1355 sp_enter(args, caller_sp);
1356 break;
1357 case FFA_SPM_ID_GET:
1358 spmc_handle_spm_id_get(args);
1359 sp_enter(args, caller_sp);
1360 break;
1361 case FFA_PARTITION_INFO_GET:
1362 ts_push_current_session(&caller_sp->ts_sess);
1363 spmc_handle_partition_info_get(args, &caller_sp->rxtx);
1364 ts_pop_current_session();
1365 sp_enter(args, caller_sp);
1366 break;
1367 #ifdef ARM64
1368 case FFA_MEM_SHARE_64:
1369 #endif
1370 case FFA_MEM_SHARE_32:
1371 ts_push_current_session(&caller_sp->ts_sess);
1372 spmc_sp_handle_mem_share(args, &caller_sp->rxtx,
1373 caller_sp);
1374 ts_pop_current_session();
1375 sp_enter(args, caller_sp);
1376 break;
1377 #ifdef ARM64
1378 case FFA_MEM_RETRIEVE_REQ_64:
1379 #endif
1380 case FFA_MEM_RETRIEVE_REQ_32:
1381 ts_push_current_session(&caller_sp->ts_sess);
1382 ffa_mem_retrieve(args, caller_sp, &caller_sp->rxtx);
1383 ts_pop_current_session();
1384 sp_enter(args, caller_sp);
1385 break;
1386 case FFA_MEM_RELINQUISH:
1387 ts_push_current_session(&caller_sp->ts_sess);
1388 ffa_mem_relinquish(args, caller_sp, &caller_sp->rxtx);
1389 ts_pop_current_session();
1390 sp_enter(args, caller_sp);
1391 break;
1392 case FFA_MEM_RECLAIM:
1393 ffa_mem_reclaim(args, caller_sp);
1394 sp_enter(args, caller_sp);
1395 break;
1396 #ifdef ARM64
1397 case FFA_MEM_PERM_GET_64:
1398 #endif
1399 case FFA_MEM_PERM_GET_32:
1400 handle_mem_perm_get(args, caller_sp);
1401 sp_enter(args, caller_sp);
1402 break;
1403
1404 #ifdef ARM64
1405 case FFA_MEM_PERM_SET_64:
1406 #endif
1407 case FFA_MEM_PERM_SET_32:
1408 handle_mem_perm_set(args, caller_sp);
1409 sp_enter(args, caller_sp);
1410 break;
1411
1412 #ifdef ARM64
1413 case FFA_CONSOLE_LOG_64:
1414 #endif
1415 case FFA_CONSOLE_LOG_32:
1416 handle_console_log(caller_sp->rxtx.ffa_vers, args);
1417 sp_enter(args, caller_sp);
1418 break;
1419
1420 default:
1421 EMSG("Unhandled FFA function ID %#"PRIx32,
1422 (uint32_t)args->a0);
1423 ffa_set_error(args, FFA_INVALID_PARAMETERS);
1424 sp_enter(args, caller_sp);
1425 }
1426 } while (caller_sp);
1427 }
1428