1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2020-2025, Linaro Limited.
4 * Copyright (c) 2019-2024, Arm Limited. All rights reserved.
5 */
6
7 #include <assert.h>
8 #include <ffa.h>
9 #include <initcall.h>
10 #include <io.h>
11 #include <kernel/dt.h>
12 #include <kernel/interrupt.h>
13 #include <kernel/notif.h>
14 #include <kernel/panic.h>
15 #include <kernel/secure_partition.h>
16 #include <kernel/spinlock.h>
17 #include <kernel/spmc_sp_handler.h>
18 #include <kernel/tee_misc.h>
19 #include <kernel/thread.h>
20 #include <kernel/thread_private.h>
21 #include <kernel/thread_spmc.h>
22 #include <kernel/virtualization.h>
23 #include <libfdt.h>
24 #include <mm/core_mmu.h>
25 #include <mm/mobj.h>
26 #include <optee_ffa.h>
27 #include <optee_msg.h>
28 #include <optee_rpc_cmd.h>
29 #include <sm/optee_smc.h>
30 #include <string.h>
31 #include <sys/queue.h>
32 #include <tee/entry_std.h>
33 #include <tee/uuid.h>
34 #include <tee_api_types.h>
35 #include <types_ext.h>
36 #include <util.h>
37
38 #if defined(CFG_CORE_SEL1_SPMC)
39 struct mem_op_state {
40 bool mem_share;
41 struct mobj_ffa *mf;
42 unsigned int page_count;
43 unsigned int region_count;
44 unsigned int current_page_idx;
45 };
46
47 struct mem_frag_state {
48 struct mem_op_state op;
49 tee_mm_entry_t *mm;
50 unsigned int frag_offset;
51 SLIST_ENTRY(mem_frag_state) link;
52 };
53 #endif
54
55 struct notif_vm_bitmap {
56 bool initialized;
57 int do_bottom_half_value;
58 uint64_t pending;
59 uint64_t bound;
60 };
61
62 STAILQ_HEAD(spmc_lsp_desc_head, spmc_lsp_desc);
63
64 static struct spmc_lsp_desc_head lsp_head __nex_data =
65 STAILQ_HEAD_INITIALIZER(lsp_head);
66
67 static unsigned int spmc_notif_lock __nex_data = SPINLOCK_UNLOCK;
68 static bool spmc_notif_is_ready __nex_bss;
69 static int notif_intid __nex_data __maybe_unused = -1;
70
71 /* Id used to look up the guest specific struct notif_vm_bitmap */
72 static unsigned int notif_vm_bitmap_id __nex_bss;
73 /* Notification state when ns-virtualization isn't enabled */
74 static struct notif_vm_bitmap default_notif_vm_bitmap;
75
76 /* Initialized in spmc_init() below */
77 static struct spmc_lsp_desc optee_core_lsp;
78 #ifdef CFG_CORE_SEL1_SPMC
79 /*
80 * Representation of the internal SPMC when OP-TEE is the S-EL1 SPMC.
81 * Initialized in spmc_init() below.
82 */
83 static struct spmc_lsp_desc optee_spmc_lsp;
84 /* FF-A ID of the SPMD. This is only valid when OP-TEE is the S-EL1 SPMC. */
85 static uint16_t spmd_id __nex_bss;
86
87 /*
88 * If struct ffa_rxtx::size is 0 RX/TX buffers are not mapped or initialized.
89 *
90 * struct ffa_rxtx::spin_lock protects the variables below from concurrent
91 * access this includes the use of content of struct ffa_rxtx::rx and
92 * @frag_state_head.
93 *
94 * struct ffa_rxtx::tx_buf_is_mine is true when we may write to struct
95 * ffa_rxtx::tx and false when it is owned by normal world.
96 *
97 * Note that we can't prevent normal world from updating the content of
98 * these buffers so we must always be careful when reading. while we hold
99 * the lock.
100 */
101
102 static struct ffa_rxtx my_rxtx __nex_bss;
103
is_nw_buf(struct ffa_rxtx * rxtx)104 static bool is_nw_buf(struct ffa_rxtx *rxtx)
105 {
106 return rxtx == &my_rxtx;
107 }
108
109 static SLIST_HEAD(mem_frag_state_head, mem_frag_state) frag_state_head =
110 SLIST_HEAD_INITIALIZER(&frag_state_head);
111
112 #else
113 /* FF-A ID of the external SPMC */
114 static uint16_t spmc_id __nex_bss;
115 static uint8_t __rx_buf[SMALL_PAGE_SIZE] __aligned(SMALL_PAGE_SIZE) __nex_bss;
116 static uint8_t __tx_buf[SMALL_PAGE_SIZE] __aligned(SMALL_PAGE_SIZE) __nex_bss;
117 static struct ffa_rxtx my_rxtx __nex_data = {
118 .rx = __rx_buf,
119 .tx = __tx_buf,
120 .size = sizeof(__rx_buf),
121 };
122 #endif
123
spmc_is_reserved_id(uint16_t id)124 bool spmc_is_reserved_id(uint16_t id)
125 {
126 #ifdef CFG_CORE_SEL1_SPMC
127 return id == spmd_id;
128 #else
129 return id == spmc_id;
130 #endif
131 }
132
spmc_find_lsp_by_sp_id(uint16_t sp_id)133 struct spmc_lsp_desc *spmc_find_lsp_by_sp_id(uint16_t sp_id)
134 {
135 struct spmc_lsp_desc *desc = NULL;
136
137 STAILQ_FOREACH(desc, &lsp_head, link)
138 if (desc->sp_id == sp_id)
139 return desc;
140
141 return NULL;
142 }
143
swap_src_dst(uint32_t src_dst)144 static uint32_t swap_src_dst(uint32_t src_dst)
145 {
146 return (src_dst >> 16) | (src_dst << 16);
147 }
148
get_sender_id(uint32_t src_dst)149 static uint16_t get_sender_id(uint32_t src_dst)
150 {
151 return src_dst >> 16;
152 }
153
spmc_set_args(struct thread_smc_1_2_regs * args,uint32_t fid,uint32_t src_dst,uint32_t w2,uint32_t w3,uint32_t w4,uint32_t w5)154 void spmc_set_args(struct thread_smc_1_2_regs *args, uint32_t fid,
155 uint32_t src_dst, uint32_t w2, uint32_t w3, uint32_t w4,
156 uint32_t w5)
157 {
158 *args = (struct thread_smc_1_2_regs){
159 .a0 = fid,
160 .a1 = src_dst,
161 .a2 = w2,
162 .a3 = w3,
163 .a4 = w4,
164 .a5 = w5,
165 };
166 }
167
set_simple_ret_val(struct thread_smc_1_2_regs * args,int ffa_ret)168 static void set_simple_ret_val(struct thread_smc_1_2_regs *args, int ffa_ret)
169 {
170 if (ffa_ret)
171 spmc_set_args(args, FFA_ERROR, 0, ffa_ret, 0, 0, 0);
172 else
173 spmc_set_args(args, FFA_SUCCESS_32, 0, 0, 0, 0, 0);
174 }
175
spmc_exchange_version(uint32_t vers,struct ffa_rxtx * rxtx)176 uint32_t spmc_exchange_version(uint32_t vers, struct ffa_rxtx *rxtx)
177 {
178 uint32_t major_vers = FFA_GET_MAJOR_VERSION(vers);
179 uint32_t minor_vers = FFA_GET_MINOR_VERSION(vers);
180 uint32_t my_vers = FFA_VERSION_1_2;
181 uint32_t my_major_vers = 0;
182 uint32_t my_minor_vers = 0;
183
184 my_major_vers = FFA_GET_MAJOR_VERSION(my_vers);
185 my_minor_vers = FFA_GET_MINOR_VERSION(my_vers);
186
187 /*
188 * No locking, if the caller does concurrent calls to this it's
189 * only making a mess for itself. We must be able to renegotiate
190 * the FF-A version in order to support differing versions between
191 * the loader and the driver.
192 *
193 * Callers should use the version requested if we return a matching
194 * major version and a matching or larger minor version. The caller
195 * should downgrade to our minor version if our minor version is
196 * smaller. Regardless, always return our version as recommended by
197 * the specification.
198 */
199 if (major_vers == my_major_vers) {
200 if (minor_vers > my_minor_vers)
201 rxtx->ffa_vers = my_vers;
202 else
203 rxtx->ffa_vers = vers;
204 }
205
206 return my_vers;
207 }
208
is_ffa_success(uint32_t fid)209 static bool is_ffa_success(uint32_t fid)
210 {
211 #ifdef ARM64
212 if (fid == FFA_SUCCESS_64)
213 return true;
214 #endif
215 return fid == FFA_SUCCESS_32;
216 }
217
get_ffa_ret_code(const struct thread_smc_args * args)218 static int32_t get_ffa_ret_code(const struct thread_smc_args *args)
219 {
220 if (is_ffa_success(args->a0))
221 return FFA_OK;
222 if (args->a0 == FFA_ERROR && args->a2)
223 return args->a2;
224 return FFA_NOT_SUPPORTED;
225 }
226
ffa_simple_call(uint32_t fid,unsigned long a1,unsigned long a2,unsigned long a3,unsigned long a4)227 static int ffa_simple_call(uint32_t fid, unsigned long a1, unsigned long a2,
228 unsigned long a3, unsigned long a4)
229 {
230 struct thread_smc_args args = {
231 .a0 = fid,
232 .a1 = a1,
233 .a2 = a2,
234 .a3 = a3,
235 .a4 = a4,
236 };
237
238 thread_smccc(&args);
239
240 return get_ffa_ret_code(&args);
241 }
242
ffa_features(uint32_t id)243 static int __maybe_unused ffa_features(uint32_t id)
244 {
245 return ffa_simple_call(FFA_FEATURES, id, 0, 0, 0);
246 }
247
ffa_set_notification(uint16_t dst,uint16_t src,uint32_t flags,uint64_t bitmap)248 static int __maybe_unused ffa_set_notification(uint16_t dst, uint16_t src,
249 uint32_t flags, uint64_t bitmap)
250 {
251 return ffa_simple_call(FFA_NOTIFICATION_SET,
252 SHIFT_U32(src, 16) | dst, flags,
253 low32_from_64(bitmap), high32_from_64(bitmap));
254 }
255
256 #if defined(CFG_CORE_SEL1_SPMC)
handle_features(struct thread_smc_1_2_regs * args)257 static void handle_features(struct thread_smc_1_2_regs *args)
258 {
259 uint32_t ret_fid = FFA_ERROR;
260 uint32_t ret_w2 = FFA_NOT_SUPPORTED;
261
262 switch (args->a1) {
263 case FFA_FEATURE_SCHEDULE_RECV_INTR:
264 if (spmc_notif_is_ready) {
265 ret_fid = FFA_SUCCESS_32;
266 ret_w2 = notif_intid;
267 }
268 break;
269
270 #ifdef ARM64
271 case FFA_RXTX_MAP_64:
272 #endif
273 case FFA_RXTX_MAP_32:
274 ret_fid = FFA_SUCCESS_32;
275 ret_w2 = 0; /* 4kB Minimum buffer size and alignment boundary */
276 break;
277 #ifdef ARM64
278 case FFA_MEM_SHARE_64:
279 #endif
280 case FFA_MEM_SHARE_32:
281 ret_fid = FFA_SUCCESS_32;
282 /*
283 * Partition manager supports transmission of a memory
284 * transaction descriptor in a buffer dynamically allocated
285 * by the endpoint.
286 */
287 ret_w2 = BIT(0);
288 break;
289
290 case FFA_ERROR:
291 case FFA_VERSION:
292 case FFA_SUCCESS_32:
293 #ifdef ARM64
294 case FFA_SUCCESS_64:
295 #endif
296 case FFA_FEATURES:
297 case FFA_SPM_ID_GET:
298 case FFA_MEM_FRAG_TX:
299 case FFA_MEM_RECLAIM:
300 case FFA_MSG_SEND_DIRECT_REQ_64:
301 case FFA_MSG_SEND_DIRECT_REQ_32:
302 case FFA_INTERRUPT:
303 case FFA_PARTITION_INFO_GET:
304 case FFA_RXTX_UNMAP:
305 case FFA_RX_RELEASE:
306 case FFA_FEATURE_MANAGED_EXIT_INTR:
307 case FFA_NOTIFICATION_BITMAP_CREATE:
308 case FFA_NOTIFICATION_BITMAP_DESTROY:
309 case FFA_NOTIFICATION_BIND:
310 case FFA_NOTIFICATION_UNBIND:
311 case FFA_NOTIFICATION_SET:
312 case FFA_NOTIFICATION_GET:
313 case FFA_NOTIFICATION_INFO_GET_32:
314 #ifdef ARM64
315 case FFA_NOTIFICATION_INFO_GET_64:
316 #endif
317 ret_fid = FFA_SUCCESS_32;
318 ret_w2 = FFA_PARAM_MBZ;
319 break;
320 default:
321 break;
322 }
323
324 spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, ret_w2, FFA_PARAM_MBZ,
325 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
326 }
327
map_buf(paddr_t pa,unsigned int sz,void ** va_ret)328 static int map_buf(paddr_t pa, unsigned int sz, void **va_ret)
329 {
330 tee_mm_entry_t *mm = NULL;
331
332 if (!core_pbuf_is(CORE_MEM_NON_SEC, pa, sz))
333 return FFA_INVALID_PARAMETERS;
334
335 mm = tee_mm_alloc(&core_virt_shm_pool, sz);
336 if (!mm)
337 return FFA_NO_MEMORY;
338
339 if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pa,
340 sz / SMALL_PAGE_SIZE,
341 MEM_AREA_NSEC_SHM)) {
342 tee_mm_free(mm);
343 return FFA_INVALID_PARAMETERS;
344 }
345
346 *va_ret = (void *)tee_mm_get_smem(mm);
347 return 0;
348 }
349
spmc_handle_spm_id_get(struct thread_smc_1_2_regs * args)350 void spmc_handle_spm_id_get(struct thread_smc_1_2_regs *args)
351 {
352 spmc_set_args(args, FFA_SUCCESS_32, FFA_PARAM_MBZ, optee_spmc_lsp.sp_id,
353 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
354 }
355
unmap_buf(void * va,size_t sz)356 static void unmap_buf(void *va, size_t sz)
357 {
358 tee_mm_entry_t *mm = tee_mm_find(&core_virt_shm_pool, (vaddr_t)va);
359
360 assert(mm);
361 core_mmu_unmap_pages(tee_mm_get_smem(mm), sz / SMALL_PAGE_SIZE);
362 tee_mm_free(mm);
363 }
364
spmc_handle_rxtx_map(struct thread_smc_1_2_regs * args,struct ffa_rxtx * rxtx)365 void spmc_handle_rxtx_map(struct thread_smc_1_2_regs *args,
366 struct ffa_rxtx *rxtx)
367 {
368 int rc = 0;
369 unsigned int sz = 0;
370 paddr_t rx_pa = 0;
371 paddr_t tx_pa = 0;
372 void *rx = NULL;
373 void *tx = NULL;
374
375 cpu_spin_lock(&rxtx->spinlock);
376
377 if (args->a3 & GENMASK_64(63, 6)) {
378 rc = FFA_INVALID_PARAMETERS;
379 goto out;
380 }
381
382 sz = args->a3 * SMALL_PAGE_SIZE;
383 if (!sz) {
384 rc = FFA_INVALID_PARAMETERS;
385 goto out;
386 }
387 /* TX/RX are swapped compared to the caller */
388 tx_pa = args->a2;
389 rx_pa = args->a1;
390
391 if (rxtx->size) {
392 rc = FFA_DENIED;
393 goto out;
394 }
395
396 /*
397 * If the buffer comes from a SP the address is virtual and already
398 * mapped.
399 */
400 if (is_nw_buf(rxtx)) {
401 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
402 enum teecore_memtypes mt = MEM_AREA_NEX_NSEC_SHM;
403 bool tx_alloced = false;
404
405 /*
406 * With virtualization we establish this mapping in
407 * the nexus mapping which then is replicated to
408 * each partition.
409 *
410 * This means that this mapping must be done before
411 * any partition is created and then must not be
412 * changed.
413 */
414
415 /*
416 * core_mmu_add_mapping() may reuse previous
417 * mappings. First check if there's any mappings to
418 * reuse so we know how to clean up in case of
419 * failure.
420 */
421 tx = phys_to_virt(tx_pa, mt, sz);
422 rx = phys_to_virt(rx_pa, mt, sz);
423 if (!tx) {
424 tx = core_mmu_add_mapping(mt, tx_pa, sz);
425 if (!tx) {
426 rc = FFA_NO_MEMORY;
427 goto out;
428 }
429 tx_alloced = true;
430 }
431 if (!rx)
432 rx = core_mmu_add_mapping(mt, rx_pa, sz);
433
434 if (!rx) {
435 if (tx_alloced && tx)
436 core_mmu_remove_mapping(mt, tx, sz);
437 rc = FFA_NO_MEMORY;
438 goto out;
439 }
440 } else {
441 rc = map_buf(tx_pa, sz, &tx);
442 if (rc)
443 goto out;
444 rc = map_buf(rx_pa, sz, &rx);
445 if (rc) {
446 unmap_buf(tx, sz);
447 goto out;
448 }
449 }
450 rxtx->tx = tx;
451 rxtx->rx = rx;
452 } else {
453 if ((tx_pa & SMALL_PAGE_MASK) || (rx_pa & SMALL_PAGE_MASK)) {
454 rc = FFA_INVALID_PARAMETERS;
455 goto out;
456 }
457
458 if (!virt_to_phys((void *)tx_pa) ||
459 !virt_to_phys((void *)rx_pa)) {
460 rc = FFA_INVALID_PARAMETERS;
461 goto out;
462 }
463
464 rxtx->tx = (void *)tx_pa;
465 rxtx->rx = (void *)rx_pa;
466 }
467
468 rxtx->size = sz;
469 rxtx->tx_is_mine = true;
470 DMSG("Mapped tx %#"PRIxPA" size %#x @ %p", tx_pa, sz, tx);
471 DMSG("Mapped rx %#"PRIxPA" size %#x @ %p", rx_pa, sz, rx);
472 out:
473 cpu_spin_unlock(&rxtx->spinlock);
474 set_simple_ret_val(args, rc);
475 }
476
spmc_handle_rxtx_unmap(struct thread_smc_1_2_regs * args,struct ffa_rxtx * rxtx)477 void spmc_handle_rxtx_unmap(struct thread_smc_1_2_regs *args,
478 struct ffa_rxtx *rxtx)
479 {
480 int rc = FFA_INVALID_PARAMETERS;
481
482 cpu_spin_lock(&rxtx->spinlock);
483
484 if (!rxtx->size)
485 goto out;
486
487 /*
488 * We don't unmap the SP memory as the SP might still use it.
489 * We avoid to make changes to nexus mappings at this stage since
490 * there currently isn't a way to replicate those changes to all
491 * partitions.
492 */
493 if (is_nw_buf(rxtx) && !IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
494 unmap_buf(rxtx->rx, rxtx->size);
495 unmap_buf(rxtx->tx, rxtx->size);
496 }
497 rxtx->size = 0;
498 rxtx->rx = NULL;
499 rxtx->tx = NULL;
500 rc = 0;
501 out:
502 cpu_spin_unlock(&rxtx->spinlock);
503 set_simple_ret_val(args, rc);
504 }
505
spmc_handle_rx_release(struct thread_smc_1_2_regs * args,struct ffa_rxtx * rxtx)506 void spmc_handle_rx_release(struct thread_smc_1_2_regs *args,
507 struct ffa_rxtx *rxtx)
508 {
509 int rc = 0;
510
511 cpu_spin_lock(&rxtx->spinlock);
512 /* The senders RX is our TX */
513 if (!rxtx->size || rxtx->tx_is_mine) {
514 rc = FFA_DENIED;
515 } else {
516 rc = 0;
517 rxtx->tx_is_mine = true;
518 }
519 cpu_spin_unlock(&rxtx->spinlock);
520
521 set_simple_ret_val(args, rc);
522 }
523
is_nil_uuid(uint32_t w0,uint32_t w1,uint32_t w2,uint32_t w3)524 static bool is_nil_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3)
525 {
526 return !w0 && !w1 && !w2 && !w3;
527 }
528
spmc_fill_partition_entry(uint32_t ffa_vers,void * buf,size_t blen,size_t idx,uint16_t endpoint_id,uint16_t execution_context,uint32_t part_props,const uint32_t uuid_words[4])529 TEE_Result spmc_fill_partition_entry(uint32_t ffa_vers, void *buf, size_t blen,
530 size_t idx, uint16_t endpoint_id,
531 uint16_t execution_context,
532 uint32_t part_props,
533 const uint32_t uuid_words[4])
534 {
535 struct ffa_partition_info_x *fpi = NULL;
536 size_t fpi_size = sizeof(*fpi);
537
538 if (ffa_vers >= FFA_VERSION_1_1)
539 fpi_size += FFA_UUID_SIZE;
540
541 if ((idx + 1) * fpi_size > blen)
542 return TEE_ERROR_OUT_OF_MEMORY;
543
544 fpi = (void *)((vaddr_t)buf + idx * fpi_size);
545 fpi->id = endpoint_id;
546 /* Number of execution contexts implemented by this partition */
547 fpi->execution_context = execution_context;
548
549 fpi->partition_properties = part_props;
550
551 /* In FF-A 1.0 only bits [2:0] are defined, let's mask others */
552 if (ffa_vers < FFA_VERSION_1_1)
553 fpi->partition_properties &= FFA_PART_PROP_DIRECT_REQ_RECV |
554 FFA_PART_PROP_DIRECT_REQ_SEND |
555 FFA_PART_PROP_INDIRECT_MSGS;
556
557 if (ffa_vers >= FFA_VERSION_1_1) {
558 if (uuid_words)
559 memcpy(fpi->uuid, uuid_words, FFA_UUID_SIZE);
560 else
561 memset(fpi->uuid, 0, FFA_UUID_SIZE);
562 }
563
564 return TEE_SUCCESS;
565 }
566
lsp_partition_info_get(uint32_t ffa_vers,void * buf,size_t buf_size,size_t * elem_count,const uint32_t uuid_words[4],bool count_only)567 static TEE_Result lsp_partition_info_get(uint32_t ffa_vers, void *buf,
568 size_t buf_size, size_t *elem_count,
569 const uint32_t uuid_words[4],
570 bool count_only)
571 {
572 struct spmc_lsp_desc *desc = NULL;
573 TEE_Result res = TEE_SUCCESS;
574 size_t c = *elem_count;
575
576 STAILQ_FOREACH(desc, &lsp_head, link) {
577 /*
578 * LSPs (OP-TEE SPMC) without an assigned UUID are not
579 * proper LSPs and shouldn't be reported here.
580 */
581 if (is_nil_uuid(desc->uuid_words[0], desc->uuid_words[1],
582 desc->uuid_words[2], desc->uuid_words[3]))
583 continue;
584
585 if (uuid_words && memcmp(uuid_words, desc->uuid_words,
586 sizeof(desc->uuid_words)))
587 continue;
588
589 if (!count_only && !res)
590 res = spmc_fill_partition_entry(ffa_vers, buf, buf_size,
591 c, desc->sp_id,
592 CFG_TEE_CORE_NB_CORE,
593 desc->properties,
594 desc->uuid_words);
595 c++;
596 }
597
598 *elem_count = c;
599
600 return res;
601 }
602
spmc_handle_partition_info_get(struct thread_smc_1_2_regs * args,struct ffa_rxtx * rxtx)603 void spmc_handle_partition_info_get(struct thread_smc_1_2_regs *args,
604 struct ffa_rxtx *rxtx)
605 {
606 TEE_Result res = TEE_SUCCESS;
607 uint32_t ret_fid = FFA_ERROR;
608 uint32_t fpi_size = 0;
609 uint32_t rc = 0;
610 bool count_only = args->a5 & FFA_PARTITION_INFO_GET_COUNT_FLAG;
611 uint32_t uuid_words[4] = { args->a1, args->a2, args->a3, args->a4, };
612 uint32_t *uuid = uuid_words;
613 size_t count = 0;
614
615 if (!count_only) {
616 cpu_spin_lock(&rxtx->spinlock);
617
618 if (!rxtx->size || !rxtx->tx_is_mine) {
619 rc = FFA_BUSY;
620 goto out;
621 }
622 }
623
624 if (is_nil_uuid(uuid[0], uuid[1], uuid[2], uuid[3]))
625 uuid = NULL;
626
627 if (lsp_partition_info_get(rxtx->ffa_vers, rxtx->tx, rxtx->size,
628 &count, uuid, count_only)) {
629 ret_fid = FFA_ERROR;
630 rc = FFA_INVALID_PARAMETERS;
631 goto out;
632 }
633 if (IS_ENABLED(CFG_SECURE_PARTITION)) {
634 res = sp_partition_info_get(rxtx->ffa_vers, rxtx->tx,
635 rxtx->size, uuid, &count,
636 count_only);
637 if (res != TEE_SUCCESS) {
638 ret_fid = FFA_ERROR;
639 rc = FFA_INVALID_PARAMETERS;
640 goto out;
641 }
642 }
643
644 rc = count;
645 ret_fid = FFA_SUCCESS_32;
646 out:
647 if (ret_fid == FFA_SUCCESS_32 && !count_only &&
648 rxtx->ffa_vers >= FFA_VERSION_1_1)
649 fpi_size = sizeof(struct ffa_partition_info_x) + FFA_UUID_SIZE;
650
651 spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, fpi_size,
652 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
653 if (!count_only) {
654 rxtx->tx_is_mine = false;
655 cpu_spin_unlock(&rxtx->spinlock);
656 }
657 }
658
spmc_handle_run(struct thread_smc_1_2_regs * args)659 static void spmc_handle_run(struct thread_smc_1_2_regs *args)
660 {
661 uint16_t endpoint = FFA_TARGET_INFO_GET_SP_ID(args->a1);
662 uint16_t thread_id = FFA_TARGET_INFO_GET_VCPU_ID(args->a1);
663 uint32_t rc = FFA_INVALID_PARAMETERS;
664
665 /*
666 * OP-TEE core threads are only preemted using controlled exit so
667 * FFA_RUN mustn't be used to resume such threads.
668 *
669 * The OP-TEE SPMC is not preemted at all, it's an error to try to
670 * resume that ID.
671 */
672 if (spmc_find_lsp_by_sp_id(endpoint))
673 goto out;
674
675 /*
676 * The endpoint should be a S-EL0 SP, try to resume the SP from
677 * preempted into busy state.
678 */
679 rc = spmc_sp_resume_from_preempted(endpoint, thread_id);
680 out:
681 set_simple_ret_val(args, rc);
682 }
683 #endif /*CFG_CORE_SEL1_SPMC*/
684
get_notif_vm_bitmap(struct guest_partition * prtn,uint16_t vm_id)685 static struct notif_vm_bitmap *get_notif_vm_bitmap(struct guest_partition *prtn,
686 uint16_t vm_id)
687 {
688 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
689 if (!prtn)
690 return NULL;
691 assert(vm_id == virt_get_guest_id(prtn));
692 return virt_get_guest_spec_data(prtn, notif_vm_bitmap_id);
693 }
694 if (vm_id)
695 return NULL;
696 return &default_notif_vm_bitmap;
697 }
698
spmc_enable_async_notif(uint32_t bottom_half_value,uint16_t vm_id)699 static uint32_t spmc_enable_async_notif(uint32_t bottom_half_value,
700 uint16_t vm_id)
701 {
702 struct guest_partition *prtn = NULL;
703 struct notif_vm_bitmap *nvb = NULL;
704 uint32_t old_itr_status = 0;
705 uint32_t res = 0;
706
707 if (!spmc_notif_is_ready) {
708 /*
709 * This should never happen, not if normal world respects the
710 * exchanged capabilities.
711 */
712 EMSG("Asynchronous notifications are not ready");
713 return TEE_ERROR_NOT_IMPLEMENTED;
714 }
715
716 if (bottom_half_value >= OPTEE_FFA_MAX_ASYNC_NOTIF_VALUE) {
717 EMSG("Invalid bottom half value %"PRIu32, bottom_half_value);
718 return TEE_ERROR_BAD_PARAMETERS;
719 }
720
721 prtn = virt_get_guest(vm_id);
722 nvb = get_notif_vm_bitmap(prtn, vm_id);
723 if (!nvb) {
724 res = TEE_ERROR_BAD_PARAMETERS;
725 goto out;
726 }
727
728 old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
729 nvb->do_bottom_half_value = bottom_half_value;
730 cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
731
732 notif_deliver_atomic_event(NOTIF_EVENT_STARTED, vm_id);
733 res = TEE_SUCCESS;
734 out:
735 virt_put_guest(prtn);
736 return res;
737 }
738
get_direct_resp_fid(uint32_t fid)739 static uint32_t get_direct_resp_fid(uint32_t fid)
740 {
741 assert(fid == FFA_MSG_SEND_DIRECT_REQ_64 ||
742 fid == FFA_MSG_SEND_DIRECT_REQ_32);
743
744 if (OPTEE_SMC_IS_64(fid))
745 return FFA_MSG_SEND_DIRECT_RESP_64;
746 return FFA_MSG_SEND_DIRECT_RESP_32;
747 }
748
handle_yielding_call(struct thread_smc_1_2_regs * args)749 static void handle_yielding_call(struct thread_smc_1_2_regs *args)
750 {
751 uint32_t direct_resp_fid = get_direct_resp_fid(args->a0);
752 TEE_Result res = TEE_SUCCESS;
753
754 thread_check_canaries();
755
756 #ifdef ARM64
757 /* Saving this for an eventual RPC */
758 thread_get_core_local()->direct_resp_fid = direct_resp_fid;
759 #endif
760
761 if (args->a3 == OPTEE_FFA_YIELDING_CALL_RESUME) {
762 /* Note connection to struct thread_rpc_arg::ret */
763 thread_resume_from_rpc(args->a7, args->a4, args->a5, args->a6,
764 0);
765 res = TEE_ERROR_BAD_PARAMETERS;
766 } else {
767 thread_alloc_and_run(args->a1, args->a3, args->a4, args->a5,
768 args->a6, args->a7);
769 res = TEE_ERROR_BUSY;
770 }
771 spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1),
772 0, res, 0, 0);
773 }
774
handle_unregister_shm(uint32_t a4,uint32_t a5)775 static uint32_t handle_unregister_shm(uint32_t a4, uint32_t a5)
776 {
777 uint64_t cookie = reg_pair_to_64(a5, a4);
778 uint32_t res = 0;
779
780 res = mobj_ffa_unregister_by_cookie(cookie);
781 switch (res) {
782 case TEE_SUCCESS:
783 case TEE_ERROR_ITEM_NOT_FOUND:
784 return 0;
785 case TEE_ERROR_BUSY:
786 EMSG("res %#"PRIx32, res);
787 return FFA_BUSY;
788 default:
789 EMSG("res %#"PRIx32, res);
790 return FFA_INVALID_PARAMETERS;
791 }
792 }
793
handle_blocking_call(struct thread_smc_1_2_regs * args)794 static void handle_blocking_call(struct thread_smc_1_2_regs *args)
795 {
796 uint32_t direct_resp_fid = get_direct_resp_fid(args->a0);
797 uint32_t sec_caps = 0;
798
799 switch (args->a3) {
800 case OPTEE_FFA_GET_API_VERSION:
801 spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
802 OPTEE_FFA_VERSION_MAJOR, OPTEE_FFA_VERSION_MINOR,
803 0);
804 break;
805 case OPTEE_FFA_GET_OS_VERSION:
806 spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
807 CFG_OPTEE_REVISION_MAJOR,
808 CFG_OPTEE_REVISION_MINOR,
809 TEE_IMPL_GIT_SHA1 >> 32);
810 break;
811 case OPTEE_FFA_EXCHANGE_CAPABILITIES:
812 sec_caps = OPTEE_FFA_SEC_CAP_ARG_OFFSET;
813 if (spmc_notif_is_ready)
814 sec_caps |= OPTEE_FFA_SEC_CAP_ASYNC_NOTIF;
815 if (IS_ENABLED(CFG_RPMB_ANNOUNCE_PROBE_CAP))
816 sec_caps |= OPTEE_FFA_SEC_CAP_RPMB_PROBE;
817 if (IS_ENABLED(CFG_CORE_DYN_PROTMEM))
818 sec_caps |= OPTEE_FFA_SEC_CAP_PROTMEM;
819 spmc_set_args(args, direct_resp_fid,
820 swap_src_dst(args->a1), 0, 0,
821 THREAD_RPC_MAX_NUM_PARAMS, sec_caps);
822 break;
823 case OPTEE_FFA_UNREGISTER_SHM:
824 spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
825 handle_unregister_shm(args->a4, args->a5), 0, 0);
826 break;
827 case OPTEE_FFA_ENABLE_ASYNC_NOTIF:
828 spmc_set_args(args, direct_resp_fid,
829 swap_src_dst(args->a1), 0,
830 spmc_enable_async_notif(args->a4,
831 FFA_SRC(args->a1)),
832 0, 0);
833 break;
834 #ifdef CFG_CORE_DYN_PROTMEM
835 case OPTEE_FFA_RELEASE_PROTMEM:
836 spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
837 handle_unregister_shm(args->a4, args->a5), 0, 0);
838 break;
839 #endif
840 default:
841 EMSG("Unhandled blocking service ID %#"PRIx32,
842 (uint32_t)args->a3);
843 spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
844 TEE_ERROR_BAD_PARAMETERS, 0, 0);
845 }
846 }
847
handle_framework_direct_request(struct thread_smc_1_2_regs * args)848 static void handle_framework_direct_request(struct thread_smc_1_2_regs *args)
849 {
850 uint32_t direct_resp_fid = get_direct_resp_fid(args->a0);
851 uint32_t w0 = FFA_ERROR;
852 uint32_t w1 = FFA_PARAM_MBZ;
853 uint32_t w2 = FFA_NOT_SUPPORTED;
854 uint32_t w3 = FFA_PARAM_MBZ;
855
856 switch (args->a2 & FFA_MSG_TYPE_MASK) {
857 case FFA_MSG_SEND_VM_CREATED:
858 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
859 uint16_t guest_id = args->a5;
860 TEE_Result res = virt_guest_created(guest_id);
861
862 w0 = direct_resp_fid;
863 w1 = swap_src_dst(args->a1);
864 w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_RESP_VM_CREATED;
865 if (res == TEE_SUCCESS)
866 w3 = FFA_OK;
867 else if (res == TEE_ERROR_OUT_OF_MEMORY)
868 w3 = FFA_DENIED;
869 else
870 w3 = FFA_INVALID_PARAMETERS;
871 }
872 break;
873 case FFA_MSG_SEND_VM_DESTROYED:
874 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
875 uint16_t guest_id = args->a5;
876 TEE_Result res = virt_guest_destroyed(guest_id);
877
878 w0 = direct_resp_fid;
879 w1 = swap_src_dst(args->a1);
880 w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_RESP_VM_DESTROYED;
881 if (res == TEE_SUCCESS)
882 w3 = FFA_OK;
883 else
884 w3 = FFA_INVALID_PARAMETERS;
885 }
886 break;
887 case FFA_MSG_VERSION_REQ:
888 w0 = direct_resp_fid;
889 w1 = swap_src_dst(args->a1);
890 w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_VERSION_RESP;
891 w3 = spmc_exchange_version(args->a3, &my_rxtx);
892 break;
893 default:
894 break;
895 }
896 spmc_set_args(args, w0, w1, w2, w3, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
897 }
898
optee_lsp_handle_direct_request(struct thread_smc_1_2_regs * args)899 static void optee_lsp_handle_direct_request(struct thread_smc_1_2_regs *args)
900 {
901 if (args->a2 & FFA_MSG_FLAG_FRAMEWORK) {
902 handle_framework_direct_request(args);
903 return;
904 }
905
906 if (IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
907 virt_set_guest(get_sender_id(args->a1))) {
908 spmc_set_args(args, get_direct_resp_fid(args->a0),
909 swap_src_dst(args->a1), 0,
910 TEE_ERROR_ITEM_NOT_FOUND, 0, 0);
911 return;
912 }
913
914 if (args->a3 & BIT32(OPTEE_FFA_YIELDING_CALL_BIT))
915 handle_yielding_call(args);
916 else
917 handle_blocking_call(args);
918
919 /*
920 * Note that handle_yielding_call() typically only returns if a
921 * thread cannot be allocated or found. virt_unset_guest() is also
922 * called from thread_state_suspend() and thread_state_free().
923 */
924 if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
925 virt_unset_guest();
926 }
927
928 static void __maybe_unused
optee_spmc_lsp_handle_direct_request(struct thread_smc_1_2_regs * args)929 optee_spmc_lsp_handle_direct_request(struct thread_smc_1_2_regs *args)
930 {
931 if (args->a2 & FFA_MSG_FLAG_FRAMEWORK)
932 handle_framework_direct_request(args);
933 else
934 set_simple_ret_val(args, FFA_INVALID_PARAMETERS);
935 }
936
handle_direct_request(struct thread_smc_1_2_regs * args)937 static void handle_direct_request(struct thread_smc_1_2_regs *args)
938 {
939 struct spmc_lsp_desc *lsp = spmc_find_lsp_by_sp_id(FFA_DST(args->a1));
940
941 if (lsp) {
942 lsp->direct_req(args);
943 } else {
944 int rc = spmc_sp_start_thread(args);
945
946 /*
947 * spmc_sp_start_thread() returns here if the SPs aren't
948 * supported or if all threads are busy.
949 */
950 set_simple_ret_val(args, rc);
951 }
952 }
953
spmc_read_mem_transaction(uint32_t ffa_vers,void * buf,size_t blen,struct ffa_mem_transaction_x * trans)954 int spmc_read_mem_transaction(uint32_t ffa_vers, void *buf, size_t blen,
955 struct ffa_mem_transaction_x *trans)
956 {
957 uint16_t mem_reg_attr = 0;
958 uint32_t flags = 0;
959 uint32_t count = 0;
960 uint32_t offs = 0;
961 uint32_t size = 0;
962 size_t n = 0;
963
964 if (!IS_ALIGNED_WITH_TYPE(buf, uint64_t))
965 return FFA_INVALID_PARAMETERS;
966
967 if (ffa_vers >= FFA_VERSION_1_1) {
968 struct ffa_mem_transaction_1_1 *descr = NULL;
969
970 if (blen < sizeof(*descr))
971 return FFA_INVALID_PARAMETERS;
972
973 descr = buf;
974 trans->sender_id = READ_ONCE(descr->sender_id);
975 mem_reg_attr = READ_ONCE(descr->mem_reg_attr);
976 flags = READ_ONCE(descr->flags);
977 trans->global_handle = READ_ONCE(descr->global_handle);
978 trans->tag = READ_ONCE(descr->tag);
979
980 count = READ_ONCE(descr->mem_access_count);
981 size = READ_ONCE(descr->mem_access_size);
982 offs = READ_ONCE(descr->mem_access_offs);
983 } else {
984 struct ffa_mem_transaction_1_0 *descr = NULL;
985
986 if (blen < sizeof(*descr))
987 return FFA_INVALID_PARAMETERS;
988
989 descr = buf;
990 trans->sender_id = READ_ONCE(descr->sender_id);
991 mem_reg_attr = READ_ONCE(descr->mem_reg_attr);
992 flags = READ_ONCE(descr->flags);
993 trans->global_handle = READ_ONCE(descr->global_handle);
994 trans->tag = READ_ONCE(descr->tag);
995
996 count = READ_ONCE(descr->mem_access_count);
997 size = sizeof(struct ffa_mem_access);
998 offs = offsetof(struct ffa_mem_transaction_1_0,
999 mem_access_array);
1000 }
1001
1002 if (mem_reg_attr > UINT8_MAX || flags > UINT8_MAX ||
1003 size > UINT8_MAX || count > UINT8_MAX || offs > UINT16_MAX)
1004 return FFA_INVALID_PARAMETERS;
1005
1006 /* Check that the endpoint memory access descriptor array fits */
1007 if (MUL_OVERFLOW(size, count, &n) || ADD_OVERFLOW(offs, n, &n) ||
1008 n > blen)
1009 return FFA_INVALID_PARAMETERS;
1010
1011 trans->mem_reg_attr = mem_reg_attr;
1012 trans->flags = flags;
1013 trans->mem_access_size = size;
1014 trans->mem_access_count = count;
1015 trans->mem_access_offs = offs;
1016 return 0;
1017 }
1018
1019 #if defined(CFG_CORE_SEL1_SPMC)
get_acc_perms(vaddr_t mem_acc_base,unsigned int mem_access_size,unsigned int mem_access_count,uint8_t * acc_perms,unsigned int * region_offs)1020 static int get_acc_perms(vaddr_t mem_acc_base, unsigned int mem_access_size,
1021 unsigned int mem_access_count, uint8_t *acc_perms,
1022 unsigned int *region_offs)
1023 {
1024 struct ffa_mem_access_perm *descr = NULL;
1025 struct ffa_mem_access *mem_acc = NULL;
1026 unsigned int n = 0;
1027
1028 for (n = 0; n < mem_access_count; n++) {
1029 mem_acc = (void *)(mem_acc_base + mem_access_size * n);
1030 descr = &mem_acc->access_perm;
1031 if (READ_ONCE(descr->endpoint_id) == optee_core_lsp.sp_id) {
1032 *acc_perms = READ_ONCE(descr->perm);
1033 *region_offs = READ_ONCE(mem_acc[n].region_offs);
1034 return 0;
1035 }
1036 }
1037
1038 return FFA_INVALID_PARAMETERS;
1039 }
1040
mem_op_init(bool mem_share,struct ffa_mem_transaction_x * mem_trans,void * buf,size_t blen,unsigned int * page_count,unsigned int * region_count,size_t * addr_range_offs)1041 static int mem_op_init(bool mem_share, struct ffa_mem_transaction_x *mem_trans,
1042 void *buf, size_t blen, unsigned int *page_count,
1043 unsigned int *region_count, size_t *addr_range_offs)
1044 {
1045 const uint8_t exp_mem_acc_perm = FFA_MEM_ACC_RW;
1046 struct ffa_mem_region *region_descr = NULL;
1047 unsigned int region_descr_offs = 0;
1048 uint16_t exp_mem_reg_attr = 0;
1049 uint8_t mem_acc_perm = 0;
1050 size_t n = 0;
1051
1052 if (mem_share)
1053 exp_mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR;
1054 if (mem_trans->mem_reg_attr != exp_mem_reg_attr)
1055 return FFA_INVALID_PARAMETERS;
1056
1057 /* Check that the access permissions matches what's expected */
1058 if (get_acc_perms((vaddr_t)buf + mem_trans->mem_access_offs,
1059 mem_trans->mem_access_size,
1060 mem_trans->mem_access_count,
1061 &mem_acc_perm, ®ion_descr_offs) ||
1062 mem_acc_perm != exp_mem_acc_perm)
1063 return FFA_INVALID_PARAMETERS;
1064
1065 /* Check that the Composite memory region descriptor fits */
1066 if (ADD_OVERFLOW(region_descr_offs, sizeof(*region_descr), &n) ||
1067 n > blen)
1068 return FFA_INVALID_PARAMETERS;
1069
1070 if (!IS_ALIGNED_WITH_TYPE((vaddr_t)buf + region_descr_offs,
1071 struct ffa_mem_region))
1072 return FFA_INVALID_PARAMETERS;
1073
1074 region_descr = (struct ffa_mem_region *)((vaddr_t)buf +
1075 region_descr_offs);
1076 *page_count = READ_ONCE(region_descr->total_page_count);
1077 *region_count = READ_ONCE(region_descr->address_range_count);
1078 *addr_range_offs = n;
1079 return 0;
1080 }
1081
add_mem_op_helper(struct mem_op_state * s,void * buf,size_t flen)1082 static int add_mem_op_helper(struct mem_op_state *s, void *buf, size_t flen)
1083 {
1084 unsigned int region_count = flen / sizeof(struct ffa_address_range);
1085 struct ffa_address_range *arange = NULL;
1086 unsigned int n = 0;
1087
1088 if (region_count > s->region_count)
1089 region_count = s->region_count;
1090
1091 if (!IS_ALIGNED_WITH_TYPE(buf, struct ffa_address_range))
1092 return FFA_INVALID_PARAMETERS;
1093 arange = buf;
1094
1095 for (n = 0; n < region_count; n++) {
1096 unsigned int page_count = READ_ONCE(arange[n].page_count);
1097 uint64_t addr = READ_ONCE(arange[n].address);
1098
1099 if (mobj_ffa_add_pages_at(s->mf, &s->current_page_idx,
1100 addr, page_count))
1101 return FFA_INVALID_PARAMETERS;
1102 }
1103
1104 s->region_count -= region_count;
1105 if (s->region_count)
1106 return region_count * sizeof(*arange);
1107
1108 if (s->current_page_idx != s->page_count)
1109 return FFA_INVALID_PARAMETERS;
1110
1111 return 0;
1112 }
1113
add_mem_op_frag(struct mem_frag_state * s,void * buf,size_t flen)1114 static int add_mem_op_frag(struct mem_frag_state *s, void *buf, size_t flen)
1115 {
1116 int rc = 0;
1117
1118 rc = add_mem_op_helper(&s->op, buf, flen);
1119 if (rc >= 0) {
1120 if (!ADD_OVERFLOW(s->frag_offset, rc, &s->frag_offset)) {
1121 /* We're not at the end of the descriptor yet */
1122 if (s->op.region_count)
1123 return s->frag_offset;
1124
1125 /* We're done */
1126 rc = 0;
1127 } else {
1128 rc = FFA_INVALID_PARAMETERS;
1129 }
1130 }
1131
1132 SLIST_REMOVE(&frag_state_head, s, mem_frag_state, link);
1133 if (rc < 0) {
1134 mobj_ffa_sel1_spmc_delete(s->op.mf);
1135 } else {
1136 if (mobj_ffa_push_to_inactive(s->op.mf)) {
1137 rc = FFA_INVALID_PARAMETERS;
1138 mobj_ffa_sel1_spmc_delete(s->op.mf);
1139 }
1140 }
1141 free(s);
1142
1143 return rc;
1144 }
1145
is_sp_op(struct ffa_mem_transaction_x * mem_trans,void * buf)1146 static bool is_sp_op(struct ffa_mem_transaction_x *mem_trans, void *buf)
1147 {
1148 struct ffa_mem_access_perm *perm = NULL;
1149 struct ffa_mem_access *mem_acc = NULL;
1150
1151 if (!IS_ENABLED(CFG_SECURE_PARTITION))
1152 return false;
1153
1154 if (mem_trans->mem_access_count < 1)
1155 return false;
1156
1157 mem_acc = (void *)((vaddr_t)buf + mem_trans->mem_access_offs);
1158 perm = &mem_acc->access_perm;
1159
1160 /*
1161 * perm->endpoint_id is read here only to check if the endpoint is
1162 * OP-TEE. We do read it later on again, but there are some additional
1163 * checks there to make sure that the data is correct.
1164 */
1165 return READ_ONCE(perm->endpoint_id) != optee_core_lsp.sp_id;
1166 }
1167
add_mem_op(bool mem_share,struct ffa_mem_transaction_x * mem_trans,tee_mm_entry_t * mm,void * buf,size_t blen,size_t flen,uint64_t * global_handle)1168 static int add_mem_op(bool mem_share, struct ffa_mem_transaction_x *mem_trans,
1169 tee_mm_entry_t *mm, void *buf, size_t blen, size_t flen,
1170 uint64_t *global_handle)
1171 {
1172 int rc = 0;
1173 struct mem_op_state op = { .mem_share = mem_share, };
1174 size_t addr_range_offs = 0;
1175 uint64_t cookie = OPTEE_MSG_FMEM_INVALID_GLOBAL_ID;
1176 enum mobj_use_case use_case = MOBJ_USE_CASE_NS_SHM;
1177 size_t n = 0;
1178
1179 rc = mem_op_init(mem_share, mem_trans, buf, flen, &op.page_count,
1180 &op.region_count, &addr_range_offs);
1181 if (rc)
1182 return rc;
1183
1184 if (!op.page_count || !op.region_count)
1185 return FFA_INVALID_PARAMETERS;
1186
1187 if (MUL_OVERFLOW(op.region_count,
1188 sizeof(struct ffa_address_range), &n) ||
1189 ADD_OVERFLOW(n, addr_range_offs, &n) || n > blen)
1190 return FFA_INVALID_PARAMETERS;
1191
1192 if (mem_trans->global_handle)
1193 cookie = mem_trans->global_handle;
1194 if (!mem_share)
1195 use_case = mem_trans->tag;
1196 op.mf = mobj_ffa_sel1_spmc_new(cookie, op.page_count, use_case);
1197 if (!op.mf)
1198 return FFA_NO_MEMORY;
1199
1200 if (flen != blen) {
1201 struct mem_frag_state *s = calloc(1, sizeof(*s));
1202
1203 if (!s) {
1204 rc = FFA_NO_MEMORY;
1205 goto err;
1206 }
1207 s->op = op;
1208 s->mm = mm;
1209 s->frag_offset = addr_range_offs;
1210
1211 SLIST_INSERT_HEAD(&frag_state_head, s, link);
1212 rc = add_mem_op_frag(s, (char *)buf + addr_range_offs,
1213 flen - addr_range_offs);
1214
1215 if (rc >= 0)
1216 *global_handle = mobj_ffa_get_cookie(op.mf);
1217
1218 return rc;
1219 }
1220
1221 rc = add_mem_op_helper(&op, (char *)buf + addr_range_offs,
1222 flen - addr_range_offs);
1223 if (rc) {
1224 /*
1225 * Number of consumed bytes may be returned instead of 0 for
1226 * done.
1227 */
1228 rc = FFA_INVALID_PARAMETERS;
1229 goto err;
1230 }
1231
1232 if (mobj_ffa_push_to_inactive(op.mf)) {
1233 rc = FFA_INVALID_PARAMETERS;
1234 goto err;
1235 }
1236 *global_handle = mobj_ffa_get_cookie(op.mf);
1237
1238 return 0;
1239 err:
1240 mobj_ffa_sel1_spmc_delete(op.mf);
1241 return rc;
1242 }
1243
handle_mem_op_tmem(bool share_mem,paddr_t pbuf,size_t blen,size_t flen,unsigned int page_count,uint64_t * global_handle,struct ffa_rxtx * rxtx)1244 static int handle_mem_op_tmem(bool share_mem, paddr_t pbuf, size_t blen,
1245 size_t flen, unsigned int page_count,
1246 uint64_t *global_handle, struct ffa_rxtx *rxtx)
1247 {
1248 struct ffa_mem_transaction_x mem_trans = { };
1249 int rc = 0;
1250 size_t len = 0;
1251 void *buf = NULL;
1252 tee_mm_entry_t *mm = NULL;
1253 vaddr_t offs = pbuf & SMALL_PAGE_MASK;
1254
1255 if (MUL_OVERFLOW(page_count, SMALL_PAGE_SIZE, &len))
1256 return FFA_INVALID_PARAMETERS;
1257 if (!core_pbuf_is(CORE_MEM_NON_SEC, pbuf, len))
1258 return FFA_INVALID_PARAMETERS;
1259
1260 /*
1261 * Check that the length reported in flen is covered by len even
1262 * if the offset is taken into account.
1263 */
1264 if (len < flen || len - offs < flen)
1265 return FFA_INVALID_PARAMETERS;
1266
1267 mm = tee_mm_alloc(&core_virt_shm_pool, len);
1268 if (!mm)
1269 return FFA_NO_MEMORY;
1270
1271 if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pbuf,
1272 page_count, MEM_AREA_NSEC_SHM)) {
1273 rc = FFA_INVALID_PARAMETERS;
1274 goto out;
1275 }
1276 buf = (void *)(tee_mm_get_smem(mm) + offs);
1277
1278 cpu_spin_lock(&rxtx->spinlock);
1279 rc = spmc_read_mem_transaction(rxtx->ffa_vers, buf, flen, &mem_trans);
1280 if (rc)
1281 goto unlock;
1282
1283 if (is_sp_op(&mem_trans, buf)) {
1284 if (!share_mem) {
1285 rc = FFA_DENIED;
1286 goto unlock;
1287 }
1288 rc = spmc_sp_add_share(&mem_trans, buf, blen, flen,
1289 global_handle, NULL);
1290 goto unlock;
1291 }
1292
1293 if (IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
1294 virt_set_guest(mem_trans.sender_id)) {
1295 rc = FFA_DENIED;
1296 goto unlock;
1297 }
1298
1299 rc = add_mem_op(share_mem, &mem_trans, mm, buf, blen, flen,
1300 global_handle);
1301
1302 if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
1303 virt_unset_guest();
1304
1305 unlock:
1306 cpu_spin_unlock(&rxtx->spinlock);
1307 if (rc > 0)
1308 return rc;
1309
1310 core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count);
1311 out:
1312 tee_mm_free(mm);
1313 return rc;
1314 }
1315
handle_mem_op_rxbuf(bool share_mem,size_t blen,size_t flen,uint64_t * global_handle,struct ffa_rxtx * rxtx)1316 static int handle_mem_op_rxbuf(bool share_mem, size_t blen, size_t flen,
1317 uint64_t *global_handle, struct ffa_rxtx *rxtx)
1318 {
1319 struct ffa_mem_transaction_x mem_trans = { };
1320 int rc = FFA_DENIED;
1321
1322 cpu_spin_lock(&rxtx->spinlock);
1323
1324 if (!rxtx->rx || flen > rxtx->size)
1325 goto out;
1326
1327 rc = spmc_read_mem_transaction(rxtx->ffa_vers, rxtx->rx, flen,
1328 &mem_trans);
1329 if (rc)
1330 goto out;
1331 if (is_sp_op(&mem_trans, rxtx->rx)) {
1332 if (!share_mem) {
1333 rc = FFA_DENIED;
1334 goto out;
1335 }
1336 rc = spmc_sp_add_share(&mem_trans, rxtx, blen, flen,
1337 global_handle, NULL);
1338 goto out;
1339 }
1340
1341 if (IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
1342 virt_set_guest(mem_trans.sender_id))
1343 goto out;
1344
1345 rc = add_mem_op(share_mem, &mem_trans, NULL, rxtx->rx, blen, flen,
1346 global_handle);
1347
1348 if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
1349 virt_unset_guest();
1350
1351 out:
1352 cpu_spin_unlock(&rxtx->spinlock);
1353
1354 return rc;
1355 }
1356
handle_mem_op(struct thread_smc_1_2_regs * args,struct ffa_rxtx * rxtx)1357 static void handle_mem_op(struct thread_smc_1_2_regs *args,
1358 struct ffa_rxtx *rxtx)
1359 {
1360 uint32_t tot_len = args->a1;
1361 uint32_t frag_len = args->a2;
1362 uint64_t addr = args->a3;
1363 uint32_t page_count = args->a4;
1364 uint32_t ret_w1 = 0;
1365 uint32_t ret_w2 = FFA_INVALID_PARAMETERS;
1366 uint32_t ret_w3 = 0;
1367 uint32_t ret_fid = FFA_ERROR;
1368 uint64_t global_handle = 0;
1369 bool share_mem = false;
1370 int rc = 0;
1371
1372 /* Check that the MBZs are indeed 0 */
1373 if (args->a5 || args->a6 || args->a7)
1374 goto out;
1375
1376 /* Check that fragment length doesn't exceed total length */
1377 if (frag_len > tot_len)
1378 goto out;
1379
1380 /* Check for 32-bit calling convention */
1381 if (!OPTEE_SMC_IS_64(args->a0))
1382 addr &= UINT32_MAX;
1383
1384 if (args->a0 == FFA_MEM_SHARE_32 || args->a0 == FFA_MEM_SHARE_64)
1385 share_mem = true;
1386 else
1387 share_mem = false;
1388
1389 if (!addr) {
1390 /*
1391 * The memory transaction descriptor is passed via our rx
1392 * buffer.
1393 */
1394 if (page_count)
1395 goto out;
1396 rc = handle_mem_op_rxbuf(share_mem, tot_len, frag_len,
1397 &global_handle, rxtx);
1398 } else {
1399 rc = handle_mem_op_tmem(share_mem, addr, tot_len, frag_len,
1400 page_count, &global_handle, rxtx);
1401 }
1402 if (rc < 0) {
1403 ret_w2 = rc;
1404 } else if (rc > 0) {
1405 ret_fid = FFA_MEM_FRAG_RX;
1406 ret_w3 = rc;
1407 reg_pair_from_64(global_handle, &ret_w2, &ret_w1);
1408 } else {
1409 ret_fid = FFA_SUCCESS_32;
1410 reg_pair_from_64(global_handle, &ret_w3, &ret_w2);
1411 }
1412 out:
1413 spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0);
1414 }
1415
get_frag_state(uint64_t global_handle)1416 static struct mem_frag_state *get_frag_state(uint64_t global_handle)
1417 {
1418 struct mem_frag_state *s = NULL;
1419
1420 SLIST_FOREACH(s, &frag_state_head, link)
1421 if (mobj_ffa_get_cookie(s->op.mf) == global_handle)
1422 return s;
1423
1424 return NULL;
1425 }
1426
handle_mem_frag_tx(struct thread_smc_1_2_regs * args,struct ffa_rxtx * rxtx)1427 static void handle_mem_frag_tx(struct thread_smc_1_2_regs *args,
1428 struct ffa_rxtx *rxtx)
1429 {
1430 uint64_t global_handle = reg_pair_to_64(args->a2, args->a1);
1431 size_t flen = args->a3;
1432 uint32_t endpoint_id = args->a4;
1433 struct mem_frag_state *s = NULL;
1434 tee_mm_entry_t *mm = NULL;
1435 unsigned int page_count = 0;
1436 void *buf = NULL;
1437 uint32_t ret_w1 = 0;
1438 uint32_t ret_w2 = 0;
1439 uint32_t ret_w3 = 0;
1440 uint32_t ret_fid = 0;
1441 int rc = 0;
1442
1443 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1444 uint16_t guest_id = endpoint_id >> 16;
1445
1446 if (!guest_id || virt_set_guest(guest_id)) {
1447 rc = FFA_INVALID_PARAMETERS;
1448 goto out_set_rc;
1449 }
1450 }
1451
1452 /*
1453 * Currently we're only doing this for fragmented FFA_MEM_SHARE_*
1454 * requests.
1455 */
1456
1457 cpu_spin_lock(&rxtx->spinlock);
1458
1459 s = get_frag_state(global_handle);
1460 if (!s) {
1461 rc = FFA_INVALID_PARAMETERS;
1462 goto out;
1463 }
1464
1465 mm = s->mm;
1466 if (mm) {
1467 if (flen > tee_mm_get_bytes(mm)) {
1468 rc = FFA_INVALID_PARAMETERS;
1469 goto out;
1470 }
1471 page_count = s->op.page_count;
1472 buf = (void *)tee_mm_get_smem(mm);
1473 } else {
1474 if (flen > rxtx->size) {
1475 rc = FFA_INVALID_PARAMETERS;
1476 goto out;
1477 }
1478 buf = rxtx->rx;
1479 }
1480
1481 rc = add_mem_op_frag(s, buf, flen);
1482 out:
1483 if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
1484 virt_unset_guest();
1485
1486 cpu_spin_unlock(&rxtx->spinlock);
1487
1488 if (rc <= 0 && mm) {
1489 core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count);
1490 tee_mm_free(mm);
1491 }
1492
1493 out_set_rc:
1494 if (rc < 0) {
1495 ret_fid = FFA_ERROR;
1496 ret_w2 = rc;
1497 } else if (rc > 0) {
1498 ret_fid = FFA_MEM_FRAG_RX;
1499 ret_w3 = rc;
1500 reg_pair_from_64(global_handle, &ret_w2, &ret_w1);
1501 } else {
1502 ret_fid = FFA_SUCCESS_32;
1503 reg_pair_from_64(global_handle, &ret_w3, &ret_w2);
1504 }
1505
1506 spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0);
1507 }
1508
handle_mem_reclaim(struct thread_smc_1_2_regs * args)1509 static void handle_mem_reclaim(struct thread_smc_1_2_regs *args)
1510 {
1511 int rc = FFA_INVALID_PARAMETERS;
1512 uint64_t cookie = 0;
1513
1514 if (args->a3 || args->a4 || args->a5 || args->a6 || args->a7)
1515 goto out;
1516
1517 cookie = reg_pair_to_64(args->a2, args->a1);
1518 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1519 uint16_t guest_id = 0;
1520
1521 if (cookie & FFA_MEMORY_HANDLE_HYPERVISOR_BIT) {
1522 guest_id = virt_find_guest_by_cookie(cookie);
1523 } else {
1524 guest_id = (cookie >> FFA_MEMORY_HANDLE_PRTN_SHIFT) &
1525 FFA_MEMORY_HANDLE_PRTN_MASK;
1526 }
1527 if (!guest_id)
1528 goto out;
1529 if (virt_set_guest(guest_id)) {
1530 if (!virt_reclaim_cookie_from_destroyed_guest(guest_id,
1531 cookie))
1532 rc = FFA_OK;
1533 goto out;
1534 }
1535 }
1536
1537 switch (mobj_ffa_sel1_spmc_reclaim(cookie)) {
1538 case TEE_SUCCESS:
1539 rc = FFA_OK;
1540 break;
1541 case TEE_ERROR_ITEM_NOT_FOUND:
1542 DMSG("cookie %#"PRIx64" not found", cookie);
1543 rc = FFA_INVALID_PARAMETERS;
1544 break;
1545 default:
1546 DMSG("cookie %#"PRIx64" busy", cookie);
1547 rc = FFA_DENIED;
1548 break;
1549 }
1550
1551 if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
1552 virt_unset_guest();
1553
1554 out:
1555 set_simple_ret_val(args, rc);
1556 }
1557
handle_notification_bitmap_create(struct thread_smc_1_2_regs * args)1558 static void handle_notification_bitmap_create(struct thread_smc_1_2_regs *args)
1559 {
1560 uint32_t ret_val = FFA_INVALID_PARAMETERS;
1561 uint32_t ret_fid = FFA_ERROR;
1562 uint32_t old_itr_status = 0;
1563
1564 if (!FFA_TARGET_INFO_GET_SP_ID(args->a1) && !args->a3 && !args->a4 &&
1565 !args->a5 && !args->a6 && !args->a7) {
1566 struct guest_partition *prtn = NULL;
1567 struct notif_vm_bitmap *nvb = NULL;
1568 uint16_t vm_id = args->a1;
1569
1570 prtn = virt_get_guest(vm_id);
1571 nvb = get_notif_vm_bitmap(prtn, vm_id);
1572 if (!nvb) {
1573 ret_val = FFA_INVALID_PARAMETERS;
1574 goto out_virt_put;
1575 }
1576
1577 old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1578
1579 if (nvb->initialized) {
1580 ret_val = FFA_DENIED;
1581 goto out_unlock;
1582 }
1583
1584 nvb->initialized = true;
1585 nvb->do_bottom_half_value = -1;
1586 ret_val = FFA_OK;
1587 ret_fid = FFA_SUCCESS_32;
1588 out_unlock:
1589 cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1590 out_virt_put:
1591 virt_put_guest(prtn);
1592 }
1593
1594 spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0);
1595 }
1596
handle_notification_bitmap_destroy(struct thread_smc_1_2_regs * args)1597 static void handle_notification_bitmap_destroy(struct thread_smc_1_2_regs *args)
1598 {
1599 uint32_t ret_val = FFA_INVALID_PARAMETERS;
1600 uint32_t ret_fid = FFA_ERROR;
1601 uint32_t old_itr_status = 0;
1602
1603 if (!FFA_TARGET_INFO_GET_SP_ID(args->a1) && !args->a3 && !args->a4 &&
1604 !args->a5 && !args->a6 && !args->a7) {
1605 struct guest_partition *prtn = NULL;
1606 struct notif_vm_bitmap *nvb = NULL;
1607 uint16_t vm_id = args->a1;
1608
1609 prtn = virt_get_guest(vm_id);
1610 nvb = get_notif_vm_bitmap(prtn, vm_id);
1611 if (!nvb) {
1612 ret_val = FFA_INVALID_PARAMETERS;
1613 goto out_virt_put;
1614 }
1615
1616 old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1617
1618 if (nvb->pending || nvb->bound) {
1619 ret_val = FFA_DENIED;
1620 goto out_unlock;
1621 }
1622
1623 memset(nvb, 0, sizeof(*nvb));
1624 ret_val = FFA_OK;
1625 ret_fid = FFA_SUCCESS_32;
1626 out_unlock:
1627 cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1628 out_virt_put:
1629 virt_put_guest(prtn);
1630 }
1631
1632 spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0);
1633 }
1634
handle_notification_bind(struct thread_smc_1_2_regs * args)1635 static void handle_notification_bind(struct thread_smc_1_2_regs *args)
1636 {
1637 uint32_t ret_val = FFA_INVALID_PARAMETERS;
1638 struct guest_partition *prtn = NULL;
1639 struct notif_vm_bitmap *nvb = NULL;
1640 uint32_t ret_fid = FFA_ERROR;
1641 uint32_t old_itr_status = 0;
1642 uint64_t bitmap = 0;
1643 uint16_t vm_id = 0;
1644
1645 if (args->a5 || args->a6 || args->a7)
1646 goto out;
1647 if (args->a2) {
1648 /* We only deal with global notifications */
1649 ret_val = FFA_DENIED;
1650 goto out;
1651 }
1652
1653 /* The destination of the eventual notification */
1654 vm_id = FFA_DST(args->a1);
1655 bitmap = reg_pair_to_64(args->a4, args->a3);
1656
1657 prtn = virt_get_guest(vm_id);
1658 nvb = get_notif_vm_bitmap(prtn, vm_id);
1659 if (!nvb) {
1660 ret_val = FFA_INVALID_PARAMETERS;
1661 goto out_virt_put;
1662 }
1663
1664 old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1665
1666 if ((bitmap & nvb->bound)) {
1667 ret_val = FFA_DENIED;
1668 } else {
1669 nvb->bound |= bitmap;
1670 ret_val = FFA_OK;
1671 ret_fid = FFA_SUCCESS_32;
1672 }
1673
1674 cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1675 out_virt_put:
1676 virt_put_guest(prtn);
1677 out:
1678 spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0);
1679 }
1680
handle_notification_unbind(struct thread_smc_1_2_regs * args)1681 static void handle_notification_unbind(struct thread_smc_1_2_regs *args)
1682 {
1683 uint32_t ret_val = FFA_INVALID_PARAMETERS;
1684 struct guest_partition *prtn = NULL;
1685 struct notif_vm_bitmap *nvb = NULL;
1686 uint32_t ret_fid = FFA_ERROR;
1687 uint32_t old_itr_status = 0;
1688 uint64_t bitmap = 0;
1689 uint16_t vm_id = 0;
1690
1691 if (args->a2 || args->a5 || args->a6 || args->a7)
1692 goto out;
1693
1694 /* The destination of the eventual notification */
1695 vm_id = FFA_DST(args->a1);
1696 bitmap = reg_pair_to_64(args->a4, args->a3);
1697
1698 prtn = virt_get_guest(vm_id);
1699 nvb = get_notif_vm_bitmap(prtn, vm_id);
1700 if (!nvb) {
1701 ret_val = FFA_INVALID_PARAMETERS;
1702 goto out_virt_put;
1703 }
1704
1705 old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1706
1707 if (bitmap & nvb->pending) {
1708 ret_val = FFA_DENIED;
1709 } else {
1710 nvb->bound &= ~bitmap;
1711 ret_val = FFA_OK;
1712 ret_fid = FFA_SUCCESS_32;
1713 }
1714
1715 cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1716 out_virt_put:
1717 virt_put_guest(prtn);
1718 out:
1719 spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0);
1720 }
1721
handle_notification_get(struct thread_smc_1_2_regs * args)1722 static void handle_notification_get(struct thread_smc_1_2_regs *args)
1723 {
1724 uint32_t w2 = FFA_INVALID_PARAMETERS;
1725 struct guest_partition *prtn = NULL;
1726 struct notif_vm_bitmap *nvb = NULL;
1727 uint32_t ret_fid = FFA_ERROR;
1728 uint32_t old_itr_status = 0;
1729 uint16_t vm_id = 0;
1730 uint32_t w3 = 0;
1731
1732 if (args->a5 || args->a6 || args->a7)
1733 goto out;
1734 if (!(args->a2 & 0x1)) {
1735 ret_fid = FFA_SUCCESS_32;
1736 w2 = 0;
1737 goto out;
1738 }
1739 vm_id = FFA_DST(args->a1);
1740
1741 prtn = virt_get_guest(vm_id);
1742 nvb = get_notif_vm_bitmap(prtn, vm_id);
1743 if (!nvb)
1744 goto out_virt_put;
1745
1746 old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1747
1748 reg_pair_from_64(nvb->pending, &w3, &w2);
1749 nvb->pending = 0;
1750 ret_fid = FFA_SUCCESS_32;
1751
1752 cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1753 out_virt_put:
1754 virt_put_guest(prtn);
1755 out:
1756 spmc_set_args(args, ret_fid, 0, w2, w3, 0, 0);
1757 }
1758
1759 struct notif_info_get_state {
1760 struct thread_smc_1_2_regs *args;
1761 unsigned int ids_per_reg;
1762 unsigned int ids_count;
1763 unsigned int id_pos;
1764 unsigned int count;
1765 unsigned int max_list_count;
1766 unsigned int list_count;
1767 };
1768
add_id_in_regs(struct notif_info_get_state * state,uint16_t id)1769 static bool add_id_in_regs(struct notif_info_get_state *state,
1770 uint16_t id)
1771 {
1772 unsigned int reg_idx = state->id_pos / state->ids_per_reg + 3;
1773 unsigned int reg_shift = (state->id_pos % state->ids_per_reg) * 16;
1774
1775 if (reg_idx > 7)
1776 return false;
1777
1778 state->args->a[reg_idx] &= ~SHIFT_U64(0xffff, reg_shift);
1779 state->args->a[reg_idx] |= (unsigned long)id << reg_shift;
1780
1781 state->id_pos++;
1782 state->count++;
1783 return true;
1784 }
1785
add_id_count(struct notif_info_get_state * state)1786 static bool add_id_count(struct notif_info_get_state *state)
1787 {
1788 assert(state->list_count < state->max_list_count &&
1789 state->count >= 1 && state->count <= 4);
1790
1791 state->ids_count |= (state->count - 1) << (state->list_count * 2 + 12);
1792 state->list_count++;
1793 state->count = 0;
1794
1795 return state->list_count < state->max_list_count;
1796 }
1797
add_nvb_to_state(struct notif_info_get_state * state,uint16_t guest_id,struct notif_vm_bitmap * nvb)1798 static bool add_nvb_to_state(struct notif_info_get_state *state,
1799 uint16_t guest_id, struct notif_vm_bitmap *nvb)
1800 {
1801 if (!nvb->pending)
1802 return true;
1803 /*
1804 * Add only the guest_id, meaning a global notification for this
1805 * guest.
1806 *
1807 * If notifications for one or more specific vCPUs we'd add those
1808 * before calling add_id_count(), but that's not supported.
1809 */
1810 return add_id_in_regs(state, guest_id) && add_id_count(state);
1811 }
1812
handle_notification_info_get(struct thread_smc_1_2_regs * args)1813 static void handle_notification_info_get(struct thread_smc_1_2_regs *args)
1814 {
1815 struct notif_info_get_state state = { .args = args };
1816 uint32_t ffa_res = FFA_INVALID_PARAMETERS;
1817 struct guest_partition *prtn = NULL;
1818 struct notif_vm_bitmap *nvb = NULL;
1819 uint32_t more_pending_flag = 0;
1820 uint32_t itr_state = 0;
1821 uint16_t guest_id = 0;
1822
1823 if (args->a1 || args->a2 || args->a3 || args->a4 || args->a5 ||
1824 args->a6 || args->a7)
1825 goto err;
1826
1827 if (OPTEE_SMC_IS_64(args->a0)) {
1828 spmc_set_args(args, FFA_SUCCESS_64, 0, 0, 0, 0, 0);
1829 state.ids_per_reg = 4;
1830 state.max_list_count = 31;
1831 } else {
1832 spmc_set_args(args, FFA_SUCCESS_32, 0, 0, 0, 0, 0);
1833 state.ids_per_reg = 2;
1834 state.max_list_count = 15;
1835 }
1836
1837 while (true) {
1838 /*
1839 * With NS-Virtualization we need to go through all
1840 * partitions to collect the notification bitmaps, without
1841 * we just check the only notification bitmap we have.
1842 */
1843 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1844 prtn = virt_next_guest(prtn);
1845 if (!prtn)
1846 break;
1847 guest_id = virt_get_guest_id(prtn);
1848 }
1849 nvb = get_notif_vm_bitmap(prtn, guest_id);
1850
1851 itr_state = cpu_spin_lock_xsave(&spmc_notif_lock);
1852 if (!add_nvb_to_state(&state, guest_id, nvb))
1853 more_pending_flag = BIT(0);
1854 cpu_spin_unlock_xrestore(&spmc_notif_lock, itr_state);
1855
1856 if (!IS_ENABLED(CFG_NS_VIRTUALIZATION) || more_pending_flag)
1857 break;
1858 }
1859 virt_put_guest(prtn);
1860
1861 if (!state.id_pos) {
1862 ffa_res = FFA_NO_DATA;
1863 goto err;
1864 }
1865 args->a2 = (state.list_count << FFA_NOTIF_INFO_GET_ID_COUNT_SHIFT) |
1866 (state.ids_count << FFA_NOTIF_INFO_GET_ID_LIST_SHIFT) |
1867 more_pending_flag;
1868 return;
1869 err:
1870 spmc_set_args(args, FFA_ERROR, 0, ffa_res, 0, 0, 0);
1871 }
1872
thread_spmc_set_async_notif_intid(int intid)1873 void thread_spmc_set_async_notif_intid(int intid)
1874 {
1875 assert(interrupt_can_raise_sgi(interrupt_get_main_chip()));
1876 notif_intid = intid;
1877 spmc_notif_is_ready = true;
1878 DMSG("Asynchronous notifications are ready");
1879 }
1880
notif_send_async(uint32_t value,uint16_t guest_id)1881 void notif_send_async(uint32_t value, uint16_t guest_id)
1882 {
1883 struct guest_partition *prtn = NULL;
1884 struct notif_vm_bitmap *nvb = NULL;
1885 uint32_t old_itr_status = 0;
1886
1887 prtn = virt_get_guest(guest_id);
1888 nvb = get_notif_vm_bitmap(prtn, guest_id);
1889
1890 if (nvb) {
1891 old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1892 assert(value == NOTIF_VALUE_DO_BOTTOM_HALF &&
1893 spmc_notif_is_ready && nvb->do_bottom_half_value >= 0 &&
1894 notif_intid >= 0);
1895 nvb->pending |= BIT64(nvb->do_bottom_half_value);
1896 interrupt_raise_sgi(interrupt_get_main_chip(), notif_intid,
1897 ITR_CPU_MASK_TO_THIS_CPU);
1898 cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1899 }
1900
1901 virt_put_guest(prtn);
1902 }
1903 #else
notif_send_async(uint32_t value,uint16_t guest_id)1904 void notif_send_async(uint32_t value, uint16_t guest_id)
1905 {
1906 struct guest_partition *prtn = NULL;
1907 struct notif_vm_bitmap *nvb = NULL;
1908 /* global notification, delay notification interrupt */
1909 uint32_t flags = BIT32(1);
1910 int res = 0;
1911
1912 prtn = virt_get_guest(guest_id);
1913 nvb = get_notif_vm_bitmap(prtn, guest_id);
1914
1915 if (nvb) {
1916 assert(value == NOTIF_VALUE_DO_BOTTOM_HALF &&
1917 spmc_notif_is_ready && nvb->do_bottom_half_value >= 0);
1918 res = ffa_set_notification(guest_id, optee_core_lsp.sp_id,
1919 flags,
1920 BIT64(nvb->do_bottom_half_value));
1921 if (res) {
1922 EMSG("notification set failed with error %d", res);
1923 panic();
1924 }
1925 }
1926
1927 virt_put_guest(prtn);
1928 }
1929 #endif
1930
1931 /* Only called from assembly */
1932 void thread_spmc_msg_recv(struct thread_smc_1_2_regs *args);
thread_spmc_msg_recv(struct thread_smc_1_2_regs * args)1933 void thread_spmc_msg_recv(struct thread_smc_1_2_regs *args)
1934 {
1935 assert((thread_get_exceptions() & THREAD_EXCP_ALL) == THREAD_EXCP_ALL);
1936 switch (args->a0) {
1937 #if defined(CFG_CORE_SEL1_SPMC)
1938 case FFA_FEATURES:
1939 handle_features(args);
1940 break;
1941 case FFA_SPM_ID_GET:
1942 spmc_handle_spm_id_get(args);
1943 break;
1944 #ifdef ARM64
1945 case FFA_RXTX_MAP_64:
1946 #endif
1947 case FFA_RXTX_MAP_32:
1948 spmc_handle_rxtx_map(args, &my_rxtx);
1949 break;
1950 case FFA_RXTX_UNMAP:
1951 spmc_handle_rxtx_unmap(args, &my_rxtx);
1952 break;
1953 case FFA_RX_RELEASE:
1954 spmc_handle_rx_release(args, &my_rxtx);
1955 break;
1956 case FFA_PARTITION_INFO_GET:
1957 spmc_handle_partition_info_get(args, &my_rxtx);
1958 break;
1959 case FFA_RUN:
1960 spmc_handle_run(args);
1961 break;
1962 #endif /*CFG_CORE_SEL1_SPMC*/
1963 case FFA_INTERRUPT:
1964 if (IS_ENABLED(CFG_CORE_SEL1_SPMC))
1965 spmc_set_args(args, FFA_NORMAL_WORLD_RESUME, 0, 0, 0,
1966 0, 0);
1967 else
1968 spmc_set_args(args, FFA_MSG_WAIT, 0, 0, 0, 0, 0);
1969 break;
1970 #ifdef ARM64
1971 case FFA_MSG_SEND_DIRECT_REQ_64:
1972 #endif
1973 case FFA_MSG_SEND_DIRECT_REQ_32:
1974 handle_direct_request(args);
1975 break;
1976 #if defined(CFG_CORE_SEL1_SPMC)
1977 #ifdef ARM64
1978 case FFA_MEM_SHARE_64:
1979 #endif
1980 case FFA_MEM_SHARE_32:
1981 #ifdef ARM64
1982 case FFA_MEM_LEND_64:
1983 #endif
1984 case FFA_MEM_LEND_32:
1985 handle_mem_op(args, &my_rxtx);
1986 break;
1987 case FFA_MEM_RECLAIM:
1988 if (!IS_ENABLED(CFG_SECURE_PARTITION) ||
1989 !ffa_mem_reclaim(args, NULL))
1990 handle_mem_reclaim(args);
1991 break;
1992 case FFA_MEM_FRAG_TX:
1993 handle_mem_frag_tx(args, &my_rxtx);
1994 break;
1995 case FFA_NOTIFICATION_BITMAP_CREATE:
1996 handle_notification_bitmap_create(args);
1997 break;
1998 case FFA_NOTIFICATION_BITMAP_DESTROY:
1999 handle_notification_bitmap_destroy(args);
2000 break;
2001 case FFA_NOTIFICATION_BIND:
2002 handle_notification_bind(args);
2003 break;
2004 case FFA_NOTIFICATION_UNBIND:
2005 handle_notification_unbind(args);
2006 break;
2007 case FFA_NOTIFICATION_GET:
2008 handle_notification_get(args);
2009 break;
2010 #ifdef ARM64
2011 case FFA_NOTIFICATION_INFO_GET_64:
2012 #endif
2013 case FFA_NOTIFICATION_INFO_GET_32:
2014 handle_notification_info_get(args);
2015 break;
2016 #endif /*CFG_CORE_SEL1_SPMC*/
2017 case FFA_ERROR:
2018 EMSG("Cannot handle FFA_ERROR(%d)", (int)args->a2);
2019 if (!IS_ENABLED(CFG_CORE_SEL1_SPMC)) {
2020 /*
2021 * The SPMC will return an FFA_ERROR back so better
2022 * panic() now than flooding the log.
2023 */
2024 panic("FFA_ERROR from SPMC is fatal");
2025 }
2026 spmc_set_args(args, FFA_ERROR, FFA_PARAM_MBZ, FFA_NOT_SUPPORTED,
2027 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
2028 break;
2029 default:
2030 EMSG("Unhandled FFA function ID %#"PRIx32, (uint32_t)args->a0);
2031 set_simple_ret_val(args, FFA_NOT_SUPPORTED);
2032 }
2033 }
2034
yielding_call_with_arg(uint64_t cookie,uint32_t offset)2035 static TEE_Result yielding_call_with_arg(uint64_t cookie, uint32_t offset)
2036 {
2037 size_t sz_rpc = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS);
2038 struct thread_ctx *thr = threads + thread_get_id();
2039 TEE_Result res = TEE_ERROR_BAD_PARAMETERS;
2040 struct optee_msg_arg *arg = NULL;
2041 struct mobj *mobj = NULL;
2042 uint32_t num_params = 0;
2043 size_t sz = 0;
2044
2045 mobj = mobj_ffa_get_by_cookie(cookie, 0);
2046 if (!mobj) {
2047 EMSG("Can't find cookie %#"PRIx64, cookie);
2048 return TEE_ERROR_BAD_PARAMETERS;
2049 }
2050
2051 res = mobj_inc_map(mobj);
2052 if (res)
2053 goto out_put_mobj;
2054
2055 res = TEE_ERROR_BAD_PARAMETERS;
2056 arg = mobj_get_va(mobj, offset, sizeof(*arg));
2057 if (!arg)
2058 goto out_dec_map;
2059
2060 num_params = READ_ONCE(arg->num_params);
2061 if (num_params > OPTEE_MSG_MAX_NUM_PARAMS)
2062 goto out_dec_map;
2063
2064 sz = OPTEE_MSG_GET_ARG_SIZE(num_params);
2065
2066 thr->rpc_arg = mobj_get_va(mobj, offset + sz, sz_rpc);
2067 if (!thr->rpc_arg)
2068 goto out_dec_map;
2069
2070 virt_on_stdcall();
2071 res = tee_entry_std(arg, num_params);
2072
2073 thread_rpc_shm_cache_clear(&thr->shm_cache);
2074 thr->rpc_arg = NULL;
2075
2076 out_dec_map:
2077 mobj_dec_map(mobj);
2078 out_put_mobj:
2079 mobj_put(mobj);
2080 return res;
2081 }
2082
2083 /*
2084 * Helper routine for the assembly function thread_std_smc_entry()
2085 *
2086 * Note: this function is weak just to make link_dummies_paged.c happy.
2087 */
__thread_std_smc_entry(uint32_t a0,uint32_t a1,uint32_t a2,uint32_t a3,uint32_t a4,uint32_t a5 __unused)2088 uint32_t __weak __thread_std_smc_entry(uint32_t a0, uint32_t a1,
2089 uint32_t a2, uint32_t a3,
2090 uint32_t a4, uint32_t a5 __unused)
2091 {
2092 /*
2093 * Arguments are supplied from handle_yielding_call() as:
2094 * a0 <- w1
2095 * a1 <- w3
2096 * a2 <- w4
2097 * a3 <- w5
2098 * a4 <- w6
2099 * a5 <- w7
2100 */
2101 thread_get_tsd()->rpc_target_info = swap_src_dst(a0);
2102 if (a1 == OPTEE_FFA_YIELDING_CALL_WITH_ARG)
2103 return yielding_call_with_arg(reg_pair_to_64(a3, a2), a4);
2104 return FFA_DENIED;
2105 }
2106
set_fmem(struct optee_msg_param * param,struct thread_param * tpm)2107 static bool set_fmem(struct optee_msg_param *param, struct thread_param *tpm)
2108 {
2109 uint64_t offs = tpm->u.memref.offs;
2110
2111 param->attr = tpm->attr - THREAD_PARAM_ATTR_MEMREF_IN +
2112 OPTEE_MSG_ATTR_TYPE_FMEM_INPUT;
2113
2114 param->u.fmem.offs_low = offs;
2115 param->u.fmem.offs_high = offs >> 32;
2116 if (param->u.fmem.offs_high != offs >> 32)
2117 return false;
2118
2119 param->u.fmem.size = tpm->u.memref.size;
2120 if (tpm->u.memref.mobj) {
2121 uint64_t cookie = mobj_get_cookie(tpm->u.memref.mobj);
2122
2123 /* If a mobj is passed it better be one with a valid cookie. */
2124 if (cookie == OPTEE_MSG_FMEM_INVALID_GLOBAL_ID)
2125 return false;
2126 param->u.fmem.global_id = cookie;
2127 } else {
2128 param->u.fmem.global_id = OPTEE_MSG_FMEM_INVALID_GLOBAL_ID;
2129 }
2130
2131 return true;
2132 }
2133
get_rpc_arg(uint32_t cmd,size_t num_params,struct thread_param * params,struct optee_msg_arg ** arg_ret)2134 static uint32_t get_rpc_arg(uint32_t cmd, size_t num_params,
2135 struct thread_param *params,
2136 struct optee_msg_arg **arg_ret)
2137 {
2138 size_t sz = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS);
2139 struct thread_ctx *thr = threads + thread_get_id();
2140 struct optee_msg_arg *arg = thr->rpc_arg;
2141
2142 if (num_params > THREAD_RPC_MAX_NUM_PARAMS)
2143 return TEE_ERROR_BAD_PARAMETERS;
2144
2145 if (!arg) {
2146 EMSG("rpc_arg not set");
2147 return TEE_ERROR_GENERIC;
2148 }
2149
2150 memset(arg, 0, sz);
2151 arg->cmd = cmd;
2152 arg->num_params = num_params;
2153 arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */
2154
2155 for (size_t n = 0; n < num_params; n++) {
2156 switch (params[n].attr) {
2157 case THREAD_PARAM_ATTR_NONE:
2158 arg->params[n].attr = OPTEE_MSG_ATTR_TYPE_NONE;
2159 break;
2160 case THREAD_PARAM_ATTR_VALUE_IN:
2161 case THREAD_PARAM_ATTR_VALUE_OUT:
2162 case THREAD_PARAM_ATTR_VALUE_INOUT:
2163 arg->params[n].attr = params[n].attr -
2164 THREAD_PARAM_ATTR_VALUE_IN +
2165 OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
2166 arg->params[n].u.value.a = params[n].u.value.a;
2167 arg->params[n].u.value.b = params[n].u.value.b;
2168 arg->params[n].u.value.c = params[n].u.value.c;
2169 break;
2170 case THREAD_PARAM_ATTR_MEMREF_IN:
2171 case THREAD_PARAM_ATTR_MEMREF_OUT:
2172 case THREAD_PARAM_ATTR_MEMREF_INOUT:
2173 if (!set_fmem(arg->params + n, params + n))
2174 return TEE_ERROR_BAD_PARAMETERS;
2175 break;
2176 default:
2177 return TEE_ERROR_BAD_PARAMETERS;
2178 }
2179 }
2180
2181 if (arg_ret)
2182 *arg_ret = arg;
2183
2184 return TEE_SUCCESS;
2185 }
2186
get_rpc_arg_res(struct optee_msg_arg * arg,size_t num_params,struct thread_param * params)2187 static uint32_t get_rpc_arg_res(struct optee_msg_arg *arg, size_t num_params,
2188 struct thread_param *params)
2189 {
2190 for (size_t n = 0; n < num_params; n++) {
2191 switch (params[n].attr) {
2192 case THREAD_PARAM_ATTR_VALUE_OUT:
2193 case THREAD_PARAM_ATTR_VALUE_INOUT:
2194 params[n].u.value.a = arg->params[n].u.value.a;
2195 params[n].u.value.b = arg->params[n].u.value.b;
2196 params[n].u.value.c = arg->params[n].u.value.c;
2197 break;
2198 case THREAD_PARAM_ATTR_MEMREF_OUT:
2199 case THREAD_PARAM_ATTR_MEMREF_INOUT:
2200 params[n].u.memref.size = arg->params[n].u.fmem.size;
2201 break;
2202 default:
2203 break;
2204 }
2205 }
2206
2207 return arg->ret;
2208 }
2209
thread_rpc_cmd(uint32_t cmd,size_t num_params,struct thread_param * params)2210 uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params,
2211 struct thread_param *params)
2212 {
2213 struct thread_rpc_arg rpc_arg = { .call = {
2214 .w1 = thread_get_tsd()->rpc_target_info,
2215 .w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
2216 },
2217 };
2218 struct optee_msg_arg *arg = NULL;
2219 uint32_t ret = 0;
2220
2221 ret = get_rpc_arg(cmd, num_params, params, &arg);
2222 if (ret)
2223 return ret;
2224
2225 thread_rpc(&rpc_arg);
2226
2227 return get_rpc_arg_res(arg, num_params, params);
2228 }
2229
thread_rpc_free(unsigned int bt,uint64_t cookie,struct mobj * mobj)2230 static void thread_rpc_free(unsigned int bt, uint64_t cookie, struct mobj *mobj)
2231 {
2232 struct thread_rpc_arg rpc_arg = { .call = {
2233 .w1 = thread_get_tsd()->rpc_target_info,
2234 .w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
2235 },
2236 };
2237 struct thread_param param = THREAD_PARAM_VALUE(IN, bt, cookie, 0);
2238 uint32_t res2 = 0;
2239 uint32_t res = 0;
2240
2241 DMSG("freeing cookie %#"PRIx64, cookie);
2242
2243 res = get_rpc_arg(OPTEE_RPC_CMD_SHM_FREE, 1, ¶m, NULL);
2244
2245 mobj_put(mobj);
2246 res2 = mobj_ffa_unregister_by_cookie(cookie);
2247 if (res2)
2248 DMSG("mobj_ffa_unregister_by_cookie(%#"PRIx64"): %#"PRIx32,
2249 cookie, res2);
2250 if (!res)
2251 thread_rpc(&rpc_arg);
2252 }
2253
thread_rpc_alloc(size_t size,size_t align,unsigned int bt)2254 static struct mobj *thread_rpc_alloc(size_t size, size_t align, unsigned int bt)
2255 {
2256 struct thread_rpc_arg rpc_arg = { .call = {
2257 .w1 = thread_get_tsd()->rpc_target_info,
2258 .w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
2259 },
2260 };
2261 struct thread_param param = THREAD_PARAM_VALUE(IN, bt, size, align);
2262 struct optee_msg_arg *arg = NULL;
2263 unsigned int internal_offset = 0;
2264 struct mobj *mobj = NULL;
2265 uint64_t cookie = 0;
2266
2267 if (get_rpc_arg(OPTEE_RPC_CMD_SHM_ALLOC, 1, ¶m, &arg))
2268 return NULL;
2269
2270 thread_rpc(&rpc_arg);
2271
2272 if (arg->num_params != 1 ||
2273 arg->params->attr != OPTEE_MSG_ATTR_TYPE_FMEM_OUTPUT)
2274 return NULL;
2275
2276 internal_offset = READ_ONCE(arg->params->u.fmem.internal_offs);
2277 cookie = READ_ONCE(arg->params->u.fmem.global_id);
2278 mobj = mobj_ffa_get_by_cookie(cookie, internal_offset);
2279 if (!mobj) {
2280 DMSG("mobj_ffa_get_by_cookie(%#"PRIx64", %#x): failed",
2281 cookie, internal_offset);
2282 return NULL;
2283 }
2284
2285 assert(mobj_is_nonsec(mobj));
2286
2287 if (mobj->size < size) {
2288 DMSG("Mobj %#"PRIx64": wrong size", cookie);
2289 mobj_put(mobj);
2290 return NULL;
2291 }
2292
2293 if (mobj_inc_map(mobj)) {
2294 DMSG("mobj_inc_map(%#"PRIx64"): failed", cookie);
2295 mobj_put(mobj);
2296 return NULL;
2297 }
2298
2299 return mobj;
2300 }
2301
thread_rpc_alloc_payload(size_t size)2302 struct mobj *thread_rpc_alloc_payload(size_t size)
2303 {
2304 return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_APPL);
2305 }
2306
thread_rpc_alloc_kernel_payload(size_t size)2307 struct mobj *thread_rpc_alloc_kernel_payload(size_t size)
2308 {
2309 return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_KERNEL);
2310 }
2311
thread_rpc_free_kernel_payload(struct mobj * mobj)2312 void thread_rpc_free_kernel_payload(struct mobj *mobj)
2313 {
2314 if (mobj)
2315 thread_rpc_free(OPTEE_RPC_SHM_TYPE_KERNEL,
2316 mobj_get_cookie(mobj), mobj);
2317 }
2318
thread_rpc_free_payload(struct mobj * mobj)2319 void thread_rpc_free_payload(struct mobj *mobj)
2320 {
2321 if (mobj)
2322 thread_rpc_free(OPTEE_RPC_SHM_TYPE_APPL, mobj_get_cookie(mobj),
2323 mobj);
2324 }
2325
thread_rpc_alloc_global_payload(size_t size)2326 struct mobj *thread_rpc_alloc_global_payload(size_t size)
2327 {
2328 return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_GLOBAL);
2329 }
2330
thread_rpc_free_global_payload(struct mobj * mobj)2331 void thread_rpc_free_global_payload(struct mobj *mobj)
2332 {
2333 if (mobj)
2334 thread_rpc_free(OPTEE_RPC_SHM_TYPE_GLOBAL,
2335 mobj_get_cookie(mobj), mobj);
2336 }
2337
thread_spmc_register_secondary_ep(vaddr_t ep)2338 void thread_spmc_register_secondary_ep(vaddr_t ep)
2339 {
2340 unsigned long ret = 0;
2341
2342 /* Let the SPM know the entry point for secondary CPUs */
2343 ret = thread_smc(FFA_SECONDARY_EP_REGISTER_64, ep, 0, 0);
2344
2345 if (ret != FFA_SUCCESS_32 && ret != FFA_SUCCESS_64)
2346 EMSG("FFA_SECONDARY_EP_REGISTER_64 ret %#lx", ret);
2347 }
2348
ffa_id_get(void)2349 static uint16_t ffa_id_get(void)
2350 {
2351 /*
2352 * Ask the SPM component running at a higher EL to return our FF-A ID.
2353 * This can either be the SPMC ID (if the SPMC is enabled in OP-TEE) or
2354 * the partition ID (if not).
2355 */
2356 struct thread_smc_args args = {
2357 .a0 = FFA_ID_GET,
2358 };
2359
2360 thread_smccc(&args);
2361 if (!is_ffa_success(args.a0)) {
2362 if (args.a0 == FFA_ERROR)
2363 EMSG("Get id failed with error %ld", args.a2);
2364 else
2365 EMSG("Get id failed");
2366 panic();
2367 }
2368
2369 return args.a2;
2370 }
2371
ffa_spm_id_get(void)2372 static uint16_t ffa_spm_id_get(void)
2373 {
2374 /*
2375 * Ask the SPM component running at a higher EL to return its ID.
2376 * If OP-TEE implements the S-EL1 SPMC, this will get the SPMD ID.
2377 * If not, the ID of the SPMC will be returned.
2378 */
2379 struct thread_smc_args args = {
2380 .a0 = FFA_SPM_ID_GET,
2381 };
2382
2383 thread_smccc(&args);
2384 if (!is_ffa_success(args.a0)) {
2385 if (args.a0 == FFA_ERROR)
2386 EMSG("Get spm id failed with error %ld", args.a2);
2387 else
2388 EMSG("Get spm id failed");
2389 panic();
2390 }
2391
2392 return args.a2;
2393 }
2394
2395 #ifdef CFG_CORE_DYN_PROTMEM
thread_spmc_get_protmem_config(enum mobj_use_case use_case,void * buf,size_t * buf_sz,size_t * min_mem_sz,size_t * min_mem_align)2396 TEE_Result thread_spmc_get_protmem_config(enum mobj_use_case use_case,
2397 void *buf, size_t *buf_sz,
2398 size_t *min_mem_sz,
2399 size_t *min_mem_align)
2400 {
2401 TEE_Result res = TEE_SUCCESS;
2402 struct ffa_mem_access_perm mem_acc_list[] = {
2403 {
2404 .endpoint_id = optee_core_lsp.sp_id,
2405 .perm = FFA_MEM_ACC_RW,
2406 },
2407 };
2408
2409 res = plat_get_protmem_config(use_case, min_mem_sz, min_mem_align);
2410 if (res)
2411 return res;
2412
2413 if (!buf || *buf_sz < sizeof(mem_acc_list)) {
2414 *buf_sz = sizeof(mem_acc_list);
2415 return TEE_ERROR_SHORT_BUFFER;
2416 }
2417
2418 memcpy(buf, mem_acc_list, sizeof(mem_acc_list));
2419 *buf_sz = sizeof(mem_acc_list);
2420
2421 return TEE_SUCCESS;
2422 }
2423 #endif /*CFG_CORE_DYN_PROTMEM*/
2424
check_desc(struct spmc_lsp_desc * d)2425 static TEE_Result check_desc(struct spmc_lsp_desc *d)
2426 {
2427 uint32_t accept_props = FFA_PART_PROP_DIRECT_REQ_RECV |
2428 FFA_PART_PROP_DIRECT_REQ_SEND |
2429 FFA_PART_PROP_NOTIF_CREATED |
2430 FFA_PART_PROP_NOTIF_DESTROYED |
2431 FFA_PART_PROP_AARCH64_STATE;
2432 uint32_t id = d->sp_id;
2433
2434 if (id && (spmc_is_reserved_id(id) || spmc_find_lsp_by_sp_id(id) ||
2435 id < FFA_SWD_ID_MIN || id > FFA_SWD_ID_MAX)) {
2436 EMSG("Conflicting SP id for SP \"%s\" id %#"PRIx32,
2437 d->name, id);
2438 if (!IS_ENABLED(CFG_SP_SKIP_FAILED))
2439 panic();
2440 return TEE_ERROR_BAD_FORMAT;
2441 }
2442
2443 if (d->properties & ~accept_props) {
2444 EMSG("Unexpected properties in %#"PRIx32" for LSP \"%s\" %#"PRIx16,
2445 d->properties, d->name, d->sp_id);
2446 if (!IS_ENABLED(CFG_SP_SKIP_FAILED))
2447 panic();
2448 d->properties &= accept_props;
2449 }
2450
2451 if (!d->direct_req) {
2452 EMSG("Missing direct request callback for LSP \"%s\" %#"PRIx16,
2453 d->name, d->sp_id);
2454 if (!IS_ENABLED(CFG_SP_SKIP_FAILED))
2455 panic();
2456 return TEE_ERROR_BAD_FORMAT;
2457 }
2458
2459 if (!d->uuid_words[0] && !d->uuid_words[1] &&
2460 !d->uuid_words[2] && !d->uuid_words[3]) {
2461 EMSG("Found NULL UUID for LSP \"%s\" %#"PRIx16,
2462 d->name, d->sp_id);
2463 if (!IS_ENABLED(CFG_SP_SKIP_FAILED))
2464 panic();
2465 return TEE_ERROR_BAD_FORMAT;
2466 }
2467
2468 return TEE_SUCCESS;
2469 }
2470
find_unused_sp_id(void)2471 static uint16_t find_unused_sp_id(void)
2472 {
2473 uint32_t id = FFA_SWD_ID_MIN;
2474
2475 while (spmc_is_reserved_id(id) || spmc_find_lsp_by_sp_id(id)) {
2476 id++;
2477 assert(id <= FFA_SWD_ID_MAX);
2478 }
2479
2480 return id;
2481 }
2482
spmc_register_lsp(struct spmc_lsp_desc * desc)2483 TEE_Result spmc_register_lsp(struct spmc_lsp_desc *desc)
2484 {
2485 TEE_Result res = TEE_SUCCESS;
2486
2487 res = check_desc(desc);
2488 if (res)
2489 return res;
2490
2491 if (STAILQ_EMPTY(&lsp_head)) {
2492 DMSG("Cannot add Logical SP \"%s\": LSP framework not initialized yet",
2493 desc->name);
2494 return TEE_ERROR_ITEM_NOT_FOUND;
2495 }
2496
2497 if (!desc->sp_id)
2498 desc->sp_id = find_unused_sp_id();
2499
2500 DMSG("Adding Logical SP \"%s\" with id %#"PRIx16,
2501 desc->name, desc->sp_id);
2502
2503 STAILQ_INSERT_TAIL(&lsp_head, desc, link);
2504
2505 return TEE_SUCCESS;
2506 }
2507
2508 static struct spmc_lsp_desc optee_core_lsp __nex_data = {
2509 .name = "OP-TEE",
2510 .direct_req = optee_lsp_handle_direct_request,
2511 .properties = FFA_PART_PROP_DIRECT_REQ_RECV |
2512 FFA_PART_PROP_DIRECT_REQ_SEND |
2513 #ifdef CFG_NS_VIRTUALIZATION
2514 FFA_PART_PROP_NOTIF_CREATED |
2515 FFA_PART_PROP_NOTIF_DESTROYED |
2516 #endif
2517 FFA_PART_PROP_AARCH64_STATE |
2518 FFA_PART_PROP_IS_PE_ID,
2519 /*
2520 * - if the SPMC is in S-EL2 this UUID describes OP-TEE as a S-EL1
2521 * SP, or
2522 * - if the SPMC is in S-EL1 then this UUID is for OP-TEE as a
2523 * logical partition, residing in the same exception level as the
2524 * SPMC
2525 * UUID 486178e0-e7f8-11e3-bc5e-0002a5d5c51b
2526 */
2527 .uuid_words = { 0xe0786148, 0xe311f8e7, 0x02005ebc, 0x1bc5d5a5, },
2528 };
2529
2530 #if defined(CFG_CORE_SEL1_SPMC)
2531 static struct spmc_lsp_desc optee_spmc_lsp __nex_data = {
2532 .name = "OP-TEE SPMC",
2533 .direct_req = optee_spmc_lsp_handle_direct_request,
2534 };
2535
spmc_init(void)2536 static TEE_Result spmc_init(void)
2537 {
2538 if (IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
2539 virt_add_guest_spec_data(¬if_vm_bitmap_id,
2540 sizeof(struct notif_vm_bitmap), NULL))
2541 panic("virt_add_guest_spec_data");
2542 spmd_id = ffa_spm_id_get();
2543 DMSG("SPMD ID %#"PRIx16, spmd_id);
2544
2545 optee_spmc_lsp.sp_id = ffa_id_get();
2546 DMSG("SPMC ID %#"PRIx16, optee_spmc_lsp.sp_id);
2547 STAILQ_INSERT_HEAD(&lsp_head, &optee_spmc_lsp, link);
2548
2549 optee_core_lsp.sp_id = find_unused_sp_id();
2550 DMSG("OP-TEE endpoint ID %#"PRIx16, optee_core_lsp.sp_id);
2551 STAILQ_INSERT_HEAD(&lsp_head, &optee_core_lsp, link);
2552
2553 /*
2554 * If SPMD think we are version 1.0 it will report version 1.0 to
2555 * normal world regardless of what version we query the SPM with.
2556 * However, if SPMD think we are version 1.1 it will forward
2557 * queries from normal world to let us negotiate version. So by
2558 * setting version 1.0 here we should be compatible.
2559 *
2560 * Note that disagreement on negotiated version means that we'll
2561 * have communication problems with normal world.
2562 */
2563 my_rxtx.ffa_vers = FFA_VERSION_1_0;
2564
2565 return TEE_SUCCESS;
2566 }
2567 #else /* !defined(CFG_CORE_SEL1_SPMC) */
spmc_rxtx_map(struct ffa_rxtx * rxtx)2568 static void spmc_rxtx_map(struct ffa_rxtx *rxtx)
2569 {
2570 struct thread_smc_args args = {
2571 #ifdef ARM64
2572 .a0 = FFA_RXTX_MAP_64,
2573 #else
2574 .a0 = FFA_RXTX_MAP_32,
2575 #endif
2576 .a1 = virt_to_phys(rxtx->tx),
2577 .a2 = virt_to_phys(rxtx->rx),
2578 .a3 = 1,
2579 };
2580
2581 thread_smccc(&args);
2582 if (!is_ffa_success(args.a0)) {
2583 if (args.a0 == FFA_ERROR)
2584 EMSG("rxtx map failed with error %ld", args.a2);
2585 else
2586 EMSG("rxtx map failed");
2587 panic();
2588 }
2589 }
2590
get_ffa_version(uint32_t my_version)2591 static uint32_t get_ffa_version(uint32_t my_version)
2592 {
2593 struct thread_smc_args args = {
2594 .a0 = FFA_VERSION,
2595 .a1 = my_version,
2596 };
2597
2598 thread_smccc(&args);
2599 if (args.a0 & BIT(31)) {
2600 EMSG("FF-A version failed with error %ld", args.a0);
2601 panic();
2602 }
2603
2604 return args.a0;
2605 }
2606
spmc_retrieve_req(struct ffa_mem_transaction_x * trans)2607 static void *spmc_retrieve_req(struct ffa_mem_transaction_x *trans)
2608 {
2609 uint64_t cookie __maybe_unused = trans->global_handle;
2610 struct ffa_mem_access *acc_descr_array = NULL;
2611 struct ffa_mem_access_perm *perm_descr = NULL;
2612 struct thread_smc_args args = {
2613 .a0 = FFA_MEM_RETRIEVE_REQ_32,
2614 .a3 = 0, /* Address, Using TX -> MBZ */
2615 .a4 = 0, /* Using TX -> MBZ */
2616 };
2617 size_t size = 0;
2618 int rc = 0;
2619
2620 if (my_rxtx.ffa_vers == FFA_VERSION_1_0) {
2621 struct ffa_mem_transaction_1_0 *trans_descr = my_rxtx.tx;
2622
2623 size = sizeof(*trans_descr) + 1 * sizeof(struct ffa_mem_access);
2624 memset(trans_descr, 0, size);
2625 trans_descr->sender_id = trans->sender_id;
2626 trans_descr->mem_reg_attr = trans->mem_reg_attr;
2627 trans_descr->global_handle = trans->global_handle;
2628 trans_descr->tag = trans->tag;
2629 trans_descr->flags = trans->flags;
2630 trans_descr->mem_access_count = 1;
2631 acc_descr_array = trans_descr->mem_access_array;
2632 } else {
2633 struct ffa_mem_transaction_1_1 *trans_descr = my_rxtx.tx;
2634
2635 size = sizeof(*trans_descr) + 1 * sizeof(struct ffa_mem_access);
2636 memset(trans_descr, 0, size);
2637 trans_descr->sender_id = trans->sender_id;
2638 trans_descr->mem_reg_attr = trans->mem_reg_attr;
2639 trans_descr->global_handle = trans->global_handle;
2640 trans_descr->tag = trans->tag;
2641 trans_descr->flags = trans->flags;
2642 trans_descr->mem_access_count = 1;
2643 trans_descr->mem_access_offs = sizeof(*trans_descr);
2644 trans_descr->mem_access_size = sizeof(struct ffa_mem_access);
2645 acc_descr_array = (void *)((vaddr_t)my_rxtx.tx +
2646 sizeof(*trans_descr));
2647 }
2648 acc_descr_array->region_offs = 0;
2649 acc_descr_array->reserved = 0;
2650 perm_descr = &acc_descr_array->access_perm;
2651 perm_descr->endpoint_id = optee_core_lsp.sp_id;
2652 perm_descr->perm = FFA_MEM_ACC_RW;
2653 perm_descr->flags = 0;
2654
2655 args.a1 = size; /* Total Length */
2656 args.a2 = size; /* Frag Length == Total length */
2657 thread_smccc(&args);
2658 if (args.a0 != FFA_MEM_RETRIEVE_RESP) {
2659 if (args.a0 == FFA_ERROR)
2660 EMSG("Failed to fetch cookie %#"PRIx64" error code %d",
2661 cookie, (int)args.a2);
2662 else
2663 EMSG("Failed to fetch cookie %#"PRIx64" a0 %#"PRIx64,
2664 cookie, args.a0);
2665 return NULL;
2666 }
2667 rc = spmc_read_mem_transaction(my_rxtx.ffa_vers, my_rxtx.rx,
2668 my_rxtx.size, trans);
2669 if (rc) {
2670 EMSG("Memory transaction failure for cookie %#"PRIx64" rc %d",
2671 cookie, rc);
2672 return NULL;
2673 }
2674
2675 return my_rxtx.rx;
2676 }
2677
thread_spmc_relinquish(uint64_t cookie)2678 void thread_spmc_relinquish(uint64_t cookie)
2679 {
2680 struct ffa_mem_relinquish *relinquish_desc = my_rxtx.tx;
2681 struct thread_smc_args args = {
2682 .a0 = FFA_MEM_RELINQUISH,
2683 };
2684
2685 memset(relinquish_desc, 0, sizeof(*relinquish_desc));
2686 relinquish_desc->handle = cookie;
2687 relinquish_desc->flags = 0;
2688 relinquish_desc->endpoint_count = 1;
2689 relinquish_desc->endpoint_id_array[0] = optee_core_lsp.sp_id;
2690 thread_smccc(&args);
2691 if (!is_ffa_success(args.a0))
2692 EMSG("Failed to relinquish cookie %#"PRIx64, cookie);
2693 }
2694
set_pages(struct ffa_address_range * regions,unsigned int num_regions,unsigned int num_pages,struct mobj_ffa * mf)2695 static int set_pages(struct ffa_address_range *regions,
2696 unsigned int num_regions, unsigned int num_pages,
2697 struct mobj_ffa *mf)
2698 {
2699 unsigned int n = 0;
2700 unsigned int idx = 0;
2701
2702 for (n = 0; n < num_regions; n++) {
2703 unsigned int page_count = READ_ONCE(regions[n].page_count);
2704 uint64_t addr = READ_ONCE(regions[n].address);
2705
2706 if (mobj_ffa_add_pages_at(mf, &idx, addr, page_count))
2707 return FFA_INVALID_PARAMETERS;
2708 }
2709
2710 if (idx != num_pages)
2711 return FFA_INVALID_PARAMETERS;
2712
2713 return 0;
2714 }
2715
thread_spmc_populate_mobj_from_rx(uint64_t cookie,enum mobj_use_case use_case)2716 struct mobj_ffa *thread_spmc_populate_mobj_from_rx(uint64_t cookie,
2717 enum mobj_use_case use_case)
2718 {
2719 struct mobj_ffa *ret = NULL;
2720 struct ffa_mem_transaction_x retrieve_desc = { .tag = use_case};
2721 struct ffa_mem_access *descr_array = NULL;
2722 struct ffa_mem_region *descr = NULL;
2723 struct mobj_ffa *mf = NULL;
2724 unsigned int num_pages = 0;
2725 unsigned int offs = 0;
2726 void *buf = NULL;
2727 struct thread_smc_args ffa_rx_release_args = {
2728 .a0 = FFA_RX_RELEASE
2729 };
2730
2731 if (use_case == MOBJ_USE_CASE_NS_SHM)
2732 retrieve_desc.flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE;
2733 else
2734 retrieve_desc.flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_LEND;
2735 retrieve_desc.flags |= FFA_MEMORY_REGION_FLAG_ANY_ALIGNMENT;
2736 retrieve_desc.global_handle = cookie;
2737 retrieve_desc.sender_id = thread_get_tsd()->rpc_target_info;
2738 retrieve_desc.mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR;
2739
2740 /*
2741 * OP-TEE is only supporting a single mem_region while the
2742 * specification allows for more than one.
2743 */
2744 buf = spmc_retrieve_req(&retrieve_desc);
2745 if (!buf) {
2746 EMSG("Failed to retrieve cookie from rx buffer %#"PRIx64,
2747 cookie);
2748 return NULL;
2749 }
2750
2751 descr_array = (void *)((vaddr_t)buf + retrieve_desc.mem_access_offs);
2752 offs = READ_ONCE(descr_array->region_offs);
2753 descr = (struct ffa_mem_region *)((vaddr_t)buf + offs);
2754
2755 num_pages = READ_ONCE(descr->total_page_count);
2756 mf = mobj_ffa_spmc_new(cookie, num_pages, use_case);
2757 if (!mf)
2758 goto out;
2759
2760 if (set_pages(descr->address_range_array,
2761 READ_ONCE(descr->address_range_count), num_pages, mf)) {
2762 mobj_ffa_spmc_delete(mf);
2763 goto out;
2764 }
2765
2766 ret = mf;
2767
2768 out:
2769 /* Release RX buffer after the mem retrieve request. */
2770 thread_smccc(&ffa_rx_release_args);
2771
2772 return ret;
2773 }
2774
get_ffa_version_from_manifest(void * fdt)2775 static uint32_t get_ffa_version_from_manifest(void *fdt)
2776 {
2777 int ret = 0;
2778 uint32_t vers = 0;
2779
2780 ret = fdt_node_check_compatible(fdt, 0, "arm,ffa-manifest-1.0");
2781 if (ret < 0) {
2782 EMSG("Invalid FF-A manifest at %p: error %d", fdt, ret);
2783 panic();
2784 }
2785
2786 ret = fdt_read_uint32(fdt, 0, "ffa-version", &vers);
2787 if (ret < 0) {
2788 EMSG("Can't read \"ffa-version\" from FF-A manifest at %p: error %d",
2789 fdt, ret);
2790 panic();
2791 }
2792
2793 return vers;
2794 }
2795
spmc_init(void)2796 static TEE_Result spmc_init(void)
2797 {
2798 uint32_t my_vers = 0;
2799 uint32_t vers = 0;
2800
2801 if (IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
2802 virt_add_guest_spec_data(¬if_vm_bitmap_id,
2803 sizeof(struct notif_vm_bitmap), NULL))
2804 panic("virt_add_guest_spec_data");
2805
2806 my_vers = get_ffa_version_from_manifest(get_manifest_dt());
2807 if (my_vers < FFA_VERSION_1_0 || my_vers > FFA_VERSION_1_2) {
2808 EMSG("Unsupported version %"PRIu32".%"PRIu32" from manifest",
2809 FFA_GET_MAJOR_VERSION(my_vers),
2810 FFA_GET_MINOR_VERSION(my_vers));
2811 panic();
2812 }
2813 vers = get_ffa_version(my_vers);
2814 DMSG("SPMC reported version %"PRIu32".%"PRIu32,
2815 FFA_GET_MAJOR_VERSION(vers), FFA_GET_MINOR_VERSION(vers));
2816 if (FFA_GET_MAJOR_VERSION(vers) != FFA_GET_MAJOR_VERSION(my_vers)) {
2817 EMSG("Incompatible major version %"PRIu32", expected %"PRIu32"",
2818 FFA_GET_MAJOR_VERSION(vers),
2819 FFA_GET_MAJOR_VERSION(my_vers));
2820 panic();
2821 }
2822 if (vers < my_vers)
2823 my_vers = vers;
2824 DMSG("Using version %"PRIu32".%"PRIu32"",
2825 FFA_GET_MAJOR_VERSION(my_vers), FFA_GET_MINOR_VERSION(my_vers));
2826 my_rxtx.ffa_vers = my_vers;
2827
2828 spmc_rxtx_map(&my_rxtx);
2829
2830 spmc_id = ffa_spm_id_get();
2831 DMSG("SPMC ID %#"PRIx16, spmc_id);
2832
2833 optee_core_lsp.sp_id = ffa_id_get();
2834 DMSG("OP-TEE endpoint ID %#"PRIx16, optee_core_lsp.sp_id);
2835 STAILQ_INSERT_HEAD(&lsp_head, &optee_core_lsp, link);
2836
2837 if (!ffa_features(FFA_NOTIFICATION_SET)) {
2838 spmc_notif_is_ready = true;
2839 DMSG("Asynchronous notifications are ready");
2840 }
2841
2842 return TEE_SUCCESS;
2843 }
2844 #endif /* !defined(CFG_CORE_SEL1_SPMC) */
2845
2846 nex_service_init(spmc_init);
2847