1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2020-2025, Linaro Limited.
4 * Copyright (c) 2019-2026, Arm Limited. All rights reserved.
5 */
6
7 #include <assert.h>
8 #include <ffa.h>
9 #include <initcall.h>
10 #include <io.h>
11 #include <kernel/dt.h>
12 #include <kernel/interrupt.h>
13 #include <kernel/notif.h>
14 #include <kernel/panic.h>
15 #include <kernel/secure_partition.h>
16 #include <kernel/spinlock.h>
17 #include <kernel/spmc_sp_handler.h>
18 #include <kernel/tee_misc.h>
19 #include <kernel/thread.h>
20 #include <kernel/thread_private.h>
21 #include <kernel/thread_spmc.h>
22 #include <kernel/virtualization.h>
23 #include <libfdt.h>
24 #include <mm/core_mmu.h>
25 #include <mm/mobj.h>
26 #include <optee_ffa.h>
27 #include <optee_msg.h>
28 #include <optee_rpc_cmd.h>
29 #include <sm/optee_smc.h>
30 #include <string.h>
31 #include <sys/queue.h>
32 #include <tee/entry_std.h>
33 #include <tee/uuid.h>
34 #include <tee_api_types.h>
35 #include <types_ext.h>
36 #include <util.h>
37
38 #if defined(CFG_CORE_SEL1_SPMC)
39 struct mem_op_state {
40 bool mem_share;
41 struct mobj_ffa *mf;
42 unsigned int page_count;
43 unsigned int region_count;
44 unsigned int current_page_idx;
45 };
46
47 struct mem_frag_state {
48 struct mem_op_state op;
49 tee_mm_entry_t *mm;
50 unsigned int frag_offset;
51 SLIST_ENTRY(mem_frag_state) link;
52 };
53 #endif
54
55 struct notif_vm_bitmap {
56 bool initialized;
57 int do_bottom_half_value;
58 uint64_t pending;
59 uint64_t bound;
60 };
61
62 STAILQ_HEAD(spmc_lsp_desc_head, spmc_lsp_desc);
63
64 static struct spmc_lsp_desc_head lsp_head __nex_data =
65 STAILQ_HEAD_INITIALIZER(lsp_head);
66
67 static unsigned int spmc_notif_lock __nex_data = SPINLOCK_UNLOCK;
68 static bool spmc_notif_is_ready __nex_bss;
69 static int notif_intid __nex_data __maybe_unused = -1;
70
71 /* Id used to look up the guest specific struct notif_vm_bitmap */
72 static unsigned int notif_vm_bitmap_id __nex_bss;
73 /* Notification state when ns-virtualization isn't enabled */
74 static struct notif_vm_bitmap default_notif_vm_bitmap;
75
76 /* Initialized in spmc_init() below */
77 static struct spmc_lsp_desc optee_core_lsp;
78 #ifdef CFG_CORE_SEL1_SPMC
79 /*
80 * Representation of the internal SPMC when OP-TEE is the S-EL1 SPMC.
81 * Initialized in spmc_init() below.
82 */
83 static struct spmc_lsp_desc optee_spmc_lsp;
84 /* FF-A ID of the SPMD. This is only valid when OP-TEE is the S-EL1 SPMC. */
85 static uint16_t spmd_id __nex_bss;
86
87 /*
88 * If struct ffa_rxtx::size is 0 RX/TX buffers are not mapped or initialized.
89 *
90 * struct ffa_rxtx::spin_lock protects the variables below from concurrent
91 * access this includes the use of content of struct ffa_rxtx::rx and
92 * @frag_state_head.
93 *
94 * struct ffa_rxtx::tx_buf_is_mine is true when we may write to struct
95 * ffa_rxtx::tx and false when it is owned by normal world.
96 *
97 * Note that we can't prevent normal world from updating the content of
98 * these buffers so we must always be careful when reading. while we hold
99 * the lock.
100 */
101
102 static struct ffa_rxtx my_rxtx __nex_bss;
103
is_nw_buf(struct ffa_rxtx * rxtx)104 static bool is_nw_buf(struct ffa_rxtx *rxtx)
105 {
106 return rxtx == &my_rxtx;
107 }
108
109 static SLIST_HEAD(mem_frag_state_head, mem_frag_state) frag_state_head =
110 SLIST_HEAD_INITIALIZER(&frag_state_head);
111
112 #else
113 /* FF-A ID of the external SPMC */
114 static uint16_t spmc_id __nex_bss;
115 static uint8_t __rx_buf[SMALL_PAGE_SIZE] __aligned(SMALL_PAGE_SIZE) __nex_bss;
116 static uint8_t __tx_buf[SMALL_PAGE_SIZE] __aligned(SMALL_PAGE_SIZE) __nex_bss;
117 static struct ffa_rxtx my_rxtx __nex_data = {
118 .rx = __rx_buf,
119 .tx = __tx_buf,
120 .size = sizeof(__rx_buf),
121 };
122 #endif
123
spmc_is_reserved_id(uint16_t id)124 bool spmc_is_reserved_id(uint16_t id)
125 {
126 #ifdef CFG_CORE_SEL1_SPMC
127 return id == spmd_id;
128 #else
129 return id == spmc_id;
130 #endif
131 }
132
spmc_find_lsp_by_sp_id(uint16_t sp_id)133 struct spmc_lsp_desc *spmc_find_lsp_by_sp_id(uint16_t sp_id)
134 {
135 struct spmc_lsp_desc *desc = NULL;
136
137 STAILQ_FOREACH(desc, &lsp_head, link)
138 if (desc->sp_id == sp_id)
139 return desc;
140
141 return NULL;
142 }
143
swap_src_dst(uint32_t src_dst)144 static uint32_t swap_src_dst(uint32_t src_dst)
145 {
146 return (src_dst >> 16) | (src_dst << 16);
147 }
148
get_sender_id(uint32_t src_dst)149 static uint16_t get_sender_id(uint32_t src_dst)
150 {
151 return src_dst >> 16;
152 }
153
spmc_set_args(struct thread_smc_1_2_regs * args,uint32_t fid,uint32_t src_dst,uint32_t w2,uint32_t w3,uint32_t w4,uint32_t w5)154 void spmc_set_args(struct thread_smc_1_2_regs *args, uint32_t fid,
155 uint32_t src_dst, uint32_t w2, uint32_t w3, uint32_t w4,
156 uint32_t w5)
157 {
158 *args = (struct thread_smc_1_2_regs){
159 .a0 = fid,
160 .a1 = src_dst,
161 .a2 = w2,
162 .a3 = w3,
163 .a4 = w4,
164 .a5 = w5,
165 };
166 }
167
set_simple_ret_val(struct thread_smc_1_2_regs * args,int ffa_ret)168 static void set_simple_ret_val(struct thread_smc_1_2_regs *args, int ffa_ret)
169 {
170 if (ffa_ret)
171 spmc_set_args(args, FFA_ERROR, 0, ffa_ret, 0, 0, 0);
172 else
173 spmc_set_args(args, FFA_SUCCESS_32, 0, 0, 0, 0, 0);
174 }
175
spmc_exchange_version(uint32_t vers,struct ffa_rxtx * rxtx)176 uint32_t spmc_exchange_version(uint32_t vers, struct ffa_rxtx *rxtx)
177 {
178 uint32_t major_vers = FFA_GET_MAJOR_VERSION(vers);
179 uint32_t minor_vers = FFA_GET_MINOR_VERSION(vers);
180 uint32_t my_vers = FFA_VERSION_1_2;
181 uint32_t my_major_vers = 0;
182 uint32_t my_minor_vers = 0;
183
184 my_major_vers = FFA_GET_MAJOR_VERSION(my_vers);
185 my_minor_vers = FFA_GET_MINOR_VERSION(my_vers);
186
187 /*
188 * No locking, if the caller does concurrent calls to this it's
189 * only making a mess for itself. We must be able to renegotiate
190 * the FF-A version in order to support differing versions between
191 * the loader and the driver.
192 *
193 * Callers should use the version requested if we return a matching
194 * major version and a matching or larger minor version. The caller
195 * should downgrade to our minor version if our minor version is
196 * smaller. Regardless, always return our version as recommended by
197 * the specification.
198 */
199 if (major_vers == my_major_vers) {
200 if (minor_vers > my_minor_vers)
201 rxtx->ffa_vers = my_vers;
202 else
203 rxtx->ffa_vers = vers;
204 }
205
206 return my_vers;
207 }
208
is_ffa_success(uint32_t fid)209 static bool is_ffa_success(uint32_t fid)
210 {
211 #ifdef ARM64
212 if (fid == FFA_SUCCESS_64)
213 return true;
214 #endif
215 return fid == FFA_SUCCESS_32;
216 }
217
get_ffa_ret_code(const struct thread_smc_args * args)218 static int32_t get_ffa_ret_code(const struct thread_smc_args *args)
219 {
220 if (is_ffa_success(args->a0))
221 return FFA_OK;
222 if (args->a0 == FFA_ERROR && args->a2)
223 return args->a2;
224 return FFA_NOT_SUPPORTED;
225 }
226
ffa_simple_call(uint32_t fid,unsigned long a1,unsigned long a2,unsigned long a3,unsigned long a4)227 static int ffa_simple_call(uint32_t fid, unsigned long a1, unsigned long a2,
228 unsigned long a3, unsigned long a4)
229 {
230 struct thread_smc_args args = {
231 .a0 = fid,
232 .a1 = a1,
233 .a2 = a2,
234 .a3 = a3,
235 .a4 = a4,
236 };
237
238 thread_smccc(&args);
239
240 return get_ffa_ret_code(&args);
241 }
242
ffa_features(uint32_t id)243 static int __maybe_unused ffa_features(uint32_t id)
244 {
245 return ffa_simple_call(FFA_FEATURES, id, 0, 0, 0);
246 }
247
ffa_set_notification(uint16_t dst,uint16_t src,uint32_t flags,uint64_t bitmap)248 static int __maybe_unused ffa_set_notification(uint16_t dst, uint16_t src,
249 uint32_t flags, uint64_t bitmap)
250 {
251 return ffa_simple_call(FFA_NOTIFICATION_SET,
252 SHIFT_U32(src, 16) | dst, flags,
253 low32_from_64(bitmap), high32_from_64(bitmap));
254 }
255
256 #if defined(CFG_CORE_SEL1_SPMC)
handle_features(struct thread_smc_1_2_regs * args)257 static void handle_features(struct thread_smc_1_2_regs *args)
258 {
259 uint32_t ret_fid = FFA_ERROR;
260 uint32_t ret_w2 = FFA_NOT_SUPPORTED;
261
262 switch (args->a1) {
263 case FFA_FEATURE_SCHEDULE_RECV_INTR:
264 if (spmc_notif_is_ready) {
265 ret_fid = FFA_SUCCESS_32;
266 ret_w2 = notif_intid;
267 }
268 break;
269
270 #ifdef ARM64
271 case FFA_RXTX_MAP_64:
272 #endif
273 case FFA_RXTX_MAP_32:
274 ret_fid = FFA_SUCCESS_32;
275 ret_w2 = 0; /* 4kB Minimum buffer size and alignment boundary */
276 break;
277 #ifdef ARM64
278 case FFA_MEM_SHARE_64:
279 #endif
280 case FFA_MEM_SHARE_32:
281 ret_fid = FFA_SUCCESS_32;
282 /*
283 * Partition manager supports transmission of a memory
284 * transaction descriptor in a buffer dynamically allocated
285 * by the endpoint.
286 */
287 ret_w2 = BIT(0);
288 break;
289
290 case FFA_ERROR:
291 case FFA_VERSION:
292 case FFA_SUCCESS_32:
293 #ifdef ARM64
294 case FFA_SUCCESS_64:
295 #endif
296 case FFA_FEATURES:
297 case FFA_SPM_ID_GET:
298 case FFA_MEM_FRAG_TX:
299 case FFA_MEM_RECLAIM:
300 case FFA_MSG_SEND_DIRECT_REQ_64:
301 case FFA_MSG_SEND_DIRECT_REQ_32:
302 case FFA_INTERRUPT:
303 case FFA_PARTITION_INFO_GET:
304 case FFA_RXTX_UNMAP:
305 case FFA_RX_RELEASE:
306 case FFA_FEATURE_MANAGED_EXIT_INTR:
307 case FFA_NOTIFICATION_BITMAP_CREATE:
308 case FFA_NOTIFICATION_BITMAP_DESTROY:
309 case FFA_NOTIFICATION_BIND:
310 case FFA_NOTIFICATION_UNBIND:
311 case FFA_NOTIFICATION_SET:
312 case FFA_NOTIFICATION_GET:
313 case FFA_NOTIFICATION_INFO_GET_32:
314 #ifdef ARM64
315 case FFA_NOTIFICATION_INFO_GET_64:
316 #endif
317 ret_fid = FFA_SUCCESS_32;
318 ret_w2 = FFA_PARAM_MBZ;
319 break;
320 default:
321 break;
322 }
323
324 spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, ret_w2, FFA_PARAM_MBZ,
325 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
326 }
327
map_buf(paddr_t pa,unsigned int sz,void ** va_ret)328 static int map_buf(paddr_t pa, unsigned int sz, void **va_ret)
329 {
330 tee_mm_entry_t *mm = NULL;
331
332 if (!core_pbuf_is(CORE_MEM_NON_SEC, pa, sz))
333 return FFA_INVALID_PARAMETERS;
334
335 mm = tee_mm_alloc(&core_virt_shm_pool, sz);
336 if (!mm)
337 return FFA_NO_MEMORY;
338
339 if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pa,
340 sz / SMALL_PAGE_SIZE,
341 MEM_AREA_NSEC_SHM)) {
342 tee_mm_free(mm);
343 return FFA_INVALID_PARAMETERS;
344 }
345
346 *va_ret = (void *)tee_mm_get_smem(mm);
347 return 0;
348 }
349
spmc_handle_spm_id_get(struct thread_smc_1_2_regs * args)350 void spmc_handle_spm_id_get(struct thread_smc_1_2_regs *args)
351 {
352 spmc_set_args(args, FFA_SUCCESS_32, FFA_PARAM_MBZ, optee_spmc_lsp.sp_id,
353 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
354 }
355
unmap_buf(void * va,size_t sz)356 static void unmap_buf(void *va, size_t sz)
357 {
358 tee_mm_entry_t *mm = tee_mm_find(&core_virt_shm_pool, (vaddr_t)va);
359
360 assert(mm);
361 core_mmu_unmap_pages(tee_mm_get_smem(mm), sz / SMALL_PAGE_SIZE);
362 tee_mm_free(mm);
363 }
364
spmc_handle_rxtx_map(struct thread_smc_1_2_regs * args,struct ffa_rxtx * rxtx)365 void spmc_handle_rxtx_map(struct thread_smc_1_2_regs *args,
366 struct ffa_rxtx *rxtx)
367 {
368 int rc = 0;
369 unsigned int sz = 0;
370 paddr_t rx_pa = 0;
371 paddr_t tx_pa = 0;
372 void *rx = NULL;
373 void *tx = NULL;
374
375 cpu_spin_lock(&rxtx->spinlock);
376
377 if (args->a3 & GENMASK_64(63, 6)) {
378 rc = FFA_INVALID_PARAMETERS;
379 goto out;
380 }
381
382 sz = args->a3 * SMALL_PAGE_SIZE;
383 if (!sz) {
384 rc = FFA_INVALID_PARAMETERS;
385 goto out;
386 }
387 /* TX/RX are swapped compared to the caller */
388 tx_pa = args->a2;
389 rx_pa = args->a1;
390
391 if (rxtx->size) {
392 rc = FFA_DENIED;
393 goto out;
394 }
395
396 /*
397 * If the buffer comes from a SP the address is virtual and already
398 * mapped.
399 */
400 if (is_nw_buf(rxtx)) {
401 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
402 enum teecore_memtypes mt = MEM_AREA_NEX_NSEC_SHM;
403 bool tx_alloced = false;
404
405 /*
406 * With virtualization we establish this mapping in
407 * the nexus mapping which then is replicated to
408 * each partition.
409 *
410 * This means that this mapping must be done before
411 * any partition is created and then must not be
412 * changed.
413 */
414
415 /*
416 * core_mmu_add_mapping() may reuse previous
417 * mappings. First check if there's any mappings to
418 * reuse so we know how to clean up in case of
419 * failure.
420 */
421 tx = phys_to_virt(tx_pa, mt, sz);
422 rx = phys_to_virt(rx_pa, mt, sz);
423 if (!tx) {
424 tx = core_mmu_add_mapping(mt, tx_pa, sz);
425 if (!tx) {
426 rc = FFA_NO_MEMORY;
427 goto out;
428 }
429 tx_alloced = true;
430 }
431 if (!rx)
432 rx = core_mmu_add_mapping(mt, rx_pa, sz);
433
434 if (!rx) {
435 if (tx_alloced && tx)
436 core_mmu_remove_mapping(mt, tx, sz);
437 rc = FFA_NO_MEMORY;
438 goto out;
439 }
440 } else {
441 rc = map_buf(tx_pa, sz, &tx);
442 if (rc)
443 goto out;
444 rc = map_buf(rx_pa, sz, &rx);
445 if (rc) {
446 unmap_buf(tx, sz);
447 goto out;
448 }
449 }
450 rxtx->tx = tx;
451 rxtx->rx = rx;
452 } else {
453 if ((tx_pa & SMALL_PAGE_MASK) || (rx_pa & SMALL_PAGE_MASK)) {
454 rc = FFA_INVALID_PARAMETERS;
455 goto out;
456 }
457
458 if (!virt_to_phys((void *)tx_pa) ||
459 !virt_to_phys((void *)rx_pa)) {
460 rc = FFA_INVALID_PARAMETERS;
461 goto out;
462 }
463
464 rxtx->tx = (void *)tx_pa;
465 rxtx->rx = (void *)rx_pa;
466 }
467
468 rxtx->size = sz;
469 rxtx->tx_is_mine = true;
470 DMSG("Mapped tx %#"PRIxPA" size %#x @ %p", tx_pa, sz, tx);
471 DMSG("Mapped rx %#"PRIxPA" size %#x @ %p", rx_pa, sz, rx);
472 out:
473 cpu_spin_unlock(&rxtx->spinlock);
474 set_simple_ret_val(args, rc);
475 }
476
spmc_handle_rxtx_unmap(struct thread_smc_1_2_regs * args,struct ffa_rxtx * rxtx)477 void spmc_handle_rxtx_unmap(struct thread_smc_1_2_regs *args,
478 struct ffa_rxtx *rxtx)
479 {
480 int rc = FFA_INVALID_PARAMETERS;
481
482 cpu_spin_lock(&rxtx->spinlock);
483
484 if (!rxtx->size)
485 goto out;
486
487 /*
488 * We don't unmap the SP memory as the SP might still use it.
489 * We avoid to make changes to nexus mappings at this stage since
490 * there currently isn't a way to replicate those changes to all
491 * partitions.
492 */
493 if (is_nw_buf(rxtx) && !IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
494 unmap_buf(rxtx->rx, rxtx->size);
495 unmap_buf(rxtx->tx, rxtx->size);
496 }
497 rxtx->size = 0;
498 rxtx->rx = NULL;
499 rxtx->tx = NULL;
500 rc = 0;
501 out:
502 cpu_spin_unlock(&rxtx->spinlock);
503 set_simple_ret_val(args, rc);
504 }
505
spmc_handle_rx_release(struct thread_smc_1_2_regs * args,struct ffa_rxtx * rxtx)506 void spmc_handle_rx_release(struct thread_smc_1_2_regs *args,
507 struct ffa_rxtx *rxtx)
508 {
509 int rc = 0;
510
511 cpu_spin_lock(&rxtx->spinlock);
512 /* The senders RX is our TX */
513 if (!rxtx->size || rxtx->tx_is_mine) {
514 rc = FFA_DENIED;
515 } else {
516 rc = 0;
517 rxtx->tx_is_mine = true;
518 }
519 cpu_spin_unlock(&rxtx->spinlock);
520
521 set_simple_ret_val(args, rc);
522 }
523
is_nil_uuid(uint32_t w0,uint32_t w1,uint32_t w2,uint32_t w3)524 static bool is_nil_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3)
525 {
526 return !w0 && !w1 && !w2 && !w3;
527 }
528
spmc_fill_partition_entry(uint32_t ffa_vers,void * buf,size_t blen,size_t idx,uint16_t endpoint_id,uint16_t execution_context,uint32_t part_props,const uint32_t uuid_words[4])529 TEE_Result spmc_fill_partition_entry(uint32_t ffa_vers, void *buf, size_t blen,
530 size_t idx, uint16_t endpoint_id,
531 uint16_t execution_context,
532 uint32_t part_props,
533 const uint32_t uuid_words[4])
534 {
535 struct ffa_partition_info_x *fpi = NULL;
536 size_t fpi_size = sizeof(*fpi);
537
538 if (ffa_vers >= FFA_VERSION_1_1)
539 fpi_size += FFA_UUID_SIZE;
540
541 if ((idx + 1) * fpi_size > blen)
542 return TEE_ERROR_OUT_OF_MEMORY;
543
544 fpi = (void *)((vaddr_t)buf + idx * fpi_size);
545 fpi->id = endpoint_id;
546 /* Number of execution contexts implemented by this partition */
547 fpi->execution_context = execution_context;
548
549 fpi->partition_properties = part_props;
550
551 /* In FF-A 1.0 only bits [2:0] are defined, let's mask others */
552 if (ffa_vers < FFA_VERSION_1_1)
553 fpi->partition_properties &= FFA_PART_PROP_DIRECT_REQ_RECV |
554 FFA_PART_PROP_DIRECT_REQ_SEND |
555 FFA_PART_PROP_INDIRECT_MSGS;
556
557 if (ffa_vers >= FFA_VERSION_1_1) {
558 if (uuid_words)
559 memcpy(fpi->uuid, uuid_words, FFA_UUID_SIZE);
560 else
561 memset(fpi->uuid, 0, FFA_UUID_SIZE);
562 }
563
564 return TEE_SUCCESS;
565 }
566
lsp_partition_info_get(uint32_t ffa_vers,void * buf,size_t buf_size,size_t * elem_count,const uint32_t uuid_words[4],bool count_only)567 static TEE_Result lsp_partition_info_get(uint32_t ffa_vers, void *buf,
568 size_t buf_size, size_t *elem_count,
569 const uint32_t uuid_words[4],
570 bool count_only)
571 {
572 struct spmc_lsp_desc *desc = NULL;
573 TEE_Result res = TEE_SUCCESS;
574 size_t c = *elem_count;
575
576 STAILQ_FOREACH(desc, &lsp_head, link) {
577 /*
578 * LSPs (OP-TEE SPMC) without an assigned UUID are not
579 * proper LSPs and shouldn't be reported here.
580 */
581 if (is_nil_uuid(desc->uuid_words[0], desc->uuid_words[1],
582 desc->uuid_words[2], desc->uuid_words[3]))
583 continue;
584
585 if (uuid_words && memcmp(uuid_words, desc->uuid_words,
586 sizeof(desc->uuid_words)))
587 continue;
588
589 if (!count_only && !res)
590 res = spmc_fill_partition_entry(ffa_vers, buf, buf_size,
591 c, desc->sp_id,
592 CFG_TEE_CORE_NB_CORE,
593 desc->properties,
594 desc->uuid_words);
595 c++;
596 }
597
598 *elem_count = c;
599
600 return res;
601 }
602
spmc_handle_partition_info_get(struct thread_smc_1_2_regs * args,struct ffa_rxtx * rxtx)603 void spmc_handle_partition_info_get(struct thread_smc_1_2_regs *args,
604 struct ffa_rxtx *rxtx)
605 {
606 TEE_Result res = TEE_SUCCESS;
607 uint32_t ret_fid = FFA_ERROR;
608 uint32_t fpi_size = 0;
609 uint32_t rc = 0;
610 bool count_only = args->a5 & FFA_PARTITION_INFO_GET_COUNT_FLAG;
611 uint32_t uuid_words[4] = { args->a1, args->a2, args->a3, args->a4, };
612 uint32_t *uuid = uuid_words;
613 size_t count = 0;
614
615 if (!count_only) {
616 cpu_spin_lock(&rxtx->spinlock);
617
618 if (!rxtx->size || !rxtx->tx_is_mine) {
619 rc = FFA_BUSY;
620 goto out;
621 }
622 }
623
624 if (is_nil_uuid(uuid[0], uuid[1], uuid[2], uuid[3]))
625 uuid = NULL;
626
627 if (lsp_partition_info_get(rxtx->ffa_vers, rxtx->tx, rxtx->size,
628 &count, uuid, count_only)) {
629 ret_fid = FFA_ERROR;
630 rc = FFA_INVALID_PARAMETERS;
631 goto out;
632 }
633 if (IS_ENABLED(CFG_SECURE_PARTITION)) {
634 res = sp_partition_info_get(rxtx->ffa_vers, rxtx->tx,
635 rxtx->size, uuid, &count,
636 count_only);
637 if (res != TEE_SUCCESS) {
638 ret_fid = FFA_ERROR;
639 rc = FFA_INVALID_PARAMETERS;
640 goto out;
641 }
642 }
643
644 rc = count;
645 ret_fid = FFA_SUCCESS_32;
646 out:
647 if (ret_fid == FFA_SUCCESS_32 && !count_only &&
648 rxtx->ffa_vers >= FFA_VERSION_1_1)
649 fpi_size = sizeof(struct ffa_partition_info_x) + FFA_UUID_SIZE;
650
651 spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, fpi_size,
652 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
653 if (!count_only) {
654 rxtx->tx_is_mine = false;
655 cpu_spin_unlock(&rxtx->spinlock);
656 }
657 }
658
spmc_handle_run(struct thread_smc_1_2_regs * args)659 static void spmc_handle_run(struct thread_smc_1_2_regs *args)
660 {
661 uint16_t endpoint = FFA_TARGET_INFO_GET_SP_ID(args->a1);
662 uint16_t thread_id = FFA_TARGET_INFO_GET_VCPU_ID(args->a1);
663 uint32_t rc = FFA_INVALID_PARAMETERS;
664
665 /*
666 * OP-TEE core threads are only preemted using controlled exit so
667 * FFA_RUN mustn't be used to resume such threads.
668 *
669 * The OP-TEE SPMC is not preemted at all, it's an error to try to
670 * resume that ID.
671 */
672 if (spmc_find_lsp_by_sp_id(endpoint))
673 goto out;
674
675 /*
676 * The endpoint should be a S-EL0 SP, try to resume the SP from
677 * preempted into busy state.
678 */
679 rc = spmc_sp_resume_from_preempted(endpoint, thread_id);
680 out:
681 set_simple_ret_val(args, rc);
682 }
683 #endif /*CFG_CORE_SEL1_SPMC*/
684
get_notif_vm_bitmap(struct guest_partition * prtn,uint16_t vm_id)685 static struct notif_vm_bitmap *get_notif_vm_bitmap(struct guest_partition *prtn,
686 uint16_t vm_id)
687 {
688 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
689 if (!prtn)
690 return NULL;
691 assert(vm_id == virt_get_guest_id(prtn));
692 return virt_get_guest_spec_data(prtn, notif_vm_bitmap_id);
693 }
694 if (vm_id)
695 return NULL;
696 return &default_notif_vm_bitmap;
697 }
698
spmc_enable_async_notif(uint32_t bottom_half_value,uint16_t vm_id)699 static uint32_t spmc_enable_async_notif(uint32_t bottom_half_value,
700 uint16_t vm_id)
701 {
702 struct guest_partition *prtn = NULL;
703 struct notif_vm_bitmap *nvb = NULL;
704 uint32_t old_itr_status = 0;
705 uint32_t res = 0;
706
707 if (!spmc_notif_is_ready) {
708 /*
709 * This should never happen, not if normal world respects the
710 * exchanged capabilities.
711 */
712 EMSG("Asynchronous notifications are not ready");
713 return TEE_ERROR_NOT_IMPLEMENTED;
714 }
715
716 if (bottom_half_value >= OPTEE_FFA_MAX_ASYNC_NOTIF_VALUE) {
717 EMSG("Invalid bottom half value %"PRIu32, bottom_half_value);
718 return TEE_ERROR_BAD_PARAMETERS;
719 }
720
721 prtn = virt_get_guest(vm_id);
722 nvb = get_notif_vm_bitmap(prtn, vm_id);
723 if (!nvb) {
724 res = TEE_ERROR_BAD_PARAMETERS;
725 goto out;
726 }
727
728 old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
729 nvb->do_bottom_half_value = bottom_half_value;
730 cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
731
732 notif_deliver_atomic_event(NOTIF_EVENT_STARTED, vm_id);
733 res = TEE_SUCCESS;
734 out:
735 virt_put_guest(prtn);
736 return res;
737 }
738
get_direct_resp_fid(uint32_t fid)739 static uint32_t get_direct_resp_fid(uint32_t fid)
740 {
741 assert(fid == FFA_MSG_SEND_DIRECT_REQ_64 ||
742 fid == FFA_MSG_SEND_DIRECT_REQ_32);
743
744 if (OPTEE_SMC_IS_64(fid))
745 return FFA_MSG_SEND_DIRECT_RESP_64;
746 return FFA_MSG_SEND_DIRECT_RESP_32;
747 }
748
handle_yielding_call(struct thread_smc_1_2_regs * args)749 static void handle_yielding_call(struct thread_smc_1_2_regs *args)
750 {
751 uint32_t direct_resp_fid = get_direct_resp_fid(args->a0);
752 TEE_Result res = TEE_SUCCESS;
753
754 thread_check_canaries();
755
756 #ifdef ARM64
757 /* Saving this for an eventual RPC */
758 thread_get_core_local()->direct_resp_fid = direct_resp_fid;
759 #endif
760
761 if (args->a3 == OPTEE_FFA_YIELDING_CALL_RESUME) {
762 /* Note connection to struct thread_rpc_arg::ret */
763 thread_resume_from_rpc(args->a7, args->a4, args->a5, args->a6,
764 0);
765 res = TEE_ERROR_BAD_PARAMETERS;
766 } else {
767 thread_alloc_and_run(args->a1, args->a3, args->a4, args->a5,
768 args->a6, args->a7);
769 res = TEE_ERROR_BUSY;
770 }
771 spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1),
772 0, res, 0, 0);
773 }
774
handle_unregister_shm(uint32_t a4,uint32_t a5)775 static uint32_t handle_unregister_shm(uint32_t a4, uint32_t a5)
776 {
777 uint64_t cookie = reg_pair_to_64(a5, a4);
778 uint32_t res = 0;
779
780 res = mobj_ffa_unregister_by_cookie(cookie);
781 switch (res) {
782 case TEE_SUCCESS:
783 case TEE_ERROR_ITEM_NOT_FOUND:
784 return 0;
785 case TEE_ERROR_BUSY:
786 EMSG("res %#"PRIx32, res);
787 return FFA_BUSY;
788 default:
789 EMSG("res %#"PRIx32, res);
790 return FFA_INVALID_PARAMETERS;
791 }
792 }
793
handle_blocking_call(struct thread_smc_1_2_regs * args)794 static void handle_blocking_call(struct thread_smc_1_2_regs *args)
795 {
796 uint32_t direct_resp_fid = get_direct_resp_fid(args->a0);
797 uint32_t sec_caps = 0;
798
799 switch (args->a3) {
800 case OPTEE_FFA_GET_API_VERSION:
801 spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
802 OPTEE_FFA_VERSION_MAJOR, OPTEE_FFA_VERSION_MINOR,
803 0);
804 break;
805 case OPTEE_FFA_GET_OS_VERSION:
806 spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
807 CFG_OPTEE_REVISION_MAJOR,
808 CFG_OPTEE_REVISION_MINOR,
809 TEE_IMPL_GIT_SHA1 >> 32);
810 break;
811 case OPTEE_FFA_EXCHANGE_CAPABILITIES:
812 sec_caps = OPTEE_FFA_SEC_CAP_ARG_OFFSET;
813 if (spmc_notif_is_ready)
814 sec_caps |= OPTEE_FFA_SEC_CAP_ASYNC_NOTIF;
815 if (IS_ENABLED(CFG_RPMB_ANNOUNCE_PROBE_CAP))
816 sec_caps |= OPTEE_FFA_SEC_CAP_RPMB_PROBE;
817 if (IS_ENABLED(CFG_CORE_DYN_PROTMEM))
818 sec_caps |= OPTEE_FFA_SEC_CAP_PROTMEM;
819 spmc_set_args(args, direct_resp_fid,
820 swap_src_dst(args->a1), 0, 0,
821 THREAD_RPC_MAX_NUM_PARAMS, sec_caps);
822 break;
823 case OPTEE_FFA_UNREGISTER_SHM:
824 spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
825 handle_unregister_shm(args->a4, args->a5), 0, 0);
826 break;
827 case OPTEE_FFA_ENABLE_ASYNC_NOTIF:
828 spmc_set_args(args, direct_resp_fid,
829 swap_src_dst(args->a1), 0,
830 spmc_enable_async_notif(args->a4,
831 FFA_SRC(args->a1)),
832 0, 0);
833 break;
834 #ifdef CFG_CORE_DYN_PROTMEM
835 case OPTEE_FFA_RELEASE_PROTMEM:
836 spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
837 handle_unregister_shm(args->a4, args->a5), 0, 0);
838 break;
839 #endif
840 default:
841 EMSG("Unhandled blocking service ID %#"PRIx32,
842 (uint32_t)args->a3);
843 spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
844 TEE_ERROR_BAD_PARAMETERS, 0, 0);
845 }
846 }
847
handle_framework_direct_request(struct thread_smc_1_2_regs * args)848 static void handle_framework_direct_request(struct thread_smc_1_2_regs *args)
849 {
850 uint32_t direct_resp_fid = get_direct_resp_fid(args->a0);
851 uint32_t w0 = FFA_ERROR;
852 uint32_t w1 = FFA_PARAM_MBZ;
853 uint32_t w2 = FFA_NOT_SUPPORTED;
854 uint32_t w3 = FFA_PARAM_MBZ;
855
856 switch (args->a2 & FFA_MSG_TYPE_MASK) {
857 case FFA_MSG_SEND_VM_CREATED:
858 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
859 uint16_t guest_id = args->a5;
860 TEE_Result res = virt_guest_created(guest_id);
861
862 w0 = direct_resp_fid;
863 w1 = swap_src_dst(args->a1);
864 w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_RESP_VM_CREATED;
865 if (res == TEE_SUCCESS)
866 w3 = FFA_OK;
867 else if (res == TEE_ERROR_OUT_OF_MEMORY)
868 w3 = FFA_DENIED;
869 else
870 w3 = FFA_INVALID_PARAMETERS;
871 }
872 break;
873 case FFA_MSG_SEND_VM_DESTROYED:
874 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
875 uint16_t guest_id = args->a5;
876 TEE_Result res = virt_guest_destroyed(guest_id);
877
878 w0 = direct_resp_fid;
879 w1 = swap_src_dst(args->a1);
880 w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_RESP_VM_DESTROYED;
881 if (res == TEE_SUCCESS)
882 w3 = FFA_OK;
883 else
884 w3 = FFA_INVALID_PARAMETERS;
885 }
886 break;
887 case FFA_MSG_VERSION_REQ:
888 w0 = direct_resp_fid;
889 w1 = swap_src_dst(args->a1);
890 w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_VERSION_RESP;
891 w3 = spmc_exchange_version(args->a3, &my_rxtx);
892 break;
893 default:
894 break;
895 }
896 spmc_set_args(args, w0, w1, w2, w3, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
897 }
898
899 static void
optee_lsp_handle_direct_request(struct thread_smc_1_2_regs * args,struct sp_session * caller_sp)900 optee_lsp_handle_direct_request(struct thread_smc_1_2_regs *args,
901 struct sp_session *caller_sp)
902 {
903 if (caller_sp) {
904 set_simple_ret_val(args, FFA_INVALID_PARAMETERS);
905 return;
906 }
907
908 if (args->a2 & FFA_MSG_FLAG_FRAMEWORK) {
909 handle_framework_direct_request(args);
910 return;
911 }
912
913 if (IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
914 virt_set_guest(get_sender_id(args->a1))) {
915 spmc_set_args(args, get_direct_resp_fid(args->a0),
916 swap_src_dst(args->a1), 0,
917 TEE_ERROR_ITEM_NOT_FOUND, 0, 0);
918 return;
919 }
920
921 if (args->a3 & BIT32(OPTEE_FFA_YIELDING_CALL_BIT))
922 handle_yielding_call(args);
923 else
924 handle_blocking_call(args);
925
926 /*
927 * Note that handle_yielding_call() typically only returns if a
928 * thread cannot be allocated or found. virt_unset_guest() is also
929 * called from thread_state_suspend() and thread_state_free().
930 */
931 if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
932 virt_unset_guest();
933 }
934
935 static void __maybe_unused
optee_spmc_lsp_handle_direct_request(struct thread_smc_1_2_regs * args,struct sp_session * caller_sp)936 optee_spmc_lsp_handle_direct_request(struct thread_smc_1_2_regs *args,
937 struct sp_session *caller_sp)
938 {
939 if (caller_sp) {
940 set_simple_ret_val(args, FFA_INVALID_PARAMETERS);
941 return;
942 }
943
944 if (args->a2 & FFA_MSG_FLAG_FRAMEWORK)
945 handle_framework_direct_request(args);
946 else
947 set_simple_ret_val(args, FFA_INVALID_PARAMETERS);
948 }
949
handle_direct_request(struct thread_smc_1_2_regs * args)950 static void handle_direct_request(struct thread_smc_1_2_regs *args)
951 {
952 struct spmc_lsp_desc *lsp = spmc_find_lsp_by_sp_id(FFA_DST(args->a1));
953
954 if (lsp) {
955 lsp->direct_req(args, NULL);
956 } else {
957 int rc = spmc_sp_start_thread(args);
958
959 /*
960 * spmc_sp_start_thread() returns here if the SPs aren't
961 * supported or if all threads are busy.
962 */
963 set_simple_ret_val(args, rc);
964 }
965 }
966
spmc_read_mem_transaction(uint32_t ffa_vers,void * buf,size_t blen,uint32_t tot_len,uint32_t frag_len,struct ffa_mem_transaction_x * trans)967 int spmc_read_mem_transaction(uint32_t ffa_vers, void *buf, size_t blen,
968 uint32_t tot_len, uint32_t frag_len,
969 struct ffa_mem_transaction_x *trans)
970 {
971 uint16_t mem_reg_attr = 0;
972 uint32_t flags = 0;
973 uint32_t count = 0;
974 uint32_t offs = 0;
975 uint32_t size = 0;
976 size_t n = 0;
977
978 if (!IS_ALIGNED_WITH_TYPE(buf, uint64_t) || frag_len > tot_len ||
979 frag_len > blen)
980 return FFA_INVALID_PARAMETERS;
981
982 if (ffa_vers >= FFA_VERSION_1_1) {
983 struct ffa_mem_transaction_1_1 *descr = NULL;
984
985 if (frag_len < sizeof(*descr))
986 return FFA_INVALID_PARAMETERS;
987
988 descr = buf;
989 trans->sender_id = READ_ONCE(descr->sender_id);
990 mem_reg_attr = READ_ONCE(descr->mem_reg_attr);
991 flags = READ_ONCE(descr->flags);
992 trans->global_handle = READ_ONCE(descr->global_handle);
993 trans->tag = READ_ONCE(descr->tag);
994
995 count = READ_ONCE(descr->mem_access_count);
996 size = READ_ONCE(descr->mem_access_size);
997 offs = READ_ONCE(descr->mem_access_offs);
998 } else {
999 struct ffa_mem_transaction_1_0 *descr = NULL;
1000
1001 if (frag_len < sizeof(*descr))
1002 return FFA_INVALID_PARAMETERS;
1003
1004 descr = buf;
1005 trans->sender_id = READ_ONCE(descr->sender_id);
1006 mem_reg_attr = READ_ONCE(descr->mem_reg_attr);
1007 flags = READ_ONCE(descr->flags);
1008 trans->global_handle = READ_ONCE(descr->global_handle);
1009 trans->tag = READ_ONCE(descr->tag);
1010
1011 count = READ_ONCE(descr->mem_access_count);
1012 size = sizeof(descr->mem_access_array[0]);
1013 offs = offsetof(struct ffa_mem_transaction_1_0,
1014 mem_access_array);
1015 }
1016
1017 if (mem_reg_attr > UINT8_MAX || flags > UINT8_MAX ||
1018 size > UINT8_MAX || count > UINT8_MAX || offs > UINT16_MAX ||
1019 offs % 16)
1020 return FFA_INVALID_PARAMETERS;
1021
1022 /* Check that the endpoint memory access descriptor array fits */
1023 if (MUL_OVERFLOW(size, count, &n) || ADD_OVERFLOW(offs, n, &n) ||
1024 n > frag_len)
1025 return FFA_INVALID_PARAMETERS;
1026
1027 trans->mem_reg_attr = mem_reg_attr;
1028 trans->flags = flags;
1029 trans->mem_access_size = size;
1030 trans->mem_access_count = count;
1031 trans->mem_access_offs = offs;
1032 return 0;
1033 }
1034
1035 #if defined(CFG_CORE_SEL1_SPMC)
get_acc_perms(vaddr_t mem_acc_base,unsigned int mem_access_size,unsigned int mem_access_count,uint8_t * acc_perms,unsigned int * region_offs)1036 static int get_acc_perms(vaddr_t mem_acc_base, unsigned int mem_access_size,
1037 unsigned int mem_access_count, uint8_t *acc_perms,
1038 unsigned int *region_offs)
1039 {
1040 struct ffa_mem_access_common *mem_acc = NULL;
1041 struct ffa_mem_access_perm *descr = NULL;
1042 unsigned int n = 0;
1043
1044 for (n = 0; n < mem_access_count; n++) {
1045 mem_acc = (void *)(mem_acc_base + mem_access_size * n);
1046 descr = &mem_acc->access_perm;
1047 if (READ_ONCE(descr->endpoint_id) == optee_core_lsp.sp_id) {
1048 *acc_perms = READ_ONCE(descr->perm);
1049 *region_offs = READ_ONCE(mem_acc->region_offs);
1050 return 0;
1051 }
1052 }
1053
1054 return FFA_INVALID_PARAMETERS;
1055 }
1056
mem_op_init(bool mem_share,struct ffa_mem_transaction_x * mem_trans,void * buf,size_t blen,unsigned int * page_count,unsigned int * region_count,size_t * addr_range_offs)1057 static int mem_op_init(bool mem_share, struct ffa_mem_transaction_x *mem_trans,
1058 void *buf, size_t blen, unsigned int *page_count,
1059 unsigned int *region_count, size_t *addr_range_offs)
1060 {
1061 const uint8_t exp_mem_acc_perm = FFA_MEM_ACC_RW;
1062 struct ffa_mem_region *region_descr = NULL;
1063 unsigned int region_descr_offs = 0;
1064 uint16_t exp_mem_reg_attr = 0;
1065 uint8_t mem_acc_perm = 0;
1066 size_t n = 0;
1067
1068 if (mem_share)
1069 exp_mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR;
1070 if (mem_trans->mem_reg_attr != exp_mem_reg_attr)
1071 return FFA_INVALID_PARAMETERS;
1072
1073 /* Check that the access permissions matches what's expected */
1074 if (get_acc_perms((vaddr_t)buf + mem_trans->mem_access_offs,
1075 mem_trans->mem_access_size,
1076 mem_trans->mem_access_count,
1077 &mem_acc_perm, ®ion_descr_offs) ||
1078 mem_acc_perm != exp_mem_acc_perm)
1079 return FFA_INVALID_PARAMETERS;
1080
1081 /* Check that the Composite memory region descriptor fits */
1082 if (ADD_OVERFLOW(region_descr_offs, sizeof(*region_descr), &n) ||
1083 n > blen)
1084 return FFA_INVALID_PARAMETERS;
1085
1086 if (!IS_ALIGNED_WITH_TYPE((vaddr_t)buf + region_descr_offs,
1087 struct ffa_mem_region))
1088 return FFA_INVALID_PARAMETERS;
1089
1090 region_descr = (struct ffa_mem_region *)((vaddr_t)buf +
1091 region_descr_offs);
1092 *page_count = READ_ONCE(region_descr->total_page_count);
1093 *region_count = READ_ONCE(region_descr->address_range_count);
1094 *addr_range_offs = n;
1095 return 0;
1096 }
1097
add_mem_op_helper(struct mem_op_state * s,void * buf,size_t flen)1098 static int add_mem_op_helper(struct mem_op_state *s, void *buf, size_t flen)
1099 {
1100 unsigned int region_count = flen / sizeof(struct ffa_address_range);
1101 struct ffa_address_range *arange = NULL;
1102 unsigned int n = 0;
1103
1104 if (region_count > s->region_count)
1105 region_count = s->region_count;
1106
1107 if (!IS_ALIGNED_WITH_TYPE(buf, struct ffa_address_range))
1108 return FFA_INVALID_PARAMETERS;
1109 arange = buf;
1110
1111 for (n = 0; n < region_count; n++) {
1112 unsigned int page_count = READ_ONCE(arange[n].page_count);
1113 uint64_t addr = READ_ONCE(arange[n].address);
1114
1115 if (mobj_ffa_add_pages_at(s->mf, &s->current_page_idx,
1116 addr, page_count))
1117 return FFA_INVALID_PARAMETERS;
1118 }
1119
1120 s->region_count -= region_count;
1121 if (s->region_count)
1122 return region_count * sizeof(*arange);
1123
1124 if (s->current_page_idx != s->page_count)
1125 return FFA_INVALID_PARAMETERS;
1126
1127 return 0;
1128 }
1129
add_mem_op_frag(struct mem_frag_state * s,void * buf,size_t flen)1130 static int add_mem_op_frag(struct mem_frag_state *s, void *buf, size_t flen)
1131 {
1132 int rc = 0;
1133
1134 rc = add_mem_op_helper(&s->op, buf, flen);
1135 if (rc >= 0) {
1136 if (!ADD_OVERFLOW(s->frag_offset, rc, &s->frag_offset)) {
1137 /* We're not at the end of the descriptor yet */
1138 if (s->op.region_count)
1139 return s->frag_offset;
1140
1141 /* We're done */
1142 rc = 0;
1143 } else {
1144 rc = FFA_INVALID_PARAMETERS;
1145 }
1146 }
1147
1148 SLIST_REMOVE(&frag_state_head, s, mem_frag_state, link);
1149 if (rc < 0) {
1150 mobj_ffa_sel1_spmc_delete(s->op.mf);
1151 } else {
1152 if (mobj_ffa_push_to_inactive(s->op.mf)) {
1153 rc = FFA_INVALID_PARAMETERS;
1154 mobj_ffa_sel1_spmc_delete(s->op.mf);
1155 }
1156 }
1157 free(s);
1158
1159 return rc;
1160 }
1161
is_sp_op(struct ffa_mem_transaction_x * mem_trans,void * buf)1162 static bool is_sp_op(struct ffa_mem_transaction_x *mem_trans, void *buf)
1163 {
1164 struct ffa_mem_access_common *mem_acc = NULL;
1165 struct ffa_mem_access_perm *perm = NULL;
1166
1167 if (!IS_ENABLED(CFG_SECURE_PARTITION))
1168 return false;
1169
1170 if (mem_trans->mem_access_count < 1)
1171 return false;
1172
1173 mem_acc = (void *)((vaddr_t)buf + mem_trans->mem_access_offs);
1174 perm = &mem_acc->access_perm;
1175
1176 /*
1177 * perm->endpoint_id is read here only to check if the endpoint is
1178 * OP-TEE. We do read it later on again, but there are some additional
1179 * checks there to make sure that the data is correct.
1180 */
1181 return READ_ONCE(perm->endpoint_id) != optee_core_lsp.sp_id;
1182 }
1183
add_mem_op(bool mem_share,struct ffa_mem_transaction_x * mem_trans,tee_mm_entry_t * mm,void * buf,size_t blen,size_t flen,uint64_t * global_handle)1184 static int add_mem_op(bool mem_share, struct ffa_mem_transaction_x *mem_trans,
1185 tee_mm_entry_t *mm, void *buf, size_t blen, size_t flen,
1186 uint64_t *global_handle)
1187 {
1188 int rc = 0;
1189 struct mem_op_state op = { .mem_share = mem_share, };
1190 size_t addr_range_offs = 0;
1191 uint64_t cookie = OPTEE_MSG_FMEM_INVALID_GLOBAL_ID;
1192 enum mobj_use_case use_case = MOBJ_USE_CASE_NS_SHM;
1193 size_t n = 0;
1194
1195 rc = mem_op_init(mem_share, mem_trans, buf, flen, &op.page_count,
1196 &op.region_count, &addr_range_offs);
1197 if (rc)
1198 return rc;
1199
1200 if (!op.page_count || !op.region_count)
1201 return FFA_INVALID_PARAMETERS;
1202
1203 if (MUL_OVERFLOW(op.region_count,
1204 sizeof(struct ffa_address_range), &n) ||
1205 ADD_OVERFLOW(n, addr_range_offs, &n) || n > blen)
1206 return FFA_INVALID_PARAMETERS;
1207
1208 if (mem_trans->global_handle)
1209 cookie = mem_trans->global_handle;
1210 if (!mem_share)
1211 use_case = mem_trans->tag;
1212 op.mf = mobj_ffa_sel1_spmc_new(cookie, op.page_count, use_case);
1213 if (!op.mf)
1214 return FFA_NO_MEMORY;
1215
1216 if (flen != blen) {
1217 struct mem_frag_state *s = calloc(1, sizeof(*s));
1218
1219 if (!s) {
1220 rc = FFA_NO_MEMORY;
1221 goto err;
1222 }
1223 s->op = op;
1224 s->mm = mm;
1225 s->frag_offset = addr_range_offs;
1226
1227 SLIST_INSERT_HEAD(&frag_state_head, s, link);
1228 rc = add_mem_op_frag(s, (char *)buf + addr_range_offs,
1229 flen - addr_range_offs);
1230
1231 if (rc >= 0)
1232 *global_handle = mobj_ffa_get_cookie(op.mf);
1233
1234 return rc;
1235 }
1236
1237 rc = add_mem_op_helper(&op, (char *)buf + addr_range_offs,
1238 flen - addr_range_offs);
1239 if (rc) {
1240 /*
1241 * Number of consumed bytes may be returned instead of 0 for
1242 * done.
1243 */
1244 rc = FFA_INVALID_PARAMETERS;
1245 goto err;
1246 }
1247
1248 if (mobj_ffa_push_to_inactive(op.mf)) {
1249 rc = FFA_INVALID_PARAMETERS;
1250 goto err;
1251 }
1252 *global_handle = mobj_ffa_get_cookie(op.mf);
1253
1254 return 0;
1255 err:
1256 mobj_ffa_sel1_spmc_delete(op.mf);
1257 return rc;
1258 }
1259
handle_mem_op_tmem(bool share_mem,paddr_t pbuf,size_t tot_len,size_t frag_len,unsigned int page_count,uint64_t * global_handle,struct ffa_rxtx * rxtx)1260 static int handle_mem_op_tmem(bool share_mem, paddr_t pbuf, size_t tot_len,
1261 size_t frag_len, unsigned int page_count,
1262 uint64_t *global_handle, struct ffa_rxtx *rxtx)
1263 {
1264 struct ffa_mem_transaction_x mem_trans = { };
1265 int rc = 0;
1266 size_t len = 0;
1267 void *buf = NULL;
1268 tee_mm_entry_t *mm = NULL;
1269 vaddr_t offs = pbuf & SMALL_PAGE_MASK;
1270
1271 if (MUL_OVERFLOW(page_count, SMALL_PAGE_SIZE, &len))
1272 return FFA_INVALID_PARAMETERS;
1273 if (!core_pbuf_is(CORE_MEM_NON_SEC, pbuf, len))
1274 return FFA_INVALID_PARAMETERS;
1275
1276 /*
1277 * Check that the length reported in flen is covered by len even
1278 * if the offset is taken into account.
1279 */
1280 if (len < frag_len || len - offs < frag_len)
1281 return FFA_INVALID_PARAMETERS;
1282
1283 mm = tee_mm_alloc(&core_virt_shm_pool, len);
1284 if (!mm)
1285 return FFA_NO_MEMORY;
1286
1287 if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pbuf,
1288 page_count, MEM_AREA_NSEC_SHM)) {
1289 rc = FFA_INVALID_PARAMETERS;
1290 goto out;
1291 }
1292 buf = (void *)(tee_mm_get_smem(mm) + offs);
1293
1294 cpu_spin_lock(&rxtx->spinlock);
1295 rc = spmc_read_mem_transaction(rxtx->ffa_vers, buf, len - offs,
1296 frag_len, tot_len, &mem_trans);
1297 if (rc)
1298 goto unlock;
1299
1300 if (is_sp_op(&mem_trans, buf)) {
1301 if (!share_mem) {
1302 rc = FFA_DENIED;
1303 goto unlock;
1304 }
1305 rc = spmc_sp_add_share(&mem_trans, buf, tot_len, frag_len,
1306 global_handle, NULL);
1307 goto unlock;
1308 }
1309
1310 if (IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
1311 virt_set_guest(mem_trans.sender_id)) {
1312 rc = FFA_DENIED;
1313 goto unlock;
1314 }
1315
1316 rc = add_mem_op(share_mem, &mem_trans, mm, buf, tot_len, frag_len,
1317 global_handle);
1318
1319 if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
1320 virt_unset_guest();
1321
1322 unlock:
1323 cpu_spin_unlock(&rxtx->spinlock);
1324 if (rc > 0)
1325 return rc;
1326
1327 core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count);
1328 out:
1329 tee_mm_free(mm);
1330 return rc;
1331 }
1332
handle_mem_op_rxbuf(bool share_mem,size_t tot_len,size_t frag_len,uint64_t * global_handle,struct ffa_rxtx * rxtx)1333 static int handle_mem_op_rxbuf(bool share_mem, size_t tot_len, size_t frag_len,
1334 uint64_t *global_handle, struct ffa_rxtx *rxtx)
1335 {
1336 struct ffa_mem_transaction_x mem_trans = { };
1337 int rc = FFA_DENIED;
1338
1339 cpu_spin_lock(&rxtx->spinlock);
1340
1341 if (!rxtx->rx || frag_len > rxtx->size)
1342 goto out;
1343
1344 rc = spmc_read_mem_transaction(rxtx->ffa_vers, rxtx->rx, rxtx->size,
1345 frag_len, tot_len, &mem_trans);
1346 if (rc)
1347 goto out;
1348 if (is_sp_op(&mem_trans, rxtx->rx)) {
1349 if (!share_mem) {
1350 rc = FFA_DENIED;
1351 goto out;
1352 }
1353 rc = spmc_sp_add_share(&mem_trans, rxtx, tot_len, frag_len,
1354 global_handle, NULL);
1355 goto out;
1356 }
1357
1358 if (IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
1359 virt_set_guest(mem_trans.sender_id))
1360 goto out;
1361
1362 rc = add_mem_op(share_mem, &mem_trans, NULL, rxtx->rx, tot_len,
1363 frag_len, global_handle);
1364
1365 if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
1366 virt_unset_guest();
1367
1368 out:
1369 cpu_spin_unlock(&rxtx->spinlock);
1370
1371 return rc;
1372 }
1373
handle_mem_op(struct thread_smc_1_2_regs * args,struct ffa_rxtx * rxtx)1374 static void handle_mem_op(struct thread_smc_1_2_regs *args,
1375 struct ffa_rxtx *rxtx)
1376 {
1377 uint32_t tot_len = args->a1;
1378 uint32_t frag_len = args->a2;
1379 uint64_t addr = args->a3;
1380 uint32_t page_count = args->a4;
1381 uint32_t ret_w1 = 0;
1382 uint32_t ret_w2 = FFA_INVALID_PARAMETERS;
1383 uint32_t ret_w3 = 0;
1384 uint32_t ret_fid = FFA_ERROR;
1385 uint64_t global_handle = 0;
1386 bool share_mem = false;
1387 int rc = 0;
1388
1389 /* Check that the MBZs are indeed 0 */
1390 if (args->a5 || args->a6 || args->a7)
1391 goto out;
1392
1393 /* Check that fragment length doesn't exceed total length */
1394 if (frag_len > tot_len)
1395 goto out;
1396
1397 /* Check for 32-bit calling convention */
1398 if (!OPTEE_SMC_IS_64(args->a0))
1399 addr &= UINT32_MAX;
1400
1401 if (args->a0 == FFA_MEM_SHARE_32 || args->a0 == FFA_MEM_SHARE_64)
1402 share_mem = true;
1403 else
1404 share_mem = false;
1405
1406 if (!addr) {
1407 /*
1408 * The memory transaction descriptor is passed via our rx
1409 * buffer.
1410 */
1411 if (page_count)
1412 goto out;
1413 rc = handle_mem_op_rxbuf(share_mem, tot_len, frag_len,
1414 &global_handle, rxtx);
1415 } else {
1416 rc = handle_mem_op_tmem(share_mem, addr, tot_len, frag_len,
1417 page_count, &global_handle, rxtx);
1418 }
1419 if (rc < 0) {
1420 ret_w2 = rc;
1421 } else if (rc > 0) {
1422 ret_fid = FFA_MEM_FRAG_RX;
1423 ret_w3 = rc;
1424 reg_pair_from_64(global_handle, &ret_w2, &ret_w1);
1425 } else {
1426 ret_fid = FFA_SUCCESS_32;
1427 reg_pair_from_64(global_handle, &ret_w3, &ret_w2);
1428 }
1429 out:
1430 spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0);
1431 }
1432
get_frag_state(uint64_t global_handle)1433 static struct mem_frag_state *get_frag_state(uint64_t global_handle)
1434 {
1435 struct mem_frag_state *s = NULL;
1436
1437 SLIST_FOREACH(s, &frag_state_head, link)
1438 if (mobj_ffa_get_cookie(s->op.mf) == global_handle)
1439 return s;
1440
1441 return NULL;
1442 }
1443
handle_mem_frag_tx(struct thread_smc_1_2_regs * args,struct ffa_rxtx * rxtx)1444 static void handle_mem_frag_tx(struct thread_smc_1_2_regs *args,
1445 struct ffa_rxtx *rxtx)
1446 {
1447 uint64_t global_handle = reg_pair_to_64(args->a2, args->a1);
1448 size_t flen = args->a3;
1449 uint32_t endpoint_id = args->a4;
1450 struct mem_frag_state *s = NULL;
1451 tee_mm_entry_t *mm = NULL;
1452 unsigned int page_count = 0;
1453 void *buf = NULL;
1454 uint32_t ret_w1 = 0;
1455 uint32_t ret_w2 = 0;
1456 uint32_t ret_w3 = 0;
1457 uint32_t ret_fid = 0;
1458 int rc = 0;
1459
1460 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1461 uint16_t guest_id = endpoint_id >> 16;
1462
1463 if (!guest_id || virt_set_guest(guest_id)) {
1464 rc = FFA_INVALID_PARAMETERS;
1465 goto out_set_rc;
1466 }
1467 }
1468
1469 /*
1470 * Currently we're only doing this for fragmented FFA_MEM_SHARE_*
1471 * requests.
1472 */
1473
1474 cpu_spin_lock(&rxtx->spinlock);
1475
1476 s = get_frag_state(global_handle);
1477 if (!s) {
1478 rc = FFA_INVALID_PARAMETERS;
1479 goto out;
1480 }
1481
1482 mm = s->mm;
1483 if (mm) {
1484 if (flen > tee_mm_get_bytes(mm)) {
1485 rc = FFA_INVALID_PARAMETERS;
1486 goto out;
1487 }
1488 page_count = s->op.page_count;
1489 buf = (void *)tee_mm_get_smem(mm);
1490 } else {
1491 if (flen > rxtx->size) {
1492 rc = FFA_INVALID_PARAMETERS;
1493 goto out;
1494 }
1495 buf = rxtx->rx;
1496 }
1497
1498 rc = add_mem_op_frag(s, buf, flen);
1499 out:
1500 if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
1501 virt_unset_guest();
1502
1503 cpu_spin_unlock(&rxtx->spinlock);
1504
1505 if (rc <= 0 && mm) {
1506 core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count);
1507 tee_mm_free(mm);
1508 }
1509
1510 out_set_rc:
1511 if (rc < 0) {
1512 ret_fid = FFA_ERROR;
1513 ret_w2 = rc;
1514 } else if (rc > 0) {
1515 ret_fid = FFA_MEM_FRAG_RX;
1516 ret_w3 = rc;
1517 reg_pair_from_64(global_handle, &ret_w2, &ret_w1);
1518 } else {
1519 ret_fid = FFA_SUCCESS_32;
1520 reg_pair_from_64(global_handle, &ret_w3, &ret_w2);
1521 }
1522
1523 spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0);
1524 }
1525
handle_mem_reclaim(struct thread_smc_1_2_regs * args)1526 static void handle_mem_reclaim(struct thread_smc_1_2_regs *args)
1527 {
1528 int rc = FFA_INVALID_PARAMETERS;
1529 uint64_t cookie = 0;
1530
1531 if (args->a3 || args->a4 || args->a5 || args->a6 || args->a7)
1532 goto out;
1533
1534 cookie = reg_pair_to_64(args->a2, args->a1);
1535 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1536 uint16_t guest_id = 0;
1537
1538 if (cookie & FFA_MEMORY_HANDLE_HYPERVISOR_BIT) {
1539 guest_id = virt_find_guest_by_cookie(cookie);
1540 } else {
1541 guest_id = (cookie >> FFA_MEMORY_HANDLE_PRTN_SHIFT) &
1542 FFA_MEMORY_HANDLE_PRTN_MASK;
1543 }
1544 if (!guest_id)
1545 goto out;
1546 if (virt_set_guest(guest_id)) {
1547 if (!virt_reclaim_cookie_from_destroyed_guest(guest_id,
1548 cookie))
1549 rc = FFA_OK;
1550 goto out;
1551 }
1552 }
1553
1554 switch (mobj_ffa_sel1_spmc_reclaim(cookie)) {
1555 case TEE_SUCCESS:
1556 rc = FFA_OK;
1557 break;
1558 case TEE_ERROR_ITEM_NOT_FOUND:
1559 DMSG("cookie %#"PRIx64" not found", cookie);
1560 rc = FFA_INVALID_PARAMETERS;
1561 break;
1562 default:
1563 DMSG("cookie %#"PRIx64" busy", cookie);
1564 rc = FFA_DENIED;
1565 break;
1566 }
1567
1568 if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
1569 virt_unset_guest();
1570
1571 out:
1572 set_simple_ret_val(args, rc);
1573 }
1574
handle_notification_bitmap_create(struct thread_smc_1_2_regs * args)1575 static void handle_notification_bitmap_create(struct thread_smc_1_2_regs *args)
1576 {
1577 uint32_t ret_val = FFA_INVALID_PARAMETERS;
1578 uint32_t ret_fid = FFA_ERROR;
1579 uint32_t old_itr_status = 0;
1580
1581 if (!FFA_TARGET_INFO_GET_SP_ID(args->a1) && !args->a3 && !args->a4 &&
1582 !args->a5 && !args->a6 && !args->a7) {
1583 struct guest_partition *prtn = NULL;
1584 struct notif_vm_bitmap *nvb = NULL;
1585 uint16_t vm_id = args->a1;
1586
1587 prtn = virt_get_guest(vm_id);
1588 nvb = get_notif_vm_bitmap(prtn, vm_id);
1589 if (!nvb) {
1590 ret_val = FFA_INVALID_PARAMETERS;
1591 goto out_virt_put;
1592 }
1593
1594 old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1595
1596 if (nvb->initialized) {
1597 ret_val = FFA_DENIED;
1598 goto out_unlock;
1599 }
1600
1601 nvb->initialized = true;
1602 nvb->do_bottom_half_value = -1;
1603 ret_val = FFA_OK;
1604 ret_fid = FFA_SUCCESS_32;
1605 out_unlock:
1606 cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1607 out_virt_put:
1608 virt_put_guest(prtn);
1609 }
1610
1611 spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0);
1612 }
1613
handle_notification_bitmap_destroy(struct thread_smc_1_2_regs * args)1614 static void handle_notification_bitmap_destroy(struct thread_smc_1_2_regs *args)
1615 {
1616 uint32_t ret_val = FFA_INVALID_PARAMETERS;
1617 uint32_t ret_fid = FFA_ERROR;
1618 uint32_t old_itr_status = 0;
1619
1620 if (!FFA_TARGET_INFO_GET_SP_ID(args->a1) && !args->a3 && !args->a4 &&
1621 !args->a5 && !args->a6 && !args->a7) {
1622 struct guest_partition *prtn = NULL;
1623 struct notif_vm_bitmap *nvb = NULL;
1624 uint16_t vm_id = args->a1;
1625
1626 prtn = virt_get_guest(vm_id);
1627 nvb = get_notif_vm_bitmap(prtn, vm_id);
1628 if (!nvb) {
1629 ret_val = FFA_INVALID_PARAMETERS;
1630 goto out_virt_put;
1631 }
1632
1633 old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1634
1635 if (nvb->pending || nvb->bound) {
1636 ret_val = FFA_DENIED;
1637 goto out_unlock;
1638 }
1639
1640 memset(nvb, 0, sizeof(*nvb));
1641 ret_val = FFA_OK;
1642 ret_fid = FFA_SUCCESS_32;
1643 out_unlock:
1644 cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1645 out_virt_put:
1646 virt_put_guest(prtn);
1647 }
1648
1649 spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0);
1650 }
1651
handle_notification_bind(struct thread_smc_1_2_regs * args)1652 static void handle_notification_bind(struct thread_smc_1_2_regs *args)
1653 {
1654 uint32_t ret_val = FFA_INVALID_PARAMETERS;
1655 struct guest_partition *prtn = NULL;
1656 struct notif_vm_bitmap *nvb = NULL;
1657 uint32_t ret_fid = FFA_ERROR;
1658 uint32_t old_itr_status = 0;
1659 uint64_t bitmap = 0;
1660 uint16_t vm_id = 0;
1661
1662 if (args->a5 || args->a6 || args->a7)
1663 goto out;
1664 if (args->a2) {
1665 /* We only deal with global notifications */
1666 ret_val = FFA_DENIED;
1667 goto out;
1668 }
1669
1670 /* The destination of the eventual notification */
1671 vm_id = FFA_DST(args->a1);
1672 bitmap = reg_pair_to_64(args->a4, args->a3);
1673
1674 prtn = virt_get_guest(vm_id);
1675 nvb = get_notif_vm_bitmap(prtn, vm_id);
1676 if (!nvb) {
1677 ret_val = FFA_INVALID_PARAMETERS;
1678 goto out_virt_put;
1679 }
1680
1681 old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1682
1683 if ((bitmap & nvb->bound)) {
1684 ret_val = FFA_DENIED;
1685 } else {
1686 nvb->bound |= bitmap;
1687 ret_val = FFA_OK;
1688 ret_fid = FFA_SUCCESS_32;
1689 }
1690
1691 cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1692 out_virt_put:
1693 virt_put_guest(prtn);
1694 out:
1695 spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0);
1696 }
1697
handle_notification_unbind(struct thread_smc_1_2_regs * args)1698 static void handle_notification_unbind(struct thread_smc_1_2_regs *args)
1699 {
1700 uint32_t ret_val = FFA_INVALID_PARAMETERS;
1701 struct guest_partition *prtn = NULL;
1702 struct notif_vm_bitmap *nvb = NULL;
1703 uint32_t ret_fid = FFA_ERROR;
1704 uint32_t old_itr_status = 0;
1705 uint64_t bitmap = 0;
1706 uint16_t vm_id = 0;
1707
1708 if (args->a2 || args->a5 || args->a6 || args->a7)
1709 goto out;
1710
1711 /* The destination of the eventual notification */
1712 vm_id = FFA_DST(args->a1);
1713 bitmap = reg_pair_to_64(args->a4, args->a3);
1714
1715 prtn = virt_get_guest(vm_id);
1716 nvb = get_notif_vm_bitmap(prtn, vm_id);
1717 if (!nvb) {
1718 ret_val = FFA_INVALID_PARAMETERS;
1719 goto out_virt_put;
1720 }
1721
1722 old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1723
1724 if (bitmap & nvb->pending) {
1725 ret_val = FFA_DENIED;
1726 } else {
1727 nvb->bound &= ~bitmap;
1728 ret_val = FFA_OK;
1729 ret_fid = FFA_SUCCESS_32;
1730 }
1731
1732 cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1733 out_virt_put:
1734 virt_put_guest(prtn);
1735 out:
1736 spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0);
1737 }
1738
handle_notification_get(struct thread_smc_1_2_regs * args)1739 static void handle_notification_get(struct thread_smc_1_2_regs *args)
1740 {
1741 uint32_t w2 = FFA_INVALID_PARAMETERS;
1742 struct guest_partition *prtn = NULL;
1743 struct notif_vm_bitmap *nvb = NULL;
1744 uint32_t ret_fid = FFA_ERROR;
1745 uint32_t old_itr_status = 0;
1746 uint16_t vm_id = 0;
1747 uint32_t w3 = 0;
1748
1749 if (args->a5 || args->a6 || args->a7)
1750 goto out;
1751 if (!(args->a2 & 0x1)) {
1752 ret_fid = FFA_SUCCESS_32;
1753 w2 = 0;
1754 goto out;
1755 }
1756 vm_id = FFA_DST(args->a1);
1757
1758 prtn = virt_get_guest(vm_id);
1759 nvb = get_notif_vm_bitmap(prtn, vm_id);
1760 if (!nvb)
1761 goto out_virt_put;
1762
1763 old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1764
1765 reg_pair_from_64(nvb->pending, &w3, &w2);
1766 nvb->pending = 0;
1767 ret_fid = FFA_SUCCESS_32;
1768
1769 cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1770 out_virt_put:
1771 virt_put_guest(prtn);
1772 out:
1773 spmc_set_args(args, ret_fid, 0, w2, w3, 0, 0);
1774 }
1775
1776 struct notif_info_get_state {
1777 struct thread_smc_1_2_regs *args;
1778 unsigned int ids_per_reg;
1779 unsigned int ids_count;
1780 unsigned int id_pos;
1781 unsigned int count;
1782 unsigned int max_list_count;
1783 unsigned int list_count;
1784 };
1785
add_id_in_regs(struct notif_info_get_state * state,uint16_t id)1786 static bool add_id_in_regs(struct notif_info_get_state *state,
1787 uint16_t id)
1788 {
1789 unsigned int reg_idx = state->id_pos / state->ids_per_reg + 3;
1790 unsigned int reg_shift = (state->id_pos % state->ids_per_reg) * 16;
1791
1792 if (reg_idx > 7)
1793 return false;
1794
1795 state->args->a[reg_idx] &= ~SHIFT_U64(0xffff, reg_shift);
1796 state->args->a[reg_idx] |= (unsigned long)id << reg_shift;
1797
1798 state->id_pos++;
1799 state->count++;
1800 return true;
1801 }
1802
add_id_count(struct notif_info_get_state * state)1803 static bool add_id_count(struct notif_info_get_state *state)
1804 {
1805 assert(state->list_count < state->max_list_count &&
1806 state->count >= 1 && state->count <= 4);
1807
1808 state->ids_count |= (state->count - 1) << (state->list_count * 2 + 12);
1809 state->list_count++;
1810 state->count = 0;
1811
1812 return state->list_count < state->max_list_count;
1813 }
1814
add_nvb_to_state(struct notif_info_get_state * state,uint16_t guest_id,struct notif_vm_bitmap * nvb)1815 static bool add_nvb_to_state(struct notif_info_get_state *state,
1816 uint16_t guest_id, struct notif_vm_bitmap *nvb)
1817 {
1818 if (!nvb->pending)
1819 return true;
1820 /*
1821 * Add only the guest_id, meaning a global notification for this
1822 * guest.
1823 *
1824 * If notifications for one or more specific vCPUs we'd add those
1825 * before calling add_id_count(), but that's not supported.
1826 */
1827 return add_id_in_regs(state, guest_id) && add_id_count(state);
1828 }
1829
handle_notification_info_get(struct thread_smc_1_2_regs * args)1830 static void handle_notification_info_get(struct thread_smc_1_2_regs *args)
1831 {
1832 struct notif_info_get_state state = { .args = args };
1833 uint32_t ffa_res = FFA_INVALID_PARAMETERS;
1834 struct guest_partition *prtn = NULL;
1835 struct notif_vm_bitmap *nvb = NULL;
1836 uint32_t more_pending_flag = 0;
1837 uint32_t itr_state = 0;
1838 uint16_t guest_id = 0;
1839
1840 if (args->a1 || args->a2 || args->a3 || args->a4 || args->a5 ||
1841 args->a6 || args->a7)
1842 goto err;
1843
1844 if (OPTEE_SMC_IS_64(args->a0)) {
1845 spmc_set_args(args, FFA_SUCCESS_64, 0, 0, 0, 0, 0);
1846 state.ids_per_reg = 4;
1847 state.max_list_count = 31;
1848 } else {
1849 spmc_set_args(args, FFA_SUCCESS_32, 0, 0, 0, 0, 0);
1850 state.ids_per_reg = 2;
1851 state.max_list_count = 15;
1852 }
1853
1854 while (true) {
1855 /*
1856 * With NS-Virtualization we need to go through all
1857 * partitions to collect the notification bitmaps, without
1858 * we just check the only notification bitmap we have.
1859 */
1860 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1861 prtn = virt_next_guest(prtn);
1862 if (!prtn)
1863 break;
1864 guest_id = virt_get_guest_id(prtn);
1865 }
1866 nvb = get_notif_vm_bitmap(prtn, guest_id);
1867
1868 itr_state = cpu_spin_lock_xsave(&spmc_notif_lock);
1869 if (!add_nvb_to_state(&state, guest_id, nvb))
1870 more_pending_flag = BIT(0);
1871 cpu_spin_unlock_xrestore(&spmc_notif_lock, itr_state);
1872
1873 if (!IS_ENABLED(CFG_NS_VIRTUALIZATION) || more_pending_flag)
1874 break;
1875 }
1876 virt_put_guest(prtn);
1877
1878 if (!state.id_pos) {
1879 ffa_res = FFA_NO_DATA;
1880 goto err;
1881 }
1882 args->a2 = (state.list_count << FFA_NOTIF_INFO_GET_ID_COUNT_SHIFT) |
1883 (state.ids_count << FFA_NOTIF_INFO_GET_ID_LIST_SHIFT) |
1884 more_pending_flag;
1885 return;
1886 err:
1887 spmc_set_args(args, FFA_ERROR, 0, ffa_res, 0, 0, 0);
1888 }
1889
thread_spmc_set_async_notif_intid(int intid)1890 void thread_spmc_set_async_notif_intid(int intid)
1891 {
1892 assert(interrupt_can_raise_sgi(interrupt_get_main_chip()));
1893 notif_intid = intid;
1894 spmc_notif_is_ready = true;
1895 DMSG("Asynchronous notifications are ready");
1896 }
1897
notif_send_async(uint32_t value,uint16_t guest_id)1898 void notif_send_async(uint32_t value, uint16_t guest_id)
1899 {
1900 struct guest_partition *prtn = NULL;
1901 struct notif_vm_bitmap *nvb = NULL;
1902 uint32_t old_itr_status = 0;
1903
1904 prtn = virt_get_guest(guest_id);
1905 nvb = get_notif_vm_bitmap(prtn, guest_id);
1906
1907 if (nvb) {
1908 old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1909 assert(value == NOTIF_VALUE_DO_BOTTOM_HALF &&
1910 spmc_notif_is_ready && nvb->do_bottom_half_value >= 0 &&
1911 notif_intid >= 0);
1912 nvb->pending |= BIT64(nvb->do_bottom_half_value);
1913 interrupt_raise_sgi(interrupt_get_main_chip(), notif_intid,
1914 ITR_CPU_MASK_TO_THIS_CPU);
1915 cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1916 }
1917
1918 virt_put_guest(prtn);
1919 }
1920 #else
notif_send_async(uint32_t value,uint16_t guest_id)1921 void notif_send_async(uint32_t value, uint16_t guest_id)
1922 {
1923 struct guest_partition *prtn = NULL;
1924 struct notif_vm_bitmap *nvb = NULL;
1925 /* global notification, delay notification interrupt */
1926 uint32_t flags = BIT32(1);
1927 int res = 0;
1928
1929 prtn = virt_get_guest(guest_id);
1930 nvb = get_notif_vm_bitmap(prtn, guest_id);
1931
1932 if (nvb) {
1933 assert(value == NOTIF_VALUE_DO_BOTTOM_HALF &&
1934 spmc_notif_is_ready && nvb->do_bottom_half_value >= 0);
1935 res = ffa_set_notification(guest_id, optee_core_lsp.sp_id,
1936 flags,
1937 BIT64(nvb->do_bottom_half_value));
1938 if (res) {
1939 EMSG("notification set failed with error %d", res);
1940 panic();
1941 }
1942 }
1943
1944 virt_put_guest(prtn);
1945 }
1946 #endif
1947
1948 /* Only called from assembly */
1949 void thread_spmc_msg_recv(struct thread_smc_1_2_regs *args);
thread_spmc_msg_recv(struct thread_smc_1_2_regs * args)1950 void thread_spmc_msg_recv(struct thread_smc_1_2_regs *args)
1951 {
1952 assert((thread_get_exceptions() & THREAD_EXCP_ALL) == THREAD_EXCP_ALL);
1953 switch (args->a0) {
1954 #if defined(CFG_CORE_SEL1_SPMC)
1955 case FFA_FEATURES:
1956 handle_features(args);
1957 break;
1958 case FFA_SPM_ID_GET:
1959 spmc_handle_spm_id_get(args);
1960 break;
1961 #ifdef ARM64
1962 case FFA_RXTX_MAP_64:
1963 #endif
1964 case FFA_RXTX_MAP_32:
1965 spmc_handle_rxtx_map(args, &my_rxtx);
1966 break;
1967 case FFA_RXTX_UNMAP:
1968 spmc_handle_rxtx_unmap(args, &my_rxtx);
1969 break;
1970 case FFA_RX_RELEASE:
1971 spmc_handle_rx_release(args, &my_rxtx);
1972 break;
1973 case FFA_PARTITION_INFO_GET:
1974 spmc_handle_partition_info_get(args, &my_rxtx);
1975 break;
1976 case FFA_RUN:
1977 spmc_handle_run(args);
1978 break;
1979 #endif /*CFG_CORE_SEL1_SPMC*/
1980 case FFA_INTERRUPT:
1981 if (IS_ENABLED(CFG_CORE_SEL1_SPMC))
1982 spmc_set_args(args, FFA_NORMAL_WORLD_RESUME, 0, 0, 0,
1983 0, 0);
1984 else
1985 spmc_set_args(args, FFA_MSG_WAIT, 0, 0, 0, 0, 0);
1986 break;
1987 #ifdef ARM64
1988 case FFA_MSG_SEND_DIRECT_REQ_64:
1989 #endif
1990 case FFA_MSG_SEND_DIRECT_REQ_32:
1991 handle_direct_request(args);
1992 break;
1993 #if defined(CFG_CORE_SEL1_SPMC)
1994 #ifdef ARM64
1995 case FFA_MEM_SHARE_64:
1996 #endif
1997 case FFA_MEM_SHARE_32:
1998 #ifdef ARM64
1999 case FFA_MEM_LEND_64:
2000 #endif
2001 case FFA_MEM_LEND_32:
2002 handle_mem_op(args, &my_rxtx);
2003 break;
2004 case FFA_MEM_RECLAIM:
2005 if (!IS_ENABLED(CFG_SECURE_PARTITION) ||
2006 !ffa_mem_reclaim(args, NULL))
2007 handle_mem_reclaim(args);
2008 break;
2009 case FFA_MEM_FRAG_TX:
2010 handle_mem_frag_tx(args, &my_rxtx);
2011 break;
2012 case FFA_NOTIFICATION_BITMAP_CREATE:
2013 handle_notification_bitmap_create(args);
2014 break;
2015 case FFA_NOTIFICATION_BITMAP_DESTROY:
2016 handle_notification_bitmap_destroy(args);
2017 break;
2018 case FFA_NOTIFICATION_BIND:
2019 handle_notification_bind(args);
2020 break;
2021 case FFA_NOTIFICATION_UNBIND:
2022 handle_notification_unbind(args);
2023 break;
2024 case FFA_NOTIFICATION_GET:
2025 handle_notification_get(args);
2026 break;
2027 #ifdef ARM64
2028 case FFA_NOTIFICATION_INFO_GET_64:
2029 #endif
2030 case FFA_NOTIFICATION_INFO_GET_32:
2031 handle_notification_info_get(args);
2032 break;
2033 #endif /*CFG_CORE_SEL1_SPMC*/
2034 case FFA_ERROR:
2035 EMSG("Cannot handle FFA_ERROR(%d)", (int)args->a2);
2036 if (!IS_ENABLED(CFG_CORE_SEL1_SPMC)) {
2037 /*
2038 * The SPMC will return an FFA_ERROR back so better
2039 * panic() now than flooding the log.
2040 */
2041 panic("FFA_ERROR from SPMC is fatal");
2042 }
2043 spmc_set_args(args, FFA_ERROR, FFA_PARAM_MBZ, FFA_NOT_SUPPORTED,
2044 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
2045 break;
2046 default:
2047 EMSG("Unhandled FFA function ID %#"PRIx32, (uint32_t)args->a0);
2048 set_simple_ret_val(args, FFA_NOT_SUPPORTED);
2049 }
2050 }
2051
yielding_call_with_arg(uint64_t cookie,uint32_t offset)2052 static TEE_Result yielding_call_with_arg(uint64_t cookie, uint32_t offset)
2053 {
2054 size_t sz_rpc = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS);
2055 struct thread_ctx *thr = threads + thread_get_id();
2056 TEE_Result res = TEE_ERROR_BAD_PARAMETERS;
2057 struct optee_msg_arg *arg = NULL;
2058 struct mobj *mobj = NULL;
2059 uint32_t num_params = 0;
2060 size_t sz = 0;
2061
2062 mobj = mobj_ffa_get_by_cookie(cookie, 0);
2063 if (!mobj) {
2064 EMSG("Can't find cookie %#"PRIx64, cookie);
2065 return TEE_ERROR_BAD_PARAMETERS;
2066 }
2067
2068 res = mobj_inc_map(mobj);
2069 if (res)
2070 goto out_put_mobj;
2071
2072 res = TEE_ERROR_BAD_PARAMETERS;
2073 arg = mobj_get_va(mobj, offset, sizeof(*arg));
2074 if (!arg)
2075 goto out_dec_map;
2076
2077 num_params = READ_ONCE(arg->num_params);
2078 if (num_params > OPTEE_MSG_MAX_NUM_PARAMS)
2079 goto out_dec_map;
2080
2081 sz = OPTEE_MSG_GET_ARG_SIZE(num_params);
2082
2083 thr->rpc_arg = mobj_get_va(mobj, offset + sz, sz_rpc);
2084 if (!thr->rpc_arg)
2085 goto out_dec_map;
2086
2087 virt_on_stdcall();
2088 res = tee_entry_std(arg, num_params);
2089
2090 thread_rpc_shm_cache_clear(&thr->shm_cache);
2091 thr->rpc_arg = NULL;
2092
2093 out_dec_map:
2094 mobj_dec_map(mobj);
2095 out_put_mobj:
2096 mobj_put(mobj);
2097 return res;
2098 }
2099
2100 /*
2101 * Helper routine for the assembly function thread_std_smc_entry()
2102 *
2103 * Note: this function is weak just to make link_dummies_paged.c happy.
2104 */
__thread_std_smc_entry(uint32_t a0,uint32_t a1,uint32_t a2,uint32_t a3,uint32_t a4,uint32_t a5 __unused)2105 uint32_t __weak __thread_std_smc_entry(uint32_t a0, uint32_t a1,
2106 uint32_t a2, uint32_t a3,
2107 uint32_t a4, uint32_t a5 __unused)
2108 {
2109 /*
2110 * Arguments are supplied from handle_yielding_call() as:
2111 * a0 <- w1
2112 * a1 <- w3
2113 * a2 <- w4
2114 * a3 <- w5
2115 * a4 <- w6
2116 * a5 <- w7
2117 */
2118 thread_get_tsd()->rpc_target_info = swap_src_dst(a0);
2119 if (a1 == OPTEE_FFA_YIELDING_CALL_WITH_ARG)
2120 return yielding_call_with_arg(reg_pair_to_64(a3, a2), a4);
2121 return FFA_DENIED;
2122 }
2123
set_fmem(struct optee_msg_param * param,struct thread_param * tpm)2124 static bool set_fmem(struct optee_msg_param *param, struct thread_param *tpm)
2125 {
2126 uint64_t offs = tpm->u.memref.offs;
2127
2128 param->attr = tpm->attr - THREAD_PARAM_ATTR_MEMREF_IN +
2129 OPTEE_MSG_ATTR_TYPE_FMEM_INPUT;
2130
2131 param->u.fmem.offs_low = offs;
2132 param->u.fmem.offs_high = offs >> 32;
2133 if (param->u.fmem.offs_high != offs >> 32)
2134 return false;
2135
2136 param->u.fmem.size = tpm->u.memref.size;
2137 if (tpm->u.memref.mobj) {
2138 uint64_t cookie = mobj_get_cookie(tpm->u.memref.mobj);
2139
2140 /* If a mobj is passed it better be one with a valid cookie. */
2141 if (cookie == OPTEE_MSG_FMEM_INVALID_GLOBAL_ID)
2142 return false;
2143 param->u.fmem.global_id = cookie;
2144 } else {
2145 param->u.fmem.global_id = OPTEE_MSG_FMEM_INVALID_GLOBAL_ID;
2146 }
2147
2148 return true;
2149 }
2150
get_rpc_arg(uint32_t cmd,size_t num_params,struct thread_param * params,struct optee_msg_arg ** arg_ret)2151 static uint32_t get_rpc_arg(uint32_t cmd, size_t num_params,
2152 struct thread_param *params,
2153 struct optee_msg_arg **arg_ret)
2154 {
2155 size_t sz = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS);
2156 struct thread_ctx *thr = threads + thread_get_id();
2157 struct optee_msg_arg *arg = thr->rpc_arg;
2158
2159 if (num_params > THREAD_RPC_MAX_NUM_PARAMS)
2160 return TEE_ERROR_BAD_PARAMETERS;
2161
2162 if (!arg) {
2163 EMSG("rpc_arg not set");
2164 return TEE_ERROR_GENERIC;
2165 }
2166
2167 memset(arg, 0, sz);
2168 arg->cmd = cmd;
2169 arg->num_params = num_params;
2170 arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */
2171
2172 for (size_t n = 0; n < num_params; n++) {
2173 switch (params[n].attr) {
2174 case THREAD_PARAM_ATTR_NONE:
2175 arg->params[n].attr = OPTEE_MSG_ATTR_TYPE_NONE;
2176 break;
2177 case THREAD_PARAM_ATTR_VALUE_IN:
2178 case THREAD_PARAM_ATTR_VALUE_OUT:
2179 case THREAD_PARAM_ATTR_VALUE_INOUT:
2180 arg->params[n].attr = params[n].attr -
2181 THREAD_PARAM_ATTR_VALUE_IN +
2182 OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
2183 arg->params[n].u.value.a = params[n].u.value.a;
2184 arg->params[n].u.value.b = params[n].u.value.b;
2185 arg->params[n].u.value.c = params[n].u.value.c;
2186 break;
2187 case THREAD_PARAM_ATTR_MEMREF_IN:
2188 case THREAD_PARAM_ATTR_MEMREF_OUT:
2189 case THREAD_PARAM_ATTR_MEMREF_INOUT:
2190 if (!set_fmem(arg->params + n, params + n))
2191 return TEE_ERROR_BAD_PARAMETERS;
2192 break;
2193 default:
2194 return TEE_ERROR_BAD_PARAMETERS;
2195 }
2196 }
2197
2198 if (arg_ret)
2199 *arg_ret = arg;
2200
2201 return TEE_SUCCESS;
2202 }
2203
get_rpc_arg_res(struct optee_msg_arg * arg,size_t num_params,struct thread_param * params)2204 static uint32_t get_rpc_arg_res(struct optee_msg_arg *arg, size_t num_params,
2205 struct thread_param *params)
2206 {
2207 for (size_t n = 0; n < num_params; n++) {
2208 switch (params[n].attr) {
2209 case THREAD_PARAM_ATTR_VALUE_OUT:
2210 case THREAD_PARAM_ATTR_VALUE_INOUT:
2211 params[n].u.value.a = arg->params[n].u.value.a;
2212 params[n].u.value.b = arg->params[n].u.value.b;
2213 params[n].u.value.c = arg->params[n].u.value.c;
2214 break;
2215 case THREAD_PARAM_ATTR_MEMREF_OUT:
2216 case THREAD_PARAM_ATTR_MEMREF_INOUT:
2217 params[n].u.memref.size = arg->params[n].u.fmem.size;
2218 break;
2219 default:
2220 break;
2221 }
2222 }
2223
2224 return arg->ret;
2225 }
2226
thread_rpc_cmd(uint32_t cmd,size_t num_params,struct thread_param * params)2227 uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params,
2228 struct thread_param *params)
2229 {
2230 struct thread_rpc_arg rpc_arg = { .call = {
2231 .w1 = thread_get_tsd()->rpc_target_info,
2232 .w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
2233 },
2234 };
2235 struct optee_msg_arg *arg = NULL;
2236 uint32_t ret = 0;
2237
2238 ret = get_rpc_arg(cmd, num_params, params, &arg);
2239 if (ret)
2240 return ret;
2241
2242 thread_rpc(&rpc_arg);
2243
2244 return get_rpc_arg_res(arg, num_params, params);
2245 }
2246
thread_rpc_free(unsigned int bt,uint64_t cookie,struct mobj * mobj)2247 static void thread_rpc_free(unsigned int bt, uint64_t cookie, struct mobj *mobj)
2248 {
2249 struct thread_rpc_arg rpc_arg = { .call = {
2250 .w1 = thread_get_tsd()->rpc_target_info,
2251 .w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
2252 },
2253 };
2254 struct thread_param param = THREAD_PARAM_VALUE(IN, bt, cookie, 0);
2255 uint32_t res2 = 0;
2256 uint32_t res = 0;
2257
2258 DMSG("freeing cookie %#"PRIx64, cookie);
2259
2260 res = get_rpc_arg(OPTEE_RPC_CMD_SHM_FREE, 1, ¶m, NULL);
2261
2262 mobj_put(mobj);
2263 res2 = mobj_ffa_unregister_by_cookie(cookie);
2264 if (res2)
2265 DMSG("mobj_ffa_unregister_by_cookie(%#"PRIx64"): %#"PRIx32,
2266 cookie, res2);
2267 if (!res)
2268 thread_rpc(&rpc_arg);
2269 }
2270
thread_rpc_alloc(size_t size,size_t align,unsigned int bt)2271 static struct mobj *thread_rpc_alloc(size_t size, size_t align, unsigned int bt)
2272 {
2273 struct thread_rpc_arg rpc_arg = { .call = {
2274 .w1 = thread_get_tsd()->rpc_target_info,
2275 .w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
2276 },
2277 };
2278 struct thread_param param = THREAD_PARAM_VALUE(IN, bt, size, align);
2279 struct optee_msg_arg *arg = NULL;
2280 unsigned int internal_offset = 0;
2281 struct mobj *mobj = NULL;
2282 uint64_t cookie = 0;
2283
2284 if (get_rpc_arg(OPTEE_RPC_CMD_SHM_ALLOC, 1, ¶m, &arg))
2285 return NULL;
2286
2287 thread_rpc(&rpc_arg);
2288
2289 if (arg->num_params != 1 ||
2290 arg->params->attr != OPTEE_MSG_ATTR_TYPE_FMEM_OUTPUT)
2291 return NULL;
2292
2293 internal_offset = READ_ONCE(arg->params->u.fmem.internal_offs);
2294 cookie = READ_ONCE(arg->params->u.fmem.global_id);
2295 mobj = mobj_ffa_get_by_cookie(cookie, internal_offset);
2296 if (!mobj) {
2297 DMSG("mobj_ffa_get_by_cookie(%#"PRIx64", %#x): failed",
2298 cookie, internal_offset);
2299 return NULL;
2300 }
2301
2302 assert(mobj_is_nonsec(mobj));
2303
2304 if (mobj->size < size) {
2305 DMSG("Mobj %#"PRIx64": wrong size", cookie);
2306 mobj_put(mobj);
2307 return NULL;
2308 }
2309
2310 if (mobj_inc_map(mobj)) {
2311 DMSG("mobj_inc_map(%#"PRIx64"): failed", cookie);
2312 mobj_put(mobj);
2313 return NULL;
2314 }
2315
2316 return mobj;
2317 }
2318
thread_rpc_alloc_payload(size_t size)2319 struct mobj *thread_rpc_alloc_payload(size_t size)
2320 {
2321 return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_APPL);
2322 }
2323
thread_rpc_alloc_kernel_payload(size_t size)2324 struct mobj *thread_rpc_alloc_kernel_payload(size_t size)
2325 {
2326 return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_KERNEL);
2327 }
2328
thread_rpc_free_kernel_payload(struct mobj * mobj)2329 void thread_rpc_free_kernel_payload(struct mobj *mobj)
2330 {
2331 if (mobj)
2332 thread_rpc_free(OPTEE_RPC_SHM_TYPE_KERNEL,
2333 mobj_get_cookie(mobj), mobj);
2334 }
2335
thread_rpc_free_payload(struct mobj * mobj)2336 void thread_rpc_free_payload(struct mobj *mobj)
2337 {
2338 if (mobj)
2339 thread_rpc_free(OPTEE_RPC_SHM_TYPE_APPL, mobj_get_cookie(mobj),
2340 mobj);
2341 }
2342
thread_rpc_alloc_global_payload(size_t size)2343 struct mobj *thread_rpc_alloc_global_payload(size_t size)
2344 {
2345 return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_GLOBAL);
2346 }
2347
thread_rpc_free_global_payload(struct mobj * mobj)2348 void thread_rpc_free_global_payload(struct mobj *mobj)
2349 {
2350 if (mobj)
2351 thread_rpc_free(OPTEE_RPC_SHM_TYPE_GLOBAL,
2352 mobj_get_cookie(mobj), mobj);
2353 }
2354
thread_spmc_register_secondary_ep(vaddr_t ep)2355 void thread_spmc_register_secondary_ep(vaddr_t ep)
2356 {
2357 unsigned long ret = 0;
2358
2359 /* Let the SPM know the entry point for secondary CPUs */
2360 ret = thread_smc(FFA_SECONDARY_EP_REGISTER_64, ep, 0, 0);
2361
2362 if (ret != FFA_SUCCESS_32 && ret != FFA_SUCCESS_64)
2363 EMSG("FFA_SECONDARY_EP_REGISTER_64 ret %#lx", ret);
2364 }
2365
ffa_id_get(void)2366 static uint16_t ffa_id_get(void)
2367 {
2368 /*
2369 * Ask the SPM component running at a higher EL to return our FF-A ID.
2370 * This can either be the SPMC ID (if the SPMC is enabled in OP-TEE) or
2371 * the partition ID (if not).
2372 */
2373 struct thread_smc_args args = {
2374 .a0 = FFA_ID_GET,
2375 };
2376
2377 thread_smccc(&args);
2378 if (!is_ffa_success(args.a0)) {
2379 if (args.a0 == FFA_ERROR)
2380 EMSG("Get id failed with error %ld", args.a2);
2381 else
2382 EMSG("Get id failed");
2383 panic();
2384 }
2385
2386 return args.a2;
2387 }
2388
ffa_spm_id_get(void)2389 static uint16_t ffa_spm_id_get(void)
2390 {
2391 /*
2392 * Ask the SPM component running at a higher EL to return its ID.
2393 * If OP-TEE implements the S-EL1 SPMC, this will get the SPMD ID.
2394 * If not, the ID of the SPMC will be returned.
2395 */
2396 struct thread_smc_args args = {
2397 .a0 = FFA_SPM_ID_GET,
2398 };
2399
2400 thread_smccc(&args);
2401 if (!is_ffa_success(args.a0)) {
2402 if (args.a0 == FFA_ERROR)
2403 EMSG("Get spm id failed with error %ld", args.a2);
2404 else
2405 EMSG("Get spm id failed");
2406 panic();
2407 }
2408
2409 return args.a2;
2410 }
2411
2412 #ifdef CFG_CORE_DYN_PROTMEM
thread_spmc_get_protmem_config(enum mobj_use_case use_case,void * buf,size_t * buf_sz,size_t * min_mem_sz,size_t * min_mem_align)2413 TEE_Result thread_spmc_get_protmem_config(enum mobj_use_case use_case,
2414 void *buf, size_t *buf_sz,
2415 size_t *min_mem_sz,
2416 size_t *min_mem_align)
2417 {
2418 TEE_Result res = TEE_SUCCESS;
2419 struct ffa_mem_access_perm mem_acc_list[] = {
2420 {
2421 .endpoint_id = optee_core_lsp.sp_id,
2422 .perm = FFA_MEM_ACC_RW,
2423 },
2424 };
2425
2426 res = plat_get_protmem_config(use_case, min_mem_sz, min_mem_align);
2427 if (res)
2428 return res;
2429
2430 if (!buf || *buf_sz < sizeof(mem_acc_list)) {
2431 *buf_sz = sizeof(mem_acc_list);
2432 return TEE_ERROR_SHORT_BUFFER;
2433 }
2434
2435 memcpy(buf, mem_acc_list, sizeof(mem_acc_list));
2436 *buf_sz = sizeof(mem_acc_list);
2437
2438 return TEE_SUCCESS;
2439 }
2440 #endif /*CFG_CORE_DYN_PROTMEM*/
2441
check_desc(struct spmc_lsp_desc * d)2442 static TEE_Result check_desc(struct spmc_lsp_desc *d)
2443 {
2444 uint32_t accept_props = FFA_PART_PROP_DIRECT_REQ_RECV |
2445 FFA_PART_PROP_DIRECT_REQ_SEND |
2446 FFA_PART_PROP_NOTIF_CREATED |
2447 FFA_PART_PROP_NOTIF_DESTROYED |
2448 FFA_PART_PROP_AARCH64_STATE;
2449 uint32_t id = d->sp_id;
2450
2451 if (id && (spmc_is_reserved_id(id) || spmc_find_lsp_by_sp_id(id) ||
2452 id < FFA_SWD_ID_MIN || id > FFA_SWD_ID_MAX)) {
2453 EMSG("Conflicting SP id for SP \"%s\" id %#"PRIx32,
2454 d->name, id);
2455 if (!IS_ENABLED(CFG_SP_SKIP_FAILED))
2456 panic();
2457 return TEE_ERROR_BAD_FORMAT;
2458 }
2459
2460 if (d->properties & ~accept_props) {
2461 EMSG("Unexpected properties in %#"PRIx32" for LSP \"%s\" %#"PRIx16,
2462 d->properties, d->name, d->sp_id);
2463 if (!IS_ENABLED(CFG_SP_SKIP_FAILED))
2464 panic();
2465 d->properties &= accept_props;
2466 }
2467
2468 if (!d->direct_req) {
2469 EMSG("Missing direct request callback for LSP \"%s\" %#"PRIx16,
2470 d->name, d->sp_id);
2471 if (!IS_ENABLED(CFG_SP_SKIP_FAILED))
2472 panic();
2473 return TEE_ERROR_BAD_FORMAT;
2474 }
2475
2476 if (!d->uuid_words[0] && !d->uuid_words[1] &&
2477 !d->uuid_words[2] && !d->uuid_words[3]) {
2478 EMSG("Found NULL UUID for LSP \"%s\" %#"PRIx16,
2479 d->name, d->sp_id);
2480 if (!IS_ENABLED(CFG_SP_SKIP_FAILED))
2481 panic();
2482 return TEE_ERROR_BAD_FORMAT;
2483 }
2484
2485 return TEE_SUCCESS;
2486 }
2487
find_unused_sp_id(void)2488 static uint16_t find_unused_sp_id(void)
2489 {
2490 uint32_t id = FFA_SWD_ID_MIN;
2491
2492 while (spmc_is_reserved_id(id) || spmc_find_lsp_by_sp_id(id)) {
2493 id++;
2494 assert(id <= FFA_SWD_ID_MAX);
2495 }
2496
2497 return id;
2498 }
2499
spmc_register_lsp(struct spmc_lsp_desc * desc)2500 TEE_Result spmc_register_lsp(struct spmc_lsp_desc *desc)
2501 {
2502 TEE_Result res = TEE_SUCCESS;
2503
2504 res = check_desc(desc);
2505 if (res)
2506 return res;
2507
2508 if (STAILQ_EMPTY(&lsp_head)) {
2509 DMSG("Cannot add Logical SP \"%s\": LSP framework not initialized yet",
2510 desc->name);
2511 return TEE_ERROR_ITEM_NOT_FOUND;
2512 }
2513
2514 if (!desc->sp_id)
2515 desc->sp_id = find_unused_sp_id();
2516
2517 DMSG("Adding Logical SP \"%s\" with id %#"PRIx16,
2518 desc->name, desc->sp_id);
2519
2520 STAILQ_INSERT_TAIL(&lsp_head, desc, link);
2521
2522 return TEE_SUCCESS;
2523 }
2524
2525 static struct spmc_lsp_desc optee_core_lsp __nex_data = {
2526 .name = "OP-TEE",
2527 .direct_req = optee_lsp_handle_direct_request,
2528 .properties = FFA_PART_PROP_DIRECT_REQ_RECV |
2529 FFA_PART_PROP_DIRECT_REQ_SEND |
2530 #ifdef CFG_NS_VIRTUALIZATION
2531 FFA_PART_PROP_NOTIF_CREATED |
2532 FFA_PART_PROP_NOTIF_DESTROYED |
2533 #endif
2534 FFA_PART_PROP_AARCH64_STATE |
2535 FFA_PART_PROP_IS_PE_ID,
2536 /*
2537 * - if the SPMC is in S-EL2 this UUID describes OP-TEE as a S-EL1
2538 * SP, or
2539 * - if the SPMC is in S-EL1 then this UUID is for OP-TEE as a
2540 * logical partition, residing in the same exception level as the
2541 * SPMC
2542 * UUID 486178e0-e7f8-11e3-bc5e-0002a5d5c51b
2543 */
2544 .uuid_words = { 0xe0786148, 0xe311f8e7, 0x02005ebc, 0x1bc5d5a5, },
2545 };
2546
2547 #if defined(CFG_CORE_SEL1_SPMC)
2548 static struct spmc_lsp_desc optee_spmc_lsp __nex_data = {
2549 .name = "OP-TEE SPMC",
2550 .direct_req = optee_spmc_lsp_handle_direct_request,
2551 };
2552
spmc_init(void)2553 static TEE_Result spmc_init(void)
2554 {
2555 if (IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
2556 virt_add_guest_spec_data(¬if_vm_bitmap_id,
2557 sizeof(struct notif_vm_bitmap), NULL))
2558 panic("virt_add_guest_spec_data");
2559 spmd_id = ffa_spm_id_get();
2560 DMSG("SPMD ID %#"PRIx16, spmd_id);
2561
2562 optee_spmc_lsp.sp_id = ffa_id_get();
2563 DMSG("SPMC ID %#"PRIx16, optee_spmc_lsp.sp_id);
2564 STAILQ_INSERT_HEAD(&lsp_head, &optee_spmc_lsp, link);
2565
2566 optee_core_lsp.sp_id = find_unused_sp_id();
2567 DMSG("OP-TEE endpoint ID %#"PRIx16, optee_core_lsp.sp_id);
2568 STAILQ_INSERT_HEAD(&lsp_head, &optee_core_lsp, link);
2569
2570 /*
2571 * If SPMD think we are version 1.0 it will report version 1.0 to
2572 * normal world regardless of what version we query the SPM with.
2573 * However, if SPMD think we are version 1.1 it will forward
2574 * queries from normal world to let us negotiate version. So by
2575 * setting version 1.0 here we should be compatible.
2576 *
2577 * Note that disagreement on negotiated version means that we'll
2578 * have communication problems with normal world.
2579 */
2580 my_rxtx.ffa_vers = FFA_VERSION_1_0;
2581
2582 return TEE_SUCCESS;
2583 }
2584 #else /* !defined(CFG_CORE_SEL1_SPMC) */
spmc_rxtx_map(struct ffa_rxtx * rxtx)2585 static void spmc_rxtx_map(struct ffa_rxtx *rxtx)
2586 {
2587 struct thread_smc_args args = {
2588 #ifdef ARM64
2589 .a0 = FFA_RXTX_MAP_64,
2590 #else
2591 .a0 = FFA_RXTX_MAP_32,
2592 #endif
2593 .a1 = virt_to_phys(rxtx->tx),
2594 .a2 = virt_to_phys(rxtx->rx),
2595 .a3 = 1,
2596 };
2597
2598 thread_smccc(&args);
2599 if (!is_ffa_success(args.a0)) {
2600 if (args.a0 == FFA_ERROR)
2601 EMSG("rxtx map failed with error %ld", args.a2);
2602 else
2603 EMSG("rxtx map failed");
2604 panic();
2605 }
2606 }
2607
get_ffa_version(uint32_t my_version)2608 static uint32_t get_ffa_version(uint32_t my_version)
2609 {
2610 struct thread_smc_args args = {
2611 .a0 = FFA_VERSION,
2612 .a1 = my_version,
2613 };
2614
2615 thread_smccc(&args);
2616 if (args.a0 & BIT(31)) {
2617 EMSG("FF-A version failed with error %ld", args.a0);
2618 panic();
2619 }
2620
2621 return args.a0;
2622 }
2623
spmc_retrieve_req(struct ffa_mem_transaction_x * trans,uint32_t * tot_len,uint32_t * frag_len)2624 static void *spmc_retrieve_req(struct ffa_mem_transaction_x *trans,
2625 uint32_t *tot_len, uint32_t *frag_len)
2626 {
2627 uint64_t cookie __maybe_unused = trans->global_handle;
2628 struct ffa_mem_access_common *mem_acc = NULL;
2629 struct ffa_mem_access_perm *perm_descr = NULL;
2630 struct thread_smc_args args = {
2631 .a0 = FFA_MEM_RETRIEVE_REQ_32,
2632 .a3 = 0, /* Address, Using TX -> MBZ */
2633 .a4 = 0, /* Using TX -> MBZ */
2634 };
2635 size_t mem_acc_size = 0;
2636 size_t size = 0;
2637 int rc = 0;
2638
2639 if (my_rxtx.ffa_vers <= FFA_VERSION_1_1)
2640 mem_acc_size = sizeof(struct ffa_mem_access_1_0);
2641 else
2642 mem_acc_size = sizeof(struct ffa_mem_access_1_2);
2643
2644 if (my_rxtx.ffa_vers == FFA_VERSION_1_0) {
2645 struct ffa_mem_transaction_1_0 *trans_descr = my_rxtx.tx;
2646
2647 size = sizeof(*trans_descr) + 1 * mem_acc_size;
2648 memset(trans_descr, 0, size);
2649 trans_descr->sender_id = trans->sender_id;
2650 trans_descr->mem_reg_attr = trans->mem_reg_attr;
2651 trans_descr->global_handle = trans->global_handle;
2652 trans_descr->tag = trans->tag;
2653 trans_descr->flags = trans->flags;
2654 trans_descr->mem_access_count = 1;
2655 mem_acc = (void *)trans_descr->mem_access_array;
2656 } else {
2657 struct ffa_mem_transaction_1_1 *trans_descr = my_rxtx.tx;
2658
2659 size = sizeof(*trans_descr) + 1 * mem_acc_size;
2660 memset(trans_descr, 0, size);
2661 trans_descr->sender_id = trans->sender_id;
2662 trans_descr->mem_reg_attr = trans->mem_reg_attr;
2663 trans_descr->global_handle = trans->global_handle;
2664 trans_descr->tag = trans->tag;
2665 trans_descr->flags = trans->flags;
2666 trans_descr->mem_access_count = 1;
2667 trans_descr->mem_access_offs = sizeof(*trans_descr);
2668 trans_descr->mem_access_size = mem_acc_size;
2669 mem_acc = (void *)((vaddr_t)my_rxtx.tx + sizeof(*trans_descr));
2670 }
2671 mem_acc->region_offs = 0;
2672 perm_descr = &mem_acc->access_perm;
2673 perm_descr->endpoint_id = optee_core_lsp.sp_id;
2674 perm_descr->perm = FFA_MEM_ACC_RW;
2675 perm_descr->flags = 0;
2676
2677 args.a1 = size; /* Total Length */
2678 args.a2 = size; /* Frag Length == Total length */
2679 thread_smccc(&args);
2680 if (args.a0 != FFA_MEM_RETRIEVE_RESP) {
2681 if (args.a0 == FFA_ERROR)
2682 EMSG("Failed to fetch cookie %#"PRIx64" error code %d",
2683 cookie, (int)args.a2);
2684 else
2685 EMSG("Failed to fetch cookie %#"PRIx64" a0 %#"PRIx64,
2686 cookie, args.a0);
2687 return NULL;
2688 }
2689 rc = spmc_read_mem_transaction(my_rxtx.ffa_vers, my_rxtx.rx,
2690 my_rxtx.size, args.a1, args.a2, trans);
2691 if (rc) {
2692 ffa_simple_call(FFA_RX_RELEASE, 0, 0, 0, 0);
2693 EMSG("Memory transaction failure for cookie %#"PRIx64" rc %d",
2694 cookie, rc);
2695 return NULL;
2696 }
2697
2698 *tot_len = args.a1;
2699 *frag_len = args.a2;
2700
2701 return my_rxtx.rx;
2702 }
2703
thread_spmc_relinquish(uint64_t cookie)2704 void thread_spmc_relinquish(uint64_t cookie)
2705 {
2706 struct ffa_mem_relinquish *relinquish_desc = my_rxtx.tx;
2707 struct thread_smc_args args = {
2708 .a0 = FFA_MEM_RELINQUISH,
2709 };
2710
2711 memset(relinquish_desc, 0, sizeof(*relinquish_desc));
2712 relinquish_desc->handle = cookie;
2713 relinquish_desc->flags = 0;
2714 relinquish_desc->endpoint_count = 1;
2715 relinquish_desc->endpoint_id_array[0] = optee_core_lsp.sp_id;
2716 thread_smccc(&args);
2717 if (!is_ffa_success(args.a0))
2718 EMSG("Failed to relinquish cookie %#"PRIx64, cookie);
2719 }
2720
set_pages(struct mobj_ffa * mf,uint64_t cookie,void * buf,struct ffa_address_range * addr_range,uint32_t total_addr_range_count,uint32_t total_page_count,uint32_t tot_len,uint32_t frag_len)2721 static int set_pages(struct mobj_ffa *mf, uint64_t cookie, void *buf,
2722 struct ffa_address_range *addr_range,
2723 uint32_t total_addr_range_count, uint32_t total_page_count,
2724 uint32_t tot_len, uint32_t frag_len)
2725 {
2726 uint32_t next_frag_offs = frag_len;
2727 unsigned int idx = 0;
2728 uint32_t n = 0;
2729 int ret = 0;
2730
2731 while (true) {
2732 struct thread_smc_args args = {
2733 .a0 = FFA_MEM_FRAG_RX,
2734 .a1 = low32_from_64(cookie),
2735 .a2 = high32_from_64(cookie),
2736 .a3 = next_frag_offs,
2737 };
2738
2739 for (n = 0; n < total_addr_range_count; n++) {
2740 unsigned int page_count = 0;
2741 uint64_t addr = 0;
2742
2743 if ((void *)(n + addr_range + 1) >
2744 (void *)((vaddr_t)buf + frag_len))
2745 break;
2746
2747 page_count = READ_ONCE(addr_range[n].page_count);
2748 addr = READ_ONCE(addr_range[n].address);
2749 ret = mobj_ffa_add_pages_at(mf, &idx, addr, page_count);
2750 if (ret)
2751 return ret;
2752 }
2753 total_addr_range_count -= n;
2754 if (!total_addr_range_count) {
2755 if (idx != total_page_count)
2756 return FFA_INVALID_PARAMETERS;
2757 return FFA_OK;
2758 }
2759
2760 ret = ffa_simple_call(FFA_RX_RELEASE, 0, 0, 0, 0);
2761 if (ret)
2762 return ret;
2763
2764 thread_smccc(&args);
2765 if (args.a0 == FFA_ERROR) {
2766 EMSG("FFA_MEM_FRAG_RX: ret %d", (int)args.a2);
2767 return args.a2;
2768 }
2769 if (args.a0 != FFA_MEM_FRAG_TX) {
2770 EMSG("Bad tx fid 0x%lx", args.a0);
2771 return FFA_INVALID_PARAMETERS;
2772 }
2773 if (reg_pair_to_64(args.a2, args.a1) != cookie) {
2774 EMSG("Bad cookie 0x%"PRIx64" expected 0x%"PRIx64,
2775 reg_pair_to_64(args.a2, args.a1), cookie);
2776 return FFA_INVALID_PARAMETERS;
2777 }
2778 frag_len = args.a3;
2779 next_frag_offs += frag_len;
2780 if (next_frag_offs > tot_len ||
2781 frag_len % sizeof(*addr_range)) {
2782 EMSG("Bad frag_len 0x%"PRIx32, frag_len);
2783 return FFA_INVALID_PARAMETERS;
2784 }
2785 addr_range = buf;
2786 }
2787 }
2788
thread_spmc_populate_mobj_from_rx(uint64_t cookie,enum mobj_use_case use_case)2789 struct mobj_ffa *thread_spmc_populate_mobj_from_rx(uint64_t cookie,
2790 enum mobj_use_case use_case)
2791 {
2792 struct mobj_ffa *ret = NULL;
2793 struct ffa_mem_transaction_x retrieve_desc = { .tag = use_case};
2794 struct ffa_mem_access_common *mem_acc = NULL;
2795 struct ffa_mem_region *descr = NULL;
2796 uint32_t total_page_count = 0;
2797 struct mobj_ffa *mf = NULL;
2798 unsigned int offs = 0;
2799 uint32_t frag_len = 0;
2800 uint32_t tot_len = 0;
2801 void *buf = NULL;
2802
2803 if (use_case == MOBJ_USE_CASE_NS_SHM)
2804 retrieve_desc.flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE;
2805 else
2806 retrieve_desc.flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_LEND;
2807 retrieve_desc.flags |= FFA_MEMORY_REGION_FLAG_ANY_ALIGNMENT;
2808 retrieve_desc.global_handle = cookie;
2809 retrieve_desc.sender_id = thread_get_tsd()->rpc_target_info;
2810 retrieve_desc.mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR;
2811
2812 /*
2813 * OP-TEE is only supporting a single mem_region while the
2814 * specification allows for more than one.
2815 */
2816 buf = spmc_retrieve_req(&retrieve_desc, &tot_len, &frag_len);
2817 if (!buf) {
2818 EMSG("Failed to retrieve cookie from rx buffer %#"PRIx64,
2819 cookie);
2820 return NULL;
2821 }
2822
2823 /*
2824 * We assume that the returned buffer ends with a complete struct
2825 * ffa_address_range.
2826 */
2827 if (!IS_ALIGNED_WITH_TYPE(frag_len, struct ffa_address_range) ||
2828 !IS_ALIGNED_WITH_TYPE(tot_len, struct ffa_address_range))
2829 goto out;
2830 mem_acc = (void *)((vaddr_t)buf + retrieve_desc.mem_access_offs);
2831 /*
2832 * We assume struct ffa_mem_region is well aligned.
2833 */
2834 offs = READ_ONCE(mem_acc->region_offs);
2835 if (!IS_ALIGNED_WITH_TYPE(offs, struct ffa_mem_region))
2836 goto out;
2837 descr = (struct ffa_mem_region *)((vaddr_t)buf + offs);
2838
2839 total_page_count = READ_ONCE(descr->total_page_count);
2840 mf = mobj_ffa_spmc_new(cookie, total_page_count, use_case);
2841 if (!mf)
2842 goto out;
2843
2844 if (set_pages(mf, cookie, buf, descr->address_range_array,
2845 READ_ONCE(descr->address_range_count), total_page_count,
2846 tot_len, frag_len)) {
2847 mobj_ffa_spmc_delete(mf);
2848 goto out;
2849 }
2850
2851 ret = mf;
2852
2853 out:
2854 /* Release RX buffer after the mem retrieve request. */
2855 ffa_simple_call(FFA_RX_RELEASE, 0, 0, 0, 0);
2856
2857 return ret;
2858 }
2859
get_ffa_version_from_manifest(void * fdt)2860 static uint32_t get_ffa_version_from_manifest(void *fdt)
2861 {
2862 int ret = 0;
2863 uint32_t vers = 0;
2864
2865 ret = fdt_node_check_compatible(fdt, 0, "arm,ffa-manifest-1.0");
2866 if (ret < 0) {
2867 EMSG("Invalid FF-A manifest at %p: error %d", fdt, ret);
2868 panic();
2869 }
2870
2871 ret = fdt_read_uint32(fdt, 0, "ffa-version", &vers);
2872 if (ret < 0) {
2873 EMSG("Can't read \"ffa-version\" from FF-A manifest at %p: error %d",
2874 fdt, ret);
2875 panic();
2876 }
2877
2878 return vers;
2879 }
2880
spmc_init(void)2881 static TEE_Result spmc_init(void)
2882 {
2883 uint32_t my_vers = 0;
2884 uint32_t vers = 0;
2885
2886 if (IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
2887 virt_add_guest_spec_data(¬if_vm_bitmap_id,
2888 sizeof(struct notif_vm_bitmap), NULL))
2889 panic("virt_add_guest_spec_data");
2890
2891 my_vers = get_ffa_version_from_manifest(get_manifest_dt());
2892 if (my_vers < FFA_VERSION_1_0 || my_vers > FFA_VERSION_1_2) {
2893 EMSG("Unsupported version %"PRIu32".%"PRIu32" from manifest",
2894 FFA_GET_MAJOR_VERSION(my_vers),
2895 FFA_GET_MINOR_VERSION(my_vers));
2896 panic();
2897 }
2898 vers = get_ffa_version(my_vers);
2899 DMSG("SPMC reported version %"PRIu32".%"PRIu32,
2900 FFA_GET_MAJOR_VERSION(vers), FFA_GET_MINOR_VERSION(vers));
2901 if (FFA_GET_MAJOR_VERSION(vers) != FFA_GET_MAJOR_VERSION(my_vers)) {
2902 EMSG("Incompatible major version %"PRIu32", expected %"PRIu32"",
2903 FFA_GET_MAJOR_VERSION(vers),
2904 FFA_GET_MAJOR_VERSION(my_vers));
2905 panic();
2906 }
2907 if (vers < my_vers)
2908 my_vers = vers;
2909 DMSG("Using version %"PRIu32".%"PRIu32"",
2910 FFA_GET_MAJOR_VERSION(my_vers), FFA_GET_MINOR_VERSION(my_vers));
2911 my_rxtx.ffa_vers = my_vers;
2912
2913 spmc_rxtx_map(&my_rxtx);
2914
2915 spmc_id = ffa_spm_id_get();
2916 DMSG("SPMC ID %#"PRIx16, spmc_id);
2917
2918 optee_core_lsp.sp_id = ffa_id_get();
2919 DMSG("OP-TEE endpoint ID %#"PRIx16, optee_core_lsp.sp_id);
2920 STAILQ_INSERT_HEAD(&lsp_head, &optee_core_lsp, link);
2921
2922 if (!ffa_features(FFA_NOTIFICATION_SET)) {
2923 spmc_notif_is_ready = true;
2924 DMSG("Asynchronous notifications are ready");
2925 }
2926
2927 return TEE_SUCCESS;
2928 }
2929 #endif /* !defined(CFG_CORE_SEL1_SPMC) */
2930
2931 nex_service_init(spmc_init);
2932