xref: /optee_os/core/arch/arm/kernel/spmc_sp_handler.c (revision d45fc1401c575c4273f684571b4150d6ff89982f)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2021-2024, Arm Limited
4  */
5 #include <assert.h>
6 #include <io.h>
7 #include <kernel/panic.h>
8 #include <kernel/secure_partition.h>
9 #include <kernel/spinlock.h>
10 #include <kernel/spmc_sp_handler.h>
11 #include <kernel/tee_misc.h>
12 #include <kernel/thread_private.h>
13 #include <mm/mobj.h>
14 #include <mm/sp_mem.h>
15 #include <mm/vm.h>
16 #include <optee_ffa.h>
17 #include <string.h>
18 
19 static unsigned int mem_ref_lock = SPINLOCK_UNLOCK;
20 
spmc_sp_start_thread(struct thread_smc_1_2_regs * args)21 int spmc_sp_start_thread(struct thread_smc_1_2_regs *args)
22 {
23 	thread_sp_alloc_and_run(&args->arg11);
24 	/*
25 	 * thread_sp_alloc_and_run() only returns if all threads are busy.
26 	 * The caller must try again.
27 	 */
28 	return FFA_BUSY;
29 }
30 
ffa_set_error(struct thread_smc_1_2_regs * args,uint32_t error)31 static void ffa_set_error(struct thread_smc_1_2_regs *args, uint32_t error)
32 {
33 	spmc_set_args(args, FFA_ERROR, FFA_PARAM_MBZ, error, FFA_PARAM_MBZ,
34 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
35 }
36 
ffa_success(struct thread_smc_1_2_regs * args)37 static void ffa_success(struct thread_smc_1_2_regs *args)
38 {
39 	spmc_set_args(args, FFA_SUCCESS_32, 0, 0, 0, 0, 0);
40 }
41 
ffa_get_dst(struct thread_smc_1_2_regs * args,struct sp_session * caller,struct sp_session ** dst)42 static TEE_Result ffa_get_dst(struct thread_smc_1_2_regs *args,
43 			      struct sp_session *caller,
44 			      struct sp_session **dst)
45 {
46 	struct sp_session *s = NULL;
47 
48 	s = sp_get_session(FFA_DST(args->a1));
49 
50 	/* Message came from the NW */
51 	if (!caller) {
52 		if (!s) {
53 			EMSG("Neither destination nor source is a SP");
54 			return FFA_INVALID_PARAMETERS;
55 		}
56 	} else {
57 		/* Check if the source matches the endpoint we came from */
58 		if (FFA_SRC(args->a1) != caller->endpoint_id) {
59 			EMSG("Source address doesn't match the endpoint id");
60 			return FFA_INVALID_PARAMETERS;
61 		}
62 	}
63 
64 	*dst = s;
65 
66 	return FFA_OK;
67 }
68 
find_sp_mem_receiver(struct sp_session * s,struct sp_mem * smem)69 static struct sp_mem_receiver *find_sp_mem_receiver(struct sp_session *s,
70 						    struct sp_mem *smem)
71 {
72 	struct sp_mem_receiver *receiver = NULL;
73 
74 	/*
75 	 * FF-A Spec 8.10.2:
76 	 * Each Handle identifies a single unique composite memory region
77 	 * description that is, there is a 1:1 mapping between the two.
78 	 *
79 	 * Each memory share has an unique handle. We can only have each SP
80 	 * once as a receiver in the memory share. For each receiver of a
81 	 * memory share, we have one sp_mem_access_descr object.
82 	 * This means that there can only be one SP linked to a specific
83 	 * struct sp_mem_access_descr.
84 	 */
85 	SLIST_FOREACH(receiver, &smem->receivers, link) {
86 		if (receiver->perm.endpoint_id == s->endpoint_id)
87 			break;
88 	}
89 	return receiver;
90 }
91 
add_mem_region_to_sp(struct ffa_mem_access_perm * access_perm,struct sp_mem * smem)92 static int add_mem_region_to_sp(struct ffa_mem_access_perm *access_perm,
93 				struct sp_mem *smem)
94 {
95 	struct sp_session *s = NULL;
96 	struct sp_mem_receiver *receiver = NULL;
97 	uint8_t perm = READ_ONCE(access_perm->perm);
98 	uint16_t endpoint_id = READ_ONCE(access_perm->endpoint_id);
99 
100 	s = sp_get_session(endpoint_id);
101 
102 	/* Only add memory shares of loaded SPs */
103 	if (!s)
104 		return FFA_DENIED;
105 
106 	/* Only allow each endpoint once */
107 	if (find_sp_mem_receiver(s, smem))
108 		return FFA_DENIED;
109 
110 	if (perm & ~FFA_MEM_ACC_MASK)
111 		return FFA_DENIED;
112 
113 	receiver = calloc(1, sizeof(struct sp_mem_receiver));
114 	if (!receiver)
115 		return FFA_NO_MEMORY;
116 
117 	receiver->smem = smem;
118 
119 	receiver->perm.endpoint_id = endpoint_id;
120 	receiver->perm.perm = perm;
121 	receiver->perm.flags = READ_ONCE(access_perm->flags);
122 
123 	SLIST_INSERT_HEAD(&smem->receivers, receiver, link);
124 
125 	return FFA_OK;
126 }
127 
spmc_sp_handle_mem_share(struct thread_smc_1_2_regs * args,struct ffa_rxtx * rxtx,struct sp_session * owner_sp)128 static void spmc_sp_handle_mem_share(struct thread_smc_1_2_regs *args,
129 				     struct ffa_rxtx *rxtx,
130 				     struct sp_session *owner_sp)
131 {
132 	struct ffa_mem_transaction_x mem_trans = { };
133 	uint32_t tot_len = args->a1;
134 	uint32_t frag_len = args->a2;
135 	uint64_t global_handle = 0;
136 	int res = FFA_OK;
137 
138 	cpu_spin_lock(&rxtx->spinlock);
139 
140 	/* Descriptor fragments or custom buffers aren't supported yet. */
141 	if (frag_len != tot_len || args->a3 || args->a4)
142 		res = FFA_NOT_SUPPORTED;
143 	else if (frag_len > rxtx->size)
144 		res = FFA_INVALID_PARAMETERS;
145 	else
146 		res = spmc_read_mem_transaction(rxtx->ffa_vers, rxtx->rx,
147 						frag_len, &mem_trans);
148 	if (!res)
149 		res = spmc_sp_add_share(&mem_trans, rxtx, tot_len, frag_len,
150 					&global_handle, owner_sp);
151 	if (!res) {
152 		args->a3 = high32_from_64(global_handle);
153 		args->a2 = low32_from_64(global_handle);
154 		args->a1 = FFA_PARAM_MBZ;
155 		args->a0 = FFA_SUCCESS_32;
156 	} else {
157 		ffa_set_error(args, res);
158 	}
159 
160 	cpu_spin_unlock(&rxtx->spinlock);
161 }
162 
spmc_sp_add_sp_region(struct sp_mem * smem,struct ffa_address_range * mem_reg,struct sp_session * owner_sp,uint8_t highest_permission)163 static int spmc_sp_add_sp_region(struct sp_mem *smem,
164 				 struct ffa_address_range *mem_reg,
165 				 struct sp_session *owner_sp,
166 				 uint8_t highest_permission)
167 {
168 	struct sp_ctx *sp_ctx = NULL;
169 	uint64_t va = READ_ONCE(mem_reg->address);
170 	int res = FFA_OK;
171 	uint64_t region_len = READ_ONCE(mem_reg->page_count) * SMALL_PAGE_SIZE;
172 	struct mobj *mobj = NULL;
173 
174 	sp_ctx = to_sp_ctx(owner_sp->ts_sess.ctx);
175 
176 	/*
177 	 * The memory region we try to share might not be linked to just one
178 	 * mobj. Create a new region for each mobj.
179 	 */
180 	while (region_len) {
181 		size_t len = region_len;
182 		struct sp_mem_map_region *region = NULL;
183 		uint16_t prot = 0;
184 		size_t offs = 0;
185 
186 		/*
187 		 * There is already a mobj for each address that is in the SPs
188 		 * address range.
189 		 */
190 		mobj = vm_get_mobj(&sp_ctx->uctx, va, &len, &prot, &offs);
191 		if (!mobj)
192 			return FFA_DENIED;
193 
194 		/*
195 		 * If we share memory from a SP, check if we are not sharing
196 		 * with a higher permission than the memory was originally
197 		 * mapped.
198 		 */
199 		if ((highest_permission & FFA_MEM_ACC_RW) &&
200 		    !(prot & TEE_MATTR_UW)) {
201 			res = FFA_DENIED;
202 			goto err;
203 		}
204 
205 		if ((highest_permission & FFA_MEM_ACC_EXE) &&
206 		    !(prot & TEE_MATTR_UX)) {
207 			res = FFA_DENIED;
208 			goto err;
209 		}
210 
211 		region = calloc(1, sizeof(*region));
212 		region->mobj = mobj;
213 		region->page_offset = offs;
214 		region->page_count = len / SMALL_PAGE_SIZE;
215 
216 		if (!sp_has_exclusive_access(region, &sp_ctx->uctx)) {
217 			free(region);
218 			res = FFA_DENIED;
219 			goto err;
220 		}
221 
222 		va += len;
223 		region_len -= len;
224 		SLIST_INSERT_HEAD(&smem->regions, region, link);
225 	}
226 
227 	return FFA_OK;
228 err:
229 	mobj_put(mobj);
230 
231 	return res;
232 }
233 
spmc_sp_add_nw_region(struct sp_mem * smem,struct ffa_mem_region * mem_reg)234 static int spmc_sp_add_nw_region(struct sp_mem *smem,
235 				 struct ffa_mem_region *mem_reg)
236 {
237 	uint64_t page_count = READ_ONCE(mem_reg->total_page_count);
238 	struct sp_mem_map_region *region = NULL;
239 	struct mobj *m = sp_mem_new_mobj(page_count, TEE_MATTR_MEM_TYPE_CACHED,
240 					 false);
241 	unsigned int i = 0;
242 	unsigned int idx = 0;
243 	int res = FFA_OK;
244 	uint64_t address_count = READ_ONCE(mem_reg->address_range_count);
245 
246 	if (!m)
247 		return FFA_NO_MEMORY;
248 
249 	for (i = 0; i < address_count; i++) {
250 		struct ffa_address_range *addr_range = NULL;
251 
252 		addr_range = &mem_reg->address_range_array[i];
253 		if (sp_mem_add_pages(m, &idx,
254 				     READ_ONCE(addr_range->address),
255 				     READ_ONCE(addr_range->page_count))) {
256 			res = FFA_DENIED;
257 			goto clean_up;
258 		}
259 	}
260 
261 	region = calloc(1, sizeof(*region));
262 	if (!region) {
263 		res = FFA_NO_MEMORY;
264 		goto clean_up;
265 	}
266 
267 	region->mobj = m;
268 	region->page_count = page_count;
269 
270 	if (!sp_has_exclusive_access(region, NULL)) {
271 		free(region);
272 		res = FFA_DENIED;
273 		goto clean_up;
274 	}
275 
276 	SLIST_INSERT_HEAD(&smem->regions, region, link);
277 	return FFA_OK;
278 clean_up:
279 	mobj_put(m);
280 	return res;
281 }
282 
spmc_sp_add_share(struct ffa_mem_transaction_x * mem_trans,struct ffa_rxtx * rxtx,size_t blen,size_t flen,uint64_t * global_handle,struct sp_session * owner_sp)283 int spmc_sp_add_share(struct ffa_mem_transaction_x *mem_trans,
284 		      struct ffa_rxtx *rxtx, size_t blen, size_t flen,
285 		      uint64_t *global_handle, struct sp_session *owner_sp)
286 {
287 	int res = FFA_INVALID_PARAMETERS;
288 	unsigned int num_mem_accs = 0;
289 	unsigned int i = 0;
290 	struct ffa_mem_access_common *mem_acc = NULL;
291 	size_t needed_size = 0;
292 	size_t addr_range_offs = 0;
293 	struct ffa_mem_region *mem_reg = NULL;
294 	uint8_t highest_permission = 0;
295 	struct sp_mem *smem = NULL;
296 	uint16_t sender_id = mem_trans->sender_id;
297 	size_t addr_range_cnt = 0;
298 	struct ffa_address_range *addr_range = NULL;
299 	size_t total_page_count = 0;
300 	size_t page_count_sum = 0;
301 	vaddr_t mem_acc_base = 0;
302 	size_t mem_acc_size = 0;
303 
304 	if (blen != flen) {
305 		DMSG("Fragmented memory share is not supported for SPs");
306 		return FFA_NOT_SUPPORTED;
307 	}
308 
309 	smem = sp_mem_new();
310 	if (!smem)
311 		return FFA_NO_MEMORY;
312 
313 	if ((owner_sp && owner_sp->endpoint_id != sender_id) ||
314 	    (!owner_sp && sp_get_session(sender_id))) {
315 		res = FFA_DENIED;
316 		goto cleanup;
317 	}
318 
319 	mem_acc_size = mem_trans->mem_access_size;
320 	num_mem_accs = mem_trans->mem_access_count;
321 	mem_acc_base = (vaddr_t)rxtx->rx + mem_trans->mem_access_offs;
322 
323 	if (!num_mem_accs) {
324 		res = FFA_INVALID_PARAMETERS;
325 		goto cleanup;
326 	}
327 
328 	/* Store the ffa_mem_transaction */
329 	smem->sender_id = sender_id;
330 	smem->mem_reg_attr = mem_trans->mem_reg_attr;
331 	smem->flags = mem_trans->flags;
332 	smem->tag = mem_trans->tag;
333 
334 	if (MUL_OVERFLOW(num_mem_accs, mem_acc_size, &needed_size) ||
335 	    ADD_OVERFLOW(needed_size, mem_trans->mem_access_offs,
336 			 &needed_size) || needed_size > blen) {
337 		res = FFA_INVALID_PARAMETERS;
338 		goto cleanup;
339 	}
340 
341 	for (i = 0; i < num_mem_accs; i++) {
342 		mem_acc = (void *)(mem_acc_base + i * mem_acc_size);
343 		highest_permission |= READ_ONCE(mem_acc->access_perm.perm);
344 	}
345 
346 	/* Check if the memory region array fits into the buffer */
347 	addr_range_offs = READ_ONCE(mem_acc->region_offs);
348 
349 	if (ADD_OVERFLOW(addr_range_offs, sizeof(*mem_reg), &needed_size) ||
350 	    needed_size > blen) {
351 		res = FFA_INVALID_PARAMETERS;
352 		goto cleanup;
353 	}
354 
355 	mem_reg = (void *)((char *)rxtx->rx + addr_range_offs);
356 	addr_range_cnt = READ_ONCE(mem_reg->address_range_count);
357 	total_page_count = READ_ONCE(mem_reg->total_page_count);
358 
359 	/* Memory transaction without address ranges or pages is invalid */
360 	if (!addr_range_cnt || !total_page_count) {
361 		res = FFA_INVALID_PARAMETERS;
362 		goto cleanup;
363 	}
364 
365 	/* Check if the region descriptors fit into the buffer */
366 	if (MUL_OVERFLOW(addr_range_cnt, sizeof(*addr_range), &needed_size) ||
367 	    ADD_OVERFLOW(needed_size, addr_range_offs, &needed_size) ||
368 	    needed_size > blen) {
369 		res = FFA_INVALID_PARAMETERS;
370 		goto cleanup;
371 	}
372 
373 	page_count_sum = 0;
374 	for (i = 0; i < addr_range_cnt; i++) {
375 		addr_range = &mem_reg->address_range_array[i];
376 
377 		/* Memory region without pages is invalid */
378 		if (!addr_range->page_count) {
379 			res = FFA_INVALID_PARAMETERS;
380 			goto cleanup;
381 		}
382 
383 		/* Sum the page count of each region */
384 		if (ADD_OVERFLOW(page_count_sum, addr_range->page_count,
385 				 &page_count_sum)) {
386 			res = FFA_INVALID_PARAMETERS;
387 			goto cleanup;
388 		}
389 	}
390 
391 	/* Validate total page count */
392 	if (total_page_count != page_count_sum) {
393 		res = FFA_INVALID_PARAMETERS;
394 		goto cleanup;
395 	}
396 
397 	/* Iterate over all the addresses */
398 	if (owner_sp) {
399 		for (i = 0; i < addr_range_cnt; i++) {
400 			addr_range = &mem_reg->address_range_array[i];
401 			res = spmc_sp_add_sp_region(smem, addr_range,
402 						    owner_sp,
403 						    highest_permission);
404 			if (res)
405 				goto cleanup;
406 		}
407 	} else {
408 		res = spmc_sp_add_nw_region(smem, mem_reg);
409 		if (res)
410 			goto cleanup;
411 	}
412 
413 	/* Add the memory address to the SP */
414 	for (i = 0; i < num_mem_accs; i++) {
415 		mem_acc = (void *)(mem_acc_base + i * mem_acc_size);
416 		res = add_mem_region_to_sp(&mem_acc->access_perm, smem);
417 		if (res)
418 			goto cleanup;
419 	}
420 	*global_handle = smem->global_handle;
421 	sp_mem_add(smem);
422 
423 	return FFA_OK;
424 
425 cleanup:
426 	sp_mem_remove(smem);
427 	return res;
428 }
429 
spmc_sp_set_to_preempted(struct ts_session * ts_sess)430 void spmc_sp_set_to_preempted(struct ts_session *ts_sess)
431 {
432 	if (ts_sess && is_sp_ctx(ts_sess->ctx)) {
433 		struct sp_session *sp_sess = to_sp_session(ts_sess);
434 
435 		cpu_spin_lock(&sp_sess->spinlock);
436 		assert(sp_sess->state == sp_busy);
437 		sp_sess->state = sp_preempted;
438 		cpu_spin_unlock(&sp_sess->spinlock);
439 	}
440 }
441 
spmc_sp_resume_from_preempted(uint16_t endpoint_id,uint16_t thread_id)442 int spmc_sp_resume_from_preempted(uint16_t endpoint_id, uint16_t thread_id)
443 {
444 	struct sp_session *sp_sess = sp_get_session(endpoint_id);
445 
446 	if (!sp_sess)
447 		return FFA_INVALID_PARAMETERS;
448 
449 	if (sp_sess->state != sp_preempted || sp_sess->thread_id != thread_id)
450 		return FFA_DENIED;
451 
452 	cpu_spin_lock(&sp_sess->spinlock);
453 	sp_sess->state = sp_busy;
454 	cpu_spin_unlock(&sp_sess->spinlock);
455 
456 	thread_resume_from_rpc(thread_id, 0, 0, 0, 0);
457 	panic();
458 }
459 
check_rxtx(struct ffa_rxtx * rxtx)460 static bool check_rxtx(struct ffa_rxtx *rxtx)
461 {
462 	return rxtx && rxtx->rx && rxtx->tx && rxtx->size > 0;
463 }
464 
465 static TEE_Result
check_retrieve_request(struct sp_mem_receiver * receiver,uint32_t ffa_vers,struct ffa_mem_transaction_x * mem_trans,void * rx,struct sp_mem * smem,int64_t tx_len)466 check_retrieve_request(struct sp_mem_receiver *receiver, uint32_t ffa_vers,
467 		       struct ffa_mem_transaction_x *mem_trans,
468 		       void *rx, struct sp_mem *smem, int64_t tx_len)
469 {
470 	struct ffa_mem_access_common *retr_access = NULL;
471 	uint8_t share_perm = receiver->perm.perm;
472 	uint32_t retr_perm = 0;
473 	uint32_t retr_flags = mem_trans->flags;
474 	uint64_t retr_tag = mem_trans->tag;
475 	struct sp_mem_map_region *reg = NULL;
476 
477 	/*
478 	 * The request came from the endpoint. It should only have one
479 	 * ffa_mem_access element
480 	 */
481 	if (mem_trans->mem_access_count != 1)
482 		return TEE_ERROR_BAD_PARAMETERS;
483 
484 	retr_access = (void *)((vaddr_t)rx + mem_trans->mem_access_offs);
485 	retr_perm = READ_ONCE(retr_access->access_perm.perm);
486 
487 	/* Check if tag is correct */
488 	if (receiver->smem->tag != retr_tag) {
489 		EMSG("Incorrect tag %#"PRIx64" %#"PRIx64, receiver->smem->tag,
490 		     retr_tag);
491 		return TEE_ERROR_BAD_PARAMETERS;
492 	}
493 
494 	/* Check permissions and flags */
495 	if ((retr_perm & FFA_MEM_ACC_RW) &&
496 	    !(share_perm & FFA_MEM_ACC_RW)) {
497 		DMSG("Incorrect memshare permission set");
498 		return TEE_ERROR_BAD_PARAMETERS;
499 	}
500 
501 	if ((retr_perm & FFA_MEM_ACC_EXE) &&
502 	    !(share_perm & FFA_MEM_ACC_EXE)) {
503 		DMSG("Incorrect memshare permission set");
504 		return TEE_ERROR_BAD_PARAMETERS;
505 	}
506 
507 	if (retr_flags & FFA_MEMORY_REGION_FLAG_CLEAR_RELINQUISH) {
508 		DMSG("CLEAR_RELINQUISH is not allowed for FFA_SHARE");
509 		return TEE_ERROR_BAD_PARAMETERS;
510 	}
511 
512 	/*
513 	 * Check if there is enough space in the tx buffer to send the respons.
514 	 */
515 	if (ffa_vers <= FFA_VERSION_1_0)
516 		tx_len -= sizeof(struct ffa_mem_transaction_1_0);
517 	else
518 		tx_len -= sizeof(struct ffa_mem_transaction_1_1);
519 	tx_len -= mem_trans->mem_access_size + sizeof(struct ffa_mem_region);
520 
521 	if (tx_len < 0)
522 		return FFA_NO_MEMORY;
523 
524 	SLIST_FOREACH(reg, &smem->regions, link) {
525 		tx_len -= sizeof(struct ffa_address_range);
526 		if (tx_len < 0)
527 			return FFA_NO_MEMORY;
528 	}
529 
530 	return TEE_SUCCESS;
531 }
532 
create_retrieve_response(uint32_t ffa_vers,void * dst_buffer,struct sp_mem_receiver * receiver,struct sp_mem * smem,struct sp_session * s)533 static void create_retrieve_response(uint32_t ffa_vers, void *dst_buffer,
534 				     struct sp_mem_receiver *receiver,
535 				     struct sp_mem *smem, struct sp_session *s)
536 {
537 	size_t off = 0;
538 	struct ffa_mem_region *dst_region =  NULL;
539 	struct ffa_address_range *addr_dst = NULL;
540 	struct sp_mem_map_region *reg = NULL;
541 	struct ffa_mem_access_common *mem_acc = NULL;
542 	size_t mem_acc_size = 0;
543 
544 	if (ffa_vers <= FFA_VERSION_1_1)
545 		mem_acc_size = sizeof(struct ffa_mem_access_1_0);
546 	else
547 		mem_acc_size = sizeof(struct ffa_mem_access_1_2);
548 
549 	/*
550 	 * we respond with a ffa_mem_retrieve_resp which defines the
551 	 * following data in the rx buffer of the sp.
552 	 * struct mem_transaction_descr
553 	 * struct mem_access_descr (always 1 element)
554 	 * struct mem_region_descr
555 	 */
556 	if (ffa_vers <= FFA_VERSION_1_0) {
557 		struct ffa_mem_transaction_1_0 *d_ds = dst_buffer;
558 
559 		memset(d_ds, 0, sizeof(*d_ds));
560 
561 		off = sizeof(*d_ds);
562 		mem_acc = (void *)d_ds->mem_access_array;
563 
564 		/* copy the mem_transaction_descr */
565 		d_ds->sender_id = receiver->smem->sender_id;
566 		d_ds->mem_reg_attr = receiver->smem->mem_reg_attr;
567 		d_ds->flags = FFA_MEMORY_TRANSACTION_TYPE_SHARE;
568 		d_ds->tag = receiver->smem->tag;
569 		d_ds->mem_access_count = 1;
570 	} else {
571 		struct ffa_mem_transaction_1_1 *d_ds = dst_buffer;
572 
573 		memset(d_ds, 0, sizeof(*d_ds));
574 
575 		off = sizeof(*d_ds);
576 		mem_acc = (void *)(d_ds + 1);
577 
578 		d_ds->sender_id = receiver->smem->sender_id;
579 		d_ds->mem_reg_attr = receiver->smem->mem_reg_attr;
580 		d_ds->flags = FFA_MEMORY_TRANSACTION_TYPE_SHARE;
581 		d_ds->tag = receiver->smem->tag;
582 		d_ds->mem_access_size = mem_acc_size;
583 		d_ds->mem_access_count = 1;
584 		d_ds->mem_access_offs = off;
585 	}
586 
587 	off += mem_acc_size;
588 	dst_region = (struct ffa_mem_region *)((vaddr_t)dst_buffer + off);
589 
590 	/* Copy the mem_accsess_descr */
591 	mem_acc->region_offs = off;
592 	memcpy(&mem_acc->access_perm, &receiver->perm,
593 	       sizeof(struct ffa_mem_access_perm));
594 
595 	/* Copy the mem_region_descr */
596 	memset(dst_region, 0, sizeof(*dst_region));
597 	dst_region->address_range_count = 0;
598 	dst_region->total_page_count = 0;
599 
600 	addr_dst = dst_region->address_range_array;
601 
602 	SLIST_FOREACH(reg, &smem->regions, link) {
603 		uint32_t offset = reg->page_offset;
604 		struct sp_ctx *ctx = to_sp_ctx(s->ts_sess.ctx);
605 
606 		addr_dst->address = (uint64_t)sp_mem_get_va(&ctx->uctx,
607 							    offset,
608 							    reg->mobj);
609 		addr_dst->page_count = reg->page_count;
610 		dst_region->address_range_count++;
611 
612 		dst_region->total_page_count += addr_dst->page_count;
613 	}
614 }
615 
ffa_mem_retrieve(struct thread_smc_1_2_regs * args,struct sp_session * caller_sp,struct ffa_rxtx * rxtx)616 static void ffa_mem_retrieve(struct thread_smc_1_2_regs *args,
617 			     struct sp_session *caller_sp,
618 			     struct ffa_rxtx *rxtx)
619 {
620 	struct ffa_mem_transaction_x mem_trans = { };
621 	uint32_t tot_len = args->a1;
622 	uint32_t frag_len = args->a2;
623 	int ret = FFA_OK;
624 	size_t tx_len = 0;
625 	struct ffa_mem_access_common *mem_acc = NULL;
626 	struct ffa_mem_region *mem_region = NULL;
627 	uint64_t va = 0;
628 	struct sp_mem *smem = NULL;
629 	struct sp_mem_receiver *receiver = NULL;
630 	uint32_t exceptions = 0;
631 	uint32_t address_offset = 0;
632 	size_t needed_size = 0;
633 
634 	if (!check_rxtx(rxtx) || !rxtx->tx_is_mine) {
635 		ret = FFA_DENIED;
636 		goto err;
637 	}
638 	/* Descriptor fragments aren't supported yet. */
639 	if (frag_len != tot_len) {
640 		ret = FFA_NOT_SUPPORTED;
641 		goto err;
642 	}
643 	if (frag_len > rxtx->size) {
644 		ret = FFA_INVALID_PARAMETERS;
645 		goto err;
646 	}
647 
648 	tx_len = rxtx->size;
649 
650 	ret = spmc_read_mem_transaction(rxtx->ffa_vers, rxtx->rx, frag_len,
651 					&mem_trans);
652 	if (ret)
653 		goto err;
654 
655 	smem = sp_mem_get(mem_trans.global_handle);
656 	if (!smem) {
657 		DMSG("Incorrect handle");
658 		ret = FFA_DENIED;
659 		goto err;
660 	}
661 
662 	receiver = sp_mem_get_receiver(caller_sp->endpoint_id, smem);
663 
664 	mem_acc = (void *)((vaddr_t)rxtx->rx + mem_trans.mem_access_offs);
665 	address_offset = READ_ONCE(mem_acc->region_offs);
666 
667 	if (ADD_OVERFLOW(address_offset, sizeof(struct ffa_mem_region),
668 			 &needed_size) || needed_size > tx_len) {
669 		ret = FFA_INVALID_PARAMETERS;
670 		goto err;
671 	}
672 
673 	if (check_retrieve_request(receiver, rxtx->ffa_vers, &mem_trans,
674 				   rxtx->rx, smem, tx_len) != TEE_SUCCESS) {
675 		ret = FFA_INVALID_PARAMETERS;
676 		goto err;
677 	}
678 
679 	exceptions = cpu_spin_lock_xsave(&mem_ref_lock);
680 
681 	if (receiver->ref_count == UINT8_MAX) {
682 		ret = FFA_DENIED;
683 		cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
684 		goto err;
685 	}
686 
687 	receiver->ref_count++;
688 
689 	/* We only need to map the region the first time we request it. */
690 	if (receiver->ref_count == 1) {
691 		TEE_Result ret_map = TEE_SUCCESS;
692 
693 		cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
694 
695 		/*
696 		 * Try to map the memory linked to the handle in
697 		 * sp_mem_access_descr.
698 		 */
699 		mem_region = (struct ffa_mem_region *)((vaddr_t)rxtx->rx +
700 						       address_offset);
701 
702 		va = READ_ONCE(mem_region->address_range_array[0].address);
703 		ret_map = sp_map_shared(caller_sp, receiver, smem,  &va);
704 
705 		if (ret_map) {
706 			EMSG("Could not map memory region: %#"PRIx32, ret_map);
707 			exceptions = cpu_spin_lock_xsave(&mem_ref_lock);
708 			receiver->ref_count--;
709 			cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
710 			ret = FFA_DENIED;
711 			goto err;
712 		}
713 	} else {
714 		cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
715 	}
716 
717 	create_retrieve_response(rxtx->ffa_vers, rxtx->tx, receiver, smem,
718 				 caller_sp);
719 
720 	args->a0 = FFA_MEM_RETRIEVE_RESP;
721 	args->a1 = tx_len;
722 	args->a2 = tx_len;
723 
724 	rxtx->tx_is_mine = false;
725 
726 	return;
727 err:
728 	ffa_set_error(args, ret);
729 }
730 
ffa_mem_relinquish(struct thread_smc_1_2_regs * args,struct sp_session * caller_sp,struct ffa_rxtx * rxtx)731 static void ffa_mem_relinquish(struct thread_smc_1_2_regs *args,
732 			       struct sp_session *caller_sp,
733 			       struct ffa_rxtx  *rxtx)
734 {
735 	struct sp_mem *smem = NULL;
736 	struct ffa_mem_relinquish *mem = rxtx->rx;
737 	struct sp_mem_receiver *receiver = NULL;
738 	int err = FFA_NOT_SUPPORTED;
739 	uint32_t exceptions = 0;
740 
741 	if (!check_rxtx(rxtx)) {
742 		ffa_set_error(args, FFA_DENIED);
743 		return;
744 	}
745 
746 	exceptions = cpu_spin_lock_xsave(&rxtx->spinlock);
747 	smem = sp_mem_get(READ_ONCE(mem->handle));
748 
749 	if (!smem) {
750 		DMSG("Incorrect handle");
751 		err = FFA_DENIED;
752 		goto err_unlock_rxtwx;
753 	}
754 
755 	if (READ_ONCE(mem->endpoint_count) != 1) {
756 		DMSG("Incorrect endpoint count");
757 		err = FFA_INVALID_PARAMETERS;
758 		goto err_unlock_rxtwx;
759 	}
760 
761 	if (READ_ONCE(mem->endpoint_id_array[0]) != caller_sp->endpoint_id) {
762 		DMSG("Incorrect endpoint id");
763 		err = FFA_DENIED;
764 		goto err_unlock_rxtwx;
765 	}
766 
767 	cpu_spin_unlock_xrestore(&rxtx->spinlock, exceptions);
768 
769 	receiver = sp_mem_get_receiver(caller_sp->endpoint_id, smem);
770 
771 	exceptions = cpu_spin_lock_xsave(&mem_ref_lock);
772 	if (!receiver->ref_count) {
773 		DMSG("To many relinquish requests");
774 		err = FFA_DENIED;
775 		goto err_unlock_memref;
776 	}
777 
778 	receiver->ref_count--;
779 	if (!receiver->ref_count) {
780 		cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
781 		if (sp_unmap_ffa_regions(caller_sp, smem) != TEE_SUCCESS) {
782 			DMSG("Failed to unmap region");
783 			ffa_set_error(args, FFA_DENIED);
784 			return;
785 		}
786 	} else {
787 		cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
788 	}
789 
790 	ffa_success(args);
791 	return;
792 
793 err_unlock_rxtwx:
794 	cpu_spin_unlock_xrestore(&rxtx->spinlock, exceptions);
795 	ffa_set_error(args, err);
796 	return;
797 err_unlock_memref:
798 	cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
799 	ffa_set_error(args, err);
800 }
801 
zero_mem_region(struct sp_mem * smem,struct sp_session * s)802 static void zero_mem_region(struct sp_mem *smem, struct sp_session *s)
803 {
804 	void *addr = NULL;
805 	struct sp_ctx *ctx = to_sp_ctx(s->ts_sess.ctx);
806 	struct sp_mem_map_region *reg = NULL;
807 
808 	ts_push_current_session(&s->ts_sess);
809 	SLIST_FOREACH(reg, &smem->regions, link) {
810 		size_t sz = reg->page_count * SMALL_PAGE_SIZE;
811 
812 		addr = sp_mem_get_va(&ctx->uctx, reg->page_offset, reg->mobj);
813 
814 		assert(addr);
815 		memset(addr, 0, sz);
816 	}
817 	ts_pop_current_session();
818 }
819 
820 /*
821  * ffa_mem_reclaim returns false if it couldn't process the reclaim message.
822  * This happens when the memory regions was shared with the OP-TEE endpoint.
823  * After this thread_spmc calls handle_mem_reclaim() to make sure that the
824  * region is reclaimed from the OP-TEE endpoint.
825  */
ffa_mem_reclaim(struct thread_smc_1_2_regs * args,struct sp_session * caller_sp)826 bool ffa_mem_reclaim(struct thread_smc_1_2_regs *args,
827 		     struct sp_session *caller_sp)
828 {
829 	uint64_t handle = reg_pair_to_64(args->a2, args->a1);
830 	uint32_t flags = args->a3;
831 	struct sp_mem *smem = NULL;
832 	struct sp_mem_receiver *receiver  = NULL;
833 	uint32_t exceptions = 0;
834 
835 	smem = sp_mem_get(handle);
836 	if (!smem)
837 		return false;
838 
839 	/*
840 	 * If the caller is an SP, make sure that it is the owner of the share.
841 	 * If the call comes from NWd this is ensured by the hypervisor.
842 	 */
843 	if (caller_sp && caller_sp->endpoint_id != smem->sender_id) {
844 		ffa_set_error(args, FFA_INVALID_PARAMETERS);
845 		return true;
846 	}
847 
848 	exceptions = cpu_spin_lock_xsave(&mem_ref_lock);
849 
850 	/* Make sure that all shares where relinquished */
851 	SLIST_FOREACH(receiver, &smem->receivers, link) {
852 		if (receiver->ref_count != 0) {
853 			ffa_set_error(args, FFA_DENIED);
854 			cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
855 			return true;
856 		}
857 	}
858 
859 	if (flags & FFA_MEMORY_REGION_FLAG_CLEAR) {
860 		if (caller_sp) {
861 			zero_mem_region(smem, caller_sp);
862 		} else {
863 			/*
864 			 * Currently we don't support zeroing Normal World
865 			 * memory. To do this we would have to map the memory
866 			 * again, zero it and unmap it.
867 			 */
868 			ffa_set_error(args, FFA_DENIED);
869 			cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
870 			return true;
871 		}
872 	}
873 
874 	sp_mem_remove(smem);
875 	cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
876 
877 	ffa_success(args);
878 	return true;
879 }
880 
881 static struct sp_session *
ffa_handle_sp_direct_req(struct thread_smc_1_2_regs * args,struct sp_session * caller_sp)882 ffa_handle_sp_direct_req(struct thread_smc_1_2_regs *args,
883 			 struct sp_session *caller_sp)
884 {
885 	struct sp_session *dst = NULL;
886 	TEE_Result res = FFA_OK;
887 
888 	res = ffa_get_dst(args, caller_sp, &dst);
889 	if (res) {
890 		/* Tried to send message to an incorrect endpoint */
891 		ffa_set_error(args, res);
892 		return caller_sp;
893 	}
894 	if (!dst) {
895 		EMSG("Request to normal world not supported");
896 		ffa_set_error(args, FFA_NOT_SUPPORTED);
897 		return caller_sp;
898 	}
899 
900 	if (dst == caller_sp) {
901 		EMSG("Cannot send message to own ID");
902 		ffa_set_error(args, FFA_INVALID_PARAMETERS);
903 		return caller_sp;
904 	}
905 
906 	if (caller_sp &&
907 	    !(caller_sp->props & FFA_PART_PROP_DIRECT_REQ_SEND)) {
908 		EMSG("SP 0x%"PRIx16" doesn't support sending direct requests",
909 		     caller_sp->endpoint_id);
910 		ffa_set_error(args, FFA_NOT_SUPPORTED);
911 		return caller_sp;
912 	}
913 
914 	if (!(dst->props & FFA_PART_PROP_DIRECT_REQ_RECV)) {
915 		EMSG("SP 0x%"PRIx16" doesn't support receipt of direct requests",
916 		     dst->endpoint_id);
917 		ffa_set_error(args, FFA_NOT_SUPPORTED);
918 		return caller_sp;
919 	}
920 
921 	if (args->a2 & FFA_MSG_FLAG_FRAMEWORK) {
922 		switch (args->a2 & FFA_MSG_TYPE_MASK) {
923 		case FFA_MSG_SEND_VM_CREATED:
924 			/* The sender must be the NWd hypervisor (ID 0) */
925 			if (FFA_SRC(args->a1) != 0 || caller_sp) {
926 				ffa_set_error(args, FFA_INVALID_PARAMETERS);
927 				return caller_sp;
928 			}
929 
930 			/* The SP must be subscribed for this message */
931 			if (!(dst->props & FFA_PART_PROP_NOTIF_CREATED)) {
932 				ffa_set_error(args, FFA_INVALID_PARAMETERS);
933 				return caller_sp;
934 			}
935 			break;
936 		case FFA_MSG_SEND_VM_DESTROYED:
937 			/* The sender must be the NWd hypervisor (ID 0) */
938 			if (FFA_SRC(args->a1) != 0 || caller_sp) {
939 				ffa_set_error(args, FFA_INVALID_PARAMETERS);
940 				return caller_sp;
941 			}
942 
943 			/* The SP must be subscribed for this message */
944 			if (!(dst->props & FFA_PART_PROP_NOTIF_DESTROYED)) {
945 				ffa_set_error(args, FFA_INVALID_PARAMETERS);
946 				return caller_sp;
947 			}
948 			break;
949 		default:
950 			ffa_set_error(args, FFA_NOT_SUPPORTED);
951 			return caller_sp;
952 		}
953 	} else if (args->a2 != FFA_PARAM_MBZ) {
954 		ffa_set_error(args, FFA_INVALID_PARAMETERS);
955 		return caller_sp;
956 	}
957 
958 	cpu_spin_lock(&dst->spinlock);
959 	if (dst->state != sp_idle) {
960 		DMSG("SP is busy");
961 		ffa_set_error(args, FFA_BUSY);
962 		cpu_spin_unlock(&dst->spinlock);
963 		return caller_sp;
964 	}
965 
966 	dst->state = sp_busy;
967 	cpu_spin_unlock(&dst->spinlock);
968 
969 	/*
970 	 * Store the calling endpoint id. This will make it possible to check
971 	 * if the response is sent back to the correct endpoint.
972 	 */
973 	dst->caller_id = FFA_SRC(args->a1);
974 
975 	/* Forward the message to the destination SP */
976 	res = sp_enter(args, dst);
977 	if (res) {
978 		/* The SP Panicked */
979 		ffa_set_error(args, FFA_ABORTED);
980 		/* Return error to calling SP */
981 		return caller_sp;
982 	}
983 
984 	return dst;
985 }
986 
987 static struct sp_session *
ffa_handle_sp_direct_resp(struct thread_smc_1_2_regs * args,struct sp_session * caller_sp)988 ffa_handle_sp_direct_resp(struct thread_smc_1_2_regs *args,
989 			  struct sp_session *caller_sp)
990 {
991 	struct sp_session *dst = NULL;
992 	enum sp_status st = sp_idle;
993 	TEE_Result res = FFA_OK;
994 
995 	if (!caller_sp) {
996 		EMSG("Response from normal world not supported");
997 		ffa_set_error(args, FFA_NOT_SUPPORTED);
998 		return NULL;
999 	}
1000 
1001 	res = ffa_get_dst(args, caller_sp, &dst);
1002 	if (res) {
1003 		/* Tried to send response to an incorrect endpoint */
1004 		ffa_set_error(args, res);
1005 		return caller_sp;
1006 	}
1007 
1008 	if (args->a2 & FFA_MSG_FLAG_FRAMEWORK) {
1009 		switch (args->a2 & FFA_MSG_TYPE_MASK) {
1010 		case FFA_MSG_RESP_VM_CREATED:
1011 			/* The destination must be the NWd hypervisor (ID 0) */
1012 			if (FFA_DST(args->a1) != 0 || dst) {
1013 				ffa_set_error(args, FFA_INVALID_PARAMETERS);
1014 				return caller_sp;
1015 			}
1016 
1017 			/* The SP must be subscribed for this message */
1018 			if (!(dst->props & FFA_PART_PROP_NOTIF_CREATED)) {
1019 				ffa_set_error(args, FFA_INVALID_PARAMETERS);
1020 				return caller_sp;
1021 			}
1022 			break;
1023 		case FFA_MSG_RESP_VM_DESTROYED:
1024 			/* The destination must be the NWd hypervisor (ID 0) */
1025 			if (FFA_DST(args->a1) != 0 || dst) {
1026 				ffa_set_error(args, FFA_INVALID_PARAMETERS);
1027 				return caller_sp;
1028 			}
1029 
1030 			/* The SP must be subscribed for this message */
1031 			if (!(dst->props & FFA_PART_PROP_NOTIF_DESTROYED)) {
1032 				ffa_set_error(args, FFA_INVALID_PARAMETERS);
1033 				return caller_sp;
1034 			}
1035 			break;
1036 		default:
1037 			ffa_set_error(args, FFA_NOT_SUPPORTED);
1038 			return caller_sp;
1039 		}
1040 	} else if (args->a2 != FFA_PARAM_MBZ) {
1041 		ffa_set_error(args, FFA_INVALID_PARAMETERS);
1042 		return caller_sp;
1043 	}
1044 
1045 	if (dst) {
1046 		cpu_spin_lock(&dst->spinlock);
1047 		st = dst->state;
1048 		cpu_spin_unlock(&dst->spinlock);
1049 
1050 		if (st != sp_busy) {
1051 			EMSG("SP is not waiting for a request");
1052 			ffa_set_error(args, FFA_INVALID_PARAMETERS);
1053 			return caller_sp;
1054 		}
1055 	}
1056 
1057 	if (caller_sp->caller_id != FFA_DST(args->a1)) {
1058 		EMSG("FFA_MSG_SEND_DIRECT_RESP to incorrect SP");
1059 		ffa_set_error(args, FFA_INVALID_PARAMETERS);
1060 		return caller_sp;
1061 	}
1062 
1063 	caller_sp->caller_id = 0;
1064 
1065 	cpu_spin_lock(&caller_sp->spinlock);
1066 	caller_sp->state = sp_idle;
1067 	cpu_spin_unlock(&caller_sp->spinlock);
1068 
1069 	if (!dst) {
1070 		/* Send message back to the NW */
1071 		return NULL;
1072 	}
1073 
1074 	/* Forward the message to the destination SP */
1075 	res = sp_enter(args, dst);
1076 	if (res) {
1077 		/* The SP Panicked */
1078 		ffa_set_error(args, FFA_ABORTED);
1079 		/* Return error to calling SP */
1080 		return caller_sp;
1081 	}
1082 	return dst;
1083 }
1084 
1085 static struct sp_session *
ffa_handle_sp_error(struct thread_smc_1_2_regs * args,struct sp_session * caller_sp)1086 ffa_handle_sp_error(struct thread_smc_1_2_regs *args,
1087 		    struct sp_session *caller_sp)
1088 {
1089 	/* If caller_sp == NULL send message to Normal World */
1090 	if (caller_sp && sp_enter(args, caller_sp)) {
1091 		/*
1092 		 * We can not return the error. Unwind the call chain with one
1093 		 * link. Set the state of the SP to dead.
1094 		 */
1095 		cpu_spin_lock(&caller_sp->spinlock);
1096 		caller_sp->state = sp_dead;
1097 		cpu_spin_unlock(&caller_sp->spinlock);
1098 		/* Create error. */
1099 		ffa_set_error(args, FFA_ABORTED);
1100 		return  sp_get_session(caller_sp->caller_id);
1101 	}
1102 
1103 	return caller_sp;
1104 }
1105 
handle_features(struct thread_smc_1_2_regs * args)1106 static void handle_features(struct thread_smc_1_2_regs *args)
1107 {
1108 	uint32_t ret_fid = 0;
1109 	uint32_t ret_w2 = FFA_PARAM_MBZ;
1110 
1111 	switch (args->a1) {
1112 #ifdef ARM64
1113 	case FFA_RXTX_MAP_64:
1114 #endif
1115 	case FFA_RXTX_MAP_32:
1116 		ret_fid = FFA_SUCCESS_32;
1117 		ret_w2 = 0; /* 4kB Minimum buffer size and alignment boundary */
1118 		break;
1119 	case FFA_ERROR:
1120 	case FFA_VERSION:
1121 	case FFA_SUCCESS_32:
1122 #ifdef ARM64
1123 	case FFA_SUCCESS_64:
1124 #endif
1125 	default:
1126 		ret_fid = FFA_ERROR;
1127 		ret_w2 = FFA_NOT_SUPPORTED;
1128 		break;
1129 	}
1130 
1131 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, ret_w2, FFA_PARAM_MBZ,
1132 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
1133 }
1134 
handle_mem_perm_get(struct thread_smc_1_2_regs * args,struct sp_session * sp_s)1135 static void handle_mem_perm_get(struct thread_smc_1_2_regs *args,
1136 				struct sp_session *sp_s)
1137 {
1138 	struct sp_ctx *sp_ctx = NULL;
1139 	TEE_Result res = TEE_ERROR_BAD_PARAMETERS;
1140 	uint16_t attrs = 0;
1141 	uint32_t ret_fid = FFA_ERROR;
1142 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
1143 
1144 	/*
1145 	 * The FFA_MEM_PERM_GET interface is only allowed during initialization
1146 	 */
1147 	if (sp_s->is_initialized) {
1148 		ret_val = FFA_DENIED;
1149 		goto out;
1150 	}
1151 
1152 	sp_ctx = to_sp_ctx(sp_s->ts_sess.ctx);
1153 	if (!sp_ctx)
1154 		goto out;
1155 
1156 	/* Query memory attributes */
1157 	ts_push_current_session(&sp_s->ts_sess);
1158 	res = vm_get_prot(&sp_ctx->uctx, args->a1, SMALL_PAGE_SIZE, &attrs);
1159 	ts_pop_current_session();
1160 	if (res)
1161 		goto out;
1162 
1163 	/* Build response value */
1164 	ret_fid = FFA_SUCCESS_32;
1165 	ret_val = 0;
1166 	if ((attrs & TEE_MATTR_URW) == TEE_MATTR_URW)
1167 		ret_val |= FFA_MEM_PERM_RW;
1168 	else if (attrs & TEE_MATTR_UR)
1169 		ret_val |= FFA_MEM_PERM_RO;
1170 
1171 	if ((attrs & TEE_MATTR_UX) == 0)
1172 		ret_val |= FFA_MEM_PERM_NX;
1173 
1174 out:
1175 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, ret_val, FFA_PARAM_MBZ,
1176 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
1177 }
1178 
handle_mem_perm_set(struct thread_smc_1_2_regs * args,struct sp_session * sp_s)1179 static void handle_mem_perm_set(struct thread_smc_1_2_regs *args,
1180 				struct sp_session *sp_s)
1181 {
1182 	struct sp_ctx *sp_ctx = NULL;
1183 	TEE_Result res = TEE_ERROR_BAD_PARAMETERS;
1184 	size_t region_size = 0;
1185 	uint32_t data_perm = 0;
1186 	uint32_t instruction_perm = 0;
1187 	uint16_t attrs = 0;
1188 	uint32_t ret_fid = FFA_ERROR;
1189 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
1190 
1191 	/*
1192 	 * The FFA_MEM_PERM_GET interface is only allowed during initialization
1193 	 */
1194 	if (sp_s->is_initialized) {
1195 		ret_val = FFA_DENIED;
1196 		goto out;
1197 	}
1198 
1199 	sp_ctx = to_sp_ctx(sp_s->ts_sess.ctx);
1200 	if (!sp_ctx)
1201 		goto out;
1202 
1203 	if (MUL_OVERFLOW(args->a2, SMALL_PAGE_SIZE, &region_size))
1204 		goto out;
1205 
1206 	if (args->a3 & FFA_MEM_PERM_RESERVED) {
1207 		/* Non-zero reserved bits */
1208 		goto out;
1209 	}
1210 
1211 	data_perm = args->a3 & FFA_MEM_PERM_DATA_PERM;
1212 	instruction_perm = args->a3 & FFA_MEM_PERM_INSTRUCTION_PERM;
1213 
1214 	/* RWX access right configuration is not permitted */
1215 	if (data_perm == FFA_MEM_PERM_RW && instruction_perm == FFA_MEM_PERM_X)
1216 		goto out;
1217 
1218 	switch (data_perm) {
1219 	case FFA_MEM_PERM_RO:
1220 		attrs = TEE_MATTR_UR;
1221 		break;
1222 	case FFA_MEM_PERM_RW:
1223 		attrs = TEE_MATTR_URW;
1224 		break;
1225 	default:
1226 		/* Invalid permission value */
1227 		goto out;
1228 	}
1229 
1230 	if (instruction_perm == FFA_MEM_PERM_X)
1231 		attrs |= TEE_MATTR_UX;
1232 
1233 	/* Set access rights */
1234 	ts_push_current_session(&sp_s->ts_sess);
1235 	res = vm_set_prot(&sp_ctx->uctx, args->a1, region_size, attrs);
1236 	ts_pop_current_session();
1237 	if (res != TEE_SUCCESS)
1238 		goto out;
1239 
1240 	ret_fid = FFA_SUCCESS_32;
1241 	ret_val = FFA_PARAM_MBZ;
1242 
1243 out:
1244 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, ret_val, FFA_PARAM_MBZ,
1245 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
1246 }
1247 
spmc_handle_version(struct thread_smc_1_2_regs * args,struct ffa_rxtx * rxtx)1248 static void spmc_handle_version(struct thread_smc_1_2_regs *args,
1249 				struct ffa_rxtx *rxtx)
1250 {
1251 	spmc_set_args(args, spmc_exchange_version(args->a1, rxtx),
1252 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1253 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
1254 }
1255 
handle_console_log(uint32_t ffa_vers,struct thread_smc_1_2_regs * args)1256 static void handle_console_log(uint32_t ffa_vers,
1257 			       struct thread_smc_1_2_regs *args)
1258 {
1259 	uint32_t ret_fid = FFA_ERROR;
1260 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
1261 	size_t char_count = args->a1 & FFA_CONSOLE_LOG_CHAR_COUNT_MASK;
1262 	char buffer[FFA_CONSOLE_LOG_64_MAX_MSG_LEN + 1] = { 0 };
1263 	size_t max_length = 0;
1264 	size_t reg_size = 0;
1265 	size_t n = 0;
1266 
1267 	if (args->a0 == FFA_CONSOLE_LOG_64) {
1268 		if (ffa_vers >= FFA_VERSION_1_2)
1269 			max_length = FFA_CONSOLE_LOG_64_MAX_MSG_LEN;
1270 		else
1271 			max_length = FFA_CONSOLE_LOG_64_V1_1_MAX_MSG_LEN;
1272 		reg_size = sizeof(uint64_t);
1273 	} else {
1274 		max_length = FFA_CONSOLE_LOG_32_MAX_MSG_LEN;
1275 		reg_size = sizeof(uint32_t);
1276 	}
1277 
1278 	if (char_count < 1 || char_count > max_length)
1279 		goto out;
1280 
1281 	for (n = 0; n < char_count; n += reg_size) {
1282 		/* + 2 since we're starting from W2/X2 */
1283 		memcpy(buffer + n, &args->a[2 + n / reg_size],
1284 		       MIN(char_count - n, reg_size));
1285 	}
1286 
1287 	buffer[char_count] = '\0';
1288 
1289 	trace_ext_puts(buffer);
1290 
1291 	ret_fid = FFA_SUCCESS_32;
1292 	ret_val = FFA_PARAM_MBZ;
1293 
1294 out:
1295 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, ret_val, FFA_PARAM_MBZ,
1296 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
1297 }
1298 
1299 /*
1300  * FF-A messages handler for SP. Every messages for or from a SP is handled
1301  * here. This is the entry of the sp_spmc kernel thread. The caller_sp is set
1302  * to NULL when it is the Normal World.
1303  */
spmc_sp_msg_handler(struct thread_smc_1_2_regs * args,struct sp_session * caller_sp)1304 void spmc_sp_msg_handler(struct thread_smc_1_2_regs *args,
1305 			 struct sp_session *caller_sp)
1306 {
1307 	thread_check_canaries();
1308 	do {
1309 		switch (args->a0) {
1310 #ifdef ARM64
1311 		case FFA_MSG_SEND_DIRECT_REQ_64:
1312 #endif
1313 		case FFA_MSG_SEND_DIRECT_REQ_32:
1314 			caller_sp = ffa_handle_sp_direct_req(args, caller_sp);
1315 			break;
1316 #ifdef ARM64
1317 		case FFA_MSG_SEND_DIRECT_RESP_64:
1318 #endif
1319 		case FFA_MSG_SEND_DIRECT_RESP_32:
1320 			caller_sp = ffa_handle_sp_direct_resp(args, caller_sp);
1321 			break;
1322 		case FFA_ERROR:
1323 			caller_sp = ffa_handle_sp_error(args, caller_sp);
1324 			break;
1325 		case FFA_MSG_WAIT:
1326 			/* FFA_WAIT gives control back to NW */
1327 			cpu_spin_lock(&caller_sp->spinlock);
1328 			caller_sp->state = sp_idle;
1329 			cpu_spin_unlock(&caller_sp->spinlock);
1330 			caller_sp = NULL;
1331 			break;
1332 #ifdef ARM64
1333 		case FFA_RXTX_MAP_64:
1334 #endif
1335 		case FFA_RXTX_MAP_32:
1336 			ts_push_current_session(&caller_sp->ts_sess);
1337 			spmc_handle_rxtx_map(args, &caller_sp->rxtx);
1338 			ts_pop_current_session();
1339 			sp_enter(args, caller_sp);
1340 			break;
1341 		case FFA_RXTX_UNMAP:
1342 			ts_push_current_session(&caller_sp->ts_sess);
1343 			spmc_handle_rxtx_unmap(args, &caller_sp->rxtx);
1344 			ts_pop_current_session();
1345 			sp_enter(args, caller_sp);
1346 			break;
1347 		case FFA_RX_RELEASE:
1348 			ts_push_current_session(&caller_sp->ts_sess);
1349 			spmc_handle_rx_release(args, &caller_sp->rxtx);
1350 			ts_pop_current_session();
1351 			sp_enter(args, caller_sp);
1352 			break;
1353 		case FFA_ID_GET:
1354 			args->a0 = FFA_SUCCESS_32;
1355 			args->a2 = caller_sp->endpoint_id;
1356 			sp_enter(args, caller_sp);
1357 			break;
1358 		case FFA_VERSION:
1359 			spmc_handle_version(args, &caller_sp->rxtx);
1360 			sp_enter(args, caller_sp);
1361 			break;
1362 		case FFA_FEATURES:
1363 			handle_features(args);
1364 			sp_enter(args, caller_sp);
1365 			break;
1366 		case FFA_SPM_ID_GET:
1367 			spmc_handle_spm_id_get(args);
1368 			sp_enter(args, caller_sp);
1369 			break;
1370 		case FFA_PARTITION_INFO_GET:
1371 			ts_push_current_session(&caller_sp->ts_sess);
1372 			spmc_handle_partition_info_get(args, &caller_sp->rxtx);
1373 			ts_pop_current_session();
1374 			sp_enter(args, caller_sp);
1375 			break;
1376 #ifdef ARM64
1377 		case FFA_MEM_SHARE_64:
1378 #endif
1379 		case FFA_MEM_SHARE_32:
1380 			ts_push_current_session(&caller_sp->ts_sess);
1381 			spmc_sp_handle_mem_share(args, &caller_sp->rxtx,
1382 						 caller_sp);
1383 			ts_pop_current_session();
1384 			sp_enter(args, caller_sp);
1385 			break;
1386 #ifdef ARM64
1387 		case FFA_MEM_RETRIEVE_REQ_64:
1388 #endif
1389 		case FFA_MEM_RETRIEVE_REQ_32:
1390 			ts_push_current_session(&caller_sp->ts_sess);
1391 			ffa_mem_retrieve(args, caller_sp, &caller_sp->rxtx);
1392 			ts_pop_current_session();
1393 			sp_enter(args, caller_sp);
1394 			break;
1395 		case FFA_MEM_RELINQUISH:
1396 			ts_push_current_session(&caller_sp->ts_sess);
1397 			ffa_mem_relinquish(args, caller_sp, &caller_sp->rxtx);
1398 			ts_pop_current_session();
1399 			sp_enter(args, caller_sp);
1400 			break;
1401 		case FFA_MEM_RECLAIM:
1402 			ffa_mem_reclaim(args, caller_sp);
1403 			sp_enter(args, caller_sp);
1404 			break;
1405 #ifdef ARM64
1406 		case FFA_MEM_PERM_GET_64:
1407 #endif
1408 		case FFA_MEM_PERM_GET_32:
1409 			handle_mem_perm_get(args, caller_sp);
1410 			sp_enter(args, caller_sp);
1411 			break;
1412 
1413 #ifdef ARM64
1414 		case FFA_MEM_PERM_SET_64:
1415 #endif
1416 		case FFA_MEM_PERM_SET_32:
1417 			handle_mem_perm_set(args, caller_sp);
1418 			sp_enter(args, caller_sp);
1419 			break;
1420 
1421 #ifdef ARM64
1422 		case FFA_CONSOLE_LOG_64:
1423 #endif
1424 		case FFA_CONSOLE_LOG_32:
1425 			handle_console_log(caller_sp->rxtx.ffa_vers, args);
1426 			sp_enter(args, caller_sp);
1427 			break;
1428 
1429 		default:
1430 			EMSG("Unhandled FFA function ID %#"PRIx32,
1431 			     (uint32_t)args->a0);
1432 			ffa_set_error(args, FFA_INVALID_PARAMETERS);
1433 			sp_enter(args, caller_sp);
1434 		}
1435 	} while (caller_sp);
1436 }
1437