xref: /optee_os/core/arch/arm/kernel/spmc_sp_handler.c (revision 1ff0a11d227888ef8504965a62a1dfb42ba4bf54)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2021-2024, Arm Limited
4  */
5 #include <assert.h>
6 #include <io.h>
7 #include <kernel/panic.h>
8 #include <kernel/secure_partition.h>
9 #include <kernel/spinlock.h>
10 #include <kernel/spmc_sp_handler.h>
11 #include <kernel/tee_misc.h>
12 #include <kernel/thread_private.h>
13 #include <mm/mobj.h>
14 #include <mm/sp_mem.h>
15 #include <mm/vm.h>
16 #include <optee_ffa.h>
17 #include <string.h>
18 
19 static unsigned int mem_ref_lock = SPINLOCK_UNLOCK;
20 
spmc_sp_start_thread(struct thread_smc_1_2_regs * args)21 int spmc_sp_start_thread(struct thread_smc_1_2_regs *args)
22 {
23 	thread_sp_alloc_and_run(&args->arg11);
24 	/*
25 	 * thread_sp_alloc_and_run() only returns if all threads are busy.
26 	 * The caller must try again.
27 	 */
28 	return FFA_BUSY;
29 }
30 
ffa_set_error(struct thread_smc_1_2_regs * args,uint32_t error)31 static void ffa_set_error(struct thread_smc_1_2_regs *args, uint32_t error)
32 {
33 	spmc_set_args(args, FFA_ERROR, FFA_PARAM_MBZ, error, FFA_PARAM_MBZ,
34 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
35 }
36 
ffa_success(struct thread_smc_1_2_regs * args)37 static void ffa_success(struct thread_smc_1_2_regs *args)
38 {
39 	spmc_set_args(args, FFA_SUCCESS_32, 0, 0, 0, 0, 0);
40 }
41 
ffa_get_dst(struct thread_smc_1_2_regs * args,struct sp_session * caller,struct sp_session ** dst)42 static TEE_Result ffa_get_dst(struct thread_smc_1_2_regs *args,
43 			      struct sp_session *caller,
44 			      struct sp_session **dst)
45 {
46 	struct sp_session *s = NULL;
47 
48 	s = sp_get_session(FFA_DST(args->a1));
49 
50 	/* Message came from the NW */
51 	if (!caller) {
52 		if (!s) {
53 			EMSG("Neither destination nor source is a SP");
54 			return FFA_INVALID_PARAMETERS;
55 		}
56 	} else {
57 		/* Check if the source matches the endpoint we came from */
58 		if (FFA_SRC(args->a1) != caller->endpoint_id) {
59 			EMSG("Source address doesn't match the endpoint id");
60 			return FFA_INVALID_PARAMETERS;
61 		}
62 	}
63 
64 	*dst = s;
65 
66 	return FFA_OK;
67 }
68 
find_sp_mem_receiver(struct sp_session * s,struct sp_mem * smem)69 static struct sp_mem_receiver *find_sp_mem_receiver(struct sp_session *s,
70 						    struct sp_mem *smem)
71 {
72 	struct sp_mem_receiver *receiver = NULL;
73 
74 	/*
75 	 * FF-A Spec 8.10.2:
76 	 * Each Handle identifies a single unique composite memory region
77 	 * description that is, there is a 1:1 mapping between the two.
78 	 *
79 	 * Each memory share has an unique handle. We can only have each SP
80 	 * once as a receiver in the memory share. For each receiver of a
81 	 * memory share, we have one sp_mem_access_descr object.
82 	 * This means that there can only be one SP linked to a specific
83 	 * struct sp_mem_access_descr.
84 	 */
85 	SLIST_FOREACH(receiver, &smem->receivers, link) {
86 		if (receiver->perm.endpoint_id == s->endpoint_id)
87 			break;
88 	}
89 	return receiver;
90 }
91 
add_mem_region_to_sp(struct ffa_mem_access_perm * access_perm,struct sp_mem * smem)92 static int add_mem_region_to_sp(struct ffa_mem_access_perm *access_perm,
93 				struct sp_mem *smem)
94 {
95 	struct sp_session *s = NULL;
96 	struct sp_mem_receiver *receiver = NULL;
97 	uint8_t perm = READ_ONCE(access_perm->perm);
98 	uint16_t endpoint_id = READ_ONCE(access_perm->endpoint_id);
99 
100 	s = sp_get_session(endpoint_id);
101 
102 	/* Only add memory shares of loaded SPs */
103 	if (!s)
104 		return FFA_DENIED;
105 
106 	/* Only allow each endpoint once */
107 	if (find_sp_mem_receiver(s, smem))
108 		return FFA_DENIED;
109 
110 	if (perm & ~FFA_MEM_ACC_MASK)
111 		return FFA_DENIED;
112 
113 	receiver = calloc(1, sizeof(struct sp_mem_receiver));
114 	if (!receiver)
115 		return FFA_NO_MEMORY;
116 
117 	receiver->smem = smem;
118 
119 	receiver->perm.endpoint_id = endpoint_id;
120 	receiver->perm.perm = perm;
121 	receiver->perm.flags = READ_ONCE(access_perm->flags);
122 
123 	SLIST_INSERT_HEAD(&smem->receivers, receiver, link);
124 
125 	return FFA_OK;
126 }
127 
spmc_sp_handle_mem_share(struct thread_smc_1_2_regs * args,struct ffa_rxtx * rxtx,struct sp_session * owner_sp)128 static void spmc_sp_handle_mem_share(struct thread_smc_1_2_regs *args,
129 				     struct ffa_rxtx *rxtx,
130 				     struct sp_session *owner_sp)
131 {
132 	struct ffa_mem_transaction_x mem_trans = { };
133 	uint32_t tot_len = args->a1;
134 	uint32_t frag_len = args->a2;
135 	uint64_t global_handle = 0;
136 	int res = FFA_OK;
137 
138 	cpu_spin_lock(&rxtx->spinlock);
139 
140 	/* Descriptor fragments or custom buffers aren't supported yet. */
141 	if (frag_len != tot_len || args->a3 || args->a4)
142 		res = FFA_NOT_SUPPORTED;
143 	else if (frag_len > rxtx->size)
144 		res = FFA_INVALID_PARAMETERS;
145 	else
146 		res = spmc_read_mem_transaction(rxtx->ffa_vers, rxtx->rx,
147 						rxtx->size,
148 						tot_len, frag_len, &mem_trans);
149 	if (!res)
150 		res = spmc_sp_add_share(&mem_trans, rxtx, tot_len, frag_len,
151 					&global_handle, owner_sp);
152 	if (!res) {
153 		args->a3 = high32_from_64(global_handle);
154 		args->a2 = low32_from_64(global_handle);
155 		args->a1 = FFA_PARAM_MBZ;
156 		args->a0 = FFA_SUCCESS_32;
157 	} else {
158 		ffa_set_error(args, res);
159 	}
160 
161 	cpu_spin_unlock(&rxtx->spinlock);
162 }
163 
spmc_sp_add_sp_region(struct sp_mem * smem,struct ffa_address_range * mem_reg,struct sp_session * owner_sp,uint8_t highest_permission)164 static int spmc_sp_add_sp_region(struct sp_mem *smem,
165 				 struct ffa_address_range *mem_reg,
166 				 struct sp_session *owner_sp,
167 				 uint8_t highest_permission)
168 {
169 	struct sp_ctx *sp_ctx = NULL;
170 	uint64_t va = READ_ONCE(mem_reg->address);
171 	int res = FFA_OK;
172 	uint64_t region_len = READ_ONCE(mem_reg->page_count) * SMALL_PAGE_SIZE;
173 	struct mobj *mobj = NULL;
174 
175 	sp_ctx = to_sp_ctx(owner_sp->ts_sess.ctx);
176 
177 	/*
178 	 * The memory region we try to share might not be linked to just one
179 	 * mobj. Create a new region for each mobj.
180 	 */
181 	while (region_len) {
182 		size_t len = region_len;
183 		struct sp_mem_map_region *region = NULL;
184 		uint16_t prot = 0;
185 		size_t offs = 0;
186 
187 		/*
188 		 * There is already a mobj for each address that is in the SPs
189 		 * address range.
190 		 */
191 		mobj = vm_get_mobj(&sp_ctx->uctx, va, &len, &prot, &offs);
192 		if (!mobj)
193 			return FFA_DENIED;
194 
195 		/*
196 		 * If we share memory from a SP, check if we are not sharing
197 		 * with a higher permission than the memory was originally
198 		 * mapped.
199 		 */
200 		if ((highest_permission & FFA_MEM_ACC_RW) &&
201 		    !(prot & TEE_MATTR_UW)) {
202 			res = FFA_DENIED;
203 			goto err;
204 		}
205 
206 		if ((highest_permission & FFA_MEM_ACC_EXE) &&
207 		    !(prot & TEE_MATTR_UX)) {
208 			res = FFA_DENIED;
209 			goto err;
210 		}
211 
212 		region = calloc(1, sizeof(*region));
213 		region->mobj = mobj;
214 		region->page_offset = offs;
215 		region->page_count = len / SMALL_PAGE_SIZE;
216 
217 		if (!sp_has_exclusive_access(region, &sp_ctx->uctx)) {
218 			free(region);
219 			res = FFA_DENIED;
220 			goto err;
221 		}
222 
223 		va += len;
224 		region_len -= len;
225 		SLIST_INSERT_HEAD(&smem->regions, region, link);
226 	}
227 
228 	return FFA_OK;
229 err:
230 	mobj_put(mobj);
231 
232 	return res;
233 }
234 
spmc_sp_add_nw_region(struct sp_mem * smem,struct ffa_mem_region * mem_reg)235 static int spmc_sp_add_nw_region(struct sp_mem *smem,
236 				 struct ffa_mem_region *mem_reg)
237 {
238 	uint64_t page_count = READ_ONCE(mem_reg->total_page_count);
239 	struct sp_mem_map_region *region = NULL;
240 	struct mobj *m = sp_mem_new_mobj(page_count, TEE_MATTR_MEM_TYPE_CACHED,
241 					 false);
242 	unsigned int i = 0;
243 	unsigned int idx = 0;
244 	int res = FFA_OK;
245 	uint64_t address_count = READ_ONCE(mem_reg->address_range_count);
246 
247 	if (!m)
248 		return FFA_NO_MEMORY;
249 
250 	for (i = 0; i < address_count; i++) {
251 		struct ffa_address_range *addr_range = NULL;
252 
253 		addr_range = &mem_reg->address_range_array[i];
254 		if (sp_mem_add_pages(m, &idx,
255 				     READ_ONCE(addr_range->address),
256 				     READ_ONCE(addr_range->page_count))) {
257 			res = FFA_DENIED;
258 			goto clean_up;
259 		}
260 	}
261 
262 	region = calloc(1, sizeof(*region));
263 	if (!region) {
264 		res = FFA_NO_MEMORY;
265 		goto clean_up;
266 	}
267 
268 	region->mobj = m;
269 	region->page_count = page_count;
270 
271 	if (!sp_has_exclusive_access(region, NULL)) {
272 		free(region);
273 		res = FFA_DENIED;
274 		goto clean_up;
275 	}
276 
277 	SLIST_INSERT_HEAD(&smem->regions, region, link);
278 	return FFA_OK;
279 clean_up:
280 	mobj_put(m);
281 	return res;
282 }
283 
spmc_sp_add_share(struct ffa_mem_transaction_x * mem_trans,struct ffa_rxtx * rxtx,size_t blen,size_t flen,uint64_t * global_handle,struct sp_session * owner_sp)284 int spmc_sp_add_share(struct ffa_mem_transaction_x *mem_trans,
285 		      struct ffa_rxtx *rxtx, size_t blen, size_t flen,
286 		      uint64_t *global_handle, struct sp_session *owner_sp)
287 {
288 	int res = FFA_INVALID_PARAMETERS;
289 	unsigned int num_mem_accs = 0;
290 	unsigned int i = 0;
291 	struct ffa_mem_access_common *mem_acc = NULL;
292 	size_t needed_size = 0;
293 	size_t addr_range_offs = 0;
294 	struct ffa_mem_region *mem_reg = NULL;
295 	uint8_t highest_permission = 0;
296 	struct sp_mem *smem = NULL;
297 	uint16_t sender_id = mem_trans->sender_id;
298 	size_t addr_range_cnt = 0;
299 	struct ffa_address_range *addr_range = NULL;
300 	size_t total_page_count = 0;
301 	size_t page_count_sum = 0;
302 	vaddr_t mem_acc_base = 0;
303 	size_t mem_acc_size = 0;
304 
305 	if (blen != flen) {
306 		DMSG("Fragmented memory share is not supported for SPs");
307 		return FFA_NOT_SUPPORTED;
308 	}
309 
310 	smem = sp_mem_new();
311 	if (!smem)
312 		return FFA_NO_MEMORY;
313 
314 	if ((owner_sp && owner_sp->endpoint_id != sender_id) ||
315 	    (!owner_sp && sp_get_session(sender_id))) {
316 		res = FFA_DENIED;
317 		goto cleanup;
318 	}
319 
320 	mem_acc_size = mem_trans->mem_access_size;
321 	num_mem_accs = mem_trans->mem_access_count;
322 	mem_acc_base = (vaddr_t)rxtx->rx + mem_trans->mem_access_offs;
323 
324 	if (!num_mem_accs) {
325 		res = FFA_INVALID_PARAMETERS;
326 		goto cleanup;
327 	}
328 
329 	/* Store the ffa_mem_transaction */
330 	smem->sender_id = sender_id;
331 	smem->mem_reg_attr = mem_trans->mem_reg_attr;
332 	smem->flags = mem_trans->flags;
333 	smem->tag = mem_trans->tag;
334 
335 	if (MUL_OVERFLOW(num_mem_accs, mem_acc_size, &needed_size) ||
336 	    ADD_OVERFLOW(needed_size, mem_trans->mem_access_offs,
337 			 &needed_size) || needed_size > blen) {
338 		res = FFA_INVALID_PARAMETERS;
339 		goto cleanup;
340 	}
341 
342 	for (i = 0; i < num_mem_accs; i++) {
343 		mem_acc = (void *)(mem_acc_base + i * mem_acc_size);
344 		highest_permission |= READ_ONCE(mem_acc->access_perm.perm);
345 	}
346 
347 	/* Check if the memory region array fits into the buffer */
348 	addr_range_offs = READ_ONCE(mem_acc->region_offs);
349 
350 	if (ADD_OVERFLOW(addr_range_offs, sizeof(*mem_reg), &needed_size) ||
351 	    needed_size > blen) {
352 		res = FFA_INVALID_PARAMETERS;
353 		goto cleanup;
354 	}
355 
356 	mem_reg = (void *)((char *)rxtx->rx + addr_range_offs);
357 	addr_range_cnt = READ_ONCE(mem_reg->address_range_count);
358 	total_page_count = READ_ONCE(mem_reg->total_page_count);
359 
360 	/* Memory transaction without address ranges or pages is invalid */
361 	if (!addr_range_cnt || !total_page_count) {
362 		res = FFA_INVALID_PARAMETERS;
363 		goto cleanup;
364 	}
365 
366 	/* Check if the region descriptors fit into the buffer */
367 	if (MUL_OVERFLOW(addr_range_cnt, sizeof(*addr_range), &needed_size) ||
368 	    ADD_OVERFLOW(needed_size, addr_range_offs, &needed_size) ||
369 	    needed_size > blen) {
370 		res = FFA_INVALID_PARAMETERS;
371 		goto cleanup;
372 	}
373 
374 	page_count_sum = 0;
375 	for (i = 0; i < addr_range_cnt; i++) {
376 		addr_range = &mem_reg->address_range_array[i];
377 
378 		/* Memory region without pages is invalid */
379 		if (!addr_range->page_count) {
380 			res = FFA_INVALID_PARAMETERS;
381 			goto cleanup;
382 		}
383 
384 		/* Sum the page count of each region */
385 		if (ADD_OVERFLOW(page_count_sum, addr_range->page_count,
386 				 &page_count_sum)) {
387 			res = FFA_INVALID_PARAMETERS;
388 			goto cleanup;
389 		}
390 	}
391 
392 	/* Validate total page count */
393 	if (total_page_count != page_count_sum) {
394 		res = FFA_INVALID_PARAMETERS;
395 		goto cleanup;
396 	}
397 
398 	/* Iterate over all the addresses */
399 	if (owner_sp) {
400 		for (i = 0; i < addr_range_cnt; i++) {
401 			addr_range = &mem_reg->address_range_array[i];
402 			res = spmc_sp_add_sp_region(smem, addr_range,
403 						    owner_sp,
404 						    highest_permission);
405 			if (res)
406 				goto cleanup;
407 		}
408 	} else {
409 		res = spmc_sp_add_nw_region(smem, mem_reg);
410 		if (res)
411 			goto cleanup;
412 	}
413 
414 	/* Add the memory address to the SP */
415 	for (i = 0; i < num_mem_accs; i++) {
416 		mem_acc = (void *)(mem_acc_base + i * mem_acc_size);
417 		res = add_mem_region_to_sp(&mem_acc->access_perm, smem);
418 		if (res)
419 			goto cleanup;
420 	}
421 	*global_handle = smem->global_handle;
422 	sp_mem_add(smem);
423 
424 	return FFA_OK;
425 
426 cleanup:
427 	sp_mem_remove(smem);
428 	return res;
429 }
430 
spmc_sp_set_to_preempted(struct ts_session * ts_sess)431 void spmc_sp_set_to_preempted(struct ts_session *ts_sess)
432 {
433 	if (ts_sess && is_sp_ctx(ts_sess->ctx)) {
434 		struct sp_session *sp_sess = to_sp_session(ts_sess);
435 
436 		cpu_spin_lock(&sp_sess->spinlock);
437 		assert(sp_sess->state == sp_busy);
438 		sp_sess->state = sp_preempted;
439 		cpu_spin_unlock(&sp_sess->spinlock);
440 	}
441 }
442 
spmc_sp_resume_from_preempted(uint16_t endpoint_id,uint16_t thread_id)443 int spmc_sp_resume_from_preempted(uint16_t endpoint_id, uint16_t thread_id)
444 {
445 	struct sp_session *sp_sess = sp_get_session(endpoint_id);
446 
447 	if (!sp_sess)
448 		return FFA_INVALID_PARAMETERS;
449 
450 	if (sp_sess->state != sp_preempted || sp_sess->thread_id != thread_id)
451 		return FFA_DENIED;
452 
453 	cpu_spin_lock(&sp_sess->spinlock);
454 	sp_sess->state = sp_busy;
455 	cpu_spin_unlock(&sp_sess->spinlock);
456 
457 	thread_resume_from_rpc(thread_id, 0, 0, 0, 0);
458 	panic();
459 }
460 
check_rxtx(struct ffa_rxtx * rxtx)461 static bool check_rxtx(struct ffa_rxtx *rxtx)
462 {
463 	return rxtx && rxtx->rx && rxtx->tx && rxtx->size > 0;
464 }
465 
466 static TEE_Result
check_retrieve_request(struct sp_mem_receiver * receiver,uint32_t ffa_vers,struct ffa_mem_transaction_x * mem_trans,void * rx,struct sp_mem * smem,int64_t tx_len)467 check_retrieve_request(struct sp_mem_receiver *receiver, uint32_t ffa_vers,
468 		       struct ffa_mem_transaction_x *mem_trans,
469 		       void *rx, struct sp_mem *smem, int64_t tx_len)
470 {
471 	struct ffa_mem_access_common *retr_access = NULL;
472 	uint8_t share_perm = receiver->perm.perm;
473 	uint32_t retr_perm = 0;
474 	uint32_t retr_flags = mem_trans->flags;
475 	uint64_t retr_tag = mem_trans->tag;
476 	struct sp_mem_map_region *reg = NULL;
477 
478 	/*
479 	 * The request came from the endpoint. It should only have one
480 	 * ffa_mem_access element
481 	 */
482 	if (mem_trans->mem_access_count != 1)
483 		return TEE_ERROR_BAD_PARAMETERS;
484 
485 	retr_access = (void *)((vaddr_t)rx + mem_trans->mem_access_offs);
486 	retr_perm = READ_ONCE(retr_access->access_perm.perm);
487 
488 	/* Check if tag is correct */
489 	if (receiver->smem->tag != retr_tag) {
490 		EMSG("Incorrect tag %#"PRIx64" %#"PRIx64, receiver->smem->tag,
491 		     retr_tag);
492 		return TEE_ERROR_BAD_PARAMETERS;
493 	}
494 
495 	/* Check permissions and flags */
496 	if ((retr_perm & FFA_MEM_ACC_RW) &&
497 	    !(share_perm & FFA_MEM_ACC_RW)) {
498 		DMSG("Incorrect memshare permission set");
499 		return TEE_ERROR_BAD_PARAMETERS;
500 	}
501 
502 	if ((retr_perm & FFA_MEM_ACC_EXE) &&
503 	    !(share_perm & FFA_MEM_ACC_EXE)) {
504 		DMSG("Incorrect memshare permission set");
505 		return TEE_ERROR_BAD_PARAMETERS;
506 	}
507 
508 	if (retr_flags & FFA_MEMORY_REGION_FLAG_CLEAR_RELINQUISH) {
509 		DMSG("CLEAR_RELINQUISH is not allowed for FFA_SHARE");
510 		return TEE_ERROR_BAD_PARAMETERS;
511 	}
512 
513 	/*
514 	 * Check if there is enough space in the tx buffer to send the respons.
515 	 */
516 	if (ffa_vers <= FFA_VERSION_1_0)
517 		tx_len -= sizeof(struct ffa_mem_transaction_1_0);
518 	else
519 		tx_len -= sizeof(struct ffa_mem_transaction_1_1);
520 	tx_len -= mem_trans->mem_access_size + sizeof(struct ffa_mem_region);
521 
522 	if (tx_len < 0)
523 		return FFA_NO_MEMORY;
524 
525 	SLIST_FOREACH(reg, &smem->regions, link) {
526 		tx_len -= sizeof(struct ffa_address_range);
527 		if (tx_len < 0)
528 			return FFA_NO_MEMORY;
529 	}
530 
531 	return TEE_SUCCESS;
532 }
533 
create_retrieve_response(uint32_t ffa_vers,void * dst_buffer,struct sp_mem_receiver * receiver,struct sp_mem * smem,struct sp_session * s)534 static void create_retrieve_response(uint32_t ffa_vers, void *dst_buffer,
535 				     struct sp_mem_receiver *receiver,
536 				     struct sp_mem *smem, struct sp_session *s)
537 {
538 	size_t off = 0;
539 	struct ffa_mem_region *dst_region =  NULL;
540 	struct ffa_address_range *addr_dst = NULL;
541 	struct sp_mem_map_region *reg = NULL;
542 	struct ffa_mem_access_common *mem_acc = NULL;
543 	size_t mem_acc_size = 0;
544 
545 	if (ffa_vers <= FFA_VERSION_1_1)
546 		mem_acc_size = sizeof(struct ffa_mem_access_1_0);
547 	else
548 		mem_acc_size = sizeof(struct ffa_mem_access_1_2);
549 
550 	/*
551 	 * we respond with a ffa_mem_retrieve_resp which defines the
552 	 * following data in the rx buffer of the sp.
553 	 * struct mem_transaction_descr
554 	 * struct mem_access_descr (always 1 element)
555 	 * struct mem_region_descr
556 	 */
557 	if (ffa_vers <= FFA_VERSION_1_0) {
558 		struct ffa_mem_transaction_1_0 *d_ds = dst_buffer;
559 
560 		memset(d_ds, 0, sizeof(*d_ds));
561 
562 		off = sizeof(*d_ds);
563 		mem_acc = (void *)d_ds->mem_access_array;
564 
565 		/* copy the mem_transaction_descr */
566 		d_ds->sender_id = receiver->smem->sender_id;
567 		d_ds->mem_reg_attr = receiver->smem->mem_reg_attr;
568 		d_ds->flags = FFA_MEMORY_TRANSACTION_TYPE_SHARE;
569 		d_ds->tag = receiver->smem->tag;
570 		d_ds->mem_access_count = 1;
571 	} else {
572 		struct ffa_mem_transaction_1_1 *d_ds = dst_buffer;
573 
574 		memset(d_ds, 0, sizeof(*d_ds));
575 
576 		off = sizeof(*d_ds);
577 		mem_acc = (void *)(d_ds + 1);
578 
579 		d_ds->sender_id = receiver->smem->sender_id;
580 		d_ds->mem_reg_attr = receiver->smem->mem_reg_attr;
581 		d_ds->flags = FFA_MEMORY_TRANSACTION_TYPE_SHARE;
582 		d_ds->tag = receiver->smem->tag;
583 		d_ds->mem_access_size = mem_acc_size;
584 		d_ds->mem_access_count = 1;
585 		d_ds->mem_access_offs = off;
586 	}
587 
588 	off += mem_acc_size;
589 	dst_region = (struct ffa_mem_region *)((vaddr_t)dst_buffer + off);
590 
591 	/* Copy the mem_accsess_descr */
592 	mem_acc->region_offs = off;
593 	memcpy(&mem_acc->access_perm, &receiver->perm,
594 	       sizeof(struct ffa_mem_access_perm));
595 
596 	/* Copy the mem_region_descr */
597 	memset(dst_region, 0, sizeof(*dst_region));
598 	dst_region->address_range_count = 0;
599 	dst_region->total_page_count = 0;
600 
601 	addr_dst = dst_region->address_range_array;
602 
603 	SLIST_FOREACH(reg, &smem->regions, link) {
604 		uint32_t offset = reg->page_offset;
605 		struct sp_ctx *ctx = to_sp_ctx(s->ts_sess.ctx);
606 
607 		addr_dst->address = (uint64_t)sp_mem_get_va(&ctx->uctx,
608 							    offset,
609 							    reg->mobj);
610 		addr_dst->page_count = reg->page_count;
611 		dst_region->address_range_count++;
612 
613 		dst_region->total_page_count += addr_dst->page_count;
614 	}
615 }
616 
ffa_mem_retrieve(struct thread_smc_1_2_regs * args,struct sp_session * caller_sp,struct ffa_rxtx * rxtx)617 static void ffa_mem_retrieve(struct thread_smc_1_2_regs *args,
618 			     struct sp_session *caller_sp,
619 			     struct ffa_rxtx *rxtx)
620 {
621 	struct ffa_mem_transaction_x mem_trans = { };
622 	uint32_t tot_len = args->a1;
623 	uint32_t frag_len = args->a2;
624 	int ret = FFA_OK;
625 	size_t tx_len = 0;
626 	struct ffa_mem_access_common *mem_acc = NULL;
627 	struct ffa_mem_region *mem_region = NULL;
628 	uint64_t va = 0;
629 	struct sp_mem *smem = NULL;
630 	struct sp_mem_receiver *receiver = NULL;
631 	uint32_t exceptions = 0;
632 	uint32_t address_offset = 0;
633 	size_t needed_size = 0;
634 
635 	if (!check_rxtx(rxtx) || !rxtx->tx_is_mine) {
636 		ret = FFA_DENIED;
637 		goto err;
638 	}
639 	/* Descriptor fragments aren't supported yet. */
640 	if (frag_len != tot_len) {
641 		ret = FFA_NOT_SUPPORTED;
642 		goto err;
643 	}
644 	if (frag_len > rxtx->size) {
645 		ret = FFA_INVALID_PARAMETERS;
646 		goto err;
647 	}
648 
649 	tx_len = rxtx->size;
650 
651 	ret = spmc_read_mem_transaction(rxtx->ffa_vers, rxtx->rx, rxtx->size,
652 					tot_len, frag_len, &mem_trans);
653 	if (ret)
654 		goto err;
655 
656 	smem = sp_mem_get(mem_trans.global_handle);
657 	if (!smem) {
658 		DMSG("Incorrect handle");
659 		ret = FFA_DENIED;
660 		goto err;
661 	}
662 
663 	receiver = sp_mem_get_receiver(caller_sp->endpoint_id, smem);
664 
665 	mem_acc = (void *)((vaddr_t)rxtx->rx + mem_trans.mem_access_offs);
666 	address_offset = READ_ONCE(mem_acc->region_offs);
667 
668 	if (ADD_OVERFLOW(address_offset, sizeof(struct ffa_mem_region),
669 			 &needed_size) || needed_size > tx_len) {
670 		ret = FFA_INVALID_PARAMETERS;
671 		goto err;
672 	}
673 
674 	if (check_retrieve_request(receiver, rxtx->ffa_vers, &mem_trans,
675 				   rxtx->rx, smem, tx_len) != TEE_SUCCESS) {
676 		ret = FFA_INVALID_PARAMETERS;
677 		goto err;
678 	}
679 
680 	exceptions = cpu_spin_lock_xsave(&mem_ref_lock);
681 
682 	if (receiver->ref_count == UINT8_MAX) {
683 		ret = FFA_DENIED;
684 		cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
685 		goto err;
686 	}
687 
688 	receiver->ref_count++;
689 
690 	/* We only need to map the region the first time we request it. */
691 	if (receiver->ref_count == 1) {
692 		TEE_Result ret_map = TEE_SUCCESS;
693 
694 		cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
695 
696 		/*
697 		 * Try to map the memory linked to the handle in
698 		 * sp_mem_access_descr.
699 		 */
700 		mem_region = (struct ffa_mem_region *)((vaddr_t)rxtx->rx +
701 						       address_offset);
702 
703 		va = READ_ONCE(mem_region->address_range_array[0].address);
704 		ret_map = sp_map_shared(caller_sp, receiver, smem,  &va);
705 
706 		if (ret_map) {
707 			EMSG("Could not map memory region: %#"PRIx32, ret_map);
708 			exceptions = cpu_spin_lock_xsave(&mem_ref_lock);
709 			receiver->ref_count--;
710 			cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
711 			ret = FFA_DENIED;
712 			goto err;
713 		}
714 	} else {
715 		cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
716 	}
717 
718 	create_retrieve_response(rxtx->ffa_vers, rxtx->tx, receiver, smem,
719 				 caller_sp);
720 
721 	args->a0 = FFA_MEM_RETRIEVE_RESP;
722 	args->a1 = tx_len;
723 	args->a2 = tx_len;
724 
725 	rxtx->tx_is_mine = false;
726 
727 	return;
728 err:
729 	ffa_set_error(args, ret);
730 }
731 
ffa_mem_relinquish(struct thread_smc_1_2_regs * args,struct sp_session * caller_sp,struct ffa_rxtx * rxtx)732 static void ffa_mem_relinquish(struct thread_smc_1_2_regs *args,
733 			       struct sp_session *caller_sp,
734 			       struct ffa_rxtx  *rxtx)
735 {
736 	struct sp_mem *smem = NULL;
737 	struct ffa_mem_relinquish *mem = rxtx->rx;
738 	struct sp_mem_receiver *receiver = NULL;
739 	int err = FFA_NOT_SUPPORTED;
740 	uint32_t exceptions = 0;
741 
742 	if (!check_rxtx(rxtx)) {
743 		ffa_set_error(args, FFA_DENIED);
744 		return;
745 	}
746 
747 	exceptions = cpu_spin_lock_xsave(&rxtx->spinlock);
748 	smem = sp_mem_get(READ_ONCE(mem->handle));
749 
750 	if (!smem) {
751 		DMSG("Incorrect handle");
752 		err = FFA_DENIED;
753 		goto err_unlock_rxtwx;
754 	}
755 
756 	if (READ_ONCE(mem->endpoint_count) != 1) {
757 		DMSG("Incorrect endpoint count");
758 		err = FFA_INVALID_PARAMETERS;
759 		goto err_unlock_rxtwx;
760 	}
761 
762 	if (READ_ONCE(mem->endpoint_id_array[0]) != caller_sp->endpoint_id) {
763 		DMSG("Incorrect endpoint id");
764 		err = FFA_DENIED;
765 		goto err_unlock_rxtwx;
766 	}
767 
768 	cpu_spin_unlock_xrestore(&rxtx->spinlock, exceptions);
769 
770 	receiver = sp_mem_get_receiver(caller_sp->endpoint_id, smem);
771 
772 	exceptions = cpu_spin_lock_xsave(&mem_ref_lock);
773 	if (!receiver->ref_count) {
774 		DMSG("To many relinquish requests");
775 		err = FFA_DENIED;
776 		goto err_unlock_memref;
777 	}
778 
779 	receiver->ref_count--;
780 	if (!receiver->ref_count) {
781 		cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
782 		if (sp_unmap_ffa_regions(caller_sp, smem) != TEE_SUCCESS) {
783 			DMSG("Failed to unmap region");
784 			ffa_set_error(args, FFA_DENIED);
785 			return;
786 		}
787 	} else {
788 		cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
789 	}
790 
791 	ffa_success(args);
792 	return;
793 
794 err_unlock_rxtwx:
795 	cpu_spin_unlock_xrestore(&rxtx->spinlock, exceptions);
796 	ffa_set_error(args, err);
797 	return;
798 err_unlock_memref:
799 	cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
800 	ffa_set_error(args, err);
801 }
802 
zero_mem_region(struct sp_mem * smem,struct sp_session * s)803 static void zero_mem_region(struct sp_mem *smem, struct sp_session *s)
804 {
805 	void *addr = NULL;
806 	struct sp_ctx *ctx = to_sp_ctx(s->ts_sess.ctx);
807 	struct sp_mem_map_region *reg = NULL;
808 
809 	ts_push_current_session(&s->ts_sess);
810 	SLIST_FOREACH(reg, &smem->regions, link) {
811 		size_t sz = reg->page_count * SMALL_PAGE_SIZE;
812 
813 		addr = sp_mem_get_va(&ctx->uctx, reg->page_offset, reg->mobj);
814 
815 		assert(addr);
816 		memset(addr, 0, sz);
817 	}
818 	ts_pop_current_session();
819 }
820 
821 /*
822  * ffa_mem_reclaim returns false if it couldn't process the reclaim message.
823  * This happens when the memory regions was shared with the OP-TEE endpoint.
824  * After this thread_spmc calls handle_mem_reclaim() to make sure that the
825  * region is reclaimed from the OP-TEE endpoint.
826  */
ffa_mem_reclaim(struct thread_smc_1_2_regs * args,struct sp_session * caller_sp)827 bool ffa_mem_reclaim(struct thread_smc_1_2_regs *args,
828 		     struct sp_session *caller_sp)
829 {
830 	uint64_t handle = reg_pair_to_64(args->a2, args->a1);
831 	uint32_t flags = args->a3;
832 	struct sp_mem *smem = NULL;
833 	struct sp_mem_receiver *receiver  = NULL;
834 	uint32_t exceptions = 0;
835 
836 	smem = sp_mem_get(handle);
837 	if (!smem)
838 		return false;
839 
840 	/*
841 	 * If the caller is an SP, make sure that it is the owner of the share.
842 	 * If the call comes from NWd this is ensured by the hypervisor.
843 	 */
844 	if (caller_sp && caller_sp->endpoint_id != smem->sender_id) {
845 		ffa_set_error(args, FFA_INVALID_PARAMETERS);
846 		return true;
847 	}
848 
849 	exceptions = cpu_spin_lock_xsave(&mem_ref_lock);
850 
851 	/* Make sure that all shares where relinquished */
852 	SLIST_FOREACH(receiver, &smem->receivers, link) {
853 		if (receiver->ref_count != 0) {
854 			ffa_set_error(args, FFA_DENIED);
855 			cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
856 			return true;
857 		}
858 	}
859 
860 	if (flags & FFA_MEMORY_REGION_FLAG_CLEAR) {
861 		if (caller_sp) {
862 			zero_mem_region(smem, caller_sp);
863 		} else {
864 			/*
865 			 * Currently we don't support zeroing Normal World
866 			 * memory. To do this we would have to map the memory
867 			 * again, zero it and unmap it.
868 			 */
869 			ffa_set_error(args, FFA_DENIED);
870 			cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
871 			return true;
872 		}
873 	}
874 
875 	sp_mem_remove(smem);
876 	cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
877 
878 	ffa_success(args);
879 	return true;
880 }
881 
882 static struct sp_session *
ffa_handle_sp_direct_req(struct thread_smc_1_2_regs * args,struct sp_session * caller_sp)883 ffa_handle_sp_direct_req(struct thread_smc_1_2_regs *args,
884 			 struct sp_session *caller_sp)
885 {
886 	struct sp_session *dst = NULL;
887 	TEE_Result res = FFA_OK;
888 
889 	res = ffa_get_dst(args, caller_sp, &dst);
890 	if (res) {
891 		/* Tried to send message to an incorrect endpoint */
892 		ffa_set_error(args, res);
893 		return caller_sp;
894 	}
895 	if (!dst) {
896 		EMSG("Request to normal world not supported");
897 		ffa_set_error(args, FFA_NOT_SUPPORTED);
898 		return caller_sp;
899 	}
900 
901 	if (dst == caller_sp) {
902 		EMSG("Cannot send message to own ID");
903 		ffa_set_error(args, FFA_INVALID_PARAMETERS);
904 		return caller_sp;
905 	}
906 
907 	if (caller_sp &&
908 	    !(caller_sp->props & FFA_PART_PROP_DIRECT_REQ_SEND)) {
909 		EMSG("SP 0x%"PRIx16" doesn't support sending direct requests",
910 		     caller_sp->endpoint_id);
911 		ffa_set_error(args, FFA_NOT_SUPPORTED);
912 		return caller_sp;
913 	}
914 
915 	if (!(dst->props & FFA_PART_PROP_DIRECT_REQ_RECV)) {
916 		EMSG("SP 0x%"PRIx16" doesn't support receipt of direct requests",
917 		     dst->endpoint_id);
918 		ffa_set_error(args, FFA_NOT_SUPPORTED);
919 		return caller_sp;
920 	}
921 
922 	if (args->a2 & FFA_MSG_FLAG_FRAMEWORK) {
923 		switch (args->a2 & FFA_MSG_TYPE_MASK) {
924 		case FFA_MSG_SEND_VM_CREATED:
925 			/* The sender must be the NWd hypervisor (ID 0) */
926 			if (FFA_SRC(args->a1) != 0 || caller_sp) {
927 				ffa_set_error(args, FFA_INVALID_PARAMETERS);
928 				return caller_sp;
929 			}
930 
931 			/* The SP must be subscribed for this message */
932 			if (!(dst->props & FFA_PART_PROP_NOTIF_CREATED)) {
933 				ffa_set_error(args, FFA_INVALID_PARAMETERS);
934 				return caller_sp;
935 			}
936 			break;
937 		case FFA_MSG_SEND_VM_DESTROYED:
938 			/* The sender must be the NWd hypervisor (ID 0) */
939 			if (FFA_SRC(args->a1) != 0 || caller_sp) {
940 				ffa_set_error(args, FFA_INVALID_PARAMETERS);
941 				return caller_sp;
942 			}
943 
944 			/* The SP must be subscribed for this message */
945 			if (!(dst->props & FFA_PART_PROP_NOTIF_DESTROYED)) {
946 				ffa_set_error(args, FFA_INVALID_PARAMETERS);
947 				return caller_sp;
948 			}
949 			break;
950 		default:
951 			ffa_set_error(args, FFA_NOT_SUPPORTED);
952 			return caller_sp;
953 		}
954 	} else if (args->a2 != FFA_PARAM_MBZ) {
955 		ffa_set_error(args, FFA_INVALID_PARAMETERS);
956 		return caller_sp;
957 	}
958 
959 	cpu_spin_lock(&dst->spinlock);
960 	if (dst->state != sp_idle) {
961 		DMSG("SP is busy");
962 		ffa_set_error(args, FFA_BUSY);
963 		cpu_spin_unlock(&dst->spinlock);
964 		return caller_sp;
965 	}
966 
967 	dst->state = sp_busy;
968 	cpu_spin_unlock(&dst->spinlock);
969 
970 	/*
971 	 * Store the calling endpoint id. This will make it possible to check
972 	 * if the response is sent back to the correct endpoint.
973 	 */
974 	dst->caller_id = FFA_SRC(args->a1);
975 
976 	/* Forward the message to the destination SP */
977 	res = sp_enter(args, dst);
978 	if (res) {
979 		/* The SP Panicked */
980 		ffa_set_error(args, FFA_ABORTED);
981 		/* Return error to calling SP */
982 		return caller_sp;
983 	}
984 
985 	return dst;
986 }
987 
988 static struct sp_session *
ffa_handle_sp_direct_resp(struct thread_smc_1_2_regs * args,struct sp_session * caller_sp)989 ffa_handle_sp_direct_resp(struct thread_smc_1_2_regs *args,
990 			  struct sp_session *caller_sp)
991 {
992 	struct sp_session *dst = NULL;
993 	enum sp_status st = sp_idle;
994 	TEE_Result res = FFA_OK;
995 
996 	if (!caller_sp) {
997 		EMSG("Response from normal world not supported");
998 		ffa_set_error(args, FFA_NOT_SUPPORTED);
999 		return NULL;
1000 	}
1001 
1002 	res = ffa_get_dst(args, caller_sp, &dst);
1003 	if (res) {
1004 		/* Tried to send response to an incorrect endpoint */
1005 		ffa_set_error(args, res);
1006 		return caller_sp;
1007 	}
1008 
1009 	if (args->a2 & FFA_MSG_FLAG_FRAMEWORK) {
1010 		switch (args->a2 & FFA_MSG_TYPE_MASK) {
1011 		case FFA_MSG_RESP_VM_CREATED:
1012 			/* The destination must be the NWd hypervisor (ID 0) */
1013 			if (FFA_DST(args->a1) != 0 || dst) {
1014 				ffa_set_error(args, FFA_INVALID_PARAMETERS);
1015 				return caller_sp;
1016 			}
1017 
1018 			/* The SP must be subscribed for this message */
1019 			if (!(dst->props & FFA_PART_PROP_NOTIF_CREATED)) {
1020 				ffa_set_error(args, FFA_INVALID_PARAMETERS);
1021 				return caller_sp;
1022 			}
1023 			break;
1024 		case FFA_MSG_RESP_VM_DESTROYED:
1025 			/* The destination must be the NWd hypervisor (ID 0) */
1026 			if (FFA_DST(args->a1) != 0 || dst) {
1027 				ffa_set_error(args, FFA_INVALID_PARAMETERS);
1028 				return caller_sp;
1029 			}
1030 
1031 			/* The SP must be subscribed for this message */
1032 			if (!(dst->props & FFA_PART_PROP_NOTIF_DESTROYED)) {
1033 				ffa_set_error(args, FFA_INVALID_PARAMETERS);
1034 				return caller_sp;
1035 			}
1036 			break;
1037 		default:
1038 			ffa_set_error(args, FFA_NOT_SUPPORTED);
1039 			return caller_sp;
1040 		}
1041 	} else if (args->a2 != FFA_PARAM_MBZ) {
1042 		ffa_set_error(args, FFA_INVALID_PARAMETERS);
1043 		return caller_sp;
1044 	}
1045 
1046 	if (dst) {
1047 		cpu_spin_lock(&dst->spinlock);
1048 		st = dst->state;
1049 		cpu_spin_unlock(&dst->spinlock);
1050 
1051 		if (st != sp_busy) {
1052 			EMSG("SP is not waiting for a request");
1053 			ffa_set_error(args, FFA_INVALID_PARAMETERS);
1054 			return caller_sp;
1055 		}
1056 	}
1057 
1058 	if (caller_sp->caller_id != FFA_DST(args->a1)) {
1059 		EMSG("FFA_MSG_SEND_DIRECT_RESP to incorrect SP");
1060 		ffa_set_error(args, FFA_INVALID_PARAMETERS);
1061 		return caller_sp;
1062 	}
1063 
1064 	caller_sp->caller_id = 0;
1065 
1066 	cpu_spin_lock(&caller_sp->spinlock);
1067 	caller_sp->state = sp_idle;
1068 	cpu_spin_unlock(&caller_sp->spinlock);
1069 
1070 	if (!dst) {
1071 		/* Send message back to the NW */
1072 		return NULL;
1073 	}
1074 
1075 	/* Forward the message to the destination SP */
1076 	res = sp_enter(args, dst);
1077 	if (res) {
1078 		/* The SP Panicked */
1079 		ffa_set_error(args, FFA_ABORTED);
1080 		/* Return error to calling SP */
1081 		return caller_sp;
1082 	}
1083 	return dst;
1084 }
1085 
1086 static struct sp_session *
ffa_handle_sp_error(struct thread_smc_1_2_regs * args,struct sp_session * caller_sp)1087 ffa_handle_sp_error(struct thread_smc_1_2_regs *args,
1088 		    struct sp_session *caller_sp)
1089 {
1090 	/* If caller_sp == NULL send message to Normal World */
1091 	if (caller_sp && sp_enter(args, caller_sp)) {
1092 		/*
1093 		 * We can not return the error. Unwind the call chain with one
1094 		 * link. Set the state of the SP to dead.
1095 		 */
1096 		cpu_spin_lock(&caller_sp->spinlock);
1097 		caller_sp->state = sp_dead;
1098 		cpu_spin_unlock(&caller_sp->spinlock);
1099 		/* Create error. */
1100 		ffa_set_error(args, FFA_ABORTED);
1101 		return  sp_get_session(caller_sp->caller_id);
1102 	}
1103 
1104 	return caller_sp;
1105 }
1106 
handle_features(struct thread_smc_1_2_regs * args)1107 static void handle_features(struct thread_smc_1_2_regs *args)
1108 {
1109 	uint32_t ret_fid = 0;
1110 	uint32_t ret_w2 = FFA_PARAM_MBZ;
1111 
1112 	switch (args->a1) {
1113 #ifdef ARM64
1114 	case FFA_RXTX_MAP_64:
1115 #endif
1116 	case FFA_RXTX_MAP_32:
1117 		ret_fid = FFA_SUCCESS_32;
1118 		ret_w2 = 0; /* 4kB Minimum buffer size and alignment boundary */
1119 		break;
1120 	case FFA_ERROR:
1121 	case FFA_VERSION:
1122 	case FFA_SUCCESS_32:
1123 #ifdef ARM64
1124 	case FFA_SUCCESS_64:
1125 #endif
1126 	default:
1127 		ret_fid = FFA_ERROR;
1128 		ret_w2 = FFA_NOT_SUPPORTED;
1129 		break;
1130 	}
1131 
1132 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, ret_w2, FFA_PARAM_MBZ,
1133 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
1134 }
1135 
handle_mem_perm_get(struct thread_smc_1_2_regs * args,struct sp_session * sp_s)1136 static void handle_mem_perm_get(struct thread_smc_1_2_regs *args,
1137 				struct sp_session *sp_s)
1138 {
1139 	struct sp_ctx *sp_ctx = NULL;
1140 	TEE_Result res = TEE_ERROR_BAD_PARAMETERS;
1141 	uint16_t attrs = 0;
1142 	uint32_t ret_fid = FFA_ERROR;
1143 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
1144 
1145 	/*
1146 	 * The FFA_MEM_PERM_GET interface is only allowed during initialization
1147 	 */
1148 	if (sp_s->is_initialized) {
1149 		ret_val = FFA_DENIED;
1150 		goto out;
1151 	}
1152 
1153 	sp_ctx = to_sp_ctx(sp_s->ts_sess.ctx);
1154 	if (!sp_ctx)
1155 		goto out;
1156 
1157 	/* Query memory attributes */
1158 	ts_push_current_session(&sp_s->ts_sess);
1159 	res = vm_get_prot(&sp_ctx->uctx, args->a1, SMALL_PAGE_SIZE, &attrs);
1160 	ts_pop_current_session();
1161 	if (res)
1162 		goto out;
1163 
1164 	/* Build response value */
1165 	ret_fid = FFA_SUCCESS_32;
1166 	ret_val = 0;
1167 	if ((attrs & TEE_MATTR_URW) == TEE_MATTR_URW)
1168 		ret_val |= FFA_MEM_PERM_RW;
1169 	else if (attrs & TEE_MATTR_UR)
1170 		ret_val |= FFA_MEM_PERM_RO;
1171 
1172 	if ((attrs & TEE_MATTR_UX) == 0)
1173 		ret_val |= FFA_MEM_PERM_NX;
1174 
1175 out:
1176 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, ret_val, FFA_PARAM_MBZ,
1177 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
1178 }
1179 
handle_mem_perm_set(struct thread_smc_1_2_regs * args,struct sp_session * sp_s)1180 static void handle_mem_perm_set(struct thread_smc_1_2_regs *args,
1181 				struct sp_session *sp_s)
1182 {
1183 	struct sp_ctx *sp_ctx = NULL;
1184 	TEE_Result res = TEE_ERROR_BAD_PARAMETERS;
1185 	size_t region_size = 0;
1186 	uint32_t data_perm = 0;
1187 	uint32_t instruction_perm = 0;
1188 	uint16_t attrs = 0;
1189 	uint32_t ret_fid = FFA_ERROR;
1190 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
1191 
1192 	/*
1193 	 * The FFA_MEM_PERM_GET interface is only allowed during initialization
1194 	 */
1195 	if (sp_s->is_initialized) {
1196 		ret_val = FFA_DENIED;
1197 		goto out;
1198 	}
1199 
1200 	sp_ctx = to_sp_ctx(sp_s->ts_sess.ctx);
1201 	if (!sp_ctx)
1202 		goto out;
1203 
1204 	if (MUL_OVERFLOW(args->a2, SMALL_PAGE_SIZE, &region_size))
1205 		goto out;
1206 
1207 	if (args->a3 & FFA_MEM_PERM_RESERVED) {
1208 		/* Non-zero reserved bits */
1209 		goto out;
1210 	}
1211 
1212 	data_perm = args->a3 & FFA_MEM_PERM_DATA_PERM;
1213 	instruction_perm = args->a3 & FFA_MEM_PERM_INSTRUCTION_PERM;
1214 
1215 	/* RWX access right configuration is not permitted */
1216 	if (data_perm == FFA_MEM_PERM_RW && instruction_perm == FFA_MEM_PERM_X)
1217 		goto out;
1218 
1219 	switch (data_perm) {
1220 	case FFA_MEM_PERM_RO:
1221 		attrs = TEE_MATTR_UR;
1222 		break;
1223 	case FFA_MEM_PERM_RW:
1224 		attrs = TEE_MATTR_URW;
1225 		break;
1226 	default:
1227 		/* Invalid permission value */
1228 		goto out;
1229 	}
1230 
1231 	if (instruction_perm == FFA_MEM_PERM_X)
1232 		attrs |= TEE_MATTR_UX;
1233 
1234 	/* Set access rights */
1235 	ts_push_current_session(&sp_s->ts_sess);
1236 	res = vm_set_prot(&sp_ctx->uctx, args->a1, region_size, attrs);
1237 	ts_pop_current_session();
1238 	if (res != TEE_SUCCESS)
1239 		goto out;
1240 
1241 	ret_fid = FFA_SUCCESS_32;
1242 	ret_val = FFA_PARAM_MBZ;
1243 
1244 out:
1245 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, ret_val, FFA_PARAM_MBZ,
1246 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
1247 }
1248 
spmc_handle_version(struct thread_smc_1_2_regs * args,struct ffa_rxtx * rxtx)1249 static void spmc_handle_version(struct thread_smc_1_2_regs *args,
1250 				struct ffa_rxtx *rxtx)
1251 {
1252 	spmc_set_args(args, spmc_exchange_version(args->a1, rxtx),
1253 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1254 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
1255 }
1256 
handle_console_log(uint32_t ffa_vers,struct thread_smc_1_2_regs * args)1257 static void handle_console_log(uint32_t ffa_vers,
1258 			       struct thread_smc_1_2_regs *args)
1259 {
1260 	uint32_t ret_fid = FFA_ERROR;
1261 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
1262 	size_t char_count = args->a1 & FFA_CONSOLE_LOG_CHAR_COUNT_MASK;
1263 	char buffer[FFA_CONSOLE_LOG_64_MAX_MSG_LEN + 1] = { 0 };
1264 	size_t max_length = 0;
1265 	size_t reg_size = 0;
1266 	size_t n = 0;
1267 
1268 	if (args->a0 == FFA_CONSOLE_LOG_64) {
1269 		if (ffa_vers >= FFA_VERSION_1_2)
1270 			max_length = FFA_CONSOLE_LOG_64_MAX_MSG_LEN;
1271 		else
1272 			max_length = FFA_CONSOLE_LOG_64_V1_1_MAX_MSG_LEN;
1273 		reg_size = sizeof(uint64_t);
1274 	} else {
1275 		max_length = FFA_CONSOLE_LOG_32_MAX_MSG_LEN;
1276 		reg_size = sizeof(uint32_t);
1277 	}
1278 
1279 	if (char_count < 1 || char_count > max_length)
1280 		goto out;
1281 
1282 	for (n = 0; n < char_count; n += reg_size) {
1283 		/* + 2 since we're starting from W2/X2 */
1284 		memcpy(buffer + n, &args->a[2 + n / reg_size],
1285 		       MIN(char_count - n, reg_size));
1286 	}
1287 
1288 	buffer[char_count] = '\0';
1289 
1290 	trace_ext_puts(buffer);
1291 
1292 	ret_fid = FFA_SUCCESS_32;
1293 	ret_val = FFA_PARAM_MBZ;
1294 
1295 out:
1296 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, ret_val, FFA_PARAM_MBZ,
1297 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
1298 }
1299 
1300 /*
1301  * FF-A messages handler for SP. Every messages for or from a SP is handled
1302  * here. This is the entry of the sp_spmc kernel thread. The caller_sp is set
1303  * to NULL when it is the Normal World.
1304  */
spmc_sp_msg_handler(struct thread_smc_1_2_regs * args,struct sp_session * caller_sp)1305 void spmc_sp_msg_handler(struct thread_smc_1_2_regs *args,
1306 			 struct sp_session *caller_sp)
1307 {
1308 	thread_check_canaries();
1309 	do {
1310 		switch (args->a0) {
1311 #ifdef ARM64
1312 		case FFA_MSG_SEND_DIRECT_REQ_64:
1313 #endif
1314 		case FFA_MSG_SEND_DIRECT_REQ_32:
1315 			caller_sp = ffa_handle_sp_direct_req(args, caller_sp);
1316 			break;
1317 #ifdef ARM64
1318 		case FFA_MSG_SEND_DIRECT_RESP_64:
1319 #endif
1320 		case FFA_MSG_SEND_DIRECT_RESP_32:
1321 			caller_sp = ffa_handle_sp_direct_resp(args, caller_sp);
1322 			break;
1323 		case FFA_ERROR:
1324 			caller_sp = ffa_handle_sp_error(args, caller_sp);
1325 			break;
1326 		case FFA_MSG_WAIT:
1327 			/* FFA_WAIT gives control back to NW */
1328 			cpu_spin_lock(&caller_sp->spinlock);
1329 			caller_sp->state = sp_idle;
1330 			cpu_spin_unlock(&caller_sp->spinlock);
1331 			caller_sp = NULL;
1332 			break;
1333 #ifdef ARM64
1334 		case FFA_RXTX_MAP_64:
1335 #endif
1336 		case FFA_RXTX_MAP_32:
1337 			ts_push_current_session(&caller_sp->ts_sess);
1338 			spmc_handle_rxtx_map(args, &caller_sp->rxtx);
1339 			ts_pop_current_session();
1340 			sp_enter(args, caller_sp);
1341 			break;
1342 		case FFA_RXTX_UNMAP:
1343 			ts_push_current_session(&caller_sp->ts_sess);
1344 			spmc_handle_rxtx_unmap(args, &caller_sp->rxtx);
1345 			ts_pop_current_session();
1346 			sp_enter(args, caller_sp);
1347 			break;
1348 		case FFA_RX_RELEASE:
1349 			ts_push_current_session(&caller_sp->ts_sess);
1350 			spmc_handle_rx_release(args, &caller_sp->rxtx);
1351 			ts_pop_current_session();
1352 			sp_enter(args, caller_sp);
1353 			break;
1354 		case FFA_ID_GET:
1355 			args->a0 = FFA_SUCCESS_32;
1356 			args->a2 = caller_sp->endpoint_id;
1357 			sp_enter(args, caller_sp);
1358 			break;
1359 		case FFA_VERSION:
1360 			spmc_handle_version(args, &caller_sp->rxtx);
1361 			sp_enter(args, caller_sp);
1362 			break;
1363 		case FFA_FEATURES:
1364 			handle_features(args);
1365 			sp_enter(args, caller_sp);
1366 			break;
1367 		case FFA_SPM_ID_GET:
1368 			spmc_handle_spm_id_get(args);
1369 			sp_enter(args, caller_sp);
1370 			break;
1371 		case FFA_PARTITION_INFO_GET:
1372 			ts_push_current_session(&caller_sp->ts_sess);
1373 			spmc_handle_partition_info_get(args, &caller_sp->rxtx);
1374 			ts_pop_current_session();
1375 			sp_enter(args, caller_sp);
1376 			break;
1377 #ifdef ARM64
1378 		case FFA_MEM_SHARE_64:
1379 #endif
1380 		case FFA_MEM_SHARE_32:
1381 			ts_push_current_session(&caller_sp->ts_sess);
1382 			spmc_sp_handle_mem_share(args, &caller_sp->rxtx,
1383 						 caller_sp);
1384 			ts_pop_current_session();
1385 			sp_enter(args, caller_sp);
1386 			break;
1387 #ifdef ARM64
1388 		case FFA_MEM_RETRIEVE_REQ_64:
1389 #endif
1390 		case FFA_MEM_RETRIEVE_REQ_32:
1391 			ts_push_current_session(&caller_sp->ts_sess);
1392 			ffa_mem_retrieve(args, caller_sp, &caller_sp->rxtx);
1393 			ts_pop_current_session();
1394 			sp_enter(args, caller_sp);
1395 			break;
1396 		case FFA_MEM_RELINQUISH:
1397 			ts_push_current_session(&caller_sp->ts_sess);
1398 			ffa_mem_relinquish(args, caller_sp, &caller_sp->rxtx);
1399 			ts_pop_current_session();
1400 			sp_enter(args, caller_sp);
1401 			break;
1402 		case FFA_MEM_RECLAIM:
1403 			ffa_mem_reclaim(args, caller_sp);
1404 			sp_enter(args, caller_sp);
1405 			break;
1406 #ifdef ARM64
1407 		case FFA_MEM_PERM_GET_64:
1408 #endif
1409 		case FFA_MEM_PERM_GET_32:
1410 			handle_mem_perm_get(args, caller_sp);
1411 			sp_enter(args, caller_sp);
1412 			break;
1413 
1414 #ifdef ARM64
1415 		case FFA_MEM_PERM_SET_64:
1416 #endif
1417 		case FFA_MEM_PERM_SET_32:
1418 			handle_mem_perm_set(args, caller_sp);
1419 			sp_enter(args, caller_sp);
1420 			break;
1421 
1422 #ifdef ARM64
1423 		case FFA_CONSOLE_LOG_64:
1424 #endif
1425 		case FFA_CONSOLE_LOG_32:
1426 			handle_console_log(caller_sp->rxtx.ffa_vers, args);
1427 			sp_enter(args, caller_sp);
1428 			break;
1429 
1430 		default:
1431 			EMSG("Unhandled FFA function ID %#"PRIx32,
1432 			     (uint32_t)args->a0);
1433 			ffa_set_error(args, FFA_INVALID_PARAMETERS);
1434 			sp_enter(args, caller_sp);
1435 		}
1436 	} while (caller_sp);
1437 }
1438