xref: /optee_os/core/drivers/crypto/caam/caam_jr.c (revision 84989f868c7543140ede4664a46a9daca482c415)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright 2018-2019 NXP
4  *
5  * Brief   CAAM Job Rings manager.
6  *         Implementation of functions to enqueue/dequeue CAAM Job Descriptor
7  */
8 #include <caam_common.h>
9 #include <caam_desc_helper.h>
10 #include <caam_hal_jr.h>
11 #include <caam_io.h>
12 #include <caam_jr.h>
13 #include <caam_rng.h>
14 #include <caam_utils_delay.h>
15 #include <caam_utils_mem.h>
16 #include <kernel/interrupt.h>
17 #include <kernel/panic.h>
18 #include <kernel/pm.h>
19 #include <kernel/spinlock.h>
20 #include <mm/core_memprot.h>
21 #include <tee/cache.h>
22 
23 /*
24  * Job Free define
25  */
26 #define JR_JOB_FREE	0
27 
28 /*
29  * Caller information context object
30  */
31 struct caller_info {
32 	struct caam_jobctx *jobctx; /* Caller job context object */
33 	uint32_t job_id;            /* Current Job ID */
34 	paddr_t pdesc;              /* Physical address of the descriptor */
35 };
36 
37 /*
38  * Job Ring module private data
39  */
40 struct jr_privdata {
41 	vaddr_t baseaddr;        /* Job Ring base address */
42 
43 	vaddr_t ctrladdr;        /* CAAM virtual base address */
44 	paddr_t jroffset;        /* Job Ring address offset */
45 	uint64_t paddr_inrings;  /* CAAM physical addr of input queue */
46 	uint64_t paddr_outrings; /* CAAM physical addr of output queue */
47 
48 	uint8_t nb_jobs;         /* Number of Job ring entries managed */
49 
50 	/* Input Job Ring Variables */
51 	struct caam_inring_entry *inrings; /* Input JR HW queue */
52 	unsigned int inlock;          /* Input JR spin lock */
53 	uint16_t inwrite_index;       /* SW Index - next JR entry free */
54 
55 	/* Output Job Ring Variables */
56 	struct caam_outring_entry *outrings; /* Output JR HW queue */
57 	unsigned int outlock;           /* Output JR spin lock */
58 	uint16_t outread_index;         /* SW Index - next JR output done */
59 
60 	/* Caller Information Variables */
61 	struct caller_info *callers;    /* Job Ring Caller information */
62 	unsigned int callers_lock;      /* Job Ring Caller spin lock */
63 
64 	struct itr_handler it_handler;  /* Interrupt handler */
65 };
66 
67 /*
68  * Job Ring module private data reference
69  */
70 static struct jr_privdata *jr_privdata;
71 
72 /*
73  * Free module resources
74  *
75  * @jr_priv   Reference to the module private data
76  */
77 static void do_jr_free(struct jr_privdata *jr_priv)
78 {
79 	if (jr_priv) {
80 		caam_free(jr_priv->inrings);
81 		caam_free(jr_priv->outrings);
82 		caam_free(jr_priv->callers);
83 		caam_free(jr_priv);
84 	}
85 }
86 
87 /*
88  * Allocate module resources
89  *
90  * @privdata  [out] Allocated Job Ring private data
91  * @nb_jobs   Number of jobs to manage in the queue
92  */
93 static enum caam_status do_jr_alloc(struct jr_privdata **privdata,
94 				    uint8_t nb_jobs)
95 {
96 	enum caam_status retstatus = CAAM_OUT_MEMORY;
97 	struct jr_privdata *jr_priv = NULL;
98 
99 	/* Allocate the Job Ring private data */
100 	jr_priv = caam_calloc(sizeof(*jr_priv));
101 
102 	if (!jr_priv) {
103 		JR_TRACE("Private Data allocation error");
104 		goto end_alloc;
105 	}
106 
107 	/* Setup the number of jobs */
108 	jr_priv->nb_jobs = nb_jobs;
109 
110 	/* Allocate the input and output job ring queues */
111 	jr_priv->inrings =
112 		caam_calloc_align(nb_jobs * sizeof(struct caam_inring_entry));
113 	jr_priv->outrings =
114 		caam_calloc_align(nb_jobs * sizeof(struct caam_outring_entry));
115 
116 	/* Allocate the callers information */
117 	jr_priv->callers = caam_calloc(nb_jobs * sizeof(struct caller_info));
118 
119 	if (!jr_priv->inrings || !jr_priv->outrings || !jr_priv->callers) {
120 		JR_TRACE("JR resources allocation error");
121 		goto end_alloc;
122 	}
123 
124 	/* Initialize the spin locks */
125 	jr_priv->inlock = SPINLOCK_UNLOCK;
126 	jr_priv->outlock = SPINLOCK_UNLOCK;
127 	jr_priv->callers_lock = SPINLOCK_UNLOCK;
128 
129 	/* Initialize the queue counter */
130 	jr_priv->inwrite_index = 0;
131 	jr_priv->outread_index = 0;
132 
133 	/*
134 	 * Ensure that allocated queue initialization is pushed to the physical
135 	 * memory
136 	 */
137 	cache_operation(TEE_CACHEFLUSH, jr_priv->inrings,
138 			nb_jobs * sizeof(struct caam_inring_entry));
139 	cache_operation(TEE_CACHEFLUSH, jr_priv->outrings,
140 			nb_jobs * sizeof(struct caam_outring_entry));
141 
142 	retstatus = CAAM_NO_ERROR;
143 end_alloc:
144 	if (retstatus != CAAM_NO_ERROR)
145 		do_jr_free(jr_priv);
146 	else
147 		*privdata = jr_priv;
148 
149 	return retstatus;
150 }
151 
152 /*
153  * Job Ring Interrupt handler
154  *
155  * @handler  Interrupt Handler structure
156  */
157 static enum itr_return caam_jr_irqhandler(struct itr_handler *handler)
158 {
159 	JR_TRACE("Disable the interrupt");
160 	itr_disable(handler->it);
161 
162 	/* Send a signal to exit WFE loop */
163 	sev();
164 
165 	return ITRR_HANDLED;
166 }
167 
168 /*
169  * Returns all jobs completed depending on the input @wait_job_ids mask.
170  *
171  * Dequeues all Jobs completed. Call the job context callback
172  * function. Function returns the bit mask of the expected completed job
173  * (@wait_job_ids parameter)
174  *
175  * @wait_job_ids  Expected Jobs to be complete
176  */
177 static uint32_t do_jr_dequeue(uint32_t wait_job_ids)
178 {
179 	uint32_t ret_job_id = 0;
180 	struct caller_info *caller = NULL;
181 	struct caam_outring_entry *jr_out = NULL;
182 	struct caam_jobctx *jobctx = NULL;
183 	uint32_t exceptions = 0;
184 	bool found = false;
185 	uint16_t idx_jr = 0;
186 	uint32_t nb_jobs_done = 0;
187 	size_t nb_jobs_inv = 0;
188 
189 	exceptions = cpu_spin_lock_xsave(&jr_privdata->outlock);
190 
191 	nb_jobs_done = caam_hal_jr_get_nbjob_done(jr_privdata->baseaddr);
192 
193 	if (nb_jobs_done == 0) {
194 		cpu_spin_unlock_xrestore(&jr_privdata->outlock, exceptions);
195 		return ret_job_id;
196 	}
197 
198 	/* Ensure that output ring descriptor entries are not in cache */
199 	if ((jr_privdata->outread_index + nb_jobs_done) >
200 	    jr_privdata->nb_jobs) {
201 		/*
202 		 * Invalidate the whole circular job buffer because some
203 		 * completed job rings are at the beginning of the buffer
204 		 */
205 		jr_out = jr_privdata->outrings;
206 		nb_jobs_inv = jr_privdata->nb_jobs;
207 	} else {
208 		/* Invalidate only the completed job */
209 		jr_out = &jr_privdata->outrings[jr_privdata->outread_index];
210 		nb_jobs_inv = nb_jobs_done;
211 	}
212 
213 	cache_operation(TEE_CACHEINVALIDATE, jr_out,
214 			sizeof(struct caam_outring_entry) * nb_jobs_inv);
215 
216 	for (; nb_jobs_done; nb_jobs_done--) {
217 		jr_out = &jr_privdata->outrings[jr_privdata->outread_index];
218 
219 		/*
220 		 * Lock the caller information array because enqueue is
221 		 * also touching it
222 		 */
223 		cpu_spin_lock(&jr_privdata->callers_lock);
224 		for (idx_jr = 0, found = false; idx_jr < jr_privdata->nb_jobs;
225 		     idx_jr++) {
226 			/*
227 			 * Search for the caller information corresponding to
228 			 * the completed JR.
229 			 * Don't use the outread_index or inwrite_index because
230 			 * completion can be out of order compared to input
231 			 * buffer
232 			 */
233 			caller = &jr_privdata->callers[idx_jr];
234 			if (caam_desc_pop(jr_out) == caller->pdesc) {
235 				jobctx = caller->jobctx;
236 				jobctx->status = caam_read_jobstatus(jr_out);
237 
238 				/* Update return Job IDs mask */
239 				if (caller->job_id & wait_job_ids)
240 					ret_job_id |= caller->job_id;
241 
242 				JR_TRACE("JR id=%" PRId32
243 					 ", context @0x%08" PRIxVA,
244 					 caller->job_id, (vaddr_t)jobctx);
245 				/* Clear the Entry Descriptor DMA */
246 				caller->pdesc = 0;
247 				caller->job_id = JR_JOB_FREE;
248 				found = true;
249 				JR_TRACE("Free space #%" PRId16
250 					 " in the callers array",
251 					 idx_jr);
252 				break;
253 			}
254 		}
255 		cpu_spin_unlock(&jr_privdata->callers_lock);
256 
257 		/*
258 		 * Remove the JR from the output list even if no
259 		 * JR caller found
260 		 */
261 		caam_hal_jr_del_job(jr_privdata->baseaddr);
262 
263 		/*
264 		 * Increment index to next JR output entry taking care that
265 		 * it is a circular buffer of nb_jobs size.
266 		 */
267 		jr_privdata->outread_index++;
268 		jr_privdata->outread_index %= jr_privdata->nb_jobs;
269 
270 		if (found && jobctx->callback) {
271 			/* Finally, execute user's callback */
272 			jobctx->callback(jobctx);
273 		}
274 	}
275 
276 	cpu_spin_unlock_xrestore(&jr_privdata->outlock, exceptions);
277 
278 	return ret_job_id;
279 }
280 
281 /*
282  * Enqueues a new job in the Job Ring input queue. Keep the caller's
283  * job context in private array.
284  *
285  * @jobctx   Caller's job context
286  * @job_id   [out] Job ID enqueued
287  */
288 static enum caam_status do_jr_enqueue(struct caam_jobctx *jobctx,
289 				      uint32_t *job_id)
290 {
291 	enum caam_status retstatus = CAAM_BUSY;
292 	struct caam_inring_entry *cur_inrings = NULL;
293 	struct caller_info *caller = NULL;
294 	uint32_t exceptions = 0;
295 	uint32_t job_mask = 0;
296 	uint8_t idx_jr = 0;
297 	bool found = false;
298 
299 	exceptions = cpu_spin_lock_xsave(&jr_privdata->inlock);
300 
301 	/*
302 	 * Stay locked until a job is available
303 	 * Check if there is an available JR index in the HW
304 	 */
305 	while (caam_hal_jr_read_nbslot_available(jr_privdata->baseaddr) == 0) {
306 		/*
307 		 * WFE will return thanks to a SEV generated by the
308 		 * interrupt handler or by a spin_unlock
309 		 */
310 		wfe();
311 	};
312 
313 	/*
314 	 * There is a space free in the input ring but it doesn't mean
315 	 * that the job pushed is completed.
316 	 * Completion is out of order. Look for a free space in the
317 	 * caller data to push them and get a job ID for the completion
318 	 *
319 	 * Lock the caller information array because dequeue is
320 	 * also touching it
321 	 */
322 	cpu_spin_lock(&jr_privdata->callers_lock);
323 	for (idx_jr = 0; idx_jr < jr_privdata->nb_jobs; idx_jr++) {
324 		if (jr_privdata->callers[idx_jr].job_id == JR_JOB_FREE) {
325 			JR_TRACE("Found a space #%" PRId8
326 				 " free in the callers array",
327 				 idx_jr);
328 			job_mask = 1 << idx_jr;
329 
330 			/* Store the caller information for the JR completion */
331 			caller = &jr_privdata->callers[idx_jr];
332 			caller->job_id = job_mask;
333 			caller->jobctx = jobctx;
334 			caller->pdesc = virt_to_phys((void *)jobctx->desc);
335 
336 			found = true;
337 			break;
338 		}
339 	}
340 	cpu_spin_unlock(&jr_privdata->callers_lock);
341 
342 	if (!found) {
343 		JR_TRACE("Error didn't find a free space in the callers array");
344 		goto end_enqueue;
345 	}
346 
347 	JR_TRACE("Push id=%" PRId16 ", job (0x%08" PRIx32
348 		 ") context @0x%08" PRIxVA,
349 		 jr_privdata->inwrite_index, job_mask, (vaddr_t)jobctx);
350 
351 	cur_inrings = &jr_privdata->inrings[jr_privdata->inwrite_index];
352 
353 	/* Push the descriptor into the JR HW list */
354 	caam_desc_push(cur_inrings, caller->pdesc);
355 
356 	/* Ensure that physical memory is up to date */
357 	cache_operation(TEE_CACHECLEAN, cur_inrings,
358 			sizeof(struct caam_inring_entry));
359 
360 	/*
361 	 * Increment index to next JR input entry taking care that
362 	 * it is a circular buffer of nb_jobs size.
363 	 */
364 	jr_privdata->inwrite_index++;
365 	jr_privdata->inwrite_index %= jr_privdata->nb_jobs;
366 
367 	/* Ensure that input descriptor is pushed in physical memory */
368 	cache_operation(TEE_CACHECLEAN, jobctx->desc,
369 			DESC_SZBYTES(caam_desc_get_len(jobctx->desc)));
370 
371 	/* Inform HW that a new JR is available */
372 	caam_hal_jr_add_newjob(jr_privdata->baseaddr);
373 
374 	*job_id = job_mask;
375 	retstatus = CAAM_NO_ERROR;
376 
377 end_enqueue:
378 	cpu_spin_unlock_xrestore(&jr_privdata->inlock, exceptions);
379 
380 	return retstatus;
381 }
382 
383 /*
384  * Synchronous job completion callback
385  *
386  * @jobctx   Job context
387  */
388 static void job_done(struct caam_jobctx *jobctx)
389 {
390 	jobctx->completion = true;
391 }
392 
393 void caam_jr_cancel(uint32_t job_id)
394 {
395 	unsigned int idx = 0;
396 
397 	JR_TRACE("Job cancel 0x%" PRIx32, job_id);
398 	for (idx = 0; idx < jr_privdata->nb_jobs; idx++) {
399 		/*
400 		 * Search for the caller information corresponding to
401 		 * the job_id mask.
402 		 */
403 		if (jr_privdata->callers[idx].job_id == job_id) {
404 			/* Clear the Entry Descriptor */
405 			jr_privdata->callers[idx].pdesc = 0;
406 			jr_privdata->callers[idx].job_id = JR_JOB_FREE;
407 			return;
408 		}
409 	}
410 }
411 
412 enum caam_status caam_jr_dequeue(uint32_t job_ids, unsigned int timeout_ms)
413 {
414 	uint32_t job_complete = 0;
415 	uint32_t nb_loop = 0;
416 	bool infinite = false;
417 	bool it_active = false;
418 
419 	if (timeout_ms == UINT_MAX)
420 		infinite = true;
421 	else
422 		nb_loop = timeout_ms * 100;
423 
424 	do {
425 		/* Call the do_jr_dequeue function to dequeue the jobs */
426 		job_complete = do_jr_dequeue(job_ids);
427 
428 		/* Check if new job has been submitted and acknowledge it */
429 		it_active = caam_hal_jr_check_ack_itr(jr_privdata->baseaddr);
430 
431 		if (job_complete & job_ids)
432 			return CAAM_NO_ERROR;
433 
434 		/* Check if JR interrupt otherwise wait a bit */
435 		if (!it_active)
436 			caam_udelay(10);
437 	} while (infinite || (nb_loop--));
438 
439 	return CAAM_TIMEOUT;
440 }
441 
442 enum caam_status caam_jr_enqueue(struct caam_jobctx *jobctx, uint32_t *job_id)
443 {
444 	enum caam_status retstatus = CAAM_FAILURE;
445 	__maybe_unused int timeout  = 10; /* Nb loops to pool job completion */
446 
447 	if (!jobctx)
448 		return CAAM_BAD_PARAM;
449 
450 	JR_DUMPDESC(jobctx->desc);
451 
452 	if (!jobctx->callback && job_id) {
453 		JR_TRACE("Job Callback not defined whereas asynchronous");
454 		return CAAM_BAD_PARAM;
455 	}
456 
457 	if (jobctx->callback && !job_id) {
458 		JR_TRACE("Job Id not defined whereas asynchronous");
459 		return CAAM_BAD_PARAM;
460 	}
461 
462 	jobctx->completion = false;
463 	jobctx->status = 0;
464 
465 	/*
466 	 * If parameter job_id is NULL, the job is synchronous, hence use
467 	 * the local job_done callback function
468 	 */
469 	if (!jobctx->callback && !job_id) {
470 		jobctx->callback = job_done;
471 		jobctx->context = jobctx;
472 	}
473 
474 	retstatus = do_jr_enqueue(jobctx, &jobctx->id);
475 
476 	if (retstatus != CAAM_NO_ERROR) {
477 		JR_TRACE("enqueue job error 0x%08x", retstatus);
478 		return retstatus;
479 	}
480 
481 	/*
482 	 * If parameter job_id is defined, the job is asynchronous, so
483 	 * returns with setting the job_id value
484 	 */
485 	if (job_id) {
486 		*job_id = jobctx->id;
487 		return CAAM_PENDING;
488 	}
489 
490 #ifdef TIMEOUT_COMPLETION
491 	/*
492 	 * Job is synchronous wait until job completion or timeout
493 	 */
494 	while (!jobctx->completion && timeout--)
495 		caam_jr_dequeue(jobctx->id, 100);
496 
497 	if (timeout <= 0) {
498 		/* Job timeout, cancel it and return in error */
499 		caam_jr_cancel(jobctx->id);
500 		retstatus = CAAM_TIMEOUT;
501 	} else {
502 		if (JRSTA_SRC_GET(jobctx->status) != JRSTA_SRC(NONE))
503 			retstatus = CAAM_JOB_STATUS;
504 		else
505 			retstatus = CAAM_NO_ERROR;
506 	}
507 #else
508 	/*
509 	 * Job is synchronous wait until job complete
510 	 * Don't use a timeout because there is no HW timer and
511 	 * so the timeout is not precise
512 	 */
513 	while (!jobctx->completion)
514 		caam_jr_dequeue(jobctx->id, 100);
515 
516 	if (JRSTA_SRC_GET(jobctx->status) != JRSTA_SRC(NONE))
517 		retstatus = CAAM_JOB_STATUS;
518 	else
519 		retstatus = CAAM_NO_ERROR;
520 #endif
521 
522 	/* Erase local callback function */
523 	jobctx->callback = NULL;
524 
525 	return retstatus;
526 }
527 
528 enum caam_status caam_jr_init(struct caam_jrcfg *jrcfg)
529 {
530 	enum caam_status retstatus = CAAM_FAILURE;
531 
532 	JR_TRACE("Initialization");
533 
534 	/* Allocate the Job Ring resources */
535 	retstatus = do_jr_alloc(&jr_privdata, jrcfg->nb_jobs);
536 	if (retstatus != CAAM_NO_ERROR)
537 		goto end_init;
538 
539 	jr_privdata->ctrladdr = jrcfg->base;
540 	jr_privdata->jroffset = jrcfg->offset;
541 
542 	retstatus =
543 		caam_hal_jr_setowner(jrcfg->base, jrcfg->offset, JROWN_ARM_S);
544 	JR_TRACE("JR setowner returned 0x%x", retstatus);
545 
546 	if (retstatus != CAAM_NO_ERROR)
547 		goto end_init;
548 
549 	jr_privdata->baseaddr = jrcfg->base + jrcfg->offset;
550 	retstatus = caam_hal_jr_reset(jr_privdata->baseaddr);
551 	if (retstatus != CAAM_NO_ERROR)
552 		goto end_init;
553 
554 	/*
555 	 * Get the physical address of the Input/Output queue
556 	 * The HW configuration is 64 bits registers regardless
557 	 * the CAAM or CPU addressing mode.
558 	 */
559 	jr_privdata->paddr_inrings = virt_to_phys(jr_privdata->inrings);
560 	jr_privdata->paddr_outrings = virt_to_phys(jr_privdata->outrings);
561 	if (!jr_privdata->paddr_inrings || !jr_privdata->paddr_outrings) {
562 		JR_TRACE("JR bad queue pointers");
563 		retstatus = CAAM_FAILURE;
564 		goto end_init;
565 	}
566 
567 	caam_hal_jr_config(jr_privdata->baseaddr, jr_privdata->nb_jobs,
568 			   jr_privdata->paddr_inrings,
569 			   jr_privdata->paddr_outrings);
570 
571 	/*
572 	 * Prepare the interrupt handler to secure the interrupt even
573 	 * if the interrupt is not used
574 	 */
575 	jr_privdata->it_handler.it = jrcfg->it_num;
576 	jr_privdata->it_handler.flags = ITRF_TRIGGER_LEVEL;
577 	jr_privdata->it_handler.handler = caam_jr_irqhandler;
578 	jr_privdata->it_handler.data = jr_privdata;
579 
580 #ifdef CFG_NXP_CAAM_RUNTIME_JR
581 	itr_add(&jr_privdata->it_handler);
582 #endif
583 	caam_hal_jr_enable_itr(jr_privdata->baseaddr);
584 
585 	retstatus = CAAM_NO_ERROR;
586 
587 end_init:
588 	if (retstatus != CAAM_NO_ERROR)
589 		do_jr_free(jr_privdata);
590 
591 	return retstatus;
592 }
593 
594 enum caam_status caam_jr_halt(void)
595 {
596 	return caam_hal_jr_halt(jr_privdata->baseaddr);
597 }
598 
599 enum caam_status caam_jr_flush(void)
600 {
601 	return caam_hal_jr_flush(jr_privdata->baseaddr);
602 }
603 
604 void caam_jr_resume(uint32_t pm_hint)
605 {
606 	if (pm_hint == PM_HINT_CONTEXT_STATE) {
607 #ifndef CFG_NXP_CAAM_RUNTIME_JR
608 		/*
609 		 * In case the CAAM is not used the JR used to
610 		 * instantiate the RNG has been released to Non-Secure
611 		 * hence, need reconfigure the Secure JR and release
612 		 * it after RNG instantiation
613 		 */
614 		caam_hal_jr_setowner(jr_privdata->ctrladdr,
615 				     jr_privdata->jroffset, JROWN_ARM_S);
616 
617 		caam_hal_jr_config(jr_privdata->baseaddr, jr_privdata->nb_jobs,
618 				   jr_privdata->paddr_inrings,
619 				   jr_privdata->paddr_outrings);
620 #endif /* CFG_NXP_CAAM_RUNTIME_JR */
621 
622 		/* Read the current job ring index */
623 		jr_privdata->inwrite_index =
624 			caam_hal_jr_input_index(jr_privdata->baseaddr);
625 		/* Read the current output ring index */
626 		jr_privdata->outread_index =
627 			caam_hal_jr_output_index(jr_privdata->baseaddr);
628 
629 		if (caam_rng_instantiation() != CAAM_NO_ERROR)
630 			panic();
631 
632 #ifndef CFG_NXP_CAAM_RUNTIME_JR
633 		caam_hal_jr_setowner(jr_privdata->ctrladdr,
634 				     jr_privdata->jroffset, JROWN_ARM_NS);
635 #endif /* CFG_NXP_CAAM_RUNTIME_JR */
636 	} else {
637 		caam_hal_jr_resume(jr_privdata->baseaddr);
638 	}
639 }
640 
641 enum caam_status caam_jr_complete(void)
642 {
643 	enum caam_status ret = CAAM_BUSY;
644 
645 	ret = caam_hal_jr_flush(jr_privdata->baseaddr);
646 	if (ret == CAAM_NO_ERROR)
647 		caam_hal_jr_resume(jr_privdata->baseaddr);
648 
649 	return ret;
650 }
651