xref: /optee_os/core/drivers/crypto/caam/caam_jr.c (revision 5b25c76ac40f830867e3d60800120ffd7874e8dc)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright 2018-2019 NXP
4  *
5  * Brief   CAAM Job Rings manager.
6  *         Implementation of functions to enqueue/dequeue CAAM Job Descriptor
7  */
8 #include <caam_common.h>
9 #include <caam_desc_helper.h>
10 #include <caam_hal_jr.h>
11 #include <caam_io.h>
12 #include <caam_jr.h>
13 #include <caam_rng.h>
14 #include <caam_utils_delay.h>
15 #include <caam_utils_mem.h>
16 #include <kernel/interrupt.h>
17 #include <kernel/panic.h>
18 #include <kernel/pm.h>
19 #include <kernel/spinlock.h>
20 #include <mm/core_memprot.h>
21 #include <tee/cache.h>
22 
23 /*
24  * The CAAM physical address is decorrelated from the CPU addressing mode.
25  * CAAM can manage 32 or 64 bits address depending on its version and the
26  * device.
27  */
28 /*
29  * Definition of input and output ring object
30  */
31 #ifdef CFG_CAAM_64BIT
32 struct inring_entry {
33 	uint64_t desc;   /* Physical address of the descriptor */
34 };
35 
36 struct __packed outring_entry {
37 	uint64_t desc;   /* Physical address of the descriptor */
38 	uint32_t status; /* Status of the executed job */
39 };
40 #else
41 struct inring_entry {
42 	uint32_t desc;   /* Physical address of the descriptor */
43 };
44 
45 struct __packed outring_entry {
46 	uint32_t desc;   /* Physical address of the descriptor */
47 	uint32_t status; /* Status of the executed job */
48 };
49 #endif /* CFG_CAAM_64BIT */
50 
51 /*
52  * Job Free define
53  */
54 #define JR_JOB_FREE	0
55 
56 /*
57  * Caller information context object
58  */
59 struct caller_info {
60 	struct caam_jobctx *jobctx; /* Caller job context object */
61 	uint32_t job_id;            /* Current Job ID */
62 	paddr_t pdesc;              /* Physical address of the descriptor */
63 };
64 
65 /*
66  * Job Ring module private data
67  */
68 struct jr_privdata {
69 	vaddr_t baseaddr;        /* Job Ring base address */
70 
71 	vaddr_t ctrladdr;        /* CAAM virtual base address */
72 	paddr_t jroffset;        /* Job Ring address offset */
73 	uint64_t paddr_inrings;  /* CAAM physical addr of input queue */
74 	uint64_t paddr_outrings; /* CAAM physical addr of output queue */
75 
76 	uint8_t nb_jobs;         /* Number of Job ring entries managed */
77 
78 	/* Input Job Ring Variables */
79 	struct inring_entry *inrings; /* Input JR HW queue */
80 	unsigned int inlock;          /* Input JR spin lock */
81 	uint16_t inwrite_index;       /* SW Index - next JR entry free */
82 
83 	/* Output Job Ring Variables */
84 	struct outring_entry *outrings; /* Output JR HW queue */
85 	unsigned int outlock;           /* Output JR spin lock */
86 	uint16_t outread_index;         /* SW Index - next JR output done */
87 
88 	/* Caller Information Variables */
89 	struct caller_info *callers;    /* Job Ring Caller information */
90 	unsigned int callers_lock;      /* Job Ring Caller spin lock */
91 
92 	struct itr_handler it_handler;  /* Interrupt handler */
93 };
94 
95 /*
96  * Job Ring module private data reference
97  */
98 static struct jr_privdata *jr_privdata;
99 
100 /*
101  * Free module resources
102  *
103  * @jr_priv   Reference to the module private data
104  */
105 static void do_jr_free(struct jr_privdata *jr_priv)
106 {
107 	if (jr_priv) {
108 		caam_free(jr_priv->inrings);
109 		caam_free(jr_priv->outrings);
110 		caam_free(jr_priv->callers);
111 		caam_free(jr_priv);
112 	}
113 }
114 
115 /*
116  * Allocate module resources
117  *
118  * @privdata  [out] Allocated Job Ring private data
119  * @nb_jobs   Number of jobs to manage in the queue
120  */
121 static enum caam_status do_jr_alloc(struct jr_privdata **privdata,
122 				    uint8_t nb_jobs)
123 {
124 	enum caam_status retstatus = CAAM_OUT_MEMORY;
125 	struct jr_privdata *jr_priv = NULL;
126 
127 	/* Allocate the Job Ring private data */
128 	jr_priv = caam_calloc(sizeof(*jr_priv));
129 
130 	if (!jr_priv) {
131 		JR_TRACE("Private Data allocation error");
132 		goto end_alloc;
133 	}
134 
135 	/* Setup the number of jobs */
136 	jr_priv->nb_jobs = nb_jobs;
137 
138 	/* Allocate the input and output job ring queues */
139 	jr_priv->inrings =
140 		caam_calloc_align(nb_jobs * sizeof(struct inring_entry));
141 	jr_priv->outrings =
142 		caam_calloc_align(nb_jobs * sizeof(struct outring_entry));
143 
144 	/* Allocate the callers information */
145 	jr_priv->callers = caam_calloc(nb_jobs * sizeof(struct caller_info));
146 
147 	if (!jr_priv->inrings || !jr_priv->outrings || !jr_priv->callers) {
148 		JR_TRACE("JR resources allocation error");
149 		goto end_alloc;
150 	}
151 
152 	/* Initialize the spin locks */
153 	jr_priv->inlock = SPINLOCK_UNLOCK;
154 	jr_priv->outlock = SPINLOCK_UNLOCK;
155 	jr_priv->callers_lock = SPINLOCK_UNLOCK;
156 
157 	/* Initialize the queue counter */
158 	jr_priv->inwrite_index = 0;
159 	jr_priv->outread_index = 0;
160 
161 	/*
162 	 * Ensure that allocated queue initialization is pushed to the physical
163 	 * memory
164 	 */
165 	cache_operation(TEE_CACHEFLUSH, jr_priv->inrings,
166 			nb_jobs * sizeof(struct inring_entry));
167 	cache_operation(TEE_CACHEFLUSH, jr_priv->outrings,
168 			nb_jobs * sizeof(struct outring_entry));
169 
170 	retstatus = CAAM_NO_ERROR;
171 end_alloc:
172 	if (retstatus != CAAM_NO_ERROR)
173 		do_jr_free(jr_priv);
174 	else
175 		*privdata = jr_priv;
176 
177 	return retstatus;
178 }
179 
180 /*
181  * Job Ring Interrupt handler
182  *
183  * @handler  Interrupt Handler structure
184  */
185 static enum itr_return caam_jr_irqhandler(struct itr_handler *handler)
186 {
187 	JR_TRACE("Disable the interrupt");
188 	itr_disable(handler->it);
189 
190 	/* Send a signal to exit WFE loop */
191 	sev();
192 
193 	return ITRR_HANDLED;
194 }
195 
196 /*
197  * Returns all jobs completed depending on the input @wait_job_ids mask.
198  *
199  * Dequeues all Jobs completed. Call the job context callback
200  * function. Function returns the bit mask of the expected completed job
201  * (@wait_job_ids parameter)
202  *
203  * @wait_job_ids  Expected Jobs to be complete
204  */
205 static uint32_t do_jr_dequeue(uint32_t wait_job_ids)
206 {
207 	uint32_t ret_job_id = 0;
208 	struct caller_info *caller = NULL;
209 	struct outring_entry *jr_out = NULL;
210 	struct caam_jobctx *jobctx = NULL;
211 	uint32_t exceptions = 0;
212 	bool found = false;
213 	uint16_t idx_jr = 0;
214 	uint32_t nb_jobs_done = 0;
215 	size_t nb_jobs_inv = 0;
216 
217 	exceptions = cpu_spin_lock_xsave(&jr_privdata->outlock);
218 
219 	nb_jobs_done = caam_hal_jr_get_nbjob_done(jr_privdata->baseaddr);
220 
221 	if (nb_jobs_done == 0) {
222 		cpu_spin_unlock_xrestore(&jr_privdata->outlock, exceptions);
223 		return ret_job_id;
224 	}
225 
226 	/* Ensure that output ring descriptor entries are not in cache */
227 	if ((jr_privdata->outread_index + nb_jobs_done) >
228 	    jr_privdata->nb_jobs) {
229 		/*
230 		 * Invalidate the whole circular job buffer because some
231 		 * completed job rings are at the beginning of the buffer
232 		 */
233 		jr_out = jr_privdata->outrings;
234 		nb_jobs_inv = jr_privdata->nb_jobs;
235 	} else {
236 		/* Invalidate only the completed job */
237 		jr_out = &jr_privdata->outrings[jr_privdata->outread_index];
238 		nb_jobs_inv = nb_jobs_done;
239 	}
240 
241 	cache_operation(TEE_CACHEINVALIDATE, jr_out,
242 			sizeof(struct outring_entry) * nb_jobs_inv);
243 
244 	for (; nb_jobs_done; nb_jobs_done--) {
245 		jr_out = &jr_privdata->outrings[jr_privdata->outread_index];
246 
247 		/*
248 		 * Lock the caller information array because enqueue is
249 		 * also touching it
250 		 */
251 		cpu_spin_lock(&jr_privdata->callers_lock);
252 		for (idx_jr = 0, found = false; idx_jr < jr_privdata->nb_jobs;
253 		     idx_jr++) {
254 			/*
255 			 * Search for the caller information corresponding to
256 			 * the completed JR.
257 			 * Don't use the outread_index or inwrite_index because
258 			 * completion can be out of order compared to input
259 			 * buffer
260 			 */
261 			caller = &jr_privdata->callers[idx_jr];
262 			if (caam_desc_pop(&jr_out->desc) == caller->pdesc) {
263 				jobctx = caller->jobctx;
264 				jobctx->status =
265 					caam_read_jobstatus(&jr_out->status);
266 
267 				/* Update return Job IDs mask */
268 				if (caller->job_id & wait_job_ids)
269 					ret_job_id |= caller->job_id;
270 
271 				JR_TRACE("JR id=%" PRId32
272 					 ", context @0x%08" PRIxVA,
273 					 caller->job_id, (vaddr_t)jobctx);
274 				/* Clear the Entry Descriptor DMA */
275 				caller->pdesc = 0;
276 				caller->job_id = JR_JOB_FREE;
277 				found = true;
278 				JR_TRACE("Free space #%" PRId16
279 					 " in the callers array",
280 					 idx_jr);
281 				break;
282 			}
283 		}
284 		cpu_spin_unlock(&jr_privdata->callers_lock);
285 
286 		/*
287 		 * Remove the JR from the output list even if no
288 		 * JR caller found
289 		 */
290 		caam_hal_jr_del_job(jr_privdata->baseaddr);
291 
292 		/*
293 		 * Increment index to next JR output entry taking care that
294 		 * it is a circular buffer of nb_jobs size.
295 		 */
296 		jr_privdata->outread_index++;
297 		jr_privdata->outread_index %= jr_privdata->nb_jobs;
298 
299 		if (found && jobctx->callback) {
300 			/* Finally, execute user's callback */
301 			jobctx->callback(jobctx);
302 		}
303 	}
304 
305 	cpu_spin_unlock_xrestore(&jr_privdata->outlock, exceptions);
306 
307 	return ret_job_id;
308 }
309 
310 /*
311  * Enqueues a new job in the Job Ring input queue. Keep the caller's
312  * job context in private array.
313  *
314  * @jobctx   Caller's job context
315  * @job_id   [out] Job ID enqueued
316  */
317 static enum caam_status do_jr_enqueue(struct caam_jobctx *jobctx,
318 				      uint32_t *job_id)
319 {
320 	enum caam_status retstatus = CAAM_BUSY;
321 	struct inring_entry *cur_inrings = NULL;
322 	struct caller_info *caller = NULL;
323 	uint32_t exceptions = 0;
324 	uint32_t job_mask = 0;
325 	uint8_t idx_jr = 0;
326 	bool found = false;
327 
328 	exceptions = cpu_spin_lock_xsave(&jr_privdata->inlock);
329 
330 	/*
331 	 * Stay locked until a job is available
332 	 * Check if there is an available JR index in the HW
333 	 */
334 	while (caam_hal_jr_read_nbslot_available(jr_privdata->baseaddr) == 0) {
335 		/*
336 		 * WFE will return thanks to a SEV generated by the
337 		 * interrupt handler or by a spin_unlock
338 		 */
339 		wfe();
340 	};
341 
342 	/*
343 	 * There is a space free in the input ring but it doesn't mean
344 	 * that the job pushed is completed.
345 	 * Completion is out of order. Look for a free space in the
346 	 * caller data to push them and get a job ID for the completion
347 	 *
348 	 * Lock the caller information array because dequeue is
349 	 * also touching it
350 	 */
351 	cpu_spin_lock(&jr_privdata->callers_lock);
352 	for (idx_jr = 0; idx_jr < jr_privdata->nb_jobs; idx_jr++) {
353 		if (jr_privdata->callers[idx_jr].job_id == JR_JOB_FREE) {
354 			JR_TRACE("Found a space #%" PRId8
355 				 " free in the callers array",
356 				 idx_jr);
357 			job_mask = 1 << idx_jr;
358 
359 			/* Store the caller information for the JR completion */
360 			caller = &jr_privdata->callers[idx_jr];
361 			caller->job_id = job_mask;
362 			caller->jobctx = jobctx;
363 			caller->pdesc = virt_to_phys((void *)jobctx->desc);
364 
365 			found = true;
366 			break;
367 		}
368 	}
369 	cpu_spin_unlock(&jr_privdata->callers_lock);
370 
371 	if (!found) {
372 		JR_TRACE("Error didn't find a free space in the callers array");
373 		goto end_enqueue;
374 	}
375 
376 	JR_TRACE("Push id=%" PRId16 ", job (0x%08" PRIx32
377 		 ") context @0x%08" PRIxVA,
378 		 jr_privdata->inwrite_index, job_mask, (vaddr_t)jobctx);
379 
380 	cur_inrings = &jr_privdata->inrings[jr_privdata->inwrite_index];
381 
382 	/* Push the descriptor into the JR HW list */
383 	caam_desc_push(&cur_inrings->desc, caller->pdesc);
384 
385 	/* Ensure that physical memory is up to date */
386 	cache_operation(TEE_CACHECLEAN, cur_inrings,
387 			sizeof(struct inring_entry));
388 
389 	/*
390 	 * Increment index to next JR input entry taking care that
391 	 * it is a circular buffer of nb_jobs size.
392 	 */
393 	jr_privdata->inwrite_index++;
394 	jr_privdata->inwrite_index %= jr_privdata->nb_jobs;
395 
396 	/* Ensure that input descriptor is pushed in physical memory */
397 	cache_operation(TEE_CACHECLEAN, jobctx->desc,
398 			DESC_SZBYTES(caam_desc_get_len(jobctx->desc)));
399 
400 	/* Inform HW that a new JR is available */
401 	caam_hal_jr_add_newjob(jr_privdata->baseaddr);
402 
403 	*job_id = job_mask;
404 	retstatus = CAAM_NO_ERROR;
405 
406 end_enqueue:
407 	cpu_spin_unlock_xrestore(&jr_privdata->inlock, exceptions);
408 
409 	return retstatus;
410 }
411 
412 /*
413  * Synchronous job completion callback
414  *
415  * @jobctx   Job context
416  */
417 static void job_done(struct caam_jobctx *jobctx)
418 {
419 	jobctx->completion = true;
420 }
421 
422 void caam_jr_cancel(uint32_t job_id)
423 {
424 	unsigned int idx = 0;
425 
426 	JR_TRACE("Job cancel 0x%" PRIx32, job_id);
427 	for (idx = 0; idx < jr_privdata->nb_jobs; idx++) {
428 		/*
429 		 * Search for the caller information corresponding to
430 		 * the job_id mask.
431 		 */
432 		if (jr_privdata->callers[idx].job_id == job_id) {
433 			/* Clear the Entry Descriptor */
434 			jr_privdata->callers[idx].pdesc = 0;
435 			jr_privdata->callers[idx].job_id = JR_JOB_FREE;
436 			return;
437 		}
438 	}
439 }
440 
441 enum caam_status caam_jr_dequeue(uint32_t job_ids, unsigned int timeout_ms)
442 {
443 	uint32_t job_complete = 0;
444 	uint32_t nb_loop = 0;
445 	bool infinite = false;
446 
447 	if (timeout_ms == UINT_MAX)
448 		infinite = true;
449 	else
450 		nb_loop = timeout_ms * 100;
451 
452 	do {
453 		/* Call the do_jr_dequeue function to dequeue the jobs */
454 		job_complete = do_jr_dequeue(job_ids);
455 
456 		if (job_complete & job_ids)
457 			return CAAM_NO_ERROR;
458 
459 		/* Check if JR interrupt otherwise wait a bit */
460 		if (!caam_hal_jr_check_ack_itr(jr_privdata->baseaddr))
461 			caam_udelay(10);
462 	} while (infinite || (nb_loop--));
463 
464 	return CAAM_TIMEOUT;
465 }
466 
467 enum caam_status caam_jr_enqueue(struct caam_jobctx *jobctx, uint32_t *job_id)
468 {
469 	enum caam_status retstatus = CAAM_FAILURE;
470 	__maybe_unused int timeout  = 10; /* Nb loops to pool job completion */
471 
472 	if (!jobctx)
473 		return CAAM_BAD_PARAM;
474 
475 	JR_DUMPDESC(jobctx->desc);
476 
477 	if (!jobctx->callback && job_id) {
478 		JR_TRACE("Job Callback not defined whereas asynchronous");
479 		return CAAM_BAD_PARAM;
480 	}
481 
482 	if (jobctx->callback && !job_id) {
483 		JR_TRACE("Job Id not defined whereas asynchronous");
484 		return CAAM_BAD_PARAM;
485 	}
486 
487 	jobctx->completion = false;
488 	jobctx->status = 0;
489 
490 	/*
491 	 * If parameter job_id is NULL, the job is synchronous, hence use
492 	 * the local job_done callback function
493 	 */
494 	if (!jobctx->callback && !job_id) {
495 		jobctx->callback = job_done;
496 		jobctx->context = jobctx;
497 	}
498 
499 	retstatus = do_jr_enqueue(jobctx, &jobctx->id);
500 
501 	if (retstatus != CAAM_NO_ERROR) {
502 		JR_TRACE("enqueue job error 0x%08x", retstatus);
503 		return retstatus;
504 	}
505 
506 	/*
507 	 * If parameter job_id is defined, the job is asynchronous, so
508 	 * returns with setting the job_id value
509 	 */
510 	if (job_id) {
511 		*job_id = jobctx->id;
512 		return CAAM_PENDING;
513 	}
514 
515 #ifdef TIMEOUT_COMPLETION
516 	/*
517 	 * Job is synchronous wait until job completion or timeout
518 	 */
519 	while (!jobctx->completion && timeout--)
520 		caam_jr_dequeue(jobctx->id, 100);
521 
522 	if (timeout <= 0) {
523 		/* Job timeout, cancel it and return in error */
524 		caam_jr_cancel(jobctx->id);
525 		retstatus = CAAM_TIMEOUT;
526 	} else {
527 		if (JRSTA_SRC_GET(jobctx->status) != JRSTA_SRC(NONE))
528 			retstatus = CAAM_JOB_STATUS;
529 		else
530 			retstatus = CAAM_NO_ERROR;
531 	}
532 #else
533 	/*
534 	 * Job is synchronous wait until job complete
535 	 * Don't use a timeout because there is no HW timer and
536 	 * so the timeout is not precise
537 	 */
538 	while (!jobctx->completion)
539 		caam_jr_dequeue(jobctx->id, 100);
540 
541 	if (JRSTA_SRC_GET(jobctx->status) != JRSTA_SRC(NONE))
542 		retstatus = CAAM_JOB_STATUS;
543 	else
544 		retstatus = CAAM_NO_ERROR;
545 #endif
546 
547 	/* Erase local callback function */
548 	jobctx->callback = NULL;
549 
550 	return retstatus;
551 }
552 
553 enum caam_status caam_jr_init(struct caam_jrcfg *jrcfg)
554 {
555 	enum caam_status retstatus = CAAM_FAILURE;
556 
557 	JR_TRACE("Initialization");
558 
559 	/* Allocate the Job Ring resources */
560 	retstatus = do_jr_alloc(&jr_privdata, jrcfg->nb_jobs);
561 	if (retstatus != CAAM_NO_ERROR)
562 		goto end_init;
563 
564 	jr_privdata->ctrladdr = jrcfg->base;
565 	jr_privdata->jroffset = jrcfg->offset;
566 
567 	retstatus =
568 		caam_hal_jr_setowner(jrcfg->base, jrcfg->offset, JROWN_ARM_S);
569 	JR_TRACE("JR setowner returned 0x%x", retstatus);
570 
571 	if (retstatus != CAAM_NO_ERROR)
572 		goto end_init;
573 
574 	jr_privdata->baseaddr = jrcfg->base + jrcfg->offset;
575 	retstatus = caam_hal_jr_reset(jr_privdata->baseaddr);
576 	if (retstatus != CAAM_NO_ERROR)
577 		goto end_init;
578 
579 	/*
580 	 * Get the physical address of the Input/Output queue
581 	 * The HW configuration is 64 bits registers regardless
582 	 * the CAAM or CPU addressing mode.
583 	 */
584 	jr_privdata->paddr_inrings = virt_to_phys(jr_privdata->inrings);
585 	jr_privdata->paddr_outrings = virt_to_phys(jr_privdata->outrings);
586 	if (!jr_privdata->paddr_inrings || !jr_privdata->paddr_outrings) {
587 		JR_TRACE("JR bad queue pointers");
588 		retstatus = CAAM_FAILURE;
589 		goto end_init;
590 	}
591 
592 	caam_hal_jr_config(jr_privdata->baseaddr, jr_privdata->nb_jobs,
593 			   jr_privdata->paddr_inrings,
594 			   jr_privdata->paddr_outrings);
595 
596 	/*
597 	 * Prepare the interrupt handler to secure the interrupt even
598 	 * if the interrupt is not used
599 	 */
600 	jr_privdata->it_handler.it = jrcfg->it_num;
601 	jr_privdata->it_handler.flags = ITRF_TRIGGER_LEVEL;
602 	jr_privdata->it_handler.handler = caam_jr_irqhandler;
603 	jr_privdata->it_handler.data = jr_privdata;
604 
605 #ifdef CFG_NXP_CAAM_RUNTIME_JR
606 	itr_add(&jr_privdata->it_handler);
607 #endif
608 	caam_hal_jr_enable_itr(jr_privdata->baseaddr);
609 
610 	retstatus = CAAM_NO_ERROR;
611 
612 end_init:
613 	if (retstatus != CAAM_NO_ERROR)
614 		do_jr_free(jr_privdata);
615 
616 	return retstatus;
617 }
618 
619 enum caam_status caam_jr_halt(void)
620 {
621 	return caam_hal_jr_halt(jr_privdata->baseaddr);
622 }
623 
624 enum caam_status caam_jr_flush(void)
625 {
626 	return caam_hal_jr_flush(jr_privdata->baseaddr);
627 }
628 
629 void caam_jr_resume(uint32_t pm_hint)
630 {
631 	if (pm_hint == PM_HINT_CONTEXT_STATE) {
632 #ifndef CFG_NXP_CAAM_RUNTIME_JR
633 		/*
634 		 * In case the CAAM is not used the JR used to
635 		 * instantiate the RNG has been released to Non-Secure
636 		 * hence, need reconfigure the Secure JR and release
637 		 * it after RNG instantiation
638 		 */
639 		caam_hal_jr_setowner(jr_privdata->ctrladdr,
640 				     jr_privdata->jroffset, JROWN_ARM_S);
641 
642 		caam_hal_jr_config(jr_privdata->baseaddr, jr_privdata->nb_jobs,
643 				   jr_privdata->paddr_inrings,
644 				   jr_privdata->paddr_outrings);
645 #endif /* CFG_NXP_CAAM_RUNTIME_JR */
646 
647 		/* Read the current job ring index */
648 		jr_privdata->inwrite_index =
649 			caam_hal_jr_input_index(jr_privdata->baseaddr);
650 		/* Read the current output ring index */
651 		jr_privdata->outread_index =
652 			caam_hal_jr_output_index(jr_privdata->baseaddr);
653 
654 		if (caam_rng_instantiation() != CAAM_NO_ERROR)
655 			panic();
656 
657 #ifndef CFG_NXP_CAAM_RUNTIME_JR
658 		caam_hal_jr_setowner(jr_privdata->ctrladdr,
659 				     jr_privdata->jroffset, JROWN_ARM_NS);
660 #endif /* CFG_NXP_CAAM_RUNTIME_JR */
661 	} else {
662 		caam_hal_jr_resume(jr_privdata->baseaddr);
663 	}
664 }
665 
666 enum caam_status caam_jr_complete(void)
667 {
668 	enum caam_status ret = CAAM_BUSY;
669 
670 	ret = caam_hal_jr_flush(jr_privdata->baseaddr);
671 	if (ret == CAAM_NO_ERROR)
672 		caam_hal_jr_resume(jr_privdata->baseaddr);
673 
674 	return ret;
675 }
676