1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright 2018-2019 NXP
4 *
5 * Brief CAAM Job Rings manager.
6 * Implementation of functions to enqueue/dequeue CAAM Job Descriptor
7 */
8 #include <caam_hal_clk.h>
9 #include <caam_common.h>
10 #include <caam_desc_helper.h>
11 #include <caam_hal_clk.h>
12 #include <caam_hal_jr.h>
13 #include <caam_io.h>
14 #include <caam_jr.h>
15 #include <caam_rng.h>
16 #include <caam_utils_delay.h>
17 #include <caam_utils_mem.h>
18 #include <kernel/interrupt.h>
19 #include <kernel/panic.h>
20 #include <kernel/pm.h>
21 #include <kernel/spinlock.h>
22 #include <mm/core_memprot.h>
23 #include <tee/cache.h>
24
25 /*
26 * Job Free define
27 */
28 #define JR_JOB_FREE 0
29
30 /*
31 * Caller information context object
32 */
33 struct caller_info {
34 struct caam_jobctx *jobctx; /* Caller job context object */
35 uint32_t job_id; /* Current Job ID */
36 paddr_t pdesc; /* Physical address of the descriptor */
37 };
38
39 /*
40 * Job Ring module private data
41 */
42 struct jr_privdata {
43 vaddr_t baseaddr; /* Job Ring base address */
44
45 vaddr_t ctrladdr; /* CAAM virtual base address */
46 paddr_t jroffset; /* Job Ring address offset */
47 uint64_t paddr_inrings; /* CAAM physical addr of input queue */
48 uint64_t paddr_outrings; /* CAAM physical addr of output queue */
49
50 uint8_t nb_jobs; /* Number of Job ring entries managed */
51
52 /* Input Job Ring Variables */
53 struct caam_inring_entry *inrings; /* Input JR HW queue */
54 unsigned int inlock; /* Input JR spin lock */
55 uint16_t inwrite_index; /* SW Index - next JR entry free */
56
57 /* Output Job Ring Variables */
58 struct caam_outring_entry *outrings; /* Output JR HW queue */
59 unsigned int outlock; /* Output JR spin lock */
60 uint16_t outread_index; /* SW Index - next JR output done */
61
62 /* Caller Information Variables */
63 struct caller_info *callers; /* Job Ring Caller information */
64 unsigned int callers_lock; /* Job Ring Caller spin lock */
65
66 struct itr_handler it_handler; /* Interrupt handler */
67 };
68
69 /*
70 * Job Ring module private data reference
71 */
72 static struct jr_privdata *jr_privdata;
73
74 /*
75 * Free module resources
76 *
77 * @jr_priv Reference to the module private data
78 */
do_jr_free(struct jr_privdata * jr_priv)79 static void do_jr_free(struct jr_privdata *jr_priv)
80 {
81 if (jr_priv) {
82 caam_free(jr_priv->inrings);
83 caam_free(jr_priv->outrings);
84 caam_free(jr_priv->callers);
85 caam_free(jr_priv);
86 }
87 }
88
89 /*
90 * Allocate module resources
91 *
92 * @privdata [out] Allocated Job Ring private data
93 * @nb_jobs Number of jobs to manage in the queue
94 */
do_jr_alloc(struct jr_privdata ** privdata,uint8_t nb_jobs)95 static enum caam_status do_jr_alloc(struct jr_privdata **privdata,
96 uint8_t nb_jobs)
97 {
98 enum caam_status retstatus = CAAM_OUT_MEMORY;
99 struct jr_privdata *jr_priv = NULL;
100
101 /* Allocate the Job Ring private data */
102 jr_priv = caam_calloc(sizeof(*jr_priv));
103
104 if (!jr_priv) {
105 JR_TRACE("Private Data allocation error");
106 goto end_alloc;
107 }
108
109 /* Setup the number of jobs */
110 jr_priv->nb_jobs = nb_jobs;
111
112 /* Allocate the input and output job ring queues */
113 jr_priv->inrings =
114 caam_calloc_align(nb_jobs * sizeof(struct caam_inring_entry));
115 jr_priv->outrings =
116 caam_calloc_align(nb_jobs * sizeof(struct caam_outring_entry));
117
118 /* Allocate the callers information */
119 jr_priv->callers = caam_calloc(nb_jobs * sizeof(struct caller_info));
120
121 if (!jr_priv->inrings || !jr_priv->outrings || !jr_priv->callers) {
122 JR_TRACE("JR resources allocation error");
123 goto end_alloc;
124 }
125
126 /* Initialize the spin locks */
127 jr_priv->inlock = SPINLOCK_UNLOCK;
128 jr_priv->outlock = SPINLOCK_UNLOCK;
129 jr_priv->callers_lock = SPINLOCK_UNLOCK;
130
131 /* Initialize the queue counter */
132 jr_priv->inwrite_index = 0;
133 jr_priv->outread_index = 0;
134
135 /*
136 * Ensure that allocated queue initialization is pushed to the physical
137 * memory
138 */
139 cache_operation(TEE_CACHEFLUSH, jr_priv->inrings,
140 nb_jobs * sizeof(struct caam_inring_entry));
141 cache_operation(TEE_CACHEFLUSH, jr_priv->outrings,
142 nb_jobs * sizeof(struct caam_outring_entry));
143
144 retstatus = CAAM_NO_ERROR;
145 end_alloc:
146 if (retstatus != CAAM_NO_ERROR)
147 do_jr_free(jr_priv);
148 else
149 *privdata = jr_priv;
150
151 return retstatus;
152 }
153
154 /*
155 * Job Ring Interrupt handler
156 *
157 * @handler Interrupt Handler structure
158 */
caam_jr_irqhandler(struct itr_handler * handler)159 static enum itr_return caam_jr_irqhandler(struct itr_handler *handler)
160 {
161 JR_TRACE("Disable the interrupt");
162 interrupt_disable(handler->chip, handler->it);
163
164 /* Send a signal to exit WFE loop */
165 sev();
166
167 return ITRR_HANDLED;
168 }
169
170 /*
171 * Returns all jobs completed depending on the input @wait_job_ids mask.
172 *
173 * Dequeues all Jobs completed. Call the job context callback
174 * function. Function returns the bit mask of the expected completed job
175 * (@wait_job_ids parameter)
176 *
177 * @wait_job_ids Expected Jobs to be complete
178 */
do_jr_dequeue(uint32_t wait_job_ids)179 static uint32_t do_jr_dequeue(uint32_t wait_job_ids)
180 {
181 uint32_t ret_job_id = 0;
182 struct caller_info *caller = NULL;
183 struct caam_outring_entry *jr_out = NULL;
184 struct caam_jobctx *jobctx = NULL;
185 uint32_t exceptions = 0;
186 bool found = false;
187 uint16_t idx_jr = 0;
188 uint32_t nb_jobs_done = 0;
189 size_t nb_jobs_inv = 0;
190
191 exceptions = cpu_spin_lock_xsave(&jr_privdata->outlock);
192
193 nb_jobs_done = caam_hal_jr_get_nbjob_done(jr_privdata->baseaddr);
194
195 if (nb_jobs_done == 0) {
196 cpu_spin_unlock_xrestore(&jr_privdata->outlock, exceptions);
197 return ret_job_id;
198 }
199
200 /* Ensure that output ring descriptor entries are not in cache */
201 if ((jr_privdata->outread_index + nb_jobs_done) >
202 jr_privdata->nb_jobs) {
203 /*
204 * Invalidate the whole circular job buffer because some
205 * completed job rings are at the beginning of the buffer
206 */
207 jr_out = jr_privdata->outrings;
208 nb_jobs_inv = jr_privdata->nb_jobs;
209 } else {
210 /* Invalidate only the completed job */
211 jr_out = &jr_privdata->outrings[jr_privdata->outread_index];
212 nb_jobs_inv = nb_jobs_done;
213 }
214
215 cache_operation(TEE_CACHEINVALIDATE, jr_out,
216 sizeof(struct caam_outring_entry) * nb_jobs_inv);
217
218 for (; nb_jobs_done; nb_jobs_done--) {
219 jr_out = &jr_privdata->outrings[jr_privdata->outread_index];
220
221 /*
222 * Lock the caller information array because enqueue is
223 * also touching it
224 */
225 cpu_spin_lock(&jr_privdata->callers_lock);
226 for (idx_jr = 0, found = false; idx_jr < jr_privdata->nb_jobs;
227 idx_jr++) {
228 /*
229 * Search for the caller information corresponding to
230 * the completed JR.
231 * Don't use the outread_index or inwrite_index because
232 * completion can be out of order compared to input
233 * buffer
234 */
235 caller = &jr_privdata->callers[idx_jr];
236 if (caam_desc_pop(jr_out) == caller->pdesc) {
237 jobctx = caller->jobctx;
238 jobctx->status = caam_read_jobstatus(jr_out);
239
240 /* Update return Job IDs mask */
241 if (caller->job_id & wait_job_ids)
242 ret_job_id |= caller->job_id;
243
244 JR_TRACE("JR id=%" PRId32
245 ", context @0x%08" PRIxVA,
246 caller->job_id, (vaddr_t)jobctx);
247 /* Clear the Entry Descriptor DMA */
248 caller->pdesc = 0;
249 caller->jobctx = NULL;
250 caller->job_id = JR_JOB_FREE;
251 found = true;
252 JR_TRACE("Free space #%" PRId16
253 " in the callers array",
254 idx_jr);
255 break;
256 }
257 }
258 cpu_spin_unlock(&jr_privdata->callers_lock);
259
260 /*
261 * Remove the JR from the output list even if no
262 * JR caller found
263 */
264 caam_hal_jr_del_job(jr_privdata->baseaddr);
265
266 /*
267 * Increment index to next JR output entry taking care that
268 * it is a circular buffer of nb_jobs size.
269 */
270 jr_privdata->outread_index++;
271 jr_privdata->outread_index %= jr_privdata->nb_jobs;
272
273 if (found && jobctx->callback) {
274 /* Finally, execute user's callback */
275 jobctx->callback(jobctx);
276 }
277 }
278
279 cpu_spin_unlock_xrestore(&jr_privdata->outlock, exceptions);
280
281 return ret_job_id;
282 }
283
284 /*
285 * Enqueues a new job in the Job Ring input queue. Keep the caller's
286 * job context in private array.
287 *
288 * @jobctx Caller's job context
289 * @job_id [out] Job ID enqueued
290 */
do_jr_enqueue(struct caam_jobctx * jobctx,uint32_t * job_id)291 static enum caam_status do_jr_enqueue(struct caam_jobctx *jobctx,
292 uint32_t *job_id)
293 {
294 enum caam_status retstatus = CAAM_BUSY;
295 struct caam_inring_entry *cur_inrings = NULL;
296 struct caller_info *caller = NULL;
297 uint32_t exceptions = 0;
298 uint32_t job_mask = 0;
299 uint8_t idx_jr = 0;
300 bool found = false;
301
302 exceptions = cpu_spin_lock_xsave(&jr_privdata->inlock);
303
304 caam_hal_clk_enable(true);
305
306 /*
307 * Stay locked until a job is available
308 * Check if there is an available JR index in the HW
309 */
310 while (caam_hal_jr_read_nbslot_available(jr_privdata->baseaddr) == 0) {
311 /*
312 * WFE will return thanks to a SEV generated by the
313 * interrupt handler or by a spin_unlock
314 */
315 wfe();
316 };
317
318 /*
319 * There is a space free in the input ring but it doesn't mean
320 * that the job pushed is completed.
321 * Completion is out of order. Look for a free space in the
322 * caller data to push them and get a job ID for the completion
323 *
324 * Lock the caller information array because dequeue is
325 * also touching it
326 */
327 cpu_spin_lock(&jr_privdata->callers_lock);
328 for (idx_jr = 0; idx_jr < jr_privdata->nb_jobs; idx_jr++) {
329 if (jr_privdata->callers[idx_jr].job_id == JR_JOB_FREE) {
330 JR_TRACE("Found a space #%" PRId8
331 " free in the callers array",
332 idx_jr);
333 job_mask = 1 << idx_jr;
334
335 /* Store the caller information for the JR completion */
336 caller = &jr_privdata->callers[idx_jr];
337 caller->job_id = job_mask;
338 caller->jobctx = jobctx;
339 caller->pdesc = virt_to_phys((void *)jobctx->desc);
340
341 found = true;
342 break;
343 }
344 }
345 cpu_spin_unlock(&jr_privdata->callers_lock);
346
347 if (!found) {
348 JR_TRACE("Error didn't find a free space in the callers array");
349 goto end_enqueue;
350 }
351
352 JR_TRACE("Push id=%" PRId16 ", job (0x%08" PRIx32
353 ") context @0x%08" PRIxVA,
354 jr_privdata->inwrite_index, job_mask, (vaddr_t)jobctx);
355
356 cur_inrings = &jr_privdata->inrings[jr_privdata->inwrite_index];
357
358 /* Push the descriptor into the JR HW list */
359 caam_desc_push(cur_inrings, caller->pdesc);
360
361 /* Ensure that physical memory is up to date */
362 cache_operation(TEE_CACHECLEAN, cur_inrings,
363 sizeof(struct caam_inring_entry));
364
365 /*
366 * Increment index to next JR input entry taking care that
367 * it is a circular buffer of nb_jobs size.
368 */
369 jr_privdata->inwrite_index++;
370 jr_privdata->inwrite_index %= jr_privdata->nb_jobs;
371
372 /* Ensure that input descriptor is pushed in physical memory */
373 cache_operation(TEE_CACHECLEAN, jobctx->desc,
374 DESC_SZBYTES(caam_desc_get_len(jobctx->desc)));
375
376 /* Inform HW that a new JR is available */
377 caam_hal_jr_add_newjob(jr_privdata->baseaddr);
378
379 *job_id = job_mask;
380 retstatus = CAAM_NO_ERROR;
381
382 end_enqueue:
383 cpu_spin_unlock_xrestore(&jr_privdata->inlock, exceptions);
384
385 return retstatus;
386 }
387
388 /*
389 * Synchronous job completion callback
390 *
391 * @jobctx Job context
392 */
job_done(struct caam_jobctx * jobctx)393 static void job_done(struct caam_jobctx *jobctx)
394 {
395 jobctx->completion = true;
396 }
397
caam_jr_cancel(uint32_t job_id)398 void caam_jr_cancel(uint32_t job_id)
399 {
400 unsigned int idx = 0;
401
402 cpu_spin_lock(&jr_privdata->callers_lock);
403
404 JR_TRACE("Job cancel 0x%" PRIx32, job_id);
405 for (idx = 0; idx < jr_privdata->nb_jobs; idx++) {
406 /*
407 * Search for the caller information corresponding to
408 * the job_id mask.
409 */
410 if (jr_privdata->callers[idx].job_id == job_id) {
411 /* Clear the Entry Descriptor */
412 jr_privdata->callers[idx].pdesc = 0;
413 jr_privdata->callers[idx].jobctx = NULL;
414 jr_privdata->callers[idx].job_id = JR_JOB_FREE;
415 return;
416 }
417 }
418
419 cpu_spin_unlock(&jr_privdata->callers_lock);
420 }
421
caam_jr_dequeue(uint32_t job_ids,unsigned int timeout_ms)422 enum caam_status caam_jr_dequeue(uint32_t job_ids, unsigned int timeout_ms)
423 {
424 uint32_t job_complete = 0;
425 uint32_t nb_loop = 0;
426 bool infinite = false;
427 bool it_active = false;
428
429 if (timeout_ms == UINT_MAX)
430 infinite = true;
431 else
432 nb_loop = timeout_ms * 100;
433
434 do {
435 /* Call the do_jr_dequeue function to dequeue the jobs */
436 job_complete = do_jr_dequeue(job_ids);
437
438 /* Check if new job has been submitted and acknowledge it */
439 it_active = caam_hal_jr_check_ack_itr(jr_privdata->baseaddr);
440
441 if (job_complete & job_ids)
442 return CAAM_NO_ERROR;
443
444 /* Check if JR interrupt otherwise wait a bit */
445 if (!it_active)
446 caam_udelay(10);
447 } while (infinite || (nb_loop--));
448
449 return CAAM_TIMEOUT;
450 }
451
caam_jr_enqueue(struct caam_jobctx * jobctx,uint32_t * job_id)452 enum caam_status caam_jr_enqueue(struct caam_jobctx *jobctx, uint32_t *job_id)
453 {
454 enum caam_status retstatus = CAAM_FAILURE;
455 __maybe_unused int timeout = 10; /* Nb loops to pool job completion */
456
457 if (!jobctx)
458 return CAAM_BAD_PARAM;
459
460 JR_DUMPDESC(jobctx->desc);
461
462 if (!jobctx->callback && job_id) {
463 JR_TRACE("Job Callback not defined whereas asynchronous");
464 return CAAM_BAD_PARAM;
465 }
466
467 if (jobctx->callback && !job_id) {
468 JR_TRACE("Job Id not defined whereas asynchronous");
469 return CAAM_BAD_PARAM;
470 }
471
472 jobctx->completion = false;
473 jobctx->status = 0;
474
475 /*
476 * If parameter job_id is NULL, the job is synchronous, hence use
477 * the local job_done callback function
478 */
479 if (!jobctx->callback && !job_id) {
480 jobctx->callback = job_done;
481 jobctx->context = jobctx;
482 }
483
484 retstatus = do_jr_enqueue(jobctx, &jobctx->id);
485
486 if (retstatus != CAAM_NO_ERROR) {
487 JR_TRACE("enqueue job error 0x%08x", retstatus);
488 return retstatus;
489 }
490
491 /*
492 * If parameter job_id is defined, the job is asynchronous, so
493 * returns with setting the job_id value
494 */
495 if (job_id) {
496 *job_id = jobctx->id;
497 return CAAM_PENDING;
498 }
499
500 #ifdef TIMEOUT_COMPLETION
501 /*
502 * Job is synchronous wait until job completion or timeout
503 */
504 while (!jobctx->completion && timeout--)
505 caam_jr_dequeue(jobctx->id, 100);
506
507 if (timeout <= 0) {
508 /* Job timeout, cancel it and return in error */
509 caam_jr_cancel(jobctx->id);
510 retstatus = CAAM_TIMEOUT;
511 } else {
512 if (JRSTA_SRC_GET(jobctx->status) != JRSTA_SRC(NONE))
513 retstatus = CAAM_JOB_STATUS;
514 else
515 retstatus = CAAM_NO_ERROR;
516 }
517 #else
518 /*
519 * Job is synchronous wait until job complete
520 * Don't use a timeout because there is no HW timer and
521 * so the timeout is not precise
522 */
523 while (!jobctx->completion)
524 caam_jr_dequeue(jobctx->id, 100);
525
526 if (JRSTA_SRC_GET(jobctx->status) != JRSTA_SRC(NONE))
527 retstatus = CAAM_JOB_STATUS;
528 else
529 retstatus = CAAM_NO_ERROR;
530 #endif
531
532 /* Erase local callback function */
533 jobctx->callback = NULL;
534
535 return retstatus;
536 }
537
caam_jr_init(struct caam_jrcfg * jrcfg)538 enum caam_status caam_jr_init(struct caam_jrcfg *jrcfg)
539 {
540 enum caam_status retstatus = CAAM_FAILURE;
541
542 JR_TRACE("Initialization");
543
544 /* Allocate the Job Ring resources */
545 retstatus = do_jr_alloc(&jr_privdata, jrcfg->nb_jobs);
546 if (retstatus != CAAM_NO_ERROR)
547 goto end_init;
548
549 jr_privdata->ctrladdr = jrcfg->base;
550 jr_privdata->jroffset = jrcfg->offset;
551
552 retstatus =
553 caam_hal_jr_setowner(jrcfg->base, jrcfg->offset, JROWN_ARM_S);
554 JR_TRACE("JR setowner returned 0x%x", retstatus);
555
556 if (retstatus != CAAM_NO_ERROR)
557 goto end_init;
558
559 jr_privdata->baseaddr = jrcfg->base + jrcfg->offset;
560 retstatus = caam_hal_jr_reset(jr_privdata->baseaddr);
561 if (retstatus != CAAM_NO_ERROR)
562 goto end_init;
563
564 /*
565 * Get the physical address of the Input/Output queue
566 * The HW configuration is 64 bits registers regardless
567 * the CAAM or CPU addressing mode.
568 */
569 jr_privdata->paddr_inrings = virt_to_phys(jr_privdata->inrings);
570 jr_privdata->paddr_outrings = virt_to_phys(jr_privdata->outrings);
571 if (!jr_privdata->paddr_inrings || !jr_privdata->paddr_outrings) {
572 JR_TRACE("JR bad queue pointers");
573 retstatus = CAAM_FAILURE;
574 goto end_init;
575 }
576
577 caam_hal_jr_config(jr_privdata->baseaddr, jr_privdata->nb_jobs,
578 jr_privdata->paddr_inrings,
579 jr_privdata->paddr_outrings);
580
581 /*
582 * Prepare the interrupt handler to secure the interrupt even
583 * if the interrupt is not used
584 */
585 jr_privdata->it_handler.chip = interrupt_get_main_chip();
586 jr_privdata->it_handler.it = jrcfg->it_num;
587 jr_privdata->it_handler.flags = ITRF_TRIGGER_LEVEL;
588 jr_privdata->it_handler.handler = caam_jr_irqhandler;
589 jr_privdata->it_handler.data = jr_privdata;
590
591 #if defined(CFG_NXP_CAAM_RUNTIME_JR) && defined(CFG_CAAM_ITR)
592 if (interrupt_add_handler(&jr_privdata->it_handler)) {
593 retstatus = CAAM_FAILURE;
594 goto end_init;
595 }
596 #endif
597 caam_hal_jr_enable_itr(jr_privdata->baseaddr);
598
599 retstatus = CAAM_NO_ERROR;
600
601 end_init:
602 if (retstatus != CAAM_NO_ERROR)
603 do_jr_free(jr_privdata);
604
605 return retstatus;
606 }
607
caam_jr_halt(void)608 enum caam_status caam_jr_halt(void)
609 {
610 enum caam_status retstatus = CAAM_FAILURE;
611 __maybe_unused uint32_t job_complete = 0;
612
613 retstatus = caam_hal_jr_halt(jr_privdata->baseaddr);
614
615 /*
616 * All jobs in the input queue have been done, call the
617 * dequeue function to complete them.
618 */
619 job_complete = do_jr_dequeue(UINT32_MAX);
620 JR_TRACE("Completion of jobs mask 0x%" PRIx32, job_complete);
621
622 return retstatus;
623 }
624
caam_jr_flush(void)625 enum caam_status caam_jr_flush(void)
626 {
627 enum caam_status retstatus = CAAM_FAILURE;
628 __maybe_unused uint32_t job_complete = 0;
629
630 retstatus = caam_hal_jr_flush(jr_privdata->baseaddr);
631
632 /*
633 * All jobs in the input queue have been done, call the
634 * dequeue function to complete them.
635 */
636 job_complete = do_jr_dequeue(UINT32_MAX);
637 JR_TRACE("Completion of jobs mask 0x%" PRIx32, job_complete);
638
639 return retstatus;
640 }
641
caam_jr_resume(uint32_t pm_hint)642 void caam_jr_resume(uint32_t pm_hint)
643 {
644 if (pm_hint == PM_HINT_CONTEXT_STATE) {
645 #ifndef CFG_NXP_CAAM_RUNTIME_JR
646 /*
647 * In case the CAAM is not used the JR used to
648 * instantiate the RNG has been released to Non-Secure
649 * hence, need reconfigure the Secure JR and release
650 * it after RNG instantiation
651 */
652 caam_hal_jr_setowner(jr_privdata->ctrladdr,
653 jr_privdata->jroffset, JROWN_ARM_S);
654
655 caam_hal_jr_config(jr_privdata->baseaddr, jr_privdata->nb_jobs,
656 jr_privdata->paddr_inrings,
657 jr_privdata->paddr_outrings);
658 #endif /* CFG_NXP_CAAM_RUNTIME_JR */
659
660 /* Read the current job ring index */
661 jr_privdata->inwrite_index =
662 caam_hal_jr_input_index(jr_privdata->baseaddr);
663 /* Read the current output ring index */
664 jr_privdata->outread_index =
665 caam_hal_jr_output_index(jr_privdata->baseaddr);
666
667 if (caam_rng_instantiation() != CAAM_NO_ERROR)
668 panic();
669
670 #ifndef CFG_NXP_CAAM_RUNTIME_JR
671 caam_hal_jr_setowner(jr_privdata->ctrladdr,
672 jr_privdata->jroffset, JROWN_ARM_NS);
673 #endif /* CFG_NXP_CAAM_RUNTIME_JR */
674 } else {
675 caam_hal_jr_resume(jr_privdata->baseaddr);
676 }
677 }
678
caam_jr_complete(void)679 enum caam_status caam_jr_complete(void)
680 {
681 enum caam_status ret = CAAM_BUSY;
682
683 ret = caam_hal_jr_flush(jr_privdata->baseaddr);
684 if (ret == CAAM_NO_ERROR)
685 caam_hal_jr_resume(jr_privdata->baseaddr);
686
687 return ret;
688 }
689