xref: /optee_os/core/drivers/crypto/caam/caam_jr.c (revision 316fd6e9c4852b82d626732b2eec740b1f52be67)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright 2018-2019 NXP
4  *
5  * Brief   CAAM Job Rings manager.
6  *         Implementation of functions to enqueue/dequeue CAAM Job Descriptor
7  */
8 #include <caam_common.h>
9 #include <caam_desc_helper.h>
10 #include <caam_hal_clk.h>
11 #include <caam_hal_jr.h>
12 #include <caam_io.h>
13 #include <caam_jr.h>
14 #include <caam_rng.h>
15 #include <caam_utils_delay.h>
16 #include <caam_utils_mem.h>
17 #include <kernel/interrupt.h>
18 #include <kernel/panic.h>
19 #include <kernel/pm.h>
20 #include <kernel/spinlock.h>
21 #include <mm/core_memprot.h>
22 #include <tee/cache.h>
23 
24 /*
25  * Job Free define
26  */
27 #define JR_JOB_FREE	0
28 
29 /*
30  * Caller information context object
31  */
32 struct caller_info {
33 	struct caam_jobctx *jobctx; /* Caller job context object */
34 	uint32_t job_id;            /* Current Job ID */
35 	paddr_t pdesc;              /* Physical address of the descriptor */
36 };
37 
38 /*
39  * Job Ring module private data
40  */
41 struct jr_privdata {
42 	vaddr_t baseaddr;        /* Job Ring base address */
43 
44 	vaddr_t ctrladdr;        /* CAAM virtual base address */
45 	paddr_t jroffset;        /* Job Ring address offset */
46 	uint64_t paddr_inrings;  /* CAAM physical addr of input queue */
47 	uint64_t paddr_outrings; /* CAAM physical addr of output queue */
48 
49 	uint8_t nb_jobs;         /* Number of Job ring entries managed */
50 
51 	/* Input Job Ring Variables */
52 	struct caam_inring_entry *inrings; /* Input JR HW queue */
53 	unsigned int inlock;          /* Input JR spin lock */
54 	uint16_t inwrite_index;       /* SW Index - next JR entry free */
55 
56 	/* Output Job Ring Variables */
57 	struct caam_outring_entry *outrings; /* Output JR HW queue */
58 	unsigned int outlock;           /* Output JR spin lock */
59 	uint16_t outread_index;         /* SW Index - next JR output done */
60 
61 	/* Caller Information Variables */
62 	struct caller_info *callers;    /* Job Ring Caller information */
63 	unsigned int callers_lock;      /* Job Ring Caller spin lock */
64 
65 	struct itr_handler it_handler;  /* Interrupt handler */
66 };
67 
68 /*
69  * Job Ring module private data reference
70  */
71 static struct jr_privdata *jr_privdata;
72 
73 /*
74  * Free module resources
75  *
76  * @jr_priv   Reference to the module private data
77  */
78 static void do_jr_free(struct jr_privdata *jr_priv)
79 {
80 	if (jr_priv) {
81 		caam_free(jr_priv->inrings);
82 		caam_free(jr_priv->outrings);
83 		caam_free(jr_priv->callers);
84 		caam_free(jr_priv);
85 	}
86 }
87 
88 /*
89  * Allocate module resources
90  *
91  * @privdata  [out] Allocated Job Ring private data
92  * @nb_jobs   Number of jobs to manage in the queue
93  */
94 static enum caam_status do_jr_alloc(struct jr_privdata **privdata,
95 				    uint8_t nb_jobs)
96 {
97 	enum caam_status retstatus = CAAM_OUT_MEMORY;
98 	struct jr_privdata *jr_priv = NULL;
99 
100 	/* Allocate the Job Ring private data */
101 	jr_priv = caam_calloc(sizeof(*jr_priv));
102 
103 	if (!jr_priv) {
104 		JR_TRACE("Private Data allocation error");
105 		goto end_alloc;
106 	}
107 
108 	/* Setup the number of jobs */
109 	jr_priv->nb_jobs = nb_jobs;
110 
111 	/* Allocate the input and output job ring queues */
112 	jr_priv->inrings =
113 		caam_calloc_align(nb_jobs * sizeof(struct caam_inring_entry));
114 	jr_priv->outrings =
115 		caam_calloc_align(nb_jobs * sizeof(struct caam_outring_entry));
116 
117 	/* Allocate the callers information */
118 	jr_priv->callers = caam_calloc(nb_jobs * sizeof(struct caller_info));
119 
120 	if (!jr_priv->inrings || !jr_priv->outrings || !jr_priv->callers) {
121 		JR_TRACE("JR resources allocation error");
122 		goto end_alloc;
123 	}
124 
125 	/* Initialize the spin locks */
126 	jr_priv->inlock = SPINLOCK_UNLOCK;
127 	jr_priv->outlock = SPINLOCK_UNLOCK;
128 	jr_priv->callers_lock = SPINLOCK_UNLOCK;
129 
130 	/* Initialize the queue counter */
131 	jr_priv->inwrite_index = 0;
132 	jr_priv->outread_index = 0;
133 
134 	/*
135 	 * Ensure that allocated queue initialization is pushed to the physical
136 	 * memory
137 	 */
138 	cache_operation(TEE_CACHEFLUSH, jr_priv->inrings,
139 			nb_jobs * sizeof(struct caam_inring_entry));
140 	cache_operation(TEE_CACHEFLUSH, jr_priv->outrings,
141 			nb_jobs * sizeof(struct caam_outring_entry));
142 
143 	retstatus = CAAM_NO_ERROR;
144 end_alloc:
145 	if (retstatus != CAAM_NO_ERROR)
146 		do_jr_free(jr_priv);
147 	else
148 		*privdata = jr_priv;
149 
150 	return retstatus;
151 }
152 
153 /*
154  * Job Ring Interrupt handler
155  *
156  * @handler  Interrupt Handler structure
157  */
158 static enum itr_return caam_jr_irqhandler(struct itr_handler *handler)
159 {
160 	JR_TRACE("Disable the interrupt");
161 	itr_disable(handler->it);
162 
163 	/* Send a signal to exit WFE loop */
164 	sev();
165 
166 	return ITRR_HANDLED;
167 }
168 
169 /*
170  * Returns all jobs completed depending on the input @wait_job_ids mask.
171  *
172  * Dequeues all Jobs completed. Call the job context callback
173  * function. Function returns the bit mask of the expected completed job
174  * (@wait_job_ids parameter)
175  *
176  * @wait_job_ids  Expected Jobs to be complete
177  */
178 static uint32_t do_jr_dequeue(uint32_t wait_job_ids)
179 {
180 	uint32_t ret_job_id = 0;
181 	struct caller_info *caller = NULL;
182 	struct caam_outring_entry *jr_out = NULL;
183 	struct caam_jobctx *jobctx = NULL;
184 	uint32_t exceptions = 0;
185 	bool found = false;
186 	uint16_t idx_jr = 0;
187 	uint32_t nb_jobs_done = 0;
188 	size_t nb_jobs_inv = 0;
189 
190 	exceptions = cpu_spin_lock_xsave(&jr_privdata->outlock);
191 
192 	nb_jobs_done = caam_hal_jr_get_nbjob_done(jr_privdata->baseaddr);
193 
194 	if (nb_jobs_done == 0) {
195 		cpu_spin_unlock_xrestore(&jr_privdata->outlock, exceptions);
196 		return ret_job_id;
197 	}
198 
199 	/* Ensure that output ring descriptor entries are not in cache */
200 	if ((jr_privdata->outread_index + nb_jobs_done) >
201 	    jr_privdata->nb_jobs) {
202 		/*
203 		 * Invalidate the whole circular job buffer because some
204 		 * completed job rings are at the beginning of the buffer
205 		 */
206 		jr_out = jr_privdata->outrings;
207 		nb_jobs_inv = jr_privdata->nb_jobs;
208 	} else {
209 		/* Invalidate only the completed job */
210 		jr_out = &jr_privdata->outrings[jr_privdata->outread_index];
211 		nb_jobs_inv = nb_jobs_done;
212 	}
213 
214 	cache_operation(TEE_CACHEINVALIDATE, jr_out,
215 			sizeof(struct caam_outring_entry) * nb_jobs_inv);
216 
217 	for (; nb_jobs_done; nb_jobs_done--) {
218 		jr_out = &jr_privdata->outrings[jr_privdata->outread_index];
219 
220 		/*
221 		 * Lock the caller information array because enqueue is
222 		 * also touching it
223 		 */
224 		cpu_spin_lock(&jr_privdata->callers_lock);
225 		for (idx_jr = 0, found = false; idx_jr < jr_privdata->nb_jobs;
226 		     idx_jr++) {
227 			/*
228 			 * Search for the caller information corresponding to
229 			 * the completed JR.
230 			 * Don't use the outread_index or inwrite_index because
231 			 * completion can be out of order compared to input
232 			 * buffer
233 			 */
234 			caller = &jr_privdata->callers[idx_jr];
235 			if (caam_desc_pop(jr_out) == caller->pdesc) {
236 				jobctx = caller->jobctx;
237 				jobctx->status = caam_read_jobstatus(jr_out);
238 
239 				/* Update return Job IDs mask */
240 				if (caller->job_id & wait_job_ids)
241 					ret_job_id |= caller->job_id;
242 
243 				JR_TRACE("JR id=%" PRId32
244 					 ", context @0x%08" PRIxVA,
245 					 caller->job_id, (vaddr_t)jobctx);
246 				/* Clear the Entry Descriptor DMA */
247 				caller->pdesc = 0;
248 				caller->jobctx = NULL;
249 				caller->job_id = JR_JOB_FREE;
250 				found = true;
251 				JR_TRACE("Free space #%" PRId16
252 					 " in the callers array",
253 					 idx_jr);
254 				break;
255 			}
256 		}
257 		cpu_spin_unlock(&jr_privdata->callers_lock);
258 
259 		/*
260 		 * Remove the JR from the output list even if no
261 		 * JR caller found
262 		 */
263 		caam_hal_jr_del_job(jr_privdata->baseaddr);
264 
265 		/*
266 		 * Increment index to next JR output entry taking care that
267 		 * it is a circular buffer of nb_jobs size.
268 		 */
269 		jr_privdata->outread_index++;
270 		jr_privdata->outread_index %= jr_privdata->nb_jobs;
271 
272 		if (found && jobctx->callback) {
273 			/* Finally, execute user's callback */
274 			jobctx->callback(jobctx);
275 		}
276 	}
277 
278 	cpu_spin_unlock_xrestore(&jr_privdata->outlock, exceptions);
279 
280 	return ret_job_id;
281 }
282 
283 /*
284  * Enqueues a new job in the Job Ring input queue. Keep the caller's
285  * job context in private array.
286  *
287  * @jobctx   Caller's job context
288  * @job_id   [out] Job ID enqueued
289  */
290 static enum caam_status do_jr_enqueue(struct caam_jobctx *jobctx,
291 				      uint32_t *job_id)
292 {
293 	enum caam_status retstatus = CAAM_BUSY;
294 	struct caam_inring_entry *cur_inrings = NULL;
295 	struct caller_info *caller = NULL;
296 	uint32_t exceptions = 0;
297 	uint32_t job_mask = 0;
298 	uint8_t idx_jr = 0;
299 	bool found = false;
300 
301 	exceptions = cpu_spin_lock_xsave(&jr_privdata->inlock);
302 
303 	/*
304 	 * Stay locked until a job is available
305 	 * Check if there is an available JR index in the HW
306 	 */
307 	while (caam_hal_jr_read_nbslot_available(jr_privdata->baseaddr) == 0) {
308 		/*
309 		 * WFE will return thanks to a SEV generated by the
310 		 * interrupt handler or by a spin_unlock
311 		 */
312 		wfe();
313 	};
314 
315 	/*
316 	 * There is a space free in the input ring but it doesn't mean
317 	 * that the job pushed is completed.
318 	 * Completion is out of order. Look for a free space in the
319 	 * caller data to push them and get a job ID for the completion
320 	 *
321 	 * Lock the caller information array because dequeue is
322 	 * also touching it
323 	 */
324 	cpu_spin_lock(&jr_privdata->callers_lock);
325 	for (idx_jr = 0; idx_jr < jr_privdata->nb_jobs; idx_jr++) {
326 		if (jr_privdata->callers[idx_jr].job_id == JR_JOB_FREE) {
327 			JR_TRACE("Found a space #%" PRId8
328 				 " free in the callers array",
329 				 idx_jr);
330 			job_mask = 1 << idx_jr;
331 
332 			/* Store the caller information for the JR completion */
333 			caller = &jr_privdata->callers[idx_jr];
334 			caller->job_id = job_mask;
335 			caller->jobctx = jobctx;
336 			caller->pdesc = virt_to_phys((void *)jobctx->desc);
337 
338 			found = true;
339 			break;
340 		}
341 	}
342 	cpu_spin_unlock(&jr_privdata->callers_lock);
343 
344 	if (!found) {
345 		JR_TRACE("Error didn't find a free space in the callers array");
346 		goto end_enqueue;
347 	}
348 
349 	JR_TRACE("Push id=%" PRId16 ", job (0x%08" PRIx32
350 		 ") context @0x%08" PRIxVA,
351 		 jr_privdata->inwrite_index, job_mask, (vaddr_t)jobctx);
352 
353 	cur_inrings = &jr_privdata->inrings[jr_privdata->inwrite_index];
354 
355 	/* Push the descriptor into the JR HW list */
356 	caam_desc_push(cur_inrings, caller->pdesc);
357 
358 	/* Ensure that physical memory is up to date */
359 	cache_operation(TEE_CACHECLEAN, cur_inrings,
360 			sizeof(struct caam_inring_entry));
361 
362 	/*
363 	 * Increment index to next JR input entry taking care that
364 	 * it is a circular buffer of nb_jobs size.
365 	 */
366 	jr_privdata->inwrite_index++;
367 	jr_privdata->inwrite_index %= jr_privdata->nb_jobs;
368 
369 	/* Ensure that input descriptor is pushed in physical memory */
370 	cache_operation(TEE_CACHECLEAN, jobctx->desc,
371 			DESC_SZBYTES(caam_desc_get_len(jobctx->desc)));
372 
373 	/* Inform HW that a new JR is available */
374 	caam_hal_jr_add_newjob(jr_privdata->baseaddr);
375 
376 	*job_id = job_mask;
377 	retstatus = CAAM_NO_ERROR;
378 
379 end_enqueue:
380 	cpu_spin_unlock_xrestore(&jr_privdata->inlock, exceptions);
381 
382 	return retstatus;
383 }
384 
385 /*
386  * Synchronous job completion callback
387  *
388  * @jobctx   Job context
389  */
390 static void job_done(struct caam_jobctx *jobctx)
391 {
392 	jobctx->completion = true;
393 }
394 
395 void caam_jr_cancel(uint32_t job_id)
396 {
397 	unsigned int idx = 0;
398 
399 	cpu_spin_lock(&jr_privdata->callers_lock);
400 
401 	JR_TRACE("Job cancel 0x%" PRIx32, job_id);
402 	for (idx = 0; idx < jr_privdata->nb_jobs; idx++) {
403 		/*
404 		 * Search for the caller information corresponding to
405 		 * the job_id mask.
406 		 */
407 		if (jr_privdata->callers[idx].job_id == job_id) {
408 			/* Clear the Entry Descriptor */
409 			jr_privdata->callers[idx].pdesc = 0;
410 			jr_privdata->callers[idx].jobctx = NULL;
411 			jr_privdata->callers[idx].job_id = JR_JOB_FREE;
412 			return;
413 		}
414 	}
415 
416 	cpu_spin_unlock(&jr_privdata->callers_lock);
417 }
418 
419 enum caam_status caam_jr_dequeue(uint32_t job_ids, unsigned int timeout_ms)
420 {
421 	uint32_t job_complete = 0;
422 	uint32_t nb_loop = 0;
423 	bool infinite = false;
424 	bool it_active = false;
425 
426 	if (timeout_ms == UINT_MAX)
427 		infinite = true;
428 	else
429 		nb_loop = timeout_ms * 100;
430 
431 	do {
432 		/* Call the do_jr_dequeue function to dequeue the jobs */
433 		job_complete = do_jr_dequeue(job_ids);
434 
435 		/* Check if new job has been submitted and acknowledge it */
436 		it_active = caam_hal_jr_check_ack_itr(jr_privdata->baseaddr);
437 
438 		if (job_complete & job_ids)
439 			return CAAM_NO_ERROR;
440 
441 		/* Check if JR interrupt otherwise wait a bit */
442 		if (!it_active)
443 			caam_udelay(10);
444 	} while (infinite || (nb_loop--));
445 
446 	return CAAM_TIMEOUT;
447 }
448 
449 enum caam_status caam_jr_enqueue(struct caam_jobctx *jobctx, uint32_t *job_id)
450 {
451 	enum caam_status retstatus = CAAM_FAILURE;
452 	__maybe_unused int timeout  = 10; /* Nb loops to pool job completion */
453 
454 	if (!jobctx)
455 		return CAAM_BAD_PARAM;
456 
457 	JR_DUMPDESC(jobctx->desc);
458 
459 	if (!jobctx->callback && job_id) {
460 		JR_TRACE("Job Callback not defined whereas asynchronous");
461 		return CAAM_BAD_PARAM;
462 	}
463 
464 	if (jobctx->callback && !job_id) {
465 		JR_TRACE("Job Id not defined whereas asynchronous");
466 		return CAAM_BAD_PARAM;
467 	}
468 
469 	jobctx->completion = false;
470 	jobctx->status = 0;
471 
472 	/*
473 	 * If parameter job_id is NULL, the job is synchronous, hence use
474 	 * the local job_done callback function
475 	 */
476 	if (!jobctx->callback && !job_id) {
477 		jobctx->callback = job_done;
478 		jobctx->context = jobctx;
479 	}
480 
481 	retstatus = do_jr_enqueue(jobctx, &jobctx->id);
482 
483 	if (retstatus != CAAM_NO_ERROR) {
484 		JR_TRACE("enqueue job error 0x%08x", retstatus);
485 		return retstatus;
486 	}
487 
488 	/*
489 	 * If parameter job_id is defined, the job is asynchronous, so
490 	 * returns with setting the job_id value
491 	 */
492 	if (job_id) {
493 		*job_id = jobctx->id;
494 		return CAAM_PENDING;
495 	}
496 
497 #ifdef TIMEOUT_COMPLETION
498 	/*
499 	 * Job is synchronous wait until job completion or timeout
500 	 */
501 	while (!jobctx->completion && timeout--)
502 		caam_jr_dequeue(jobctx->id, 100);
503 
504 	if (timeout <= 0) {
505 		/* Job timeout, cancel it and return in error */
506 		caam_jr_cancel(jobctx->id);
507 		retstatus = CAAM_TIMEOUT;
508 	} else {
509 		if (JRSTA_SRC_GET(jobctx->status) != JRSTA_SRC(NONE))
510 			retstatus = CAAM_JOB_STATUS;
511 		else
512 			retstatus = CAAM_NO_ERROR;
513 	}
514 #else
515 	/*
516 	 * Job is synchronous wait until job complete
517 	 * Don't use a timeout because there is no HW timer and
518 	 * so the timeout is not precise
519 	 */
520 	while (!jobctx->completion)
521 		caam_jr_dequeue(jobctx->id, 100);
522 
523 	if (JRSTA_SRC_GET(jobctx->status) != JRSTA_SRC(NONE))
524 		retstatus = CAAM_JOB_STATUS;
525 	else
526 		retstatus = CAAM_NO_ERROR;
527 #endif
528 
529 	/* Erase local callback function */
530 	jobctx->callback = NULL;
531 
532 	return retstatus;
533 }
534 
535 enum caam_status caam_jr_init(struct caam_jrcfg *jrcfg)
536 {
537 	enum caam_status retstatus = CAAM_FAILURE;
538 
539 	JR_TRACE("Initialization");
540 
541 	/* Allocate the Job Ring resources */
542 	retstatus = do_jr_alloc(&jr_privdata, jrcfg->nb_jobs);
543 	if (retstatus != CAAM_NO_ERROR)
544 		goto end_init;
545 
546 	jr_privdata->ctrladdr = jrcfg->base;
547 	jr_privdata->jroffset = jrcfg->offset;
548 
549 	retstatus =
550 		caam_hal_jr_setowner(jrcfg->base, jrcfg->offset, JROWN_ARM_S);
551 	JR_TRACE("JR setowner returned 0x%x", retstatus);
552 
553 	if (retstatus != CAAM_NO_ERROR)
554 		goto end_init;
555 
556 	jr_privdata->baseaddr = jrcfg->base + jrcfg->offset;
557 	retstatus = caam_hal_jr_reset(jr_privdata->baseaddr);
558 	if (retstatus != CAAM_NO_ERROR)
559 		goto end_init;
560 
561 	/*
562 	 * Get the physical address of the Input/Output queue
563 	 * The HW configuration is 64 bits registers regardless
564 	 * the CAAM or CPU addressing mode.
565 	 */
566 	jr_privdata->paddr_inrings = virt_to_phys(jr_privdata->inrings);
567 	jr_privdata->paddr_outrings = virt_to_phys(jr_privdata->outrings);
568 	if (!jr_privdata->paddr_inrings || !jr_privdata->paddr_outrings) {
569 		JR_TRACE("JR bad queue pointers");
570 		retstatus = CAAM_FAILURE;
571 		goto end_init;
572 	}
573 
574 	caam_hal_jr_config(jr_privdata->baseaddr, jr_privdata->nb_jobs,
575 			   jr_privdata->paddr_inrings,
576 			   jr_privdata->paddr_outrings);
577 
578 	/*
579 	 * Prepare the interrupt handler to secure the interrupt even
580 	 * if the interrupt is not used
581 	 */
582 	jr_privdata->it_handler.it = jrcfg->it_num;
583 	jr_privdata->it_handler.flags = ITRF_TRIGGER_LEVEL;
584 	jr_privdata->it_handler.handler = caam_jr_irqhandler;
585 	jr_privdata->it_handler.data = jr_privdata;
586 
587 #if defined(CFG_NXP_CAAM_RUNTIME_JR) && defined(CFG_CAAM_ITR)
588 	itr_add(&jr_privdata->it_handler);
589 #endif
590 	caam_hal_jr_enable_itr(jr_privdata->baseaddr);
591 
592 	retstatus = CAAM_NO_ERROR;
593 
594 end_init:
595 	if (retstatus != CAAM_NO_ERROR)
596 		do_jr_free(jr_privdata);
597 
598 	return retstatus;
599 }
600 
601 enum caam_status caam_jr_halt(void)
602 {
603 	enum caam_status retstatus = CAAM_FAILURE;
604 	__maybe_unused uint32_t job_complete = 0;
605 
606 	retstatus = caam_hal_jr_halt(jr_privdata->baseaddr);
607 
608 	/*
609 	 * All jobs in the input queue have been done, call the
610 	 * dequeue function to complete them.
611 	 */
612 	job_complete = do_jr_dequeue(UINT32_MAX);
613 	JR_TRACE("Completion of jobs mask 0x%" PRIx32, job_complete);
614 
615 	return retstatus;
616 }
617 
618 enum caam_status caam_jr_flush(void)
619 {
620 	enum caam_status retstatus = CAAM_FAILURE;
621 	__maybe_unused uint32_t job_complete = 0;
622 
623 	retstatus = caam_hal_jr_flush(jr_privdata->baseaddr);
624 
625 	/*
626 	 * All jobs in the input queue have been done, call the
627 	 * dequeue function to complete them.
628 	 */
629 	job_complete = do_jr_dequeue(UINT32_MAX);
630 	JR_TRACE("Completion of jobs mask 0x%" PRIx32, job_complete);
631 
632 	return retstatus;
633 }
634 
635 void caam_jr_resume(uint32_t pm_hint)
636 {
637 	if (pm_hint == PM_HINT_CONTEXT_STATE) {
638 #ifndef CFG_NXP_CAAM_RUNTIME_JR
639 		/*
640 		 * In case the CAAM is not used the JR used to
641 		 * instantiate the RNG has been released to Non-Secure
642 		 * hence, need reconfigure the Secure JR and release
643 		 * it after RNG instantiation
644 		 */
645 		caam_hal_jr_setowner(jr_privdata->ctrladdr,
646 				     jr_privdata->jroffset, JROWN_ARM_S);
647 
648 		caam_hal_jr_config(jr_privdata->baseaddr, jr_privdata->nb_jobs,
649 				   jr_privdata->paddr_inrings,
650 				   jr_privdata->paddr_outrings);
651 #endif /* CFG_NXP_CAAM_RUNTIME_JR */
652 
653 		/* Read the current job ring index */
654 		jr_privdata->inwrite_index =
655 			caam_hal_jr_input_index(jr_privdata->baseaddr);
656 		/* Read the current output ring index */
657 		jr_privdata->outread_index =
658 			caam_hal_jr_output_index(jr_privdata->baseaddr);
659 
660 		if (caam_rng_instantiation() != CAAM_NO_ERROR)
661 			panic();
662 
663 #ifndef CFG_NXP_CAAM_RUNTIME_JR
664 		caam_hal_jr_setowner(jr_privdata->ctrladdr,
665 				     jr_privdata->jroffset, JROWN_ARM_NS);
666 #endif /* CFG_NXP_CAAM_RUNTIME_JR */
667 	} else {
668 		caam_hal_jr_resume(jr_privdata->baseaddr);
669 	}
670 }
671 
672 enum caam_status caam_jr_complete(void)
673 {
674 	enum caam_status ret = CAAM_BUSY;
675 
676 	ret = caam_hal_jr_flush(jr_privdata->baseaddr);
677 	if (ret == CAAM_NO_ERROR)
678 		caam_hal_jr_resume(jr_privdata->baseaddr);
679 
680 	return ret;
681 }
682