1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright 2018-2019 NXP 4 * 5 * Brief CAAM Job Rings manager. 6 * Implementation of functions to enqueue/dequeue CAAM Job Descriptor 7 */ 8 #include <caam_common.h> 9 #include <caam_desc_helper.h> 10 #include <caam_hal_jr.h> 11 #include <caam_io.h> 12 #include <caam_jr.h> 13 #include <caam_rng.h> 14 #include <caam_utils_delay.h> 15 #include <caam_utils_mem.h> 16 #include <kernel/interrupt.h> 17 #include <kernel/panic.h> 18 #include <kernel/pm.h> 19 #include <kernel/spinlock.h> 20 #include <mm/core_memprot.h> 21 #include <tee/cache.h> 22 23 /* 24 * Job Free define 25 */ 26 #define JR_JOB_FREE 0 27 28 /* 29 * Caller information context object 30 */ 31 struct caller_info { 32 struct caam_jobctx *jobctx; /* Caller job context object */ 33 uint32_t job_id; /* Current Job ID */ 34 paddr_t pdesc; /* Physical address of the descriptor */ 35 }; 36 37 /* 38 * Job Ring module private data 39 */ 40 struct jr_privdata { 41 vaddr_t baseaddr; /* Job Ring base address */ 42 43 vaddr_t ctrladdr; /* CAAM virtual base address */ 44 paddr_t jroffset; /* Job Ring address offset */ 45 uint64_t paddr_inrings; /* CAAM physical addr of input queue */ 46 uint64_t paddr_outrings; /* CAAM physical addr of output queue */ 47 48 uint8_t nb_jobs; /* Number of Job ring entries managed */ 49 50 /* Input Job Ring Variables */ 51 struct caam_inring_entry *inrings; /* Input JR HW queue */ 52 unsigned int inlock; /* Input JR spin lock */ 53 uint16_t inwrite_index; /* SW Index - next JR entry free */ 54 55 /* Output Job Ring Variables */ 56 struct caam_outring_entry *outrings; /* Output JR HW queue */ 57 unsigned int outlock; /* Output JR spin lock */ 58 uint16_t outread_index; /* SW Index - next JR output done */ 59 60 /* Caller Information Variables */ 61 struct caller_info *callers; /* Job Ring Caller information */ 62 unsigned int callers_lock; /* Job Ring Caller spin lock */ 63 64 struct itr_handler it_handler; /* Interrupt handler */ 65 }; 66 67 /* 68 * Job Ring module private data reference 69 */ 70 static struct jr_privdata *jr_privdata; 71 72 /* 73 * Free module resources 74 * 75 * @jr_priv Reference to the module private data 76 */ 77 static void do_jr_free(struct jr_privdata *jr_priv) 78 { 79 if (jr_priv) { 80 caam_free(jr_priv->inrings); 81 caam_free(jr_priv->outrings); 82 caam_free(jr_priv->callers); 83 caam_free(jr_priv); 84 } 85 } 86 87 /* 88 * Allocate module resources 89 * 90 * @privdata [out] Allocated Job Ring private data 91 * @nb_jobs Number of jobs to manage in the queue 92 */ 93 static enum caam_status do_jr_alloc(struct jr_privdata **privdata, 94 uint8_t nb_jobs) 95 { 96 enum caam_status retstatus = CAAM_OUT_MEMORY; 97 struct jr_privdata *jr_priv = NULL; 98 99 /* Allocate the Job Ring private data */ 100 jr_priv = caam_calloc(sizeof(*jr_priv)); 101 102 if (!jr_priv) { 103 JR_TRACE("Private Data allocation error"); 104 goto end_alloc; 105 } 106 107 /* Setup the number of jobs */ 108 jr_priv->nb_jobs = nb_jobs; 109 110 /* Allocate the input and output job ring queues */ 111 jr_priv->inrings = 112 caam_calloc_align(nb_jobs * sizeof(struct caam_inring_entry)); 113 jr_priv->outrings = 114 caam_calloc_align(nb_jobs * sizeof(struct caam_outring_entry)); 115 116 /* Allocate the callers information */ 117 jr_priv->callers = caam_calloc(nb_jobs * sizeof(struct caller_info)); 118 119 if (!jr_priv->inrings || !jr_priv->outrings || !jr_priv->callers) { 120 JR_TRACE("JR resources allocation error"); 121 goto end_alloc; 122 } 123 124 /* Initialize the spin locks */ 125 jr_priv->inlock = SPINLOCK_UNLOCK; 126 jr_priv->outlock = SPINLOCK_UNLOCK; 127 jr_priv->callers_lock = SPINLOCK_UNLOCK; 128 129 /* Initialize the queue counter */ 130 jr_priv->inwrite_index = 0; 131 jr_priv->outread_index = 0; 132 133 /* 134 * Ensure that allocated queue initialization is pushed to the physical 135 * memory 136 */ 137 cache_operation(TEE_CACHEFLUSH, jr_priv->inrings, 138 nb_jobs * sizeof(struct caam_inring_entry)); 139 cache_operation(TEE_CACHEFLUSH, jr_priv->outrings, 140 nb_jobs * sizeof(struct caam_outring_entry)); 141 142 retstatus = CAAM_NO_ERROR; 143 end_alloc: 144 if (retstatus != CAAM_NO_ERROR) 145 do_jr_free(jr_priv); 146 else 147 *privdata = jr_priv; 148 149 return retstatus; 150 } 151 152 /* 153 * Job Ring Interrupt handler 154 * 155 * @handler Interrupt Handler structure 156 */ 157 static enum itr_return caam_jr_irqhandler(struct itr_handler *handler) 158 { 159 JR_TRACE("Disable the interrupt"); 160 itr_disable(handler->it); 161 162 /* Send a signal to exit WFE loop */ 163 sev(); 164 165 return ITRR_HANDLED; 166 } 167 168 /* 169 * Returns all jobs completed depending on the input @wait_job_ids mask. 170 * 171 * Dequeues all Jobs completed. Call the job context callback 172 * function. Function returns the bit mask of the expected completed job 173 * (@wait_job_ids parameter) 174 * 175 * @wait_job_ids Expected Jobs to be complete 176 */ 177 static uint32_t do_jr_dequeue(uint32_t wait_job_ids) 178 { 179 uint32_t ret_job_id = 0; 180 struct caller_info *caller = NULL; 181 struct caam_outring_entry *jr_out = NULL; 182 struct caam_jobctx *jobctx = NULL; 183 uint32_t exceptions = 0; 184 bool found = false; 185 uint16_t idx_jr = 0; 186 uint32_t nb_jobs_done = 0; 187 size_t nb_jobs_inv = 0; 188 189 exceptions = cpu_spin_lock_xsave(&jr_privdata->outlock); 190 191 nb_jobs_done = caam_hal_jr_get_nbjob_done(jr_privdata->baseaddr); 192 193 if (nb_jobs_done == 0) { 194 cpu_spin_unlock_xrestore(&jr_privdata->outlock, exceptions); 195 return ret_job_id; 196 } 197 198 /* Ensure that output ring descriptor entries are not in cache */ 199 if ((jr_privdata->outread_index + nb_jobs_done) > 200 jr_privdata->nb_jobs) { 201 /* 202 * Invalidate the whole circular job buffer because some 203 * completed job rings are at the beginning of the buffer 204 */ 205 jr_out = jr_privdata->outrings; 206 nb_jobs_inv = jr_privdata->nb_jobs; 207 } else { 208 /* Invalidate only the completed job */ 209 jr_out = &jr_privdata->outrings[jr_privdata->outread_index]; 210 nb_jobs_inv = nb_jobs_done; 211 } 212 213 cache_operation(TEE_CACHEINVALIDATE, jr_out, 214 sizeof(struct caam_outring_entry) * nb_jobs_inv); 215 216 for (; nb_jobs_done; nb_jobs_done--) { 217 jr_out = &jr_privdata->outrings[jr_privdata->outread_index]; 218 219 /* 220 * Lock the caller information array because enqueue is 221 * also touching it 222 */ 223 cpu_spin_lock(&jr_privdata->callers_lock); 224 for (idx_jr = 0, found = false; idx_jr < jr_privdata->nb_jobs; 225 idx_jr++) { 226 /* 227 * Search for the caller information corresponding to 228 * the completed JR. 229 * Don't use the outread_index or inwrite_index because 230 * completion can be out of order compared to input 231 * buffer 232 */ 233 caller = &jr_privdata->callers[idx_jr]; 234 if (caam_desc_pop(jr_out) == caller->pdesc) { 235 jobctx = caller->jobctx; 236 jobctx->status = caam_read_jobstatus(jr_out); 237 238 /* Update return Job IDs mask */ 239 if (caller->job_id & wait_job_ids) 240 ret_job_id |= caller->job_id; 241 242 JR_TRACE("JR id=%" PRId32 243 ", context @0x%08" PRIxVA, 244 caller->job_id, (vaddr_t)jobctx); 245 /* Clear the Entry Descriptor DMA */ 246 caller->pdesc = 0; 247 caller->job_id = JR_JOB_FREE; 248 found = true; 249 JR_TRACE("Free space #%" PRId16 250 " in the callers array", 251 idx_jr); 252 break; 253 } 254 } 255 cpu_spin_unlock(&jr_privdata->callers_lock); 256 257 /* 258 * Remove the JR from the output list even if no 259 * JR caller found 260 */ 261 caam_hal_jr_del_job(jr_privdata->baseaddr); 262 263 /* 264 * Increment index to next JR output entry taking care that 265 * it is a circular buffer of nb_jobs size. 266 */ 267 jr_privdata->outread_index++; 268 jr_privdata->outread_index %= jr_privdata->nb_jobs; 269 270 if (found && jobctx->callback) { 271 /* Finally, execute user's callback */ 272 jobctx->callback(jobctx); 273 } 274 } 275 276 cpu_spin_unlock_xrestore(&jr_privdata->outlock, exceptions); 277 278 return ret_job_id; 279 } 280 281 /* 282 * Enqueues a new job in the Job Ring input queue. Keep the caller's 283 * job context in private array. 284 * 285 * @jobctx Caller's job context 286 * @job_id [out] Job ID enqueued 287 */ 288 static enum caam_status do_jr_enqueue(struct caam_jobctx *jobctx, 289 uint32_t *job_id) 290 { 291 enum caam_status retstatus = CAAM_BUSY; 292 struct caam_inring_entry *cur_inrings = NULL; 293 struct caller_info *caller = NULL; 294 uint32_t exceptions = 0; 295 uint32_t job_mask = 0; 296 uint8_t idx_jr = 0; 297 bool found = false; 298 299 exceptions = cpu_spin_lock_xsave(&jr_privdata->inlock); 300 301 /* 302 * Stay locked until a job is available 303 * Check if there is an available JR index in the HW 304 */ 305 while (caam_hal_jr_read_nbslot_available(jr_privdata->baseaddr) == 0) { 306 /* 307 * WFE will return thanks to a SEV generated by the 308 * interrupt handler or by a spin_unlock 309 */ 310 wfe(); 311 }; 312 313 /* 314 * There is a space free in the input ring but it doesn't mean 315 * that the job pushed is completed. 316 * Completion is out of order. Look for a free space in the 317 * caller data to push them and get a job ID for the completion 318 * 319 * Lock the caller information array because dequeue is 320 * also touching it 321 */ 322 cpu_spin_lock(&jr_privdata->callers_lock); 323 for (idx_jr = 0; idx_jr < jr_privdata->nb_jobs; idx_jr++) { 324 if (jr_privdata->callers[idx_jr].job_id == JR_JOB_FREE) { 325 JR_TRACE("Found a space #%" PRId8 326 " free in the callers array", 327 idx_jr); 328 job_mask = 1 << idx_jr; 329 330 /* Store the caller information for the JR completion */ 331 caller = &jr_privdata->callers[idx_jr]; 332 caller->job_id = job_mask; 333 caller->jobctx = jobctx; 334 caller->pdesc = virt_to_phys((void *)jobctx->desc); 335 336 found = true; 337 break; 338 } 339 } 340 cpu_spin_unlock(&jr_privdata->callers_lock); 341 342 if (!found) { 343 JR_TRACE("Error didn't find a free space in the callers array"); 344 goto end_enqueue; 345 } 346 347 JR_TRACE("Push id=%" PRId16 ", job (0x%08" PRIx32 348 ") context @0x%08" PRIxVA, 349 jr_privdata->inwrite_index, job_mask, (vaddr_t)jobctx); 350 351 cur_inrings = &jr_privdata->inrings[jr_privdata->inwrite_index]; 352 353 /* Push the descriptor into the JR HW list */ 354 caam_desc_push(cur_inrings, caller->pdesc); 355 356 /* Ensure that physical memory is up to date */ 357 cache_operation(TEE_CACHECLEAN, cur_inrings, 358 sizeof(struct caam_inring_entry)); 359 360 /* 361 * Increment index to next JR input entry taking care that 362 * it is a circular buffer of nb_jobs size. 363 */ 364 jr_privdata->inwrite_index++; 365 jr_privdata->inwrite_index %= jr_privdata->nb_jobs; 366 367 /* Ensure that input descriptor is pushed in physical memory */ 368 cache_operation(TEE_CACHECLEAN, jobctx->desc, 369 DESC_SZBYTES(caam_desc_get_len(jobctx->desc))); 370 371 /* Inform HW that a new JR is available */ 372 caam_hal_jr_add_newjob(jr_privdata->baseaddr); 373 374 *job_id = job_mask; 375 retstatus = CAAM_NO_ERROR; 376 377 end_enqueue: 378 cpu_spin_unlock_xrestore(&jr_privdata->inlock, exceptions); 379 380 return retstatus; 381 } 382 383 /* 384 * Synchronous job completion callback 385 * 386 * @jobctx Job context 387 */ 388 static void job_done(struct caam_jobctx *jobctx) 389 { 390 jobctx->completion = true; 391 } 392 393 void caam_jr_cancel(uint32_t job_id) 394 { 395 unsigned int idx = 0; 396 397 JR_TRACE("Job cancel 0x%" PRIx32, job_id); 398 for (idx = 0; idx < jr_privdata->nb_jobs; idx++) { 399 /* 400 * Search for the caller information corresponding to 401 * the job_id mask. 402 */ 403 if (jr_privdata->callers[idx].job_id == job_id) { 404 /* Clear the Entry Descriptor */ 405 jr_privdata->callers[idx].pdesc = 0; 406 jr_privdata->callers[idx].job_id = JR_JOB_FREE; 407 return; 408 } 409 } 410 } 411 412 enum caam_status caam_jr_dequeue(uint32_t job_ids, unsigned int timeout_ms) 413 { 414 uint32_t job_complete = 0; 415 uint32_t nb_loop = 0; 416 bool infinite = false; 417 418 if (timeout_ms == UINT_MAX) 419 infinite = true; 420 else 421 nb_loop = timeout_ms * 100; 422 423 do { 424 /* Call the do_jr_dequeue function to dequeue the jobs */ 425 job_complete = do_jr_dequeue(job_ids); 426 427 if (job_complete & job_ids) 428 return CAAM_NO_ERROR; 429 430 /* Check if JR interrupt otherwise wait a bit */ 431 if (!caam_hal_jr_check_ack_itr(jr_privdata->baseaddr)) 432 caam_udelay(10); 433 } while (infinite || (nb_loop--)); 434 435 return CAAM_TIMEOUT; 436 } 437 438 enum caam_status caam_jr_enqueue(struct caam_jobctx *jobctx, uint32_t *job_id) 439 { 440 enum caam_status retstatus = CAAM_FAILURE; 441 __maybe_unused int timeout = 10; /* Nb loops to pool job completion */ 442 443 if (!jobctx) 444 return CAAM_BAD_PARAM; 445 446 JR_DUMPDESC(jobctx->desc); 447 448 if (!jobctx->callback && job_id) { 449 JR_TRACE("Job Callback not defined whereas asynchronous"); 450 return CAAM_BAD_PARAM; 451 } 452 453 if (jobctx->callback && !job_id) { 454 JR_TRACE("Job Id not defined whereas asynchronous"); 455 return CAAM_BAD_PARAM; 456 } 457 458 jobctx->completion = false; 459 jobctx->status = 0; 460 461 /* 462 * If parameter job_id is NULL, the job is synchronous, hence use 463 * the local job_done callback function 464 */ 465 if (!jobctx->callback && !job_id) { 466 jobctx->callback = job_done; 467 jobctx->context = jobctx; 468 } 469 470 retstatus = do_jr_enqueue(jobctx, &jobctx->id); 471 472 if (retstatus != CAAM_NO_ERROR) { 473 JR_TRACE("enqueue job error 0x%08x", retstatus); 474 return retstatus; 475 } 476 477 /* 478 * If parameter job_id is defined, the job is asynchronous, so 479 * returns with setting the job_id value 480 */ 481 if (job_id) { 482 *job_id = jobctx->id; 483 return CAAM_PENDING; 484 } 485 486 #ifdef TIMEOUT_COMPLETION 487 /* 488 * Job is synchronous wait until job completion or timeout 489 */ 490 while (!jobctx->completion && timeout--) 491 caam_jr_dequeue(jobctx->id, 100); 492 493 if (timeout <= 0) { 494 /* Job timeout, cancel it and return in error */ 495 caam_jr_cancel(jobctx->id); 496 retstatus = CAAM_TIMEOUT; 497 } else { 498 if (JRSTA_SRC_GET(jobctx->status) != JRSTA_SRC(NONE)) 499 retstatus = CAAM_JOB_STATUS; 500 else 501 retstatus = CAAM_NO_ERROR; 502 } 503 #else 504 /* 505 * Job is synchronous wait until job complete 506 * Don't use a timeout because there is no HW timer and 507 * so the timeout is not precise 508 */ 509 while (!jobctx->completion) 510 caam_jr_dequeue(jobctx->id, 100); 511 512 if (JRSTA_SRC_GET(jobctx->status) != JRSTA_SRC(NONE)) 513 retstatus = CAAM_JOB_STATUS; 514 else 515 retstatus = CAAM_NO_ERROR; 516 #endif 517 518 /* Erase local callback function */ 519 jobctx->callback = NULL; 520 521 return retstatus; 522 } 523 524 enum caam_status caam_jr_init(struct caam_jrcfg *jrcfg) 525 { 526 enum caam_status retstatus = CAAM_FAILURE; 527 528 JR_TRACE("Initialization"); 529 530 /* Allocate the Job Ring resources */ 531 retstatus = do_jr_alloc(&jr_privdata, jrcfg->nb_jobs); 532 if (retstatus != CAAM_NO_ERROR) 533 goto end_init; 534 535 jr_privdata->ctrladdr = jrcfg->base; 536 jr_privdata->jroffset = jrcfg->offset; 537 538 retstatus = 539 caam_hal_jr_setowner(jrcfg->base, jrcfg->offset, JROWN_ARM_S); 540 JR_TRACE("JR setowner returned 0x%x", retstatus); 541 542 if (retstatus != CAAM_NO_ERROR) 543 goto end_init; 544 545 jr_privdata->baseaddr = jrcfg->base + jrcfg->offset; 546 retstatus = caam_hal_jr_reset(jr_privdata->baseaddr); 547 if (retstatus != CAAM_NO_ERROR) 548 goto end_init; 549 550 /* 551 * Get the physical address of the Input/Output queue 552 * The HW configuration is 64 bits registers regardless 553 * the CAAM or CPU addressing mode. 554 */ 555 jr_privdata->paddr_inrings = virt_to_phys(jr_privdata->inrings); 556 jr_privdata->paddr_outrings = virt_to_phys(jr_privdata->outrings); 557 if (!jr_privdata->paddr_inrings || !jr_privdata->paddr_outrings) { 558 JR_TRACE("JR bad queue pointers"); 559 retstatus = CAAM_FAILURE; 560 goto end_init; 561 } 562 563 caam_hal_jr_config(jr_privdata->baseaddr, jr_privdata->nb_jobs, 564 jr_privdata->paddr_inrings, 565 jr_privdata->paddr_outrings); 566 567 /* 568 * Prepare the interrupt handler to secure the interrupt even 569 * if the interrupt is not used 570 */ 571 jr_privdata->it_handler.it = jrcfg->it_num; 572 jr_privdata->it_handler.flags = ITRF_TRIGGER_LEVEL; 573 jr_privdata->it_handler.handler = caam_jr_irqhandler; 574 jr_privdata->it_handler.data = jr_privdata; 575 576 #ifdef CFG_NXP_CAAM_RUNTIME_JR 577 itr_add(&jr_privdata->it_handler); 578 #endif 579 caam_hal_jr_enable_itr(jr_privdata->baseaddr); 580 581 retstatus = CAAM_NO_ERROR; 582 583 end_init: 584 if (retstatus != CAAM_NO_ERROR) 585 do_jr_free(jr_privdata); 586 587 return retstatus; 588 } 589 590 enum caam_status caam_jr_halt(void) 591 { 592 return caam_hal_jr_halt(jr_privdata->baseaddr); 593 } 594 595 enum caam_status caam_jr_flush(void) 596 { 597 return caam_hal_jr_flush(jr_privdata->baseaddr); 598 } 599 600 void caam_jr_resume(uint32_t pm_hint) 601 { 602 if (pm_hint == PM_HINT_CONTEXT_STATE) { 603 #ifndef CFG_NXP_CAAM_RUNTIME_JR 604 /* 605 * In case the CAAM is not used the JR used to 606 * instantiate the RNG has been released to Non-Secure 607 * hence, need reconfigure the Secure JR and release 608 * it after RNG instantiation 609 */ 610 caam_hal_jr_setowner(jr_privdata->ctrladdr, 611 jr_privdata->jroffset, JROWN_ARM_S); 612 613 caam_hal_jr_config(jr_privdata->baseaddr, jr_privdata->nb_jobs, 614 jr_privdata->paddr_inrings, 615 jr_privdata->paddr_outrings); 616 #endif /* CFG_NXP_CAAM_RUNTIME_JR */ 617 618 /* Read the current job ring index */ 619 jr_privdata->inwrite_index = 620 caam_hal_jr_input_index(jr_privdata->baseaddr); 621 /* Read the current output ring index */ 622 jr_privdata->outread_index = 623 caam_hal_jr_output_index(jr_privdata->baseaddr); 624 625 if (caam_rng_instantiation() != CAAM_NO_ERROR) 626 panic(); 627 628 #ifndef CFG_NXP_CAAM_RUNTIME_JR 629 caam_hal_jr_setowner(jr_privdata->ctrladdr, 630 jr_privdata->jroffset, JROWN_ARM_NS); 631 #endif /* CFG_NXP_CAAM_RUNTIME_JR */ 632 } else { 633 caam_hal_jr_resume(jr_privdata->baseaddr); 634 } 635 } 636 637 enum caam_status caam_jr_complete(void) 638 { 639 enum caam_status ret = CAAM_BUSY; 640 641 ret = caam_hal_jr_flush(jr_privdata->baseaddr); 642 if (ret == CAAM_NO_ERROR) 643 caam_hal_jr_resume(jr_privdata->baseaddr); 644 645 return ret; 646 } 647