Lines Matching +full:imx23 +full:- +full:dcp

1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Freescale i.MX23/i.MX28 Data Co-Processor driver
8 #include <linux/dma-mapping.h>
45 /* DCP DMA descriptor. */
69 struct dcp { struct
94 /* SHA Hash-specific context */
99 /* Crypto-specific context */
122 * There can even be only one instance of the MXS DCP due to the
125 static struct dcp *global_sdcp;
127 /* DCP register layout. */
173 struct dcp *sdcp = global_sdcp; in mxs_dcp_start_dma()
174 const int chan = actx->chan; in mxs_dcp_start_dma()
177 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; in mxs_dcp_start_dma()
178 dma_addr_t desc_phys = dma_map_single(sdcp->dev, desc, sizeof(*desc), in mxs_dcp_start_dma()
181 dma_err = dma_mapping_error(sdcp->dev, desc_phys); in mxs_dcp_start_dma()
185 reinit_completion(&sdcp->completion[chan]); in mxs_dcp_start_dma()
188 writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(chan)); in mxs_dcp_start_dma()
191 writel(desc_phys, sdcp->base + MXS_DCP_CH_N_CMDPTR(chan)); in mxs_dcp_start_dma()
194 writel(1, sdcp->base + MXS_DCP_CH_N_SEMA(chan)); in mxs_dcp_start_dma()
196 ret = wait_for_completion_timeout(&sdcp->completion[chan], in mxs_dcp_start_dma()
199 dev_err(sdcp->dev, "Channel %i timeout (DCP_STAT=0x%08x)\n", in mxs_dcp_start_dma()
200 chan, readl(sdcp->base + MXS_DCP_STAT)); in mxs_dcp_start_dma()
201 return -ETIMEDOUT; in mxs_dcp_start_dma()
204 stat = readl(sdcp->base + MXS_DCP_CH_N_STAT(chan)); in mxs_dcp_start_dma()
206 dev_err(sdcp->dev, "Channel %i error (CH_STAT=0x%08x)\n", in mxs_dcp_start_dma()
208 return -EINVAL; in mxs_dcp_start_dma()
211 dma_unmap_single(sdcp->dev, desc_phys, sizeof(*desc), DMA_TO_DEVICE); in mxs_dcp_start_dma()
223 struct dcp *sdcp = global_sdcp; in mxs_dcp_run_aes()
224 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; in mxs_dcp_run_aes()
228 key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key, in mxs_dcp_run_aes()
230 ret = dma_mapping_error(sdcp->dev, key_phys); in mxs_dcp_run_aes()
234 src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf, in mxs_dcp_run_aes()
236 ret = dma_mapping_error(sdcp->dev, src_phys); in mxs_dcp_run_aes()
240 dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf, in mxs_dcp_run_aes()
242 ret = dma_mapping_error(sdcp->dev, dst_phys); in mxs_dcp_run_aes()
246 if (actx->fill % AES_BLOCK_SIZE) { in mxs_dcp_run_aes()
247 dev_err(sdcp->dev, "Invalid block size!\n"); in mxs_dcp_run_aes()
248 ret = -EINVAL; in mxs_dcp_run_aes()
253 desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE | in mxs_dcp_run_aes()
258 desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY; in mxs_dcp_run_aes()
260 if (rctx->enc) in mxs_dcp_run_aes()
261 desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT; in mxs_dcp_run_aes()
263 desc->control0 |= MXS_DCP_CONTROL0_CIPHER_INIT; in mxs_dcp_run_aes()
265 desc->control1 = MXS_DCP_CONTROL1_CIPHER_SELECT_AES128; in mxs_dcp_run_aes()
267 if (rctx->ecb) in mxs_dcp_run_aes()
268 desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_ECB; in mxs_dcp_run_aes()
270 desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_CBC; in mxs_dcp_run_aes()
272 desc->next_cmd_addr = 0; in mxs_dcp_run_aes()
273 desc->source = src_phys; in mxs_dcp_run_aes()
274 desc->destination = dst_phys; in mxs_dcp_run_aes()
275 desc->size = actx->fill; in mxs_dcp_run_aes()
276 desc->payload = key_phys; in mxs_dcp_run_aes()
277 desc->status = 0; in mxs_dcp_run_aes()
282 dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE); in mxs_dcp_run_aes()
284 dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE); in mxs_dcp_run_aes()
286 dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128, in mxs_dcp_run_aes()
294 struct dcp *sdcp = global_sdcp; in mxs_dcp_aes_block_crypt()
297 struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm); in mxs_dcp_aes_block_crypt()
300 struct scatterlist *dst = req->dst; in mxs_dcp_aes_block_crypt()
301 struct scatterlist *src = req->src; in mxs_dcp_aes_block_crypt()
305 uint8_t *in_buf = sdcp->coh->aes_in_buf; in mxs_dcp_aes_block_crypt()
306 uint8_t *out_buf = sdcp->coh->aes_out_buf; in mxs_dcp_aes_block_crypt()
312 uint8_t *key = sdcp->coh->aes_key; in mxs_dcp_aes_block_crypt()
319 actx->fill = 0; in mxs_dcp_aes_block_crypt()
322 memcpy(key, actx->key, actx->key_len); in mxs_dcp_aes_block_crypt()
324 if (!rctx->ecb) { in mxs_dcp_aes_block_crypt()
326 memcpy(key + AES_KEYSIZE_128, req->iv, AES_KEYSIZE_128); in mxs_dcp_aes_block_crypt()
333 for_each_sg(req->src, src, sg_nents(req->src), i) { in mxs_dcp_aes_block_crypt()
337 limit_hit = tlen > req->cryptlen; in mxs_dcp_aes_block_crypt()
340 len = req->cryptlen - (tlen - len); in mxs_dcp_aes_block_crypt()
343 if (actx->fill + len > out_off) in mxs_dcp_aes_block_crypt()
344 clen = out_off - actx->fill; in mxs_dcp_aes_block_crypt()
348 memcpy(in_buf + actx->fill, src_buf, clen); in mxs_dcp_aes_block_crypt()
349 len -= clen; in mxs_dcp_aes_block_crypt()
351 actx->fill += clen; in mxs_dcp_aes_block_crypt()
357 if (actx->fill == out_off || sg_is_last(src) || in mxs_dcp_aes_block_crypt()
365 actx->fill, dst_off); in mxs_dcp_aes_block_crypt()
366 dst_off += actx->fill; in mxs_dcp_aes_block_crypt()
367 last_out_len = actx->fill; in mxs_dcp_aes_block_crypt()
368 actx->fill = 0; in mxs_dcp_aes_block_crypt()
377 if (!rctx->ecb) { in mxs_dcp_aes_block_crypt()
378 if (rctx->enc) in mxs_dcp_aes_block_crypt()
379 memcpy(req->iv, out_buf+(last_out_len-AES_BLOCK_SIZE), in mxs_dcp_aes_block_crypt()
382 memcpy(req->iv, in_buf+(last_out_len-AES_BLOCK_SIZE), in mxs_dcp_aes_block_crypt()
391 struct dcp *sdcp = global_sdcp; in dcp_chan_thread_aes()
402 spin_lock(&sdcp->lock[chan]); in dcp_chan_thread_aes()
403 backlog = crypto_get_backlog(&sdcp->queue[chan]); in dcp_chan_thread_aes()
404 arq = crypto_dequeue_request(&sdcp->queue[chan]); in dcp_chan_thread_aes()
405 spin_unlock(&sdcp->lock[chan]); in dcp_chan_thread_aes()
415 backlog->complete(backlog, -EINPROGRESS); in dcp_chan_thread_aes()
419 arq->complete(arq, ret); in dcp_chan_thread_aes()
433 skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback); in mxs_dcp_block_fallback()
434 skcipher_request_set_callback(&rctx->fallback_req, req->base.flags, in mxs_dcp_block_fallback()
435 req->base.complete, req->base.data); in mxs_dcp_block_fallback()
436 skcipher_request_set_crypt(&rctx->fallback_req, req->src, req->dst, in mxs_dcp_block_fallback()
437 req->cryptlen, req->iv); in mxs_dcp_block_fallback()
440 ret = crypto_skcipher_encrypt(&rctx->fallback_req); in mxs_dcp_block_fallback()
442 ret = crypto_skcipher_decrypt(&rctx->fallback_req); in mxs_dcp_block_fallback()
449 struct dcp *sdcp = global_sdcp; in mxs_dcp_aes_enqueue()
450 struct crypto_async_request *arq = &req->base; in mxs_dcp_aes_enqueue()
451 struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm); in mxs_dcp_aes_enqueue()
455 if (unlikely(actx->key_len != AES_KEYSIZE_128)) in mxs_dcp_aes_enqueue()
458 rctx->enc = enc; in mxs_dcp_aes_enqueue()
459 rctx->ecb = ecb; in mxs_dcp_aes_enqueue()
460 actx->chan = DCP_CHAN_CRYPTO; in mxs_dcp_aes_enqueue()
462 spin_lock(&sdcp->lock[actx->chan]); in mxs_dcp_aes_enqueue()
463 ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base); in mxs_dcp_aes_enqueue()
464 spin_unlock(&sdcp->lock[actx->chan]); in mxs_dcp_aes_enqueue()
466 wake_up_process(sdcp->thread[actx->chan]); in mxs_dcp_aes_enqueue()
501 actx->key_len = len; in mxs_dcp_aes_setkey()
503 memcpy(actx->key, key, len); in mxs_dcp_aes_setkey()
509 * but is supported by in-kernel software implementation, we use in mxs_dcp_aes_setkey()
512 crypto_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK); in mxs_dcp_aes_setkey()
513 crypto_skcipher_set_flags(actx->fallback, in mxs_dcp_aes_setkey()
514 tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); in mxs_dcp_aes_setkey()
515 return crypto_skcipher_setkey(actx->fallback, key, len); in mxs_dcp_aes_setkey()
528 actx->fallback = blk; in mxs_dcp_aes_fallback_init_tfm()
538 crypto_free_skcipher(actx->fallback); in mxs_dcp_aes_fallback_exit_tfm()
546 struct dcp *sdcp = global_sdcp; in mxs_dcp_run_sha()
552 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; in mxs_dcp_run_sha()
555 dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf, in mxs_dcp_run_sha()
558 ret = dma_mapping_error(sdcp->dev, buf_phys); in mxs_dcp_run_sha()
563 desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE | in mxs_dcp_run_sha()
566 if (rctx->init) in mxs_dcp_run_sha()
567 desc->control0 |= MXS_DCP_CONTROL0_HASH_INIT; in mxs_dcp_run_sha()
569 desc->control1 = actx->alg; in mxs_dcp_run_sha()
570 desc->next_cmd_addr = 0; in mxs_dcp_run_sha()
571 desc->source = buf_phys; in mxs_dcp_run_sha()
572 desc->destination = 0; in mxs_dcp_run_sha()
573 desc->size = actx->fill; in mxs_dcp_run_sha()
574 desc->payload = 0; in mxs_dcp_run_sha()
575 desc->status = 0; in mxs_dcp_run_sha()
580 if (rctx->init && rctx->fini && desc->size == 0) { in mxs_dcp_run_sha()
583 (actx->alg == MXS_DCP_CONTROL1_HASH_SELECT_SHA1) ? in mxs_dcp_run_sha()
585 memcpy(sdcp->coh->sha_out_buf, sha_buf, halg->digestsize); in mxs_dcp_run_sha()
591 if (rctx->fini) { in mxs_dcp_run_sha()
592 digest_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_out_buf, in mxs_dcp_run_sha()
594 ret = dma_mapping_error(sdcp->dev, digest_phys); in mxs_dcp_run_sha()
598 desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM; in mxs_dcp_run_sha()
599 desc->payload = digest_phys; in mxs_dcp_run_sha()
604 if (rctx->fini) in mxs_dcp_run_sha()
605 dma_unmap_single(sdcp->dev, digest_phys, DCP_SHA_PAY_SZ, in mxs_dcp_run_sha()
609 dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE); in mxs_dcp_run_sha()
616 struct dcp *sdcp = global_sdcp; in dcp_sha_req_to_buf()
624 uint8_t *in_buf = sdcp->coh->sha_in_buf; in dcp_sha_req_to_buf()
625 uint8_t *out_buf = sdcp->coh->sha_out_buf; in dcp_sha_req_to_buf()
632 int fin = rctx->fini; in dcp_sha_req_to_buf()
634 rctx->fini = 0; in dcp_sha_req_to_buf()
636 src = req->src; in dcp_sha_req_to_buf()
637 len = req->nbytes; in dcp_sha_req_to_buf()
640 if (actx->fill + len > DCP_BUF_SZ) in dcp_sha_req_to_buf()
641 clen = DCP_BUF_SZ - actx->fill; in dcp_sha_req_to_buf()
645 scatterwalk_map_and_copy(in_buf + actx->fill, src, oft, clen, in dcp_sha_req_to_buf()
648 len -= clen; in dcp_sha_req_to_buf()
650 actx->fill += clen; in dcp_sha_req_to_buf()
656 if (len && actx->fill == DCP_BUF_SZ) { in dcp_sha_req_to_buf()
660 actx->fill = 0; in dcp_sha_req_to_buf()
661 rctx->init = 0; in dcp_sha_req_to_buf()
666 rctx->fini = 1; in dcp_sha_req_to_buf()
669 if (!req->result) in dcp_sha_req_to_buf()
670 return -EINVAL; in dcp_sha_req_to_buf()
676 actx->fill = 0; in dcp_sha_req_to_buf()
679 for (i = 0; i < halg->digestsize; i++) in dcp_sha_req_to_buf()
680 req->result[i] = out_buf[halg->digestsize - i - 1]; in dcp_sha_req_to_buf()
688 struct dcp *sdcp = global_sdcp; in dcp_chan_thread_sha()
698 spin_lock(&sdcp->lock[chan]); in dcp_chan_thread_sha()
699 backlog = crypto_get_backlog(&sdcp->queue[chan]); in dcp_chan_thread_sha()
700 arq = crypto_dequeue_request(&sdcp->queue[chan]); in dcp_chan_thread_sha()
701 spin_unlock(&sdcp->lock[chan]); in dcp_chan_thread_sha()
711 backlog->complete(backlog, -EINPROGRESS); in dcp_chan_thread_sha()
715 arq->complete(arq, ret); in dcp_chan_thread_sha()
735 if (strcmp(halg->base.cra_name, "sha1") == 0) in dcp_sha_init()
736 actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA1; in dcp_sha_init()
738 actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA256; in dcp_sha_init()
740 actx->fill = 0; in dcp_sha_init()
741 actx->hot = 0; in dcp_sha_init()
742 actx->chan = DCP_CHAN_HASH_SHA; in dcp_sha_init()
744 mutex_init(&actx->mutex); in dcp_sha_init()
751 struct dcp *sdcp = global_sdcp; in dcp_sha_update_fx()
763 if (!req->nbytes && !fini) in dcp_sha_update_fx()
766 mutex_lock(&actx->mutex); in dcp_sha_update_fx()
768 rctx->fini = fini; in dcp_sha_update_fx()
770 if (!actx->hot) { in dcp_sha_update_fx()
771 actx->hot = 1; in dcp_sha_update_fx()
772 rctx->init = 1; in dcp_sha_update_fx()
775 spin_lock(&sdcp->lock[actx->chan]); in dcp_sha_update_fx()
776 ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base); in dcp_sha_update_fx()
777 spin_unlock(&sdcp->lock[actx->chan]); in dcp_sha_update_fx()
779 wake_up_process(sdcp->thread[actx->chan]); in dcp_sha_update_fx()
780 mutex_unlock(&actx->mutex); in dcp_sha_update_fx()
792 ahash_request_set_crypt(req, NULL, req->result, 0); in dcp_sha_final()
793 req->nbytes = 0; in dcp_sha_final()
822 memcpy(rctx, &export->req_ctx, sizeof(struct dcp_sha_req_ctx)); in dcp_sha_import()
823 memcpy(actx, &export->async_ctx, sizeof(struct dcp_async_ctx)); in dcp_sha_import()
835 memcpy(&export->req_ctx, rctx_state, sizeof(struct dcp_sha_req_ctx)); in dcp_sha_export()
836 memcpy(&export->async_ctx, actx_state, sizeof(struct dcp_async_ctx)); in dcp_sha_export()
856 .base.cra_driver_name = "ecb-aes-dcp",
874 .base.cra_driver_name = "cbc-aes-dcp",
908 .cra_driver_name = "sha1-dcp",
935 .cra_driver_name = "sha256-dcp",
950 struct dcp *sdcp = context; in mxs_dcp_irq()
954 stat = readl(sdcp->base + MXS_DCP_STAT); in mxs_dcp_irq()
960 writel(stat, sdcp->base + MXS_DCP_STAT_CLR); in mxs_dcp_irq()
965 complete(&sdcp->completion[i]); in mxs_dcp_irq()
972 struct device *dev = &pdev->dev; in mxs_dcp_probe()
973 struct dcp *sdcp = NULL; in mxs_dcp_probe()
978 dev_err(dev, "Only one DCP instance allowed!\n"); in mxs_dcp_probe()
979 return -ENODEV; in mxs_dcp_probe()
992 return -ENOMEM; in mxs_dcp_probe()
994 sdcp->dev = dev; in mxs_dcp_probe()
995 sdcp->base = devm_platform_ioremap_resource(pdev, 0); in mxs_dcp_probe()
996 if (IS_ERR(sdcp->base)) in mxs_dcp_probe()
997 return PTR_ERR(sdcp->base); in mxs_dcp_probe()
1001 "dcp-vmi-irq", sdcp); in mxs_dcp_probe()
1003 dev_err(dev, "Failed to claim DCP VMI IRQ!\n"); in mxs_dcp_probe()
1008 "dcp-irq", sdcp); in mxs_dcp_probe()
1010 dev_err(dev, "Failed to claim DCP IRQ!\n"); in mxs_dcp_probe()
1015 sdcp->coh = devm_kzalloc(dev, sizeof(*sdcp->coh) + DCP_ALIGNMENT, in mxs_dcp_probe()
1017 if (!sdcp->coh) in mxs_dcp_probe()
1018 return -ENOMEM; in mxs_dcp_probe()
1020 /* Re-align the structure so it fits the DCP constraints. */ in mxs_dcp_probe()
1021 sdcp->coh = PTR_ALIGN(sdcp->coh, DCP_ALIGNMENT); in mxs_dcp_probe()
1023 /* DCP clock is optional, only used on some SOCs */ in mxs_dcp_probe()
1024 sdcp->dcp_clk = devm_clk_get(dev, "dcp"); in mxs_dcp_probe()
1025 if (IS_ERR(sdcp->dcp_clk)) { in mxs_dcp_probe()
1026 if (sdcp->dcp_clk != ERR_PTR(-ENOENT)) in mxs_dcp_probe()
1027 return PTR_ERR(sdcp->dcp_clk); in mxs_dcp_probe()
1028 sdcp->dcp_clk = NULL; in mxs_dcp_probe()
1030 ret = clk_prepare_enable(sdcp->dcp_clk); in mxs_dcp_probe()
1034 /* Restart the DCP block. */ in mxs_dcp_probe()
1035 ret = stmp_reset_block(sdcp->base); in mxs_dcp_probe()
1044 sdcp->base + MXS_DCP_CTRL); in mxs_dcp_probe()
1046 /* Enable all DCP DMA channels. */ in mxs_dcp_probe()
1048 sdcp->base + MXS_DCP_CHANNELCTRL); in mxs_dcp_probe()
1053 * inadvertantly enabled, the DCP will return an error instead of in mxs_dcp_probe()
1054 * trashing good memory. The DCP DMA cannot access ROM, so any ROM in mxs_dcp_probe()
1057 writel(0xffff0000, sdcp->base + MXS_DCP_CONTEXT); in mxs_dcp_probe()
1059 writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(i)); in mxs_dcp_probe()
1060 writel(0xffffffff, sdcp->base + MXS_DCP_STAT_CLR); in mxs_dcp_probe()
1067 spin_lock_init(&sdcp->lock[i]); in mxs_dcp_probe()
1068 init_completion(&sdcp->completion[i]); in mxs_dcp_probe()
1069 crypto_init_queue(&sdcp->queue[i], 50); in mxs_dcp_probe()
1073 sdcp->thread[DCP_CHAN_HASH_SHA] = kthread_run(dcp_chan_thread_sha, in mxs_dcp_probe()
1075 if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) { in mxs_dcp_probe()
1077 ret = PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]); in mxs_dcp_probe()
1081 sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes, in mxs_dcp_probe()
1083 if (IS_ERR(sdcp->thread[DCP_CHAN_CRYPTO])) { in mxs_dcp_probe()
1085 ret = PTR_ERR(sdcp->thread[DCP_CHAN_CRYPTO]); in mxs_dcp_probe()
1090 sdcp->caps = readl(sdcp->base + MXS_DCP_CAPABILITY1); in mxs_dcp_probe()
1092 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) { in mxs_dcp_probe()
1102 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) { in mxs_dcp_probe()
1111 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) { in mxs_dcp_probe()
1123 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) in mxs_dcp_probe()
1127 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) in mxs_dcp_probe()
1131 kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]); in mxs_dcp_probe()
1134 kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]); in mxs_dcp_probe()
1137 clk_disable_unprepare(sdcp->dcp_clk); in mxs_dcp_probe()
1144 struct dcp *sdcp = platform_get_drvdata(pdev); in mxs_dcp_remove()
1146 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) in mxs_dcp_remove()
1149 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) in mxs_dcp_remove()
1152 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) in mxs_dcp_remove()
1155 kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]); in mxs_dcp_remove()
1156 kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]); in mxs_dcp_remove()
1158 clk_disable_unprepare(sdcp->dcp_clk); in mxs_dcp_remove()
1168 { .compatible = "fsl,imx23-dcp", .data = NULL, },
1169 { .compatible = "fsl,imx28-dcp", .data = NULL, },
1179 .name = "mxs-dcp",
1187 MODULE_DESCRIPTION("Freescale MXS DCP Driver");
1189 MODULE_ALIAS("platform:mxs-dcp");