Lines Matching refs:edesc

43 static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,  in rsa_io_unmap()  argument
48 dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE); in rsa_io_unmap()
49 dma_unmap_sg(dev, req_ctx->fixup_src, edesc->src_nents, DMA_TO_DEVICE); in rsa_io_unmap()
51 if (edesc->sec4_sg_bytes) in rsa_io_unmap()
52 dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes, in rsa_io_unmap()
56 static void rsa_pub_unmap(struct device *dev, struct rsa_edesc *edesc, in rsa_pub_unmap() argument
62 struct rsa_pub_pdb *pdb = &edesc->pdb.pub; in rsa_pub_unmap()
68 static void rsa_priv_f1_unmap(struct device *dev, struct rsa_edesc *edesc, in rsa_priv_f1_unmap() argument
74 struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1; in rsa_priv_f1_unmap()
80 static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc, in rsa_priv_f2_unmap() argument
86 struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2; in rsa_priv_f2_unmap()
97 static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc, in rsa_priv_f3_unmap() argument
103 struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3; in rsa_priv_f3_unmap()
122 struct rsa_edesc *edesc; in rsa_pub_done() local
129 edesc = req_ctx->edesc; in rsa_pub_done()
130 has_bklog = edesc->bklog; in rsa_pub_done()
132 rsa_pub_unmap(dev, edesc, req); in rsa_pub_done()
133 rsa_io_unmap(dev, edesc, req); in rsa_pub_done()
134 kfree(edesc); in rsa_pub_done()
155 struct rsa_edesc *edesc; in rsa_priv_f_done() local
162 edesc = req_ctx->edesc; in rsa_priv_f_done()
163 has_bklog = edesc->bklog; in rsa_priv_f_done()
167 rsa_priv_f1_unmap(dev, edesc, req); in rsa_priv_f_done()
170 rsa_priv_f2_unmap(dev, edesc, req); in rsa_priv_f_done()
173 rsa_priv_f3_unmap(dev, edesc, req); in rsa_priv_f_done()
176 rsa_io_unmap(dev, edesc, req); in rsa_priv_f_done()
177 kfree(edesc); in rsa_priv_f_done()
248 struct rsa_edesc *edesc; in rsa_edesc_alloc() local
312 edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes, in rsa_edesc_alloc()
314 if (!edesc) in rsa_edesc_alloc()
317 edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen; in rsa_edesc_alloc()
319 dma_to_sec4_sg_one(edesc->sec4_sg, ctx->padding_dma, diff_size, in rsa_edesc_alloc()
324 edesc->sec4_sg + !!diff_size, 0); in rsa_edesc_alloc()
328 edesc->sec4_sg + sec4_sg_index, 0); in rsa_edesc_alloc()
331 edesc->src_nents = src_nents; in rsa_edesc_alloc()
332 edesc->dst_nents = dst_nents; in rsa_edesc_alloc()
334 req_ctx->edesc = edesc; in rsa_edesc_alloc()
337 return edesc; in rsa_edesc_alloc()
339 edesc->mapped_src_nents = mapped_src_nents; in rsa_edesc_alloc()
340 edesc->mapped_dst_nents = mapped_dst_nents; in rsa_edesc_alloc()
342 edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg, in rsa_edesc_alloc()
344 if (dma_mapping_error(dev, edesc->sec4_sg_dma)) { in rsa_edesc_alloc()
349 edesc->sec4_sg_bytes = sec4_sg_bytes; in rsa_edesc_alloc()
352 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg, in rsa_edesc_alloc()
353 edesc->sec4_sg_bytes, 1); in rsa_edesc_alloc()
355 return edesc; in rsa_edesc_alloc()
358 kfree(edesc); in rsa_edesc_alloc()
375 u32 *desc = req_ctx->edesc->hw_desc; in akcipher_do_one_req()
378 req_ctx->edesc->bklog = true; in akcipher_do_one_req()
383 rsa_pub_unmap(jrdev, req_ctx->edesc, req); in akcipher_do_one_req()
384 rsa_io_unmap(jrdev, req_ctx->edesc, req); in akcipher_do_one_req()
385 kfree(req_ctx->edesc); in akcipher_do_one_req()
394 struct rsa_edesc *edesc) in set_rsa_pub_pdb() argument
401 struct rsa_pub_pdb *pdb = &edesc->pdb.pub; in set_rsa_pub_pdb()
417 if (edesc->mapped_src_nents > 1) { in set_rsa_pub_pdb()
419 pdb->f_dma = edesc->sec4_sg_dma; in set_rsa_pub_pdb()
420 sec4_sg_index += edesc->mapped_src_nents; in set_rsa_pub_pdb()
425 if (edesc->mapped_dst_nents > 1) { in set_rsa_pub_pdb()
427 pdb->g_dma = edesc->sec4_sg_dma + in set_rsa_pub_pdb()
440 struct rsa_edesc *edesc) in set_rsa_priv_f1_pdb() argument
446 struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1; in set_rsa_priv_f1_pdb()
462 if (edesc->mapped_src_nents > 1) { in set_rsa_priv_f1_pdb()
464 pdb->g_dma = edesc->sec4_sg_dma; in set_rsa_priv_f1_pdb()
465 sec4_sg_index += edesc->mapped_src_nents; in set_rsa_priv_f1_pdb()
473 if (edesc->mapped_dst_nents > 1) { in set_rsa_priv_f1_pdb()
475 pdb->f_dma = edesc->sec4_sg_dma + in set_rsa_priv_f1_pdb()
487 struct rsa_edesc *edesc) in set_rsa_priv_f2_pdb() argument
493 struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2; in set_rsa_priv_f2_pdb()
528 if (edesc->mapped_src_nents > 1) { in set_rsa_priv_f2_pdb()
530 pdb->g_dma = edesc->sec4_sg_dma; in set_rsa_priv_f2_pdb()
531 sec4_sg_index += edesc->mapped_src_nents; in set_rsa_priv_f2_pdb()
538 if (edesc->mapped_dst_nents > 1) { in set_rsa_priv_f2_pdb()
540 pdb->f_dma = edesc->sec4_sg_dma + in set_rsa_priv_f2_pdb()
564 struct rsa_edesc *edesc) in set_rsa_priv_f3_pdb() argument
570 struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3; in set_rsa_priv_f3_pdb()
617 if (edesc->mapped_src_nents > 1) { in set_rsa_priv_f3_pdb()
619 pdb->g_dma = edesc->sec4_sg_dma; in set_rsa_priv_f3_pdb()
620 sec4_sg_index += edesc->mapped_src_nents; in set_rsa_priv_f3_pdb()
627 if (edesc->mapped_dst_nents > 1) { in set_rsa_priv_f3_pdb()
629 pdb->f_dma = edesc->sec4_sg_dma + in set_rsa_priv_f3_pdb()
666 struct rsa_edesc *edesc = req_ctx->edesc; in akcipher_enqueue_req() local
667 u32 *desc = edesc->hw_desc; in akcipher_enqueue_req()
685 rsa_priv_f1_unmap(jrdev, edesc, req); in akcipher_enqueue_req()
688 rsa_priv_f2_unmap(jrdev, edesc, req); in akcipher_enqueue_req()
691 rsa_priv_f3_unmap(jrdev, edesc, req); in akcipher_enqueue_req()
694 rsa_pub_unmap(jrdev, edesc, req); in akcipher_enqueue_req()
696 rsa_io_unmap(jrdev, edesc, req); in akcipher_enqueue_req()
697 kfree(edesc); in akcipher_enqueue_req()
709 struct rsa_edesc *edesc; in caam_rsa_enc() local
722 edesc = rsa_edesc_alloc(req, DESC_RSA_PUB_LEN); in caam_rsa_enc()
723 if (IS_ERR(edesc)) in caam_rsa_enc()
724 return PTR_ERR(edesc); in caam_rsa_enc()
727 ret = set_rsa_pub_pdb(req, edesc); in caam_rsa_enc()
732 init_rsa_pub_desc(edesc->hw_desc, &edesc->pdb.pub); in caam_rsa_enc()
737 rsa_io_unmap(jrdev, edesc, req); in caam_rsa_enc()
738 kfree(edesc); in caam_rsa_enc()
747 struct rsa_edesc *edesc; in caam_rsa_dec_priv_f1() local
751 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN); in caam_rsa_dec_priv_f1()
752 if (IS_ERR(edesc)) in caam_rsa_dec_priv_f1()
753 return PTR_ERR(edesc); in caam_rsa_dec_priv_f1()
756 ret = set_rsa_priv_f1_pdb(req, edesc); in caam_rsa_dec_priv_f1()
761 init_rsa_priv_f1_desc(edesc->hw_desc, &edesc->pdb.priv_f1); in caam_rsa_dec_priv_f1()
766 rsa_io_unmap(jrdev, edesc, req); in caam_rsa_dec_priv_f1()
767 kfree(edesc); in caam_rsa_dec_priv_f1()
776 struct rsa_edesc *edesc; in caam_rsa_dec_priv_f2() local
780 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN); in caam_rsa_dec_priv_f2()
781 if (IS_ERR(edesc)) in caam_rsa_dec_priv_f2()
782 return PTR_ERR(edesc); in caam_rsa_dec_priv_f2()
785 ret = set_rsa_priv_f2_pdb(req, edesc); in caam_rsa_dec_priv_f2()
790 init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2); in caam_rsa_dec_priv_f2()
795 rsa_io_unmap(jrdev, edesc, req); in caam_rsa_dec_priv_f2()
796 kfree(edesc); in caam_rsa_dec_priv_f2()
805 struct rsa_edesc *edesc; in caam_rsa_dec_priv_f3() local
809 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F3_LEN); in caam_rsa_dec_priv_f3()
810 if (IS_ERR(edesc)) in caam_rsa_dec_priv_f3()
811 return PTR_ERR(edesc); in caam_rsa_dec_priv_f3()
814 ret = set_rsa_priv_f3_pdb(req, edesc); in caam_rsa_dec_priv_f3()
819 init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3); in caam_rsa_dec_priv_f3()
824 rsa_io_unmap(jrdev, edesc, req); in caam_rsa_dec_priv_f3()
825 kfree(edesc); in caam_rsa_dec_priv_f3()