Lines Matching refs:edesc
119 struct ahash_edesc *edesc; member
543 struct ahash_edesc *edesc, in ahash_unmap() argument
548 if (edesc->src_nents) in ahash_unmap()
549 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE); in ahash_unmap()
551 if (edesc->sec4_sg_bytes) in ahash_unmap()
552 dma_unmap_single(dev, edesc->sec4_sg_dma, in ahash_unmap()
553 edesc->sec4_sg_bytes, DMA_TO_DEVICE); in ahash_unmap()
563 struct ahash_edesc *edesc, in ahash_unmap_ctx() argument
572 ahash_unmap(dev, edesc, req, dst_len); in ahash_unmap_ctx()
580 struct ahash_edesc *edesc; in ahash_done_cpy() local
590 edesc = state->edesc; in ahash_done_cpy()
591 has_bklog = edesc->bklog; in ahash_done_cpy()
596 ahash_unmap_ctx(jrdev, edesc, req, digestsize, dir); in ahash_done_cpy()
598 kfree(edesc); in ahash_done_cpy()
631 struct ahash_edesc *edesc; in ahash_done_switch() local
641 edesc = state->edesc; in ahash_done_switch()
642 has_bklog = edesc->bklog; in ahash_done_switch()
646 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, dir); in ahash_done_switch()
647 kfree(edesc); in ahash_done_switch()
702 struct ahash_edesc *edesc; in ahash_edesc_alloc() local
705 edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags); in ahash_edesc_alloc()
706 if (!edesc) { in ahash_edesc_alloc()
711 state->edesc = edesc; in ahash_edesc_alloc()
713 init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc), in ahash_edesc_alloc()
716 return edesc; in ahash_edesc_alloc()
720 struct ahash_edesc *edesc, in ahash_edesc_add_src() argument
729 struct sec4_sg_entry *sg = edesc->sec4_sg; in ahash_edesc_add_src()
741 edesc->sec4_sg_bytes = sgsize; in ahash_edesc_add_src()
742 edesc->sec4_sg_dma = src_dma; in ahash_edesc_add_src()
749 append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash, in ahash_edesc_add_src()
761 u32 *desc = state->edesc->hw_desc; in ahash_do_one_req()
764 state->edesc->bklog = true; in ahash_do_one_req()
769 ahash_unmap(jrdev, state->edesc, req, 0); in ahash_do_one_req()
770 kfree(state->edesc); in ahash_do_one_req()
786 struct ahash_edesc *edesc = state->edesc; in ahash_enqueue_req() local
787 u32 *desc = edesc->hw_desc; in ahash_enqueue_req()
804 ahash_unmap_ctx(jrdev, edesc, req, dst_len, dir); in ahash_enqueue_req()
805 kfree(edesc); in ahash_enqueue_req()
825 struct ahash_edesc *edesc; in ahash_update_ctx() local
871 edesc = ahash_edesc_alloc(req, pad_nents, ctx->sh_desc_update, in ahash_update_ctx()
873 if (!edesc) { in ahash_update_ctx()
878 edesc->src_nents = src_nents; in ahash_update_ctx()
879 edesc->sec4_sg_bytes = sec4_sg_bytes; in ahash_update_ctx()
882 edesc->sec4_sg, DMA_BIDIRECTIONAL); in ahash_update_ctx()
886 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state); in ahash_update_ctx()
892 edesc->sec4_sg + sec4_sg_src_index, in ahash_update_ctx()
895 sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - in ahash_update_ctx()
898 desc = edesc->hw_desc; in ahash_update_ctx()
900 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, in ahash_update_ctx()
903 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { in ahash_update_ctx()
909 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + in ahash_update_ctx()
932 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); in ahash_update_ctx()
933 kfree(edesc); in ahash_update_ctx()
947 struct ahash_edesc *edesc; in ahash_final_ctx() local
954 edesc = ahash_edesc_alloc(req, 4, ctx->sh_desc_fin, in ahash_final_ctx()
956 if (!edesc) in ahash_final_ctx()
959 desc = edesc->hw_desc; in ahash_final_ctx()
961 edesc->sec4_sg_bytes = sec4_sg_bytes; in ahash_final_ctx()
964 edesc->sec4_sg, DMA_BIDIRECTIONAL); in ahash_final_ctx()
968 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state); in ahash_final_ctx()
972 sg_to_sec4_set_last(edesc->sec4_sg + (buflen ? 1 : 0)); in ahash_final_ctx()
974 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, in ahash_final_ctx()
976 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { in ahash_final_ctx()
982 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen, in ahash_final_ctx()
993 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL); in ahash_final_ctx()
994 kfree(edesc); in ahash_final_ctx()
1009 struct ahash_edesc *edesc; in ahash_finup_ctx() local
1032 edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents, in ahash_finup_ctx()
1034 if (!edesc) { in ahash_finup_ctx()
1039 desc = edesc->hw_desc; in ahash_finup_ctx()
1041 edesc->src_nents = src_nents; in ahash_finup_ctx()
1044 edesc->sec4_sg, DMA_BIDIRECTIONAL); in ahash_finup_ctx()
1048 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state); in ahash_finup_ctx()
1052 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, in ahash_finup_ctx()
1067 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL); in ahash_finup_ctx()
1068 kfree(edesc); in ahash_finup_ctx()
1081 struct ahash_edesc *edesc; in ahash_digest() local
1104 edesc = ahash_edesc_alloc(req, mapped_nents > 1 ? mapped_nents : 0, in ahash_digest()
1106 if (!edesc) { in ahash_digest()
1111 edesc->src_nents = src_nents; in ahash_digest()
1113 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0, in ahash_digest()
1116 ahash_unmap(jrdev, edesc, req, digestsize); in ahash_digest()
1117 kfree(edesc); in ahash_digest()
1121 desc = edesc->hw_desc; in ahash_digest()
1125 ahash_unmap(jrdev, edesc, req, digestsize); in ahash_digest()
1126 kfree(edesc); in ahash_digest()
1149 struct ahash_edesc *edesc; in ahash_final_no_ctx() local
1153 edesc = ahash_edesc_alloc(req, 0, ctx->sh_desc_digest, in ahash_final_no_ctx()
1155 if (!edesc) in ahash_final_no_ctx()
1158 desc = edesc->hw_desc; in ahash_final_no_ctx()
1182 ahash_unmap(jrdev, edesc, req, digestsize); in ahash_final_no_ctx()
1183 kfree(edesc); in ahash_final_no_ctx()
1200 struct ahash_edesc *edesc; in ahash_update_no_ctx() local
1246 edesc = ahash_edesc_alloc(req, pad_nents, in ahash_update_no_ctx()
1249 if (!edesc) { in ahash_update_no_ctx()
1254 edesc->src_nents = src_nents; in ahash_update_no_ctx()
1255 edesc->sec4_sg_bytes = sec4_sg_bytes; in ahash_update_no_ctx()
1257 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state); in ahash_update_no_ctx()
1261 sg_to_sec4_sg_last(req->src, src_len, edesc->sec4_sg + 1, 0); in ahash_update_no_ctx()
1263 desc = edesc->hw_desc; in ahash_update_no_ctx()
1265 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, in ahash_update_no_ctx()
1268 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { in ahash_update_no_ctx()
1274 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF); in ahash_update_no_ctx()
1303 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); in ahash_update_no_ctx()
1304 kfree(edesc); in ahash_update_no_ctx()
1319 struct ahash_edesc *edesc; in ahash_finup_no_ctx() local
1344 edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents, in ahash_finup_no_ctx()
1346 if (!edesc) { in ahash_finup_no_ctx()
1351 desc = edesc->hw_desc; in ahash_finup_no_ctx()
1353 edesc->src_nents = src_nents; in ahash_finup_no_ctx()
1354 edesc->sec4_sg_bytes = sec4_sg_bytes; in ahash_finup_no_ctx()
1356 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state); in ahash_finup_no_ctx()
1360 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen, in ahash_finup_no_ctx()
1378 ahash_unmap(jrdev, edesc, req, digestsize); in ahash_finup_no_ctx()
1379 kfree(edesc); in ahash_finup_no_ctx()
1398 struct ahash_edesc *edesc; in ahash_update_first() local
1438 edesc = ahash_edesc_alloc(req, mapped_nents > 1 ? in ahash_update_first()
1442 if (!edesc) { in ahash_update_first()
1447 edesc->src_nents = src_nents; in ahash_update_first()
1449 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0, in ahash_update_first()
1454 desc = edesc->hw_desc; in ahash_update_first()
1486 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); in ahash_update_first()
1487 kfree(edesc); in ahash_update_first()