1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Provide TDMA helper functions used by cipher and hash algorithm
4*4882a593Smuzhiyun * implementations.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
7*4882a593Smuzhiyun * Author: Arnaud Ebalard <arno@natisbad.org>
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * This work is based on an initial version written by
10*4882a593Smuzhiyun * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
11*4882a593Smuzhiyun */
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include "cesa.h"
14*4882a593Smuzhiyun
mv_cesa_req_dma_iter_next_transfer(struct mv_cesa_dma_iter * iter,struct mv_cesa_sg_dma_iter * sgiter,unsigned int len)15*4882a593Smuzhiyun bool mv_cesa_req_dma_iter_next_transfer(struct mv_cesa_dma_iter *iter,
16*4882a593Smuzhiyun struct mv_cesa_sg_dma_iter *sgiter,
17*4882a593Smuzhiyun unsigned int len)
18*4882a593Smuzhiyun {
19*4882a593Smuzhiyun if (!sgiter->sg)
20*4882a593Smuzhiyun return false;
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun sgiter->op_offset += len;
23*4882a593Smuzhiyun sgiter->offset += len;
24*4882a593Smuzhiyun if (sgiter->offset == sg_dma_len(sgiter->sg)) {
25*4882a593Smuzhiyun if (sg_is_last(sgiter->sg))
26*4882a593Smuzhiyun return false;
27*4882a593Smuzhiyun sgiter->offset = 0;
28*4882a593Smuzhiyun sgiter->sg = sg_next(sgiter->sg);
29*4882a593Smuzhiyun }
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun if (sgiter->op_offset == iter->op_len)
32*4882a593Smuzhiyun return false;
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun return true;
35*4882a593Smuzhiyun }
36*4882a593Smuzhiyun
mv_cesa_dma_step(struct mv_cesa_req * dreq)37*4882a593Smuzhiyun void mv_cesa_dma_step(struct mv_cesa_req *dreq)
38*4882a593Smuzhiyun {
39*4882a593Smuzhiyun struct mv_cesa_engine *engine = dreq->engine;
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun writel_relaxed(0, engine->regs + CESA_SA_CFG);
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun mv_cesa_set_int_mask(engine, CESA_SA_INT_ACC0_IDMA_DONE);
44*4882a593Smuzhiyun writel_relaxed(CESA_TDMA_DST_BURST_128B | CESA_TDMA_SRC_BURST_128B |
45*4882a593Smuzhiyun CESA_TDMA_NO_BYTE_SWAP | CESA_TDMA_EN,
46*4882a593Smuzhiyun engine->regs + CESA_TDMA_CONTROL);
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun writel_relaxed(CESA_SA_CFG_ACT_CH0_IDMA | CESA_SA_CFG_MULTI_PKT |
49*4882a593Smuzhiyun CESA_SA_CFG_CH0_W_IDMA | CESA_SA_CFG_PARA_DIS,
50*4882a593Smuzhiyun engine->regs + CESA_SA_CFG);
51*4882a593Smuzhiyun writel_relaxed(dreq->chain.first->cur_dma,
52*4882a593Smuzhiyun engine->regs + CESA_TDMA_NEXT_ADDR);
53*4882a593Smuzhiyun WARN_ON(readl(engine->regs + CESA_SA_CMD) &
54*4882a593Smuzhiyun CESA_SA_CMD_EN_CESA_SA_ACCL0);
55*4882a593Smuzhiyun writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun
mv_cesa_dma_cleanup(struct mv_cesa_req * dreq)58*4882a593Smuzhiyun void mv_cesa_dma_cleanup(struct mv_cesa_req *dreq)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun struct mv_cesa_tdma_desc *tdma;
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun for (tdma = dreq->chain.first; tdma;) {
63*4882a593Smuzhiyun struct mv_cesa_tdma_desc *old_tdma = tdma;
64*4882a593Smuzhiyun u32 type = tdma->flags & CESA_TDMA_TYPE_MSK;
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun if (type == CESA_TDMA_OP)
67*4882a593Smuzhiyun dma_pool_free(cesa_dev->dma->op_pool, tdma->op,
68*4882a593Smuzhiyun le32_to_cpu(tdma->src));
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun tdma = tdma->next;
71*4882a593Smuzhiyun dma_pool_free(cesa_dev->dma->tdma_desc_pool, old_tdma,
72*4882a593Smuzhiyun old_tdma->cur_dma);
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun dreq->chain.first = NULL;
76*4882a593Smuzhiyun dreq->chain.last = NULL;
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun
mv_cesa_dma_prepare(struct mv_cesa_req * dreq,struct mv_cesa_engine * engine)79*4882a593Smuzhiyun void mv_cesa_dma_prepare(struct mv_cesa_req *dreq,
80*4882a593Smuzhiyun struct mv_cesa_engine *engine)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun struct mv_cesa_tdma_desc *tdma;
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun for (tdma = dreq->chain.first; tdma; tdma = tdma->next) {
85*4882a593Smuzhiyun if (tdma->flags & CESA_TDMA_DST_IN_SRAM)
86*4882a593Smuzhiyun tdma->dst = cpu_to_le32(tdma->dst_dma + engine->sram_dma);
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun if (tdma->flags & CESA_TDMA_SRC_IN_SRAM)
89*4882a593Smuzhiyun tdma->src = cpu_to_le32(tdma->src_dma + engine->sram_dma);
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun if ((tdma->flags & CESA_TDMA_TYPE_MSK) == CESA_TDMA_OP)
92*4882a593Smuzhiyun mv_cesa_adjust_op(engine, tdma->op);
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun
mv_cesa_tdma_chain(struct mv_cesa_engine * engine,struct mv_cesa_req * dreq)96*4882a593Smuzhiyun void mv_cesa_tdma_chain(struct mv_cesa_engine *engine,
97*4882a593Smuzhiyun struct mv_cesa_req *dreq)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun if (engine->chain.first == NULL && engine->chain.last == NULL) {
100*4882a593Smuzhiyun engine->chain.first = dreq->chain.first;
101*4882a593Smuzhiyun engine->chain.last = dreq->chain.last;
102*4882a593Smuzhiyun } else {
103*4882a593Smuzhiyun struct mv_cesa_tdma_desc *last;
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun last = engine->chain.last;
106*4882a593Smuzhiyun last->next = dreq->chain.first;
107*4882a593Smuzhiyun engine->chain.last = dreq->chain.last;
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun /*
110*4882a593Smuzhiyun * Break the DMA chain if the CESA_TDMA_BREAK_CHAIN is set on
111*4882a593Smuzhiyun * the last element of the current chain, or if the request
112*4882a593Smuzhiyun * being queued needs the IV regs to be set before lauching
113*4882a593Smuzhiyun * the request.
114*4882a593Smuzhiyun */
115*4882a593Smuzhiyun if (!(last->flags & CESA_TDMA_BREAK_CHAIN) &&
116*4882a593Smuzhiyun !(dreq->chain.first->flags & CESA_TDMA_SET_STATE))
117*4882a593Smuzhiyun last->next_dma = cpu_to_le32(dreq->chain.first->cur_dma);
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun
mv_cesa_tdma_process(struct mv_cesa_engine * engine,u32 status)121*4882a593Smuzhiyun int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun struct crypto_async_request *req = NULL;
124*4882a593Smuzhiyun struct mv_cesa_tdma_desc *tdma = NULL, *next = NULL;
125*4882a593Smuzhiyun dma_addr_t tdma_cur;
126*4882a593Smuzhiyun int res = 0;
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun tdma_cur = readl(engine->regs + CESA_TDMA_CUR);
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun for (tdma = engine->chain.first; tdma; tdma = next) {
131*4882a593Smuzhiyun spin_lock_bh(&engine->lock);
132*4882a593Smuzhiyun next = tdma->next;
133*4882a593Smuzhiyun spin_unlock_bh(&engine->lock);
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun if (tdma->flags & CESA_TDMA_END_OF_REQ) {
136*4882a593Smuzhiyun struct crypto_async_request *backlog = NULL;
137*4882a593Smuzhiyun struct mv_cesa_ctx *ctx;
138*4882a593Smuzhiyun u32 current_status;
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun spin_lock_bh(&engine->lock);
141*4882a593Smuzhiyun /*
142*4882a593Smuzhiyun * if req is NULL, this means we're processing the
143*4882a593Smuzhiyun * request in engine->req.
144*4882a593Smuzhiyun */
145*4882a593Smuzhiyun if (!req)
146*4882a593Smuzhiyun req = engine->req;
147*4882a593Smuzhiyun else
148*4882a593Smuzhiyun req = mv_cesa_dequeue_req_locked(engine,
149*4882a593Smuzhiyun &backlog);
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun /* Re-chaining to the next request */
152*4882a593Smuzhiyun engine->chain.first = tdma->next;
153*4882a593Smuzhiyun tdma->next = NULL;
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun /* If this is the last request, clear the chain */
156*4882a593Smuzhiyun if (engine->chain.first == NULL)
157*4882a593Smuzhiyun engine->chain.last = NULL;
158*4882a593Smuzhiyun spin_unlock_bh(&engine->lock);
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun ctx = crypto_tfm_ctx(req->tfm);
161*4882a593Smuzhiyun current_status = (tdma->cur_dma == tdma_cur) ?
162*4882a593Smuzhiyun status : CESA_SA_INT_ACC0_IDMA_DONE;
163*4882a593Smuzhiyun res = ctx->ops->process(req, current_status);
164*4882a593Smuzhiyun ctx->ops->complete(req);
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun if (res == 0)
167*4882a593Smuzhiyun mv_cesa_engine_enqueue_complete_request(engine,
168*4882a593Smuzhiyun req);
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun if (backlog)
171*4882a593Smuzhiyun backlog->complete(backlog, -EINPROGRESS);
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun if (res || tdma->cur_dma == tdma_cur)
175*4882a593Smuzhiyun break;
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun /*
179*4882a593Smuzhiyun * Save the last request in error to engine->req, so that the core
180*4882a593Smuzhiyun * knows which request was fautly
181*4882a593Smuzhiyun */
182*4882a593Smuzhiyun if (res) {
183*4882a593Smuzhiyun spin_lock_bh(&engine->lock);
184*4882a593Smuzhiyun engine->req = req;
185*4882a593Smuzhiyun spin_unlock_bh(&engine->lock);
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun return res;
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun static struct mv_cesa_tdma_desc *
mv_cesa_dma_add_desc(struct mv_cesa_tdma_chain * chain,gfp_t flags)192*4882a593Smuzhiyun mv_cesa_dma_add_desc(struct mv_cesa_tdma_chain *chain, gfp_t flags)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun struct mv_cesa_tdma_desc *new_tdma = NULL;
195*4882a593Smuzhiyun dma_addr_t dma_handle;
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun new_tdma = dma_pool_zalloc(cesa_dev->dma->tdma_desc_pool, flags,
198*4882a593Smuzhiyun &dma_handle);
199*4882a593Smuzhiyun if (!new_tdma)
200*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun new_tdma->cur_dma = dma_handle;
203*4882a593Smuzhiyun if (chain->last) {
204*4882a593Smuzhiyun chain->last->next_dma = cpu_to_le32(dma_handle);
205*4882a593Smuzhiyun chain->last->next = new_tdma;
206*4882a593Smuzhiyun } else {
207*4882a593Smuzhiyun chain->first = new_tdma;
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun chain->last = new_tdma;
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun return new_tdma;
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun
mv_cesa_dma_add_result_op(struct mv_cesa_tdma_chain * chain,dma_addr_t src,u32 size,u32 flags,gfp_t gfp_flags)215*4882a593Smuzhiyun int mv_cesa_dma_add_result_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src,
216*4882a593Smuzhiyun u32 size, u32 flags, gfp_t gfp_flags)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun struct mv_cesa_tdma_desc *tdma, *op_desc;
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun tdma = mv_cesa_dma_add_desc(chain, gfp_flags);
221*4882a593Smuzhiyun if (IS_ERR(tdma))
222*4882a593Smuzhiyun return PTR_ERR(tdma);
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun /* We re-use an existing op_desc object to retrieve the context
225*4882a593Smuzhiyun * and result instead of allocating a new one.
226*4882a593Smuzhiyun * There is at least one object of this type in a CESA crypto
227*4882a593Smuzhiyun * req, just pick the first one in the chain.
228*4882a593Smuzhiyun */
229*4882a593Smuzhiyun for (op_desc = chain->first; op_desc; op_desc = op_desc->next) {
230*4882a593Smuzhiyun u32 type = op_desc->flags & CESA_TDMA_TYPE_MSK;
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun if (type == CESA_TDMA_OP)
233*4882a593Smuzhiyun break;
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun if (!op_desc)
237*4882a593Smuzhiyun return -EIO;
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun tdma->byte_cnt = cpu_to_le32(size | BIT(31));
240*4882a593Smuzhiyun tdma->src_dma = src;
241*4882a593Smuzhiyun tdma->dst_dma = op_desc->src_dma;
242*4882a593Smuzhiyun tdma->op = op_desc->op;
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun flags &= (CESA_TDMA_DST_IN_SRAM | CESA_TDMA_SRC_IN_SRAM);
245*4882a593Smuzhiyun tdma->flags = flags | CESA_TDMA_RESULT;
246*4882a593Smuzhiyun return 0;
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun
mv_cesa_dma_add_op(struct mv_cesa_tdma_chain * chain,const struct mv_cesa_op_ctx * op_templ,bool skip_ctx,gfp_t flags)249*4882a593Smuzhiyun struct mv_cesa_op_ctx *mv_cesa_dma_add_op(struct mv_cesa_tdma_chain *chain,
250*4882a593Smuzhiyun const struct mv_cesa_op_ctx *op_templ,
251*4882a593Smuzhiyun bool skip_ctx,
252*4882a593Smuzhiyun gfp_t flags)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun struct mv_cesa_tdma_desc *tdma;
255*4882a593Smuzhiyun struct mv_cesa_op_ctx *op;
256*4882a593Smuzhiyun dma_addr_t dma_handle;
257*4882a593Smuzhiyun unsigned int size;
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun tdma = mv_cesa_dma_add_desc(chain, flags);
260*4882a593Smuzhiyun if (IS_ERR(tdma))
261*4882a593Smuzhiyun return ERR_CAST(tdma);
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun op = dma_pool_alloc(cesa_dev->dma->op_pool, flags, &dma_handle);
264*4882a593Smuzhiyun if (!op)
265*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun *op = *op_templ;
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun size = skip_ctx ? sizeof(op->desc) : sizeof(*op);
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun tdma = chain->last;
272*4882a593Smuzhiyun tdma->op = op;
273*4882a593Smuzhiyun tdma->byte_cnt = cpu_to_le32(size | BIT(31));
274*4882a593Smuzhiyun tdma->src = cpu_to_le32(dma_handle);
275*4882a593Smuzhiyun tdma->dst_dma = CESA_SA_CFG_SRAM_OFFSET;
276*4882a593Smuzhiyun tdma->flags = CESA_TDMA_DST_IN_SRAM | CESA_TDMA_OP;
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun return op;
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun
mv_cesa_dma_add_data_transfer(struct mv_cesa_tdma_chain * chain,dma_addr_t dst,dma_addr_t src,u32 size,u32 flags,gfp_t gfp_flags)281*4882a593Smuzhiyun int mv_cesa_dma_add_data_transfer(struct mv_cesa_tdma_chain *chain,
282*4882a593Smuzhiyun dma_addr_t dst, dma_addr_t src, u32 size,
283*4882a593Smuzhiyun u32 flags, gfp_t gfp_flags)
284*4882a593Smuzhiyun {
285*4882a593Smuzhiyun struct mv_cesa_tdma_desc *tdma;
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun tdma = mv_cesa_dma_add_desc(chain, gfp_flags);
288*4882a593Smuzhiyun if (IS_ERR(tdma))
289*4882a593Smuzhiyun return PTR_ERR(tdma);
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun tdma->byte_cnt = cpu_to_le32(size | BIT(31));
292*4882a593Smuzhiyun tdma->src_dma = src;
293*4882a593Smuzhiyun tdma->dst_dma = dst;
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun flags &= (CESA_TDMA_DST_IN_SRAM | CESA_TDMA_SRC_IN_SRAM);
296*4882a593Smuzhiyun tdma->flags = flags | CESA_TDMA_DATA;
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun return 0;
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun
mv_cesa_dma_add_dummy_launch(struct mv_cesa_tdma_chain * chain,gfp_t flags)301*4882a593Smuzhiyun int mv_cesa_dma_add_dummy_launch(struct mv_cesa_tdma_chain *chain, gfp_t flags)
302*4882a593Smuzhiyun {
303*4882a593Smuzhiyun struct mv_cesa_tdma_desc *tdma;
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun tdma = mv_cesa_dma_add_desc(chain, flags);
306*4882a593Smuzhiyun return PTR_ERR_OR_ZERO(tdma);
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun
mv_cesa_dma_add_dummy_end(struct mv_cesa_tdma_chain * chain,gfp_t flags)309*4882a593Smuzhiyun int mv_cesa_dma_add_dummy_end(struct mv_cesa_tdma_chain *chain, gfp_t flags)
310*4882a593Smuzhiyun {
311*4882a593Smuzhiyun struct mv_cesa_tdma_desc *tdma;
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun tdma = mv_cesa_dma_add_desc(chain, flags);
314*4882a593Smuzhiyun if (IS_ERR(tdma))
315*4882a593Smuzhiyun return PTR_ERR(tdma);
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun tdma->byte_cnt = cpu_to_le32(BIT(31));
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun return 0;
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun
mv_cesa_dma_add_op_transfers(struct mv_cesa_tdma_chain * chain,struct mv_cesa_dma_iter * dma_iter,struct mv_cesa_sg_dma_iter * sgiter,gfp_t gfp_flags)322*4882a593Smuzhiyun int mv_cesa_dma_add_op_transfers(struct mv_cesa_tdma_chain *chain,
323*4882a593Smuzhiyun struct mv_cesa_dma_iter *dma_iter,
324*4882a593Smuzhiyun struct mv_cesa_sg_dma_iter *sgiter,
325*4882a593Smuzhiyun gfp_t gfp_flags)
326*4882a593Smuzhiyun {
327*4882a593Smuzhiyun u32 flags = sgiter->dir == DMA_TO_DEVICE ?
328*4882a593Smuzhiyun CESA_TDMA_DST_IN_SRAM : CESA_TDMA_SRC_IN_SRAM;
329*4882a593Smuzhiyun unsigned int len;
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun do {
332*4882a593Smuzhiyun dma_addr_t dst, src;
333*4882a593Smuzhiyun int ret;
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun len = mv_cesa_req_dma_iter_transfer_len(dma_iter, sgiter);
336*4882a593Smuzhiyun if (sgiter->dir == DMA_TO_DEVICE) {
337*4882a593Smuzhiyun dst = CESA_SA_DATA_SRAM_OFFSET + sgiter->op_offset;
338*4882a593Smuzhiyun src = sg_dma_address(sgiter->sg) + sgiter->offset;
339*4882a593Smuzhiyun } else {
340*4882a593Smuzhiyun dst = sg_dma_address(sgiter->sg) + sgiter->offset;
341*4882a593Smuzhiyun src = CESA_SA_DATA_SRAM_OFFSET + sgiter->op_offset;
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun ret = mv_cesa_dma_add_data_transfer(chain, dst, src, len,
345*4882a593Smuzhiyun flags, gfp_flags);
346*4882a593Smuzhiyun if (ret)
347*4882a593Smuzhiyun return ret;
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun } while (mv_cesa_req_dma_iter_next_transfer(dma_iter, sgiter, len));
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun return 0;
352*4882a593Smuzhiyun }
353