xref: /OK3568_Linux_fs/kernel/crypto/async_tx/async_pq.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright(c) 2007 Yuri Tikhonov <yur@emcraft.com>
4*4882a593Smuzhiyun  * Copyright(c) 2009 Intel Corporation
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun #include <linux/kernel.h>
7*4882a593Smuzhiyun #include <linux/interrupt.h>
8*4882a593Smuzhiyun #include <linux/module.h>
9*4882a593Smuzhiyun #include <linux/dma-mapping.h>
10*4882a593Smuzhiyun #include <linux/raid/pq.h>
11*4882a593Smuzhiyun #include <linux/async_tx.h>
12*4882a593Smuzhiyun #include <linux/gfp.h>
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun /**
15*4882a593Smuzhiyun  * pq_scribble_page - space to hold throwaway P or Q buffer for
16*4882a593Smuzhiyun  * synchronous gen_syndrome
17*4882a593Smuzhiyun  */
18*4882a593Smuzhiyun static struct page *pq_scribble_page;
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun /* the struct page *blocks[] parameter passed to async_gen_syndrome()
21*4882a593Smuzhiyun  * and async_syndrome_val() contains the 'P' destination address at
22*4882a593Smuzhiyun  * blocks[disks-2] and the 'Q' destination address at blocks[disks-1]
23*4882a593Smuzhiyun  *
24*4882a593Smuzhiyun  * note: these are macros as they are used as lvalues
25*4882a593Smuzhiyun  */
26*4882a593Smuzhiyun #define P(b, d) (b[d-2])
27*4882a593Smuzhiyun #define Q(b, d) (b[d-1])
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun #define MAX_DISKS 255
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun /**
32*4882a593Smuzhiyun  * do_async_gen_syndrome - asynchronously calculate P and/or Q
33*4882a593Smuzhiyun  */
34*4882a593Smuzhiyun static __async_inline struct dma_async_tx_descriptor *
do_async_gen_syndrome(struct dma_chan * chan,const unsigned char * scfs,int disks,struct dmaengine_unmap_data * unmap,enum dma_ctrl_flags dma_flags,struct async_submit_ctl * submit)35*4882a593Smuzhiyun do_async_gen_syndrome(struct dma_chan *chan,
36*4882a593Smuzhiyun 		      const unsigned char *scfs, int disks,
37*4882a593Smuzhiyun 		      struct dmaengine_unmap_data *unmap,
38*4882a593Smuzhiyun 		      enum dma_ctrl_flags dma_flags,
39*4882a593Smuzhiyun 		      struct async_submit_ctl *submit)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun 	struct dma_async_tx_descriptor *tx = NULL;
42*4882a593Smuzhiyun 	struct dma_device *dma = chan->device;
43*4882a593Smuzhiyun 	enum async_tx_flags flags_orig = submit->flags;
44*4882a593Smuzhiyun 	dma_async_tx_callback cb_fn_orig = submit->cb_fn;
45*4882a593Smuzhiyun 	dma_async_tx_callback cb_param_orig = submit->cb_param;
46*4882a593Smuzhiyun 	int src_cnt = disks - 2;
47*4882a593Smuzhiyun 	unsigned short pq_src_cnt;
48*4882a593Smuzhiyun 	dma_addr_t dma_dest[2];
49*4882a593Smuzhiyun 	int src_off = 0;
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	while (src_cnt > 0) {
52*4882a593Smuzhiyun 		submit->flags = flags_orig;
53*4882a593Smuzhiyun 		pq_src_cnt = min(src_cnt, dma_maxpq(dma, dma_flags));
54*4882a593Smuzhiyun 		/* if we are submitting additional pqs, leave the chain open,
55*4882a593Smuzhiyun 		 * clear the callback parameters, and leave the destination
56*4882a593Smuzhiyun 		 * buffers mapped
57*4882a593Smuzhiyun 		 */
58*4882a593Smuzhiyun 		if (src_cnt > pq_src_cnt) {
59*4882a593Smuzhiyun 			submit->flags &= ~ASYNC_TX_ACK;
60*4882a593Smuzhiyun 			submit->flags |= ASYNC_TX_FENCE;
61*4882a593Smuzhiyun 			submit->cb_fn = NULL;
62*4882a593Smuzhiyun 			submit->cb_param = NULL;
63*4882a593Smuzhiyun 		} else {
64*4882a593Smuzhiyun 			submit->cb_fn = cb_fn_orig;
65*4882a593Smuzhiyun 			submit->cb_param = cb_param_orig;
66*4882a593Smuzhiyun 			if (cb_fn_orig)
67*4882a593Smuzhiyun 				dma_flags |= DMA_PREP_INTERRUPT;
68*4882a593Smuzhiyun 		}
69*4882a593Smuzhiyun 		if (submit->flags & ASYNC_TX_FENCE)
70*4882a593Smuzhiyun 			dma_flags |= DMA_PREP_FENCE;
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 		/* Drivers force forward progress in case they can not provide
73*4882a593Smuzhiyun 		 * a descriptor
74*4882a593Smuzhiyun 		 */
75*4882a593Smuzhiyun 		for (;;) {
76*4882a593Smuzhiyun 			dma_dest[0] = unmap->addr[disks - 2];
77*4882a593Smuzhiyun 			dma_dest[1] = unmap->addr[disks - 1];
78*4882a593Smuzhiyun 			tx = dma->device_prep_dma_pq(chan, dma_dest,
79*4882a593Smuzhiyun 						     &unmap->addr[src_off],
80*4882a593Smuzhiyun 						     pq_src_cnt,
81*4882a593Smuzhiyun 						     &scfs[src_off], unmap->len,
82*4882a593Smuzhiyun 						     dma_flags);
83*4882a593Smuzhiyun 			if (likely(tx))
84*4882a593Smuzhiyun 				break;
85*4882a593Smuzhiyun 			async_tx_quiesce(&submit->depend_tx);
86*4882a593Smuzhiyun 			dma_async_issue_pending(chan);
87*4882a593Smuzhiyun 		}
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 		dma_set_unmap(tx, unmap);
90*4882a593Smuzhiyun 		async_tx_submit(chan, tx, submit);
91*4882a593Smuzhiyun 		submit->depend_tx = tx;
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 		/* drop completed sources */
94*4882a593Smuzhiyun 		src_cnt -= pq_src_cnt;
95*4882a593Smuzhiyun 		src_off += pq_src_cnt;
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 		dma_flags |= DMA_PREP_CONTINUE;
98*4882a593Smuzhiyun 	}
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	return tx;
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun /**
104*4882a593Smuzhiyun  * do_sync_gen_syndrome - synchronously calculate a raid6 syndrome
105*4882a593Smuzhiyun  */
106*4882a593Smuzhiyun static void
do_sync_gen_syndrome(struct page ** blocks,unsigned int * offsets,int disks,size_t len,struct async_submit_ctl * submit)107*4882a593Smuzhiyun do_sync_gen_syndrome(struct page **blocks, unsigned int *offsets, int disks,
108*4882a593Smuzhiyun 		     size_t len, struct async_submit_ctl *submit)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun 	void **srcs;
111*4882a593Smuzhiyun 	int i;
112*4882a593Smuzhiyun 	int start = -1, stop = disks - 3;
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	if (submit->scribble)
115*4882a593Smuzhiyun 		srcs = submit->scribble;
116*4882a593Smuzhiyun 	else
117*4882a593Smuzhiyun 		srcs = (void **) blocks;
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	for (i = 0; i < disks; i++) {
120*4882a593Smuzhiyun 		if (blocks[i] == NULL) {
121*4882a593Smuzhiyun 			BUG_ON(i > disks - 3); /* P or Q can't be zero */
122*4882a593Smuzhiyun 			srcs[i] = (void*)raid6_empty_zero_page;
123*4882a593Smuzhiyun 		} else {
124*4882a593Smuzhiyun 			srcs[i] = page_address(blocks[i]) + offsets[i];
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 			if (i < disks - 2) {
127*4882a593Smuzhiyun 				stop = i;
128*4882a593Smuzhiyun 				if (start == -1)
129*4882a593Smuzhiyun 					start = i;
130*4882a593Smuzhiyun 			}
131*4882a593Smuzhiyun 		}
132*4882a593Smuzhiyun 	}
133*4882a593Smuzhiyun 	if (submit->flags & ASYNC_TX_PQ_XOR_DST) {
134*4882a593Smuzhiyun 		BUG_ON(!raid6_call.xor_syndrome);
135*4882a593Smuzhiyun 		if (start >= 0)
136*4882a593Smuzhiyun 			raid6_call.xor_syndrome(disks, start, stop, len, srcs);
137*4882a593Smuzhiyun 	} else
138*4882a593Smuzhiyun 		raid6_call.gen_syndrome(disks, len, srcs);
139*4882a593Smuzhiyun 	async_tx_sync_epilog(submit);
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun static inline bool
is_dma_pq_aligned_offs(struct dma_device * dev,unsigned int * offs,int src_cnt,size_t len)143*4882a593Smuzhiyun is_dma_pq_aligned_offs(struct dma_device *dev, unsigned int *offs,
144*4882a593Smuzhiyun 				     int src_cnt, size_t len)
145*4882a593Smuzhiyun {
146*4882a593Smuzhiyun 	int i;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	for (i = 0; i < src_cnt; i++) {
149*4882a593Smuzhiyun 		if (!is_dma_pq_aligned(dev, offs[i], 0, len))
150*4882a593Smuzhiyun 			return false;
151*4882a593Smuzhiyun 	}
152*4882a593Smuzhiyun 	return true;
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun /**
156*4882a593Smuzhiyun  * async_gen_syndrome - asynchronously calculate a raid6 syndrome
157*4882a593Smuzhiyun  * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
158*4882a593Smuzhiyun  * @offsets: offset array into each block (src and dest) to start transaction
159*4882a593Smuzhiyun  * @disks: number of blocks (including missing P or Q, see below)
160*4882a593Smuzhiyun  * @len: length of operation in bytes
161*4882a593Smuzhiyun  * @submit: submission/completion modifiers
162*4882a593Smuzhiyun  *
163*4882a593Smuzhiyun  * General note: This routine assumes a field of GF(2^8) with a
164*4882a593Smuzhiyun  * primitive polynomial of 0x11d and a generator of {02}.
165*4882a593Smuzhiyun  *
166*4882a593Smuzhiyun  * 'disks' note: callers can optionally omit either P or Q (but not
167*4882a593Smuzhiyun  * both) from the calculation by setting blocks[disks-2] or
168*4882a593Smuzhiyun  * blocks[disks-1] to NULL.  When P or Q is omitted 'len' must be <=
169*4882a593Smuzhiyun  * PAGE_SIZE as a temporary buffer of this size is used in the
170*4882a593Smuzhiyun  * synchronous path.  'disks' always accounts for both destination
171*4882a593Smuzhiyun  * buffers.  If any source buffers (blocks[i] where i < disks - 2) are
172*4882a593Smuzhiyun  * set to NULL those buffers will be replaced with the raid6_zero_page
173*4882a593Smuzhiyun  * in the synchronous path and omitted in the hardware-asynchronous
174*4882a593Smuzhiyun  * path.
175*4882a593Smuzhiyun  */
176*4882a593Smuzhiyun struct dma_async_tx_descriptor *
async_gen_syndrome(struct page ** blocks,unsigned int * offsets,int disks,size_t len,struct async_submit_ctl * submit)177*4882a593Smuzhiyun async_gen_syndrome(struct page **blocks, unsigned int *offsets, int disks,
178*4882a593Smuzhiyun 		   size_t len, struct async_submit_ctl *submit)
179*4882a593Smuzhiyun {
180*4882a593Smuzhiyun 	int src_cnt = disks - 2;
181*4882a593Smuzhiyun 	struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
182*4882a593Smuzhiyun 						      &P(blocks, disks), 2,
183*4882a593Smuzhiyun 						      blocks, src_cnt, len);
184*4882a593Smuzhiyun 	struct dma_device *device = chan ? chan->device : NULL;
185*4882a593Smuzhiyun 	struct dmaengine_unmap_data *unmap = NULL;
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	BUG_ON(disks > MAX_DISKS || !(P(blocks, disks) || Q(blocks, disks)));
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	if (device)
190*4882a593Smuzhiyun 		unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	/* XORing P/Q is only implemented in software */
193*4882a593Smuzhiyun 	if (unmap && !(submit->flags & ASYNC_TX_PQ_XOR_DST) &&
194*4882a593Smuzhiyun 	    (src_cnt <= dma_maxpq(device, 0) ||
195*4882a593Smuzhiyun 	     dma_maxpq(device, DMA_PREP_CONTINUE) > 0) &&
196*4882a593Smuzhiyun 	    is_dma_pq_aligned_offs(device, offsets, disks, len)) {
197*4882a593Smuzhiyun 		struct dma_async_tx_descriptor *tx;
198*4882a593Smuzhiyun 		enum dma_ctrl_flags dma_flags = 0;
199*4882a593Smuzhiyun 		unsigned char coefs[MAX_DISKS];
200*4882a593Smuzhiyun 		int i, j;
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 		/* run the p+q asynchronously */
203*4882a593Smuzhiyun 		pr_debug("%s: (async) disks: %d len: %zu\n",
204*4882a593Smuzhiyun 			 __func__, disks, len);
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 		/* convert source addresses being careful to collapse 'empty'
207*4882a593Smuzhiyun 		 * sources and update the coefficients accordingly
208*4882a593Smuzhiyun 		 */
209*4882a593Smuzhiyun 		unmap->len = len;
210*4882a593Smuzhiyun 		for (i = 0, j = 0; i < src_cnt; i++) {
211*4882a593Smuzhiyun 			if (blocks[i] == NULL)
212*4882a593Smuzhiyun 				continue;
213*4882a593Smuzhiyun 			unmap->addr[j] = dma_map_page(device->dev, blocks[i],
214*4882a593Smuzhiyun 						offsets[i], len, DMA_TO_DEVICE);
215*4882a593Smuzhiyun 			coefs[j] = raid6_gfexp[i];
216*4882a593Smuzhiyun 			unmap->to_cnt++;
217*4882a593Smuzhiyun 			j++;
218*4882a593Smuzhiyun 		}
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 		/*
221*4882a593Smuzhiyun 		 * DMAs use destinations as sources,
222*4882a593Smuzhiyun 		 * so use BIDIRECTIONAL mapping
223*4882a593Smuzhiyun 		 */
224*4882a593Smuzhiyun 		unmap->bidi_cnt++;
225*4882a593Smuzhiyun 		if (P(blocks, disks))
226*4882a593Smuzhiyun 			unmap->addr[j++] = dma_map_page(device->dev, P(blocks, disks),
227*4882a593Smuzhiyun 							P(offsets, disks),
228*4882a593Smuzhiyun 							len, DMA_BIDIRECTIONAL);
229*4882a593Smuzhiyun 		else {
230*4882a593Smuzhiyun 			unmap->addr[j++] = 0;
231*4882a593Smuzhiyun 			dma_flags |= DMA_PREP_PQ_DISABLE_P;
232*4882a593Smuzhiyun 		}
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 		unmap->bidi_cnt++;
235*4882a593Smuzhiyun 		if (Q(blocks, disks))
236*4882a593Smuzhiyun 			unmap->addr[j++] = dma_map_page(device->dev, Q(blocks, disks),
237*4882a593Smuzhiyun 							Q(offsets, disks),
238*4882a593Smuzhiyun 							len, DMA_BIDIRECTIONAL);
239*4882a593Smuzhiyun 		else {
240*4882a593Smuzhiyun 			unmap->addr[j++] = 0;
241*4882a593Smuzhiyun 			dma_flags |= DMA_PREP_PQ_DISABLE_Q;
242*4882a593Smuzhiyun 		}
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 		tx = do_async_gen_syndrome(chan, coefs, j, unmap, dma_flags, submit);
245*4882a593Smuzhiyun 		dmaengine_unmap_put(unmap);
246*4882a593Smuzhiyun 		return tx;
247*4882a593Smuzhiyun 	}
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	dmaengine_unmap_put(unmap);
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	/* run the pq synchronously */
252*4882a593Smuzhiyun 	pr_debug("%s: (sync) disks: %d len: %zu\n", __func__, disks, len);
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	/* wait for any prerequisite operations */
255*4882a593Smuzhiyun 	async_tx_quiesce(&submit->depend_tx);
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	if (!P(blocks, disks)) {
258*4882a593Smuzhiyun 		P(blocks, disks) = pq_scribble_page;
259*4882a593Smuzhiyun 		P(offsets, disks) = 0;
260*4882a593Smuzhiyun 	}
261*4882a593Smuzhiyun 	if (!Q(blocks, disks)) {
262*4882a593Smuzhiyun 		Q(blocks, disks) = pq_scribble_page;
263*4882a593Smuzhiyun 		Q(offsets, disks) = 0;
264*4882a593Smuzhiyun 	}
265*4882a593Smuzhiyun 	do_sync_gen_syndrome(blocks, offsets, disks, len, submit);
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	return NULL;
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(async_gen_syndrome);
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun static inline struct dma_chan *
pq_val_chan(struct async_submit_ctl * submit,struct page ** blocks,int disks,size_t len)272*4882a593Smuzhiyun pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, size_t len)
273*4882a593Smuzhiyun {
274*4882a593Smuzhiyun 	#ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
275*4882a593Smuzhiyun 	return NULL;
276*4882a593Smuzhiyun 	#endif
277*4882a593Smuzhiyun 	return async_tx_find_channel(submit, DMA_PQ_VAL, NULL, 0,  blocks,
278*4882a593Smuzhiyun 				     disks, len);
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun /**
282*4882a593Smuzhiyun  * async_syndrome_val - asynchronously validate a raid6 syndrome
283*4882a593Smuzhiyun  * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
284*4882a593Smuzhiyun  * @offset: common offset into each block (src and dest) to start transaction
285*4882a593Smuzhiyun  * @disks: number of blocks (including missing P or Q, see below)
286*4882a593Smuzhiyun  * @len: length of operation in bytes
287*4882a593Smuzhiyun  * @pqres: on val failure SUM_CHECK_P_RESULT and/or SUM_CHECK_Q_RESULT are set
288*4882a593Smuzhiyun  * @spare: temporary result buffer for the synchronous case
289*4882a593Smuzhiyun  * @s_off: spare buffer page offset
290*4882a593Smuzhiyun  * @submit: submission / completion modifiers
291*4882a593Smuzhiyun  *
292*4882a593Smuzhiyun  * The same notes from async_gen_syndrome apply to the 'blocks',
293*4882a593Smuzhiyun  * and 'disks' parameters of this routine.  The synchronous path
294*4882a593Smuzhiyun  * requires a temporary result buffer and submit->scribble to be
295*4882a593Smuzhiyun  * specified.
296*4882a593Smuzhiyun  */
297*4882a593Smuzhiyun struct dma_async_tx_descriptor *
async_syndrome_val(struct page ** blocks,unsigned int * offsets,int disks,size_t len,enum sum_check_flags * pqres,struct page * spare,unsigned int s_off,struct async_submit_ctl * submit)298*4882a593Smuzhiyun async_syndrome_val(struct page **blocks, unsigned int *offsets, int disks,
299*4882a593Smuzhiyun 		   size_t len, enum sum_check_flags *pqres, struct page *spare,
300*4882a593Smuzhiyun 		   unsigned int s_off, struct async_submit_ctl *submit)
301*4882a593Smuzhiyun {
302*4882a593Smuzhiyun 	struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len);
303*4882a593Smuzhiyun 	struct dma_device *device = chan ? chan->device : NULL;
304*4882a593Smuzhiyun 	struct dma_async_tx_descriptor *tx;
305*4882a593Smuzhiyun 	unsigned char coefs[MAX_DISKS];
306*4882a593Smuzhiyun 	enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0;
307*4882a593Smuzhiyun 	struct dmaengine_unmap_data *unmap = NULL;
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	BUG_ON(disks < 4 || disks > MAX_DISKS);
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	if (device)
312*4882a593Smuzhiyun 		unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	if (unmap && disks <= dma_maxpq(device, 0) &&
315*4882a593Smuzhiyun 	    is_dma_pq_aligned_offs(device, offsets, disks, len)) {
316*4882a593Smuzhiyun 		struct device *dev = device->dev;
317*4882a593Smuzhiyun 		dma_addr_t pq[2];
318*4882a593Smuzhiyun 		int i, j = 0, src_cnt = 0;
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 		pr_debug("%s: (async) disks: %d len: %zu\n",
321*4882a593Smuzhiyun 			 __func__, disks, len);
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 		unmap->len = len;
324*4882a593Smuzhiyun 		for (i = 0; i < disks-2; i++)
325*4882a593Smuzhiyun 			if (likely(blocks[i])) {
326*4882a593Smuzhiyun 				unmap->addr[j] = dma_map_page(dev, blocks[i],
327*4882a593Smuzhiyun 							      offsets[i], len,
328*4882a593Smuzhiyun 							      DMA_TO_DEVICE);
329*4882a593Smuzhiyun 				coefs[j] = raid6_gfexp[i];
330*4882a593Smuzhiyun 				unmap->to_cnt++;
331*4882a593Smuzhiyun 				src_cnt++;
332*4882a593Smuzhiyun 				j++;
333*4882a593Smuzhiyun 			}
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 		if (!P(blocks, disks)) {
336*4882a593Smuzhiyun 			pq[0] = 0;
337*4882a593Smuzhiyun 			dma_flags |= DMA_PREP_PQ_DISABLE_P;
338*4882a593Smuzhiyun 		} else {
339*4882a593Smuzhiyun 			pq[0] = dma_map_page(dev, P(blocks, disks),
340*4882a593Smuzhiyun 					     P(offsets, disks), len,
341*4882a593Smuzhiyun 					     DMA_TO_DEVICE);
342*4882a593Smuzhiyun 			unmap->addr[j++] = pq[0];
343*4882a593Smuzhiyun 			unmap->to_cnt++;
344*4882a593Smuzhiyun 		}
345*4882a593Smuzhiyun 		if (!Q(blocks, disks)) {
346*4882a593Smuzhiyun 			pq[1] = 0;
347*4882a593Smuzhiyun 			dma_flags |= DMA_PREP_PQ_DISABLE_Q;
348*4882a593Smuzhiyun 		} else {
349*4882a593Smuzhiyun 			pq[1] = dma_map_page(dev, Q(blocks, disks),
350*4882a593Smuzhiyun 					     Q(offsets, disks), len,
351*4882a593Smuzhiyun 					     DMA_TO_DEVICE);
352*4882a593Smuzhiyun 			unmap->addr[j++] = pq[1];
353*4882a593Smuzhiyun 			unmap->to_cnt++;
354*4882a593Smuzhiyun 		}
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 		if (submit->flags & ASYNC_TX_FENCE)
357*4882a593Smuzhiyun 			dma_flags |= DMA_PREP_FENCE;
358*4882a593Smuzhiyun 		for (;;) {
359*4882a593Smuzhiyun 			tx = device->device_prep_dma_pq_val(chan, pq,
360*4882a593Smuzhiyun 							    unmap->addr,
361*4882a593Smuzhiyun 							    src_cnt,
362*4882a593Smuzhiyun 							    coefs,
363*4882a593Smuzhiyun 							    len, pqres,
364*4882a593Smuzhiyun 							    dma_flags);
365*4882a593Smuzhiyun 			if (likely(tx))
366*4882a593Smuzhiyun 				break;
367*4882a593Smuzhiyun 			async_tx_quiesce(&submit->depend_tx);
368*4882a593Smuzhiyun 			dma_async_issue_pending(chan);
369*4882a593Smuzhiyun 		}
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 		dma_set_unmap(tx, unmap);
372*4882a593Smuzhiyun 		async_tx_submit(chan, tx, submit);
373*4882a593Smuzhiyun 	} else {
374*4882a593Smuzhiyun 		struct page *p_src = P(blocks, disks);
375*4882a593Smuzhiyun 		unsigned int p_off = P(offsets, disks);
376*4882a593Smuzhiyun 		struct page *q_src = Q(blocks, disks);
377*4882a593Smuzhiyun 		unsigned int q_off = Q(offsets, disks);
378*4882a593Smuzhiyun 		enum async_tx_flags flags_orig = submit->flags;
379*4882a593Smuzhiyun 		dma_async_tx_callback cb_fn_orig = submit->cb_fn;
380*4882a593Smuzhiyun 		void *scribble = submit->scribble;
381*4882a593Smuzhiyun 		void *cb_param_orig = submit->cb_param;
382*4882a593Smuzhiyun 		void *p, *q, *s;
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 		pr_debug("%s: (sync) disks: %d len: %zu\n",
385*4882a593Smuzhiyun 			 __func__, disks, len);
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 		/* caller must provide a temporary result buffer and
388*4882a593Smuzhiyun 		 * allow the input parameters to be preserved
389*4882a593Smuzhiyun 		 */
390*4882a593Smuzhiyun 		BUG_ON(!spare || !scribble);
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 		/* wait for any prerequisite operations */
393*4882a593Smuzhiyun 		async_tx_quiesce(&submit->depend_tx);
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 		/* recompute p and/or q into the temporary buffer and then
396*4882a593Smuzhiyun 		 * check to see the result matches the current value
397*4882a593Smuzhiyun 		 */
398*4882a593Smuzhiyun 		tx = NULL;
399*4882a593Smuzhiyun 		*pqres = 0;
400*4882a593Smuzhiyun 		if (p_src) {
401*4882a593Smuzhiyun 			init_async_submit(submit, ASYNC_TX_XOR_ZERO_DST, NULL,
402*4882a593Smuzhiyun 					  NULL, NULL, scribble);
403*4882a593Smuzhiyun 			tx = async_xor_offs(spare, s_off,
404*4882a593Smuzhiyun 					blocks, offsets, disks-2, len, submit);
405*4882a593Smuzhiyun 			async_tx_quiesce(&tx);
406*4882a593Smuzhiyun 			p = page_address(p_src) + p_off;
407*4882a593Smuzhiyun 			s = page_address(spare) + s_off;
408*4882a593Smuzhiyun 			*pqres |= !!memcmp(p, s, len) << SUM_CHECK_P;
409*4882a593Smuzhiyun 		}
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 		if (q_src) {
412*4882a593Smuzhiyun 			P(blocks, disks) = NULL;
413*4882a593Smuzhiyun 			Q(blocks, disks) = spare;
414*4882a593Smuzhiyun 			Q(offsets, disks) = s_off;
415*4882a593Smuzhiyun 			init_async_submit(submit, 0, NULL, NULL, NULL, scribble);
416*4882a593Smuzhiyun 			tx = async_gen_syndrome(blocks, offsets, disks,
417*4882a593Smuzhiyun 					len, submit);
418*4882a593Smuzhiyun 			async_tx_quiesce(&tx);
419*4882a593Smuzhiyun 			q = page_address(q_src) + q_off;
420*4882a593Smuzhiyun 			s = page_address(spare) + s_off;
421*4882a593Smuzhiyun 			*pqres |= !!memcmp(q, s, len) << SUM_CHECK_Q;
422*4882a593Smuzhiyun 		}
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 		/* restore P, Q and submit */
425*4882a593Smuzhiyun 		P(blocks, disks) = p_src;
426*4882a593Smuzhiyun 		P(offsets, disks) = p_off;
427*4882a593Smuzhiyun 		Q(blocks, disks) = q_src;
428*4882a593Smuzhiyun 		Q(offsets, disks) = q_off;
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 		submit->cb_fn = cb_fn_orig;
431*4882a593Smuzhiyun 		submit->cb_param = cb_param_orig;
432*4882a593Smuzhiyun 		submit->flags = flags_orig;
433*4882a593Smuzhiyun 		async_tx_sync_epilog(submit);
434*4882a593Smuzhiyun 		tx = NULL;
435*4882a593Smuzhiyun 	}
436*4882a593Smuzhiyun 	dmaengine_unmap_put(unmap);
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 	return tx;
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(async_syndrome_val);
441*4882a593Smuzhiyun 
async_pq_init(void)442*4882a593Smuzhiyun static int __init async_pq_init(void)
443*4882a593Smuzhiyun {
444*4882a593Smuzhiyun 	pq_scribble_page = alloc_page(GFP_KERNEL);
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 	if (pq_scribble_page)
447*4882a593Smuzhiyun 		return 0;
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 	pr_err("%s: failed to allocate required spare page\n", __func__);
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 	return -ENOMEM;
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun 
async_pq_exit(void)454*4882a593Smuzhiyun static void __exit async_pq_exit(void)
455*4882a593Smuzhiyun {
456*4882a593Smuzhiyun 	__free_page(pq_scribble_page);
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun module_init(async_pq_init);
460*4882a593Smuzhiyun module_exit(async_pq_exit);
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun MODULE_DESCRIPTION("asynchronous raid6 syndrome generation/validation");
463*4882a593Smuzhiyun MODULE_LICENSE("GPL");
464