xref: /OK3568_Linux_fs/kernel/drivers/crypto/nx/nx-842-pseries.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Driver for IBM Power 842 compression accelerator
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) IBM Corporation, 2012
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Authors: Robert Jennings <rcj@linux.vnet.ibm.com>
8*4882a593Smuzhiyun  *          Seth Jennings <sjenning@linux.vnet.ibm.com>
9*4882a593Smuzhiyun  */
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include <asm/vio.h>
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include "nx-842.h"
14*4882a593Smuzhiyun #include "nx_csbcpb.h" /* struct nx_csbcpb */
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun MODULE_LICENSE("GPL");
17*4882a593Smuzhiyun MODULE_AUTHOR("Robert Jennings <rcj@linux.vnet.ibm.com>");
18*4882a593Smuzhiyun MODULE_DESCRIPTION("842 H/W Compression driver for IBM Power processors");
19*4882a593Smuzhiyun MODULE_ALIAS_CRYPTO("842");
20*4882a593Smuzhiyun MODULE_ALIAS_CRYPTO("842-nx");
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun static struct nx842_constraints nx842_pseries_constraints = {
23*4882a593Smuzhiyun 	.alignment =	DDE_BUFFER_ALIGN,
24*4882a593Smuzhiyun 	.multiple =	DDE_BUFFER_LAST_MULT,
25*4882a593Smuzhiyun 	.minimum =	DDE_BUFFER_LAST_MULT,
26*4882a593Smuzhiyun 	.maximum =	PAGE_SIZE, /* dynamic, max_sync_size */
27*4882a593Smuzhiyun };
28*4882a593Smuzhiyun 
check_constraints(unsigned long buf,unsigned int * len,bool in)29*4882a593Smuzhiyun static int check_constraints(unsigned long buf, unsigned int *len, bool in)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun 	if (!IS_ALIGNED(buf, nx842_pseries_constraints.alignment)) {
32*4882a593Smuzhiyun 		pr_debug("%s buffer 0x%lx not aligned to 0x%x\n",
33*4882a593Smuzhiyun 			 in ? "input" : "output", buf,
34*4882a593Smuzhiyun 			 nx842_pseries_constraints.alignment);
35*4882a593Smuzhiyun 		return -EINVAL;
36*4882a593Smuzhiyun 	}
37*4882a593Smuzhiyun 	if (*len % nx842_pseries_constraints.multiple) {
38*4882a593Smuzhiyun 		pr_debug("%s buffer len 0x%x not multiple of 0x%x\n",
39*4882a593Smuzhiyun 			 in ? "input" : "output", *len,
40*4882a593Smuzhiyun 			 nx842_pseries_constraints.multiple);
41*4882a593Smuzhiyun 		if (in)
42*4882a593Smuzhiyun 			return -EINVAL;
43*4882a593Smuzhiyun 		*len = round_down(*len, nx842_pseries_constraints.multiple);
44*4882a593Smuzhiyun 	}
45*4882a593Smuzhiyun 	if (*len < nx842_pseries_constraints.minimum) {
46*4882a593Smuzhiyun 		pr_debug("%s buffer len 0x%x under minimum 0x%x\n",
47*4882a593Smuzhiyun 			 in ? "input" : "output", *len,
48*4882a593Smuzhiyun 			 nx842_pseries_constraints.minimum);
49*4882a593Smuzhiyun 		return -EINVAL;
50*4882a593Smuzhiyun 	}
51*4882a593Smuzhiyun 	if (*len > nx842_pseries_constraints.maximum) {
52*4882a593Smuzhiyun 		pr_debug("%s buffer len 0x%x over maximum 0x%x\n",
53*4882a593Smuzhiyun 			 in ? "input" : "output", *len,
54*4882a593Smuzhiyun 			 nx842_pseries_constraints.maximum);
55*4882a593Smuzhiyun 		if (in)
56*4882a593Smuzhiyun 			return -EINVAL;
57*4882a593Smuzhiyun 		*len = nx842_pseries_constraints.maximum;
58*4882a593Smuzhiyun 	}
59*4882a593Smuzhiyun 	return 0;
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun /* I assume we need to align the CSB? */
63*4882a593Smuzhiyun #define WORKMEM_ALIGN	(256)
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun struct nx842_workmem {
66*4882a593Smuzhiyun 	/* scatterlist */
67*4882a593Smuzhiyun 	char slin[4096];
68*4882a593Smuzhiyun 	char slout[4096];
69*4882a593Smuzhiyun 	/* coprocessor status/parameter block */
70*4882a593Smuzhiyun 	struct nx_csbcpb csbcpb;
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	char padding[WORKMEM_ALIGN];
73*4882a593Smuzhiyun } __aligned(WORKMEM_ALIGN);
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun /* Macros for fields within nx_csbcpb */
76*4882a593Smuzhiyun /* Check the valid bit within the csbcpb valid field */
77*4882a593Smuzhiyun #define NX842_CSBCBP_VALID_CHK(x) (x & BIT_MASK(7))
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun /* CE macros operate on the completion_extension field bits in the csbcpb.
80*4882a593Smuzhiyun  * CE0 0=full completion, 1=partial completion
81*4882a593Smuzhiyun  * CE1 0=CE0 indicates completion, 1=termination (output may be modified)
82*4882a593Smuzhiyun  * CE2 0=processed_bytes is source bytes, 1=processed_bytes is target bytes */
83*4882a593Smuzhiyun #define NX842_CSBCPB_CE0(x)	(x & BIT_MASK(7))
84*4882a593Smuzhiyun #define NX842_CSBCPB_CE1(x)	(x & BIT_MASK(6))
85*4882a593Smuzhiyun #define NX842_CSBCPB_CE2(x)	(x & BIT_MASK(5))
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun /* The NX unit accepts data only on 4K page boundaries */
88*4882a593Smuzhiyun #define NX842_HW_PAGE_SIZE	(4096)
89*4882a593Smuzhiyun #define NX842_HW_PAGE_MASK	(~(NX842_HW_PAGE_SIZE-1))
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun struct ibm_nx842_counters {
92*4882a593Smuzhiyun 	atomic64_t comp_complete;
93*4882a593Smuzhiyun 	atomic64_t comp_failed;
94*4882a593Smuzhiyun 	atomic64_t decomp_complete;
95*4882a593Smuzhiyun 	atomic64_t decomp_failed;
96*4882a593Smuzhiyun 	atomic64_t swdecomp;
97*4882a593Smuzhiyun 	atomic64_t comp_times[32];
98*4882a593Smuzhiyun 	atomic64_t decomp_times[32];
99*4882a593Smuzhiyun };
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun static struct nx842_devdata {
102*4882a593Smuzhiyun 	struct vio_dev *vdev;
103*4882a593Smuzhiyun 	struct device *dev;
104*4882a593Smuzhiyun 	struct ibm_nx842_counters *counters;
105*4882a593Smuzhiyun 	unsigned int max_sg_len;
106*4882a593Smuzhiyun 	unsigned int max_sync_size;
107*4882a593Smuzhiyun 	unsigned int max_sync_sg;
108*4882a593Smuzhiyun } __rcu *devdata;
109*4882a593Smuzhiyun static DEFINE_SPINLOCK(devdata_mutex);
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun #define NX842_COUNTER_INC(_x) \
112*4882a593Smuzhiyun static inline void nx842_inc_##_x( \
113*4882a593Smuzhiyun 	const struct nx842_devdata *dev) { \
114*4882a593Smuzhiyun 	if (dev) \
115*4882a593Smuzhiyun 		atomic64_inc(&dev->counters->_x); \
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun NX842_COUNTER_INC(comp_complete);
118*4882a593Smuzhiyun NX842_COUNTER_INC(comp_failed);
119*4882a593Smuzhiyun NX842_COUNTER_INC(decomp_complete);
120*4882a593Smuzhiyun NX842_COUNTER_INC(decomp_failed);
121*4882a593Smuzhiyun NX842_COUNTER_INC(swdecomp);
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun #define NX842_HIST_SLOTS 16
124*4882a593Smuzhiyun 
ibm_nx842_incr_hist(atomic64_t * times,unsigned int time)125*4882a593Smuzhiyun static void ibm_nx842_incr_hist(atomic64_t *times, unsigned int time)
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun 	int bucket = fls(time);
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	if (bucket)
130*4882a593Smuzhiyun 		bucket = min((NX842_HIST_SLOTS - 1), bucket - 1);
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	atomic64_inc(&times[bucket]);
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun /* NX unit operation flags */
136*4882a593Smuzhiyun #define NX842_OP_COMPRESS	0x0
137*4882a593Smuzhiyun #define NX842_OP_CRC		0x1
138*4882a593Smuzhiyun #define NX842_OP_DECOMPRESS	0x2
139*4882a593Smuzhiyun #define NX842_OP_COMPRESS_CRC   (NX842_OP_COMPRESS | NX842_OP_CRC)
140*4882a593Smuzhiyun #define NX842_OP_DECOMPRESS_CRC (NX842_OP_DECOMPRESS | NX842_OP_CRC)
141*4882a593Smuzhiyun #define NX842_OP_ASYNC		(1<<23)
142*4882a593Smuzhiyun #define NX842_OP_NOTIFY		(1<<22)
143*4882a593Smuzhiyun #define NX842_OP_NOTIFY_INT(x)	((x & 0xff)<<8)
144*4882a593Smuzhiyun 
nx842_get_desired_dma(struct vio_dev * viodev)145*4882a593Smuzhiyun static unsigned long nx842_get_desired_dma(struct vio_dev *viodev)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun 	/* No use of DMA mappings within the driver. */
148*4882a593Smuzhiyun 	return 0;
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun struct nx842_slentry {
152*4882a593Smuzhiyun 	__be64 ptr; /* Real address (use __pa()) */
153*4882a593Smuzhiyun 	__be64 len;
154*4882a593Smuzhiyun };
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun /* pHyp scatterlist entry */
157*4882a593Smuzhiyun struct nx842_scatterlist {
158*4882a593Smuzhiyun 	int entry_nr; /* number of slentries */
159*4882a593Smuzhiyun 	struct nx842_slentry *entries; /* ptr to array of slentries */
160*4882a593Smuzhiyun };
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun /* Does not include sizeof(entry_nr) in the size */
nx842_get_scatterlist_size(struct nx842_scatterlist * sl)163*4882a593Smuzhiyun static inline unsigned long nx842_get_scatterlist_size(
164*4882a593Smuzhiyun 				struct nx842_scatterlist *sl)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun 	return sl->entry_nr * sizeof(struct nx842_slentry);
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun 
nx842_build_scatterlist(unsigned long buf,int len,struct nx842_scatterlist * sl)169*4882a593Smuzhiyun static int nx842_build_scatterlist(unsigned long buf, int len,
170*4882a593Smuzhiyun 			struct nx842_scatterlist *sl)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun 	unsigned long entrylen;
173*4882a593Smuzhiyun 	struct nx842_slentry *entry;
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	sl->entry_nr = 0;
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	entry = sl->entries;
178*4882a593Smuzhiyun 	while (len) {
179*4882a593Smuzhiyun 		entry->ptr = cpu_to_be64(nx842_get_pa((void *)buf));
180*4882a593Smuzhiyun 		entrylen = min_t(int, len,
181*4882a593Smuzhiyun 				 LEN_ON_SIZE(buf, NX842_HW_PAGE_SIZE));
182*4882a593Smuzhiyun 		entry->len = cpu_to_be64(entrylen);
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 		len -= entrylen;
185*4882a593Smuzhiyun 		buf += entrylen;
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 		sl->entry_nr++;
188*4882a593Smuzhiyun 		entry++;
189*4882a593Smuzhiyun 	}
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	return 0;
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun 
nx842_validate_result(struct device * dev,struct cop_status_block * csb)194*4882a593Smuzhiyun static int nx842_validate_result(struct device *dev,
195*4882a593Smuzhiyun 	struct cop_status_block *csb)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun 	/* The csb must be valid after returning from vio_h_cop_sync */
198*4882a593Smuzhiyun 	if (!NX842_CSBCBP_VALID_CHK(csb->valid)) {
199*4882a593Smuzhiyun 		dev_err(dev, "%s: cspcbp not valid upon completion.\n",
200*4882a593Smuzhiyun 				__func__);
201*4882a593Smuzhiyun 		dev_dbg(dev, "valid:0x%02x cs:0x%02x cc:0x%02x ce:0x%02x\n",
202*4882a593Smuzhiyun 				csb->valid,
203*4882a593Smuzhiyun 				csb->crb_seq_number,
204*4882a593Smuzhiyun 				csb->completion_code,
205*4882a593Smuzhiyun 				csb->completion_extension);
206*4882a593Smuzhiyun 		dev_dbg(dev, "processed_bytes:%d address:0x%016lx\n",
207*4882a593Smuzhiyun 				be32_to_cpu(csb->processed_byte_count),
208*4882a593Smuzhiyun 				(unsigned long)be64_to_cpu(csb->address));
209*4882a593Smuzhiyun 		return -EIO;
210*4882a593Smuzhiyun 	}
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	/* Check return values from the hardware in the CSB */
213*4882a593Smuzhiyun 	switch (csb->completion_code) {
214*4882a593Smuzhiyun 	case 0:	/* Completed without error */
215*4882a593Smuzhiyun 		break;
216*4882a593Smuzhiyun 	case 64: /* Compression ok, but output larger than input */
217*4882a593Smuzhiyun 		dev_dbg(dev, "%s: output size larger than input size\n",
218*4882a593Smuzhiyun 					__func__);
219*4882a593Smuzhiyun 		break;
220*4882a593Smuzhiyun 	case 13: /* Output buffer too small */
221*4882a593Smuzhiyun 		dev_dbg(dev, "%s: Out of space in output buffer\n",
222*4882a593Smuzhiyun 					__func__);
223*4882a593Smuzhiyun 		return -ENOSPC;
224*4882a593Smuzhiyun 	case 65: /* Calculated CRC doesn't match the passed value */
225*4882a593Smuzhiyun 		dev_dbg(dev, "%s: CRC mismatch for decompression\n",
226*4882a593Smuzhiyun 					__func__);
227*4882a593Smuzhiyun 		return -EINVAL;
228*4882a593Smuzhiyun 	case 66: /* Input data contains an illegal template field */
229*4882a593Smuzhiyun 	case 67: /* Template indicates data past the end of the input stream */
230*4882a593Smuzhiyun 		dev_dbg(dev, "%s: Bad data for decompression (code:%d)\n",
231*4882a593Smuzhiyun 					__func__, csb->completion_code);
232*4882a593Smuzhiyun 		return -EINVAL;
233*4882a593Smuzhiyun 	default:
234*4882a593Smuzhiyun 		dev_dbg(dev, "%s: Unspecified error (code:%d)\n",
235*4882a593Smuzhiyun 					__func__, csb->completion_code);
236*4882a593Smuzhiyun 		return -EIO;
237*4882a593Smuzhiyun 	}
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	/* Hardware sanity check */
240*4882a593Smuzhiyun 	if (!NX842_CSBCPB_CE2(csb->completion_extension)) {
241*4882a593Smuzhiyun 		dev_err(dev, "%s: No error returned by hardware, but "
242*4882a593Smuzhiyun 				"data returned is unusable, contact support.\n"
243*4882a593Smuzhiyun 				"(Additional info: csbcbp->processed bytes "
244*4882a593Smuzhiyun 				"does not specify processed bytes for the "
245*4882a593Smuzhiyun 				"target buffer.)\n", __func__);
246*4882a593Smuzhiyun 		return -EIO;
247*4882a593Smuzhiyun 	}
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	return 0;
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun /**
253*4882a593Smuzhiyun  * nx842_pseries_compress - Compress data using the 842 algorithm
254*4882a593Smuzhiyun  *
255*4882a593Smuzhiyun  * Compression provide by the NX842 coprocessor on IBM Power systems.
256*4882a593Smuzhiyun  * The input buffer is compressed and the result is stored in the
257*4882a593Smuzhiyun  * provided output buffer.
258*4882a593Smuzhiyun  *
259*4882a593Smuzhiyun  * Upon return from this function @outlen contains the length of the
260*4882a593Smuzhiyun  * compressed data.  If there is an error then @outlen will be 0 and an
261*4882a593Smuzhiyun  * error will be specified by the return code from this function.
262*4882a593Smuzhiyun  *
263*4882a593Smuzhiyun  * @in: Pointer to input buffer
264*4882a593Smuzhiyun  * @inlen: Length of input buffer
265*4882a593Smuzhiyun  * @out: Pointer to output buffer
266*4882a593Smuzhiyun  * @outlen: Length of output buffer
267*4882a593Smuzhiyun  * @wrkmem: ptr to buffer for working memory, size determined by
268*4882a593Smuzhiyun  *          nx842_pseries_driver.workmem_size
269*4882a593Smuzhiyun  *
270*4882a593Smuzhiyun  * Returns:
271*4882a593Smuzhiyun  *   0		Success, output of length @outlen stored in the buffer at @out
272*4882a593Smuzhiyun  *   -ENOMEM	Unable to allocate internal buffers
273*4882a593Smuzhiyun  *   -ENOSPC	Output buffer is to small
274*4882a593Smuzhiyun  *   -EIO	Internal error
275*4882a593Smuzhiyun  *   -ENODEV	Hardware unavailable
276*4882a593Smuzhiyun  */
nx842_pseries_compress(const unsigned char * in,unsigned int inlen,unsigned char * out,unsigned int * outlen,void * wmem)277*4882a593Smuzhiyun static int nx842_pseries_compress(const unsigned char *in, unsigned int inlen,
278*4882a593Smuzhiyun 				  unsigned char *out, unsigned int *outlen,
279*4882a593Smuzhiyun 				  void *wmem)
280*4882a593Smuzhiyun {
281*4882a593Smuzhiyun 	struct nx842_devdata *local_devdata;
282*4882a593Smuzhiyun 	struct device *dev = NULL;
283*4882a593Smuzhiyun 	struct nx842_workmem *workmem;
284*4882a593Smuzhiyun 	struct nx842_scatterlist slin, slout;
285*4882a593Smuzhiyun 	struct nx_csbcpb *csbcpb;
286*4882a593Smuzhiyun 	int ret = 0;
287*4882a593Smuzhiyun 	unsigned long inbuf, outbuf;
288*4882a593Smuzhiyun 	struct vio_pfo_op op = {
289*4882a593Smuzhiyun 		.done = NULL,
290*4882a593Smuzhiyun 		.handle = 0,
291*4882a593Smuzhiyun 		.timeout = 0,
292*4882a593Smuzhiyun 	};
293*4882a593Smuzhiyun 	unsigned long start = get_tb();
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	inbuf = (unsigned long)in;
296*4882a593Smuzhiyun 	if (check_constraints(inbuf, &inlen, true))
297*4882a593Smuzhiyun 		return -EINVAL;
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	outbuf = (unsigned long)out;
300*4882a593Smuzhiyun 	if (check_constraints(outbuf, outlen, false))
301*4882a593Smuzhiyun 		return -EINVAL;
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 	rcu_read_lock();
304*4882a593Smuzhiyun 	local_devdata = rcu_dereference(devdata);
305*4882a593Smuzhiyun 	if (!local_devdata || !local_devdata->dev) {
306*4882a593Smuzhiyun 		rcu_read_unlock();
307*4882a593Smuzhiyun 		return -ENODEV;
308*4882a593Smuzhiyun 	}
309*4882a593Smuzhiyun 	dev = local_devdata->dev;
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	/* Init scatterlist */
312*4882a593Smuzhiyun 	workmem = PTR_ALIGN(wmem, WORKMEM_ALIGN);
313*4882a593Smuzhiyun 	slin.entries = (struct nx842_slentry *)workmem->slin;
314*4882a593Smuzhiyun 	slout.entries = (struct nx842_slentry *)workmem->slout;
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	/* Init operation */
317*4882a593Smuzhiyun 	op.flags = NX842_OP_COMPRESS_CRC;
318*4882a593Smuzhiyun 	csbcpb = &workmem->csbcpb;
319*4882a593Smuzhiyun 	memset(csbcpb, 0, sizeof(*csbcpb));
320*4882a593Smuzhiyun 	op.csbcpb = nx842_get_pa(csbcpb);
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	if ((inbuf & NX842_HW_PAGE_MASK) ==
323*4882a593Smuzhiyun 	    ((inbuf + inlen - 1) & NX842_HW_PAGE_MASK)) {
324*4882a593Smuzhiyun 		/* Create direct DDE */
325*4882a593Smuzhiyun 		op.in = nx842_get_pa((void *)inbuf);
326*4882a593Smuzhiyun 		op.inlen = inlen;
327*4882a593Smuzhiyun 	} else {
328*4882a593Smuzhiyun 		/* Create indirect DDE (scatterlist) */
329*4882a593Smuzhiyun 		nx842_build_scatterlist(inbuf, inlen, &slin);
330*4882a593Smuzhiyun 		op.in = nx842_get_pa(slin.entries);
331*4882a593Smuzhiyun 		op.inlen = -nx842_get_scatterlist_size(&slin);
332*4882a593Smuzhiyun 	}
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 	if ((outbuf & NX842_HW_PAGE_MASK) ==
335*4882a593Smuzhiyun 	    ((outbuf + *outlen - 1) & NX842_HW_PAGE_MASK)) {
336*4882a593Smuzhiyun 		/* Create direct DDE */
337*4882a593Smuzhiyun 		op.out = nx842_get_pa((void *)outbuf);
338*4882a593Smuzhiyun 		op.outlen = *outlen;
339*4882a593Smuzhiyun 	} else {
340*4882a593Smuzhiyun 		/* Create indirect DDE (scatterlist) */
341*4882a593Smuzhiyun 		nx842_build_scatterlist(outbuf, *outlen, &slout);
342*4882a593Smuzhiyun 		op.out = nx842_get_pa(slout.entries);
343*4882a593Smuzhiyun 		op.outlen = -nx842_get_scatterlist_size(&slout);
344*4882a593Smuzhiyun 	}
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 	dev_dbg(dev, "%s: op.in %lx op.inlen %ld op.out %lx op.outlen %ld\n",
347*4882a593Smuzhiyun 		__func__, (unsigned long)op.in, (long)op.inlen,
348*4882a593Smuzhiyun 		(unsigned long)op.out, (long)op.outlen);
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	/* Send request to pHyp */
351*4882a593Smuzhiyun 	ret = vio_h_cop_sync(local_devdata->vdev, &op);
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	/* Check for pHyp error */
354*4882a593Smuzhiyun 	if (ret) {
355*4882a593Smuzhiyun 		dev_dbg(dev, "%s: vio_h_cop_sync error (ret=%d, hret=%ld)\n",
356*4882a593Smuzhiyun 			__func__, ret, op.hcall_err);
357*4882a593Smuzhiyun 		ret = -EIO;
358*4882a593Smuzhiyun 		goto unlock;
359*4882a593Smuzhiyun 	}
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 	/* Check for hardware error */
362*4882a593Smuzhiyun 	ret = nx842_validate_result(dev, &csbcpb->csb);
363*4882a593Smuzhiyun 	if (ret)
364*4882a593Smuzhiyun 		goto unlock;
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	*outlen = be32_to_cpu(csbcpb->csb.processed_byte_count);
367*4882a593Smuzhiyun 	dev_dbg(dev, "%s: processed_bytes=%d\n", __func__, *outlen);
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun unlock:
370*4882a593Smuzhiyun 	if (ret)
371*4882a593Smuzhiyun 		nx842_inc_comp_failed(local_devdata);
372*4882a593Smuzhiyun 	else {
373*4882a593Smuzhiyun 		nx842_inc_comp_complete(local_devdata);
374*4882a593Smuzhiyun 		ibm_nx842_incr_hist(local_devdata->counters->comp_times,
375*4882a593Smuzhiyun 			(get_tb() - start) / tb_ticks_per_usec);
376*4882a593Smuzhiyun 	}
377*4882a593Smuzhiyun 	rcu_read_unlock();
378*4882a593Smuzhiyun 	return ret;
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun /**
382*4882a593Smuzhiyun  * nx842_pseries_decompress - Decompress data using the 842 algorithm
383*4882a593Smuzhiyun  *
384*4882a593Smuzhiyun  * Decompression provide by the NX842 coprocessor on IBM Power systems.
385*4882a593Smuzhiyun  * The input buffer is decompressed and the result is stored in the
386*4882a593Smuzhiyun  * provided output buffer.  The size allocated to the output buffer is
387*4882a593Smuzhiyun  * provided by the caller of this function in @outlen.  Upon return from
388*4882a593Smuzhiyun  * this function @outlen contains the length of the decompressed data.
389*4882a593Smuzhiyun  * If there is an error then @outlen will be 0 and an error will be
390*4882a593Smuzhiyun  * specified by the return code from this function.
391*4882a593Smuzhiyun  *
392*4882a593Smuzhiyun  * @in: Pointer to input buffer
393*4882a593Smuzhiyun  * @inlen: Length of input buffer
394*4882a593Smuzhiyun  * @out: Pointer to output buffer
395*4882a593Smuzhiyun  * @outlen: Length of output buffer
396*4882a593Smuzhiyun  * @wrkmem: ptr to buffer for working memory, size determined by
397*4882a593Smuzhiyun  *          nx842_pseries_driver.workmem_size
398*4882a593Smuzhiyun  *
399*4882a593Smuzhiyun  * Returns:
400*4882a593Smuzhiyun  *   0		Success, output of length @outlen stored in the buffer at @out
401*4882a593Smuzhiyun  *   -ENODEV	Hardware decompression device is unavailable
402*4882a593Smuzhiyun  *   -ENOMEM	Unable to allocate internal buffers
403*4882a593Smuzhiyun  *   -ENOSPC	Output buffer is to small
404*4882a593Smuzhiyun  *   -EINVAL	Bad input data encountered when attempting decompress
405*4882a593Smuzhiyun  *   -EIO	Internal error
406*4882a593Smuzhiyun  */
nx842_pseries_decompress(const unsigned char * in,unsigned int inlen,unsigned char * out,unsigned int * outlen,void * wmem)407*4882a593Smuzhiyun static int nx842_pseries_decompress(const unsigned char *in, unsigned int inlen,
408*4882a593Smuzhiyun 				    unsigned char *out, unsigned int *outlen,
409*4882a593Smuzhiyun 				    void *wmem)
410*4882a593Smuzhiyun {
411*4882a593Smuzhiyun 	struct nx842_devdata *local_devdata;
412*4882a593Smuzhiyun 	struct device *dev = NULL;
413*4882a593Smuzhiyun 	struct nx842_workmem *workmem;
414*4882a593Smuzhiyun 	struct nx842_scatterlist slin, slout;
415*4882a593Smuzhiyun 	struct nx_csbcpb *csbcpb;
416*4882a593Smuzhiyun 	int ret = 0;
417*4882a593Smuzhiyun 	unsigned long inbuf, outbuf;
418*4882a593Smuzhiyun 	struct vio_pfo_op op = {
419*4882a593Smuzhiyun 		.done = NULL,
420*4882a593Smuzhiyun 		.handle = 0,
421*4882a593Smuzhiyun 		.timeout = 0,
422*4882a593Smuzhiyun 	};
423*4882a593Smuzhiyun 	unsigned long start = get_tb();
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	/* Ensure page alignment and size */
426*4882a593Smuzhiyun 	inbuf = (unsigned long)in;
427*4882a593Smuzhiyun 	if (check_constraints(inbuf, &inlen, true))
428*4882a593Smuzhiyun 		return -EINVAL;
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 	outbuf = (unsigned long)out;
431*4882a593Smuzhiyun 	if (check_constraints(outbuf, outlen, false))
432*4882a593Smuzhiyun 		return -EINVAL;
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 	rcu_read_lock();
435*4882a593Smuzhiyun 	local_devdata = rcu_dereference(devdata);
436*4882a593Smuzhiyun 	if (!local_devdata || !local_devdata->dev) {
437*4882a593Smuzhiyun 		rcu_read_unlock();
438*4882a593Smuzhiyun 		return -ENODEV;
439*4882a593Smuzhiyun 	}
440*4882a593Smuzhiyun 	dev = local_devdata->dev;
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 	workmem = PTR_ALIGN(wmem, WORKMEM_ALIGN);
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun 	/* Init scatterlist */
445*4882a593Smuzhiyun 	slin.entries = (struct nx842_slentry *)workmem->slin;
446*4882a593Smuzhiyun 	slout.entries = (struct nx842_slentry *)workmem->slout;
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun 	/* Init operation */
449*4882a593Smuzhiyun 	op.flags = NX842_OP_DECOMPRESS_CRC;
450*4882a593Smuzhiyun 	csbcpb = &workmem->csbcpb;
451*4882a593Smuzhiyun 	memset(csbcpb, 0, sizeof(*csbcpb));
452*4882a593Smuzhiyun 	op.csbcpb = nx842_get_pa(csbcpb);
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 	if ((inbuf & NX842_HW_PAGE_MASK) ==
455*4882a593Smuzhiyun 	    ((inbuf + inlen - 1) & NX842_HW_PAGE_MASK)) {
456*4882a593Smuzhiyun 		/* Create direct DDE */
457*4882a593Smuzhiyun 		op.in = nx842_get_pa((void *)inbuf);
458*4882a593Smuzhiyun 		op.inlen = inlen;
459*4882a593Smuzhiyun 	} else {
460*4882a593Smuzhiyun 		/* Create indirect DDE (scatterlist) */
461*4882a593Smuzhiyun 		nx842_build_scatterlist(inbuf, inlen, &slin);
462*4882a593Smuzhiyun 		op.in = nx842_get_pa(slin.entries);
463*4882a593Smuzhiyun 		op.inlen = -nx842_get_scatterlist_size(&slin);
464*4882a593Smuzhiyun 	}
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	if ((outbuf & NX842_HW_PAGE_MASK) ==
467*4882a593Smuzhiyun 	    ((outbuf + *outlen - 1) & NX842_HW_PAGE_MASK)) {
468*4882a593Smuzhiyun 		/* Create direct DDE */
469*4882a593Smuzhiyun 		op.out = nx842_get_pa((void *)outbuf);
470*4882a593Smuzhiyun 		op.outlen = *outlen;
471*4882a593Smuzhiyun 	} else {
472*4882a593Smuzhiyun 		/* Create indirect DDE (scatterlist) */
473*4882a593Smuzhiyun 		nx842_build_scatterlist(outbuf, *outlen, &slout);
474*4882a593Smuzhiyun 		op.out = nx842_get_pa(slout.entries);
475*4882a593Smuzhiyun 		op.outlen = -nx842_get_scatterlist_size(&slout);
476*4882a593Smuzhiyun 	}
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 	dev_dbg(dev, "%s: op.in %lx op.inlen %ld op.out %lx op.outlen %ld\n",
479*4882a593Smuzhiyun 		__func__, (unsigned long)op.in, (long)op.inlen,
480*4882a593Smuzhiyun 		(unsigned long)op.out, (long)op.outlen);
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun 	/* Send request to pHyp */
483*4882a593Smuzhiyun 	ret = vio_h_cop_sync(local_devdata->vdev, &op);
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 	/* Check for pHyp error */
486*4882a593Smuzhiyun 	if (ret) {
487*4882a593Smuzhiyun 		dev_dbg(dev, "%s: vio_h_cop_sync error (ret=%d, hret=%ld)\n",
488*4882a593Smuzhiyun 			__func__, ret, op.hcall_err);
489*4882a593Smuzhiyun 		goto unlock;
490*4882a593Smuzhiyun 	}
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun 	/* Check for hardware error */
493*4882a593Smuzhiyun 	ret = nx842_validate_result(dev, &csbcpb->csb);
494*4882a593Smuzhiyun 	if (ret)
495*4882a593Smuzhiyun 		goto unlock;
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 	*outlen = be32_to_cpu(csbcpb->csb.processed_byte_count);
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun unlock:
500*4882a593Smuzhiyun 	if (ret)
501*4882a593Smuzhiyun 		/* decompress fail */
502*4882a593Smuzhiyun 		nx842_inc_decomp_failed(local_devdata);
503*4882a593Smuzhiyun 	else {
504*4882a593Smuzhiyun 		nx842_inc_decomp_complete(local_devdata);
505*4882a593Smuzhiyun 		ibm_nx842_incr_hist(local_devdata->counters->decomp_times,
506*4882a593Smuzhiyun 			(get_tb() - start) / tb_ticks_per_usec);
507*4882a593Smuzhiyun 	}
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun 	rcu_read_unlock();
510*4882a593Smuzhiyun 	return ret;
511*4882a593Smuzhiyun }
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun /**
514*4882a593Smuzhiyun  * nx842_OF_set_defaults -- Set default (disabled) values for devdata
515*4882a593Smuzhiyun  *
516*4882a593Smuzhiyun  * @devdata - struct nx842_devdata to update
517*4882a593Smuzhiyun  *
518*4882a593Smuzhiyun  * Returns:
519*4882a593Smuzhiyun  *  0 on success
520*4882a593Smuzhiyun  *  -ENOENT if @devdata ptr is NULL
521*4882a593Smuzhiyun  */
nx842_OF_set_defaults(struct nx842_devdata * devdata)522*4882a593Smuzhiyun static int nx842_OF_set_defaults(struct nx842_devdata *devdata)
523*4882a593Smuzhiyun {
524*4882a593Smuzhiyun 	if (devdata) {
525*4882a593Smuzhiyun 		devdata->max_sync_size = 0;
526*4882a593Smuzhiyun 		devdata->max_sync_sg = 0;
527*4882a593Smuzhiyun 		devdata->max_sg_len = 0;
528*4882a593Smuzhiyun 		return 0;
529*4882a593Smuzhiyun 	} else
530*4882a593Smuzhiyun 		return -ENOENT;
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun /**
534*4882a593Smuzhiyun  * nx842_OF_upd_status -- Check the device info from OF status prop
535*4882a593Smuzhiyun  *
536*4882a593Smuzhiyun  * The status property indicates if the accelerator is enabled.  If the
537*4882a593Smuzhiyun  * device is in the OF tree it indicates that the hardware is present.
538*4882a593Smuzhiyun  * The status field indicates if the device is enabled when the status
539*4882a593Smuzhiyun  * is 'okay'.  Otherwise the device driver will be disabled.
540*4882a593Smuzhiyun  *
541*4882a593Smuzhiyun  * @devdata: struct nx842_devdata to use for dev_info
542*4882a593Smuzhiyun  * @prop: struct property point containing the maxsyncop for the update
543*4882a593Smuzhiyun  *
544*4882a593Smuzhiyun  * Returns:
545*4882a593Smuzhiyun  *  0 - Device is available
546*4882a593Smuzhiyun  *  -ENODEV - Device is not available
547*4882a593Smuzhiyun  */
nx842_OF_upd_status(struct nx842_devdata * devdata,struct property * prop)548*4882a593Smuzhiyun static int nx842_OF_upd_status(struct nx842_devdata *devdata,
549*4882a593Smuzhiyun 			       struct property *prop)
550*4882a593Smuzhiyun {
551*4882a593Smuzhiyun 	const char *status = (const char *)prop->value;
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 	if (!strncmp(status, "okay", (size_t)prop->length))
554*4882a593Smuzhiyun 		return 0;
555*4882a593Smuzhiyun 	if (!strncmp(status, "disabled", (size_t)prop->length))
556*4882a593Smuzhiyun 		return -ENODEV;
557*4882a593Smuzhiyun 	dev_info(devdata->dev, "%s: unknown status '%s'\n", __func__, status);
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun 	return -EINVAL;
560*4882a593Smuzhiyun }
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun /**
563*4882a593Smuzhiyun  * nx842_OF_upd_maxsglen -- Update the device info from OF maxsglen prop
564*4882a593Smuzhiyun  *
565*4882a593Smuzhiyun  * Definition of the 'ibm,max-sg-len' OF property:
566*4882a593Smuzhiyun  *  This field indicates the maximum byte length of a scatter list
567*4882a593Smuzhiyun  *  for the platform facility. It is a single cell encoded as with encode-int.
568*4882a593Smuzhiyun  *
569*4882a593Smuzhiyun  * Example:
570*4882a593Smuzhiyun  *  # od -x ibm,max-sg-len
571*4882a593Smuzhiyun  *  0000000 0000 0ff0
572*4882a593Smuzhiyun  *
573*4882a593Smuzhiyun  *  In this example, the maximum byte length of a scatter list is
574*4882a593Smuzhiyun  *  0x0ff0 (4,080).
575*4882a593Smuzhiyun  *
576*4882a593Smuzhiyun  * @devdata - struct nx842_devdata to update
577*4882a593Smuzhiyun  * @prop - struct property point containing the maxsyncop for the update
578*4882a593Smuzhiyun  *
579*4882a593Smuzhiyun  * Returns:
580*4882a593Smuzhiyun  *  0 on success
581*4882a593Smuzhiyun  *  -EINVAL on failure
582*4882a593Smuzhiyun  */
nx842_OF_upd_maxsglen(struct nx842_devdata * devdata,struct property * prop)583*4882a593Smuzhiyun static int nx842_OF_upd_maxsglen(struct nx842_devdata *devdata,
584*4882a593Smuzhiyun 					struct property *prop) {
585*4882a593Smuzhiyun 	int ret = 0;
586*4882a593Smuzhiyun 	const unsigned int maxsglen = of_read_number(prop->value, 1);
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun 	if (prop->length != sizeof(maxsglen)) {
589*4882a593Smuzhiyun 		dev_err(devdata->dev, "%s: unexpected format for ibm,max-sg-len property\n", __func__);
590*4882a593Smuzhiyun 		dev_dbg(devdata->dev, "%s: ibm,max-sg-len is %d bytes long, expected %lu bytes\n", __func__,
591*4882a593Smuzhiyun 				prop->length, sizeof(maxsglen));
592*4882a593Smuzhiyun 		ret = -EINVAL;
593*4882a593Smuzhiyun 	} else {
594*4882a593Smuzhiyun 		devdata->max_sg_len = min_t(unsigned int,
595*4882a593Smuzhiyun 					    maxsglen, NX842_HW_PAGE_SIZE);
596*4882a593Smuzhiyun 	}
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun 	return ret;
599*4882a593Smuzhiyun }
600*4882a593Smuzhiyun 
601*4882a593Smuzhiyun /**
602*4882a593Smuzhiyun  * nx842_OF_upd_maxsyncop -- Update the device info from OF maxsyncop prop
603*4882a593Smuzhiyun  *
604*4882a593Smuzhiyun  * Definition of the 'ibm,max-sync-cop' OF property:
605*4882a593Smuzhiyun  *  Two series of cells.  The first series of cells represents the maximums
606*4882a593Smuzhiyun  *  that can be synchronously compressed. The second series of cells
607*4882a593Smuzhiyun  *  represents the maximums that can be synchronously decompressed.
608*4882a593Smuzhiyun  *  1. The first cell in each series contains the count of the number of
609*4882a593Smuzhiyun  *     data length, scatter list elements pairs that follow – each being
610*4882a593Smuzhiyun  *     of the form
611*4882a593Smuzhiyun  *    a. One cell data byte length
612*4882a593Smuzhiyun  *    b. One cell total number of scatter list elements
613*4882a593Smuzhiyun  *
614*4882a593Smuzhiyun  * Example:
615*4882a593Smuzhiyun  *  # od -x ibm,max-sync-cop
616*4882a593Smuzhiyun  *  0000000 0000 0001 0000 1000 0000 01fe 0000 0001
617*4882a593Smuzhiyun  *  0000020 0000 1000 0000 01fe
618*4882a593Smuzhiyun  *
619*4882a593Smuzhiyun  *  In this example, compression supports 0x1000 (4,096) data byte length
620*4882a593Smuzhiyun  *  and 0x1fe (510) total scatter list elements.  Decompression supports
621*4882a593Smuzhiyun  *  0x1000 (4,096) data byte length and 0x1f3 (510) total scatter list
622*4882a593Smuzhiyun  *  elements.
623*4882a593Smuzhiyun  *
624*4882a593Smuzhiyun  * @devdata - struct nx842_devdata to update
625*4882a593Smuzhiyun  * @prop - struct property point containing the maxsyncop for the update
626*4882a593Smuzhiyun  *
627*4882a593Smuzhiyun  * Returns:
628*4882a593Smuzhiyun  *  0 on success
629*4882a593Smuzhiyun  *  -EINVAL on failure
630*4882a593Smuzhiyun  */
nx842_OF_upd_maxsyncop(struct nx842_devdata * devdata,struct property * prop)631*4882a593Smuzhiyun static int nx842_OF_upd_maxsyncop(struct nx842_devdata *devdata,
632*4882a593Smuzhiyun 					struct property *prop) {
633*4882a593Smuzhiyun 	int ret = 0;
634*4882a593Smuzhiyun 	unsigned int comp_data_limit, decomp_data_limit;
635*4882a593Smuzhiyun 	unsigned int comp_sg_limit, decomp_sg_limit;
636*4882a593Smuzhiyun 	const struct maxsynccop_t {
637*4882a593Smuzhiyun 		__be32 comp_elements;
638*4882a593Smuzhiyun 		__be32 comp_data_limit;
639*4882a593Smuzhiyun 		__be32 comp_sg_limit;
640*4882a593Smuzhiyun 		__be32 decomp_elements;
641*4882a593Smuzhiyun 		__be32 decomp_data_limit;
642*4882a593Smuzhiyun 		__be32 decomp_sg_limit;
643*4882a593Smuzhiyun 	} *maxsynccop;
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun 	if (prop->length != sizeof(*maxsynccop)) {
646*4882a593Smuzhiyun 		dev_err(devdata->dev, "%s: unexpected format for ibm,max-sync-cop property\n", __func__);
647*4882a593Smuzhiyun 		dev_dbg(devdata->dev, "%s: ibm,max-sync-cop is %d bytes long, expected %lu bytes\n", __func__, prop->length,
648*4882a593Smuzhiyun 				sizeof(*maxsynccop));
649*4882a593Smuzhiyun 		ret = -EINVAL;
650*4882a593Smuzhiyun 		goto out;
651*4882a593Smuzhiyun 	}
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun 	maxsynccop = (const struct maxsynccop_t *)prop->value;
654*4882a593Smuzhiyun 	comp_data_limit = be32_to_cpu(maxsynccop->comp_data_limit);
655*4882a593Smuzhiyun 	comp_sg_limit = be32_to_cpu(maxsynccop->comp_sg_limit);
656*4882a593Smuzhiyun 	decomp_data_limit = be32_to_cpu(maxsynccop->decomp_data_limit);
657*4882a593Smuzhiyun 	decomp_sg_limit = be32_to_cpu(maxsynccop->decomp_sg_limit);
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun 	/* Use one limit rather than separate limits for compression and
660*4882a593Smuzhiyun 	 * decompression. Set a maximum for this so as not to exceed the
661*4882a593Smuzhiyun 	 * size that the header can support and round the value down to
662*4882a593Smuzhiyun 	 * the hardware page size (4K) */
663*4882a593Smuzhiyun 	devdata->max_sync_size = min(comp_data_limit, decomp_data_limit);
664*4882a593Smuzhiyun 
665*4882a593Smuzhiyun 	devdata->max_sync_size = min_t(unsigned int, devdata->max_sync_size,
666*4882a593Smuzhiyun 					65536);
667*4882a593Smuzhiyun 
668*4882a593Smuzhiyun 	if (devdata->max_sync_size < 4096) {
669*4882a593Smuzhiyun 		dev_err(devdata->dev, "%s: hardware max data size (%u) is "
670*4882a593Smuzhiyun 				"less than the driver minimum, unable to use "
671*4882a593Smuzhiyun 				"the hardware device\n",
672*4882a593Smuzhiyun 				__func__, devdata->max_sync_size);
673*4882a593Smuzhiyun 		ret = -EINVAL;
674*4882a593Smuzhiyun 		goto out;
675*4882a593Smuzhiyun 	}
676*4882a593Smuzhiyun 
677*4882a593Smuzhiyun 	nx842_pseries_constraints.maximum = devdata->max_sync_size;
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun 	devdata->max_sync_sg = min(comp_sg_limit, decomp_sg_limit);
680*4882a593Smuzhiyun 	if (devdata->max_sync_sg < 1) {
681*4882a593Smuzhiyun 		dev_err(devdata->dev, "%s: hardware max sg size (%u) is "
682*4882a593Smuzhiyun 				"less than the driver minimum, unable to use "
683*4882a593Smuzhiyun 				"the hardware device\n",
684*4882a593Smuzhiyun 				__func__, devdata->max_sync_sg);
685*4882a593Smuzhiyun 		ret = -EINVAL;
686*4882a593Smuzhiyun 		goto out;
687*4882a593Smuzhiyun 	}
688*4882a593Smuzhiyun 
689*4882a593Smuzhiyun out:
690*4882a593Smuzhiyun 	return ret;
691*4882a593Smuzhiyun }
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun /**
694*4882a593Smuzhiyun  *
695*4882a593Smuzhiyun  * nx842_OF_upd -- Handle OF properties updates for the device.
696*4882a593Smuzhiyun  *
697*4882a593Smuzhiyun  * Set all properties from the OF tree.  Optionally, a new property
698*4882a593Smuzhiyun  * can be provided by the @new_prop pointer to overwrite an existing value.
699*4882a593Smuzhiyun  * The device will remain disabled until all values are valid, this function
700*4882a593Smuzhiyun  * will return an error for updates unless all values are valid.
701*4882a593Smuzhiyun  *
702*4882a593Smuzhiyun  * @new_prop: If not NULL, this property is being updated.  If NULL, update
703*4882a593Smuzhiyun  *  all properties from the current values in the OF tree.
704*4882a593Smuzhiyun  *
705*4882a593Smuzhiyun  * Returns:
706*4882a593Smuzhiyun  *  0 - Success
707*4882a593Smuzhiyun  *  -ENOMEM - Could not allocate memory for new devdata structure
708*4882a593Smuzhiyun  *  -EINVAL - property value not found, new_prop is not a recognized
709*4882a593Smuzhiyun  *	property for the device or property value is not valid.
710*4882a593Smuzhiyun  *  -ENODEV - Device is not available
711*4882a593Smuzhiyun  */
nx842_OF_upd(struct property * new_prop)712*4882a593Smuzhiyun static int nx842_OF_upd(struct property *new_prop)
713*4882a593Smuzhiyun {
714*4882a593Smuzhiyun 	struct nx842_devdata *old_devdata = NULL;
715*4882a593Smuzhiyun 	struct nx842_devdata *new_devdata = NULL;
716*4882a593Smuzhiyun 	struct device_node *of_node = NULL;
717*4882a593Smuzhiyun 	struct property *status = NULL;
718*4882a593Smuzhiyun 	struct property *maxsglen = NULL;
719*4882a593Smuzhiyun 	struct property *maxsyncop = NULL;
720*4882a593Smuzhiyun 	int ret = 0;
721*4882a593Smuzhiyun 	unsigned long flags;
722*4882a593Smuzhiyun 
723*4882a593Smuzhiyun 	new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS);
724*4882a593Smuzhiyun 	if (!new_devdata)
725*4882a593Smuzhiyun 		return -ENOMEM;
726*4882a593Smuzhiyun 
727*4882a593Smuzhiyun 	spin_lock_irqsave(&devdata_mutex, flags);
728*4882a593Smuzhiyun 	old_devdata = rcu_dereference_check(devdata,
729*4882a593Smuzhiyun 			lockdep_is_held(&devdata_mutex));
730*4882a593Smuzhiyun 	if (old_devdata)
731*4882a593Smuzhiyun 		of_node = old_devdata->dev->of_node;
732*4882a593Smuzhiyun 
733*4882a593Smuzhiyun 	if (!old_devdata || !of_node) {
734*4882a593Smuzhiyun 		pr_err("%s: device is not available\n", __func__);
735*4882a593Smuzhiyun 		spin_unlock_irqrestore(&devdata_mutex, flags);
736*4882a593Smuzhiyun 		kfree(new_devdata);
737*4882a593Smuzhiyun 		return -ENODEV;
738*4882a593Smuzhiyun 	}
739*4882a593Smuzhiyun 
740*4882a593Smuzhiyun 	memcpy(new_devdata, old_devdata, sizeof(*old_devdata));
741*4882a593Smuzhiyun 	new_devdata->counters = old_devdata->counters;
742*4882a593Smuzhiyun 
743*4882a593Smuzhiyun 	/* Set ptrs for existing properties */
744*4882a593Smuzhiyun 	status = of_find_property(of_node, "status", NULL);
745*4882a593Smuzhiyun 	maxsglen = of_find_property(of_node, "ibm,max-sg-len", NULL);
746*4882a593Smuzhiyun 	maxsyncop = of_find_property(of_node, "ibm,max-sync-cop", NULL);
747*4882a593Smuzhiyun 	if (!status || !maxsglen || !maxsyncop) {
748*4882a593Smuzhiyun 		dev_err(old_devdata->dev, "%s: Could not locate device properties\n", __func__);
749*4882a593Smuzhiyun 		ret = -EINVAL;
750*4882a593Smuzhiyun 		goto error_out;
751*4882a593Smuzhiyun 	}
752*4882a593Smuzhiyun 
753*4882a593Smuzhiyun 	/*
754*4882a593Smuzhiyun 	 * If this is a property update, there are only certain properties that
755*4882a593Smuzhiyun 	 * we care about. Bail if it isn't in the below list
756*4882a593Smuzhiyun 	 */
757*4882a593Smuzhiyun 	if (new_prop && (strncmp(new_prop->name, "status", new_prop->length) ||
758*4882a593Smuzhiyun 		         strncmp(new_prop->name, "ibm,max-sg-len", new_prop->length) ||
759*4882a593Smuzhiyun 		         strncmp(new_prop->name, "ibm,max-sync-cop", new_prop->length)))
760*4882a593Smuzhiyun 		goto out;
761*4882a593Smuzhiyun 
762*4882a593Smuzhiyun 	/* Perform property updates */
763*4882a593Smuzhiyun 	ret = nx842_OF_upd_status(new_devdata, status);
764*4882a593Smuzhiyun 	if (ret)
765*4882a593Smuzhiyun 		goto error_out;
766*4882a593Smuzhiyun 
767*4882a593Smuzhiyun 	ret = nx842_OF_upd_maxsglen(new_devdata, maxsglen);
768*4882a593Smuzhiyun 	if (ret)
769*4882a593Smuzhiyun 		goto error_out;
770*4882a593Smuzhiyun 
771*4882a593Smuzhiyun 	ret = nx842_OF_upd_maxsyncop(new_devdata, maxsyncop);
772*4882a593Smuzhiyun 	if (ret)
773*4882a593Smuzhiyun 		goto error_out;
774*4882a593Smuzhiyun 
775*4882a593Smuzhiyun out:
776*4882a593Smuzhiyun 	dev_info(old_devdata->dev, "%s: max_sync_size new:%u old:%u\n",
777*4882a593Smuzhiyun 			__func__, new_devdata->max_sync_size,
778*4882a593Smuzhiyun 			old_devdata->max_sync_size);
779*4882a593Smuzhiyun 	dev_info(old_devdata->dev, "%s: max_sync_sg new:%u old:%u\n",
780*4882a593Smuzhiyun 			__func__, new_devdata->max_sync_sg,
781*4882a593Smuzhiyun 			old_devdata->max_sync_sg);
782*4882a593Smuzhiyun 	dev_info(old_devdata->dev, "%s: max_sg_len new:%u old:%u\n",
783*4882a593Smuzhiyun 			__func__, new_devdata->max_sg_len,
784*4882a593Smuzhiyun 			old_devdata->max_sg_len);
785*4882a593Smuzhiyun 
786*4882a593Smuzhiyun 	rcu_assign_pointer(devdata, new_devdata);
787*4882a593Smuzhiyun 	spin_unlock_irqrestore(&devdata_mutex, flags);
788*4882a593Smuzhiyun 	synchronize_rcu();
789*4882a593Smuzhiyun 	dev_set_drvdata(new_devdata->dev, new_devdata);
790*4882a593Smuzhiyun 	kfree(old_devdata);
791*4882a593Smuzhiyun 	return 0;
792*4882a593Smuzhiyun 
793*4882a593Smuzhiyun error_out:
794*4882a593Smuzhiyun 	if (new_devdata) {
795*4882a593Smuzhiyun 		dev_info(old_devdata->dev, "%s: device disabled\n", __func__);
796*4882a593Smuzhiyun 		nx842_OF_set_defaults(new_devdata);
797*4882a593Smuzhiyun 		rcu_assign_pointer(devdata, new_devdata);
798*4882a593Smuzhiyun 		spin_unlock_irqrestore(&devdata_mutex, flags);
799*4882a593Smuzhiyun 		synchronize_rcu();
800*4882a593Smuzhiyun 		dev_set_drvdata(new_devdata->dev, new_devdata);
801*4882a593Smuzhiyun 		kfree(old_devdata);
802*4882a593Smuzhiyun 	} else {
803*4882a593Smuzhiyun 		dev_err(old_devdata->dev, "%s: could not update driver from hardware\n", __func__);
804*4882a593Smuzhiyun 		spin_unlock_irqrestore(&devdata_mutex, flags);
805*4882a593Smuzhiyun 	}
806*4882a593Smuzhiyun 
807*4882a593Smuzhiyun 	if (!ret)
808*4882a593Smuzhiyun 		ret = -EINVAL;
809*4882a593Smuzhiyun 	return ret;
810*4882a593Smuzhiyun }
811*4882a593Smuzhiyun 
812*4882a593Smuzhiyun /**
813*4882a593Smuzhiyun  * nx842_OF_notifier - Process updates to OF properties for the device
814*4882a593Smuzhiyun  *
815*4882a593Smuzhiyun  * @np: notifier block
816*4882a593Smuzhiyun  * @action: notifier action
817*4882a593Smuzhiyun  * @update: struct pSeries_reconfig_prop_update pointer if action is
818*4882a593Smuzhiyun  *	PSERIES_UPDATE_PROPERTY
819*4882a593Smuzhiyun  *
820*4882a593Smuzhiyun  * Returns:
821*4882a593Smuzhiyun  *	NOTIFY_OK on success
822*4882a593Smuzhiyun  *	NOTIFY_BAD encoded with error number on failure, use
823*4882a593Smuzhiyun  *		notifier_to_errno() to decode this value
824*4882a593Smuzhiyun  */
nx842_OF_notifier(struct notifier_block * np,unsigned long action,void * data)825*4882a593Smuzhiyun static int nx842_OF_notifier(struct notifier_block *np, unsigned long action,
826*4882a593Smuzhiyun 			     void *data)
827*4882a593Smuzhiyun {
828*4882a593Smuzhiyun 	struct of_reconfig_data *upd = data;
829*4882a593Smuzhiyun 	struct nx842_devdata *local_devdata;
830*4882a593Smuzhiyun 	struct device_node *node = NULL;
831*4882a593Smuzhiyun 
832*4882a593Smuzhiyun 	rcu_read_lock();
833*4882a593Smuzhiyun 	local_devdata = rcu_dereference(devdata);
834*4882a593Smuzhiyun 	if (local_devdata)
835*4882a593Smuzhiyun 		node = local_devdata->dev->of_node;
836*4882a593Smuzhiyun 
837*4882a593Smuzhiyun 	if (local_devdata &&
838*4882a593Smuzhiyun 			action == OF_RECONFIG_UPDATE_PROPERTY &&
839*4882a593Smuzhiyun 			!strcmp(upd->dn->name, node->name)) {
840*4882a593Smuzhiyun 		rcu_read_unlock();
841*4882a593Smuzhiyun 		nx842_OF_upd(upd->prop);
842*4882a593Smuzhiyun 	} else
843*4882a593Smuzhiyun 		rcu_read_unlock();
844*4882a593Smuzhiyun 
845*4882a593Smuzhiyun 	return NOTIFY_OK;
846*4882a593Smuzhiyun }
847*4882a593Smuzhiyun 
848*4882a593Smuzhiyun static struct notifier_block nx842_of_nb = {
849*4882a593Smuzhiyun 	.notifier_call = nx842_OF_notifier,
850*4882a593Smuzhiyun };
851*4882a593Smuzhiyun 
852*4882a593Smuzhiyun #define nx842_counter_read(_name)					\
853*4882a593Smuzhiyun static ssize_t nx842_##_name##_show(struct device *dev,		\
854*4882a593Smuzhiyun 		struct device_attribute *attr,				\
855*4882a593Smuzhiyun 		char *buf) {						\
856*4882a593Smuzhiyun 	struct nx842_devdata *local_devdata;			\
857*4882a593Smuzhiyun 	int p = 0;							\
858*4882a593Smuzhiyun 	rcu_read_lock();						\
859*4882a593Smuzhiyun 	local_devdata = rcu_dereference(devdata);			\
860*4882a593Smuzhiyun 	if (local_devdata)						\
861*4882a593Smuzhiyun 		p = snprintf(buf, PAGE_SIZE, "%lld\n",			\
862*4882a593Smuzhiyun 		       atomic64_read(&local_devdata->counters->_name));	\
863*4882a593Smuzhiyun 	rcu_read_unlock();						\
864*4882a593Smuzhiyun 	return p;							\
865*4882a593Smuzhiyun }
866*4882a593Smuzhiyun 
867*4882a593Smuzhiyun #define NX842DEV_COUNTER_ATTR_RO(_name)					\
868*4882a593Smuzhiyun 	nx842_counter_read(_name);					\
869*4882a593Smuzhiyun 	static struct device_attribute dev_attr_##_name = __ATTR(_name,	\
870*4882a593Smuzhiyun 						0444,			\
871*4882a593Smuzhiyun 						nx842_##_name##_show,\
872*4882a593Smuzhiyun 						NULL);
873*4882a593Smuzhiyun 
874*4882a593Smuzhiyun NX842DEV_COUNTER_ATTR_RO(comp_complete);
875*4882a593Smuzhiyun NX842DEV_COUNTER_ATTR_RO(comp_failed);
876*4882a593Smuzhiyun NX842DEV_COUNTER_ATTR_RO(decomp_complete);
877*4882a593Smuzhiyun NX842DEV_COUNTER_ATTR_RO(decomp_failed);
878*4882a593Smuzhiyun NX842DEV_COUNTER_ATTR_RO(swdecomp);
879*4882a593Smuzhiyun 
880*4882a593Smuzhiyun static ssize_t nx842_timehist_show(struct device *,
881*4882a593Smuzhiyun 		struct device_attribute *, char *);
882*4882a593Smuzhiyun 
883*4882a593Smuzhiyun static struct device_attribute dev_attr_comp_times = __ATTR(comp_times, 0444,
884*4882a593Smuzhiyun 		nx842_timehist_show, NULL);
885*4882a593Smuzhiyun static struct device_attribute dev_attr_decomp_times = __ATTR(decomp_times,
886*4882a593Smuzhiyun 		0444, nx842_timehist_show, NULL);
887*4882a593Smuzhiyun 
nx842_timehist_show(struct device * dev,struct device_attribute * attr,char * buf)888*4882a593Smuzhiyun static ssize_t nx842_timehist_show(struct device *dev,
889*4882a593Smuzhiyun 		struct device_attribute *attr, char *buf) {
890*4882a593Smuzhiyun 	char *p = buf;
891*4882a593Smuzhiyun 	struct nx842_devdata *local_devdata;
892*4882a593Smuzhiyun 	atomic64_t *times;
893*4882a593Smuzhiyun 	int bytes_remain = PAGE_SIZE;
894*4882a593Smuzhiyun 	int bytes;
895*4882a593Smuzhiyun 	int i;
896*4882a593Smuzhiyun 
897*4882a593Smuzhiyun 	rcu_read_lock();
898*4882a593Smuzhiyun 	local_devdata = rcu_dereference(devdata);
899*4882a593Smuzhiyun 	if (!local_devdata) {
900*4882a593Smuzhiyun 		rcu_read_unlock();
901*4882a593Smuzhiyun 		return 0;
902*4882a593Smuzhiyun 	}
903*4882a593Smuzhiyun 
904*4882a593Smuzhiyun 	if (attr == &dev_attr_comp_times)
905*4882a593Smuzhiyun 		times = local_devdata->counters->comp_times;
906*4882a593Smuzhiyun 	else if (attr == &dev_attr_decomp_times)
907*4882a593Smuzhiyun 		times = local_devdata->counters->decomp_times;
908*4882a593Smuzhiyun 	else {
909*4882a593Smuzhiyun 		rcu_read_unlock();
910*4882a593Smuzhiyun 		return 0;
911*4882a593Smuzhiyun 	}
912*4882a593Smuzhiyun 
913*4882a593Smuzhiyun 	for (i = 0; i < (NX842_HIST_SLOTS - 2); i++) {
914*4882a593Smuzhiyun 		bytes = snprintf(p, bytes_remain, "%u-%uus:\t%lld\n",
915*4882a593Smuzhiyun 			       i ? (2<<(i-1)) : 0, (2<<i)-1,
916*4882a593Smuzhiyun 			       atomic64_read(&times[i]));
917*4882a593Smuzhiyun 		bytes_remain -= bytes;
918*4882a593Smuzhiyun 		p += bytes;
919*4882a593Smuzhiyun 	}
920*4882a593Smuzhiyun 	/* The last bucket holds everything over
921*4882a593Smuzhiyun 	 * 2<<(NX842_HIST_SLOTS - 2) us */
922*4882a593Smuzhiyun 	bytes = snprintf(p, bytes_remain, "%uus - :\t%lld\n",
923*4882a593Smuzhiyun 			2<<(NX842_HIST_SLOTS - 2),
924*4882a593Smuzhiyun 			atomic64_read(&times[(NX842_HIST_SLOTS - 1)]));
925*4882a593Smuzhiyun 	p += bytes;
926*4882a593Smuzhiyun 
927*4882a593Smuzhiyun 	rcu_read_unlock();
928*4882a593Smuzhiyun 	return p - buf;
929*4882a593Smuzhiyun }
930*4882a593Smuzhiyun 
931*4882a593Smuzhiyun static struct attribute *nx842_sysfs_entries[] = {
932*4882a593Smuzhiyun 	&dev_attr_comp_complete.attr,
933*4882a593Smuzhiyun 	&dev_attr_comp_failed.attr,
934*4882a593Smuzhiyun 	&dev_attr_decomp_complete.attr,
935*4882a593Smuzhiyun 	&dev_attr_decomp_failed.attr,
936*4882a593Smuzhiyun 	&dev_attr_swdecomp.attr,
937*4882a593Smuzhiyun 	&dev_attr_comp_times.attr,
938*4882a593Smuzhiyun 	&dev_attr_decomp_times.attr,
939*4882a593Smuzhiyun 	NULL,
940*4882a593Smuzhiyun };
941*4882a593Smuzhiyun 
942*4882a593Smuzhiyun static struct attribute_group nx842_attribute_group = {
943*4882a593Smuzhiyun 	.name = NULL,		/* put in device directory */
944*4882a593Smuzhiyun 	.attrs = nx842_sysfs_entries,
945*4882a593Smuzhiyun };
946*4882a593Smuzhiyun 
947*4882a593Smuzhiyun static struct nx842_driver nx842_pseries_driver = {
948*4882a593Smuzhiyun 	.name =		KBUILD_MODNAME,
949*4882a593Smuzhiyun 	.owner =	THIS_MODULE,
950*4882a593Smuzhiyun 	.workmem_size =	sizeof(struct nx842_workmem),
951*4882a593Smuzhiyun 	.constraints =	&nx842_pseries_constraints,
952*4882a593Smuzhiyun 	.compress =	nx842_pseries_compress,
953*4882a593Smuzhiyun 	.decompress =	nx842_pseries_decompress,
954*4882a593Smuzhiyun };
955*4882a593Smuzhiyun 
nx842_pseries_crypto_init(struct crypto_tfm * tfm)956*4882a593Smuzhiyun static int nx842_pseries_crypto_init(struct crypto_tfm *tfm)
957*4882a593Smuzhiyun {
958*4882a593Smuzhiyun 	return nx842_crypto_init(tfm, &nx842_pseries_driver);
959*4882a593Smuzhiyun }
960*4882a593Smuzhiyun 
961*4882a593Smuzhiyun static struct crypto_alg nx842_pseries_alg = {
962*4882a593Smuzhiyun 	.cra_name		= "842",
963*4882a593Smuzhiyun 	.cra_driver_name	= "842-nx",
964*4882a593Smuzhiyun 	.cra_priority		= 300,
965*4882a593Smuzhiyun 	.cra_flags		= CRYPTO_ALG_TYPE_COMPRESS,
966*4882a593Smuzhiyun 	.cra_ctxsize		= sizeof(struct nx842_crypto_ctx),
967*4882a593Smuzhiyun 	.cra_module		= THIS_MODULE,
968*4882a593Smuzhiyun 	.cra_init		= nx842_pseries_crypto_init,
969*4882a593Smuzhiyun 	.cra_exit		= nx842_crypto_exit,
970*4882a593Smuzhiyun 	.cra_u			= { .compress = {
971*4882a593Smuzhiyun 	.coa_compress		= nx842_crypto_compress,
972*4882a593Smuzhiyun 	.coa_decompress		= nx842_crypto_decompress } }
973*4882a593Smuzhiyun };
974*4882a593Smuzhiyun 
nx842_probe(struct vio_dev * viodev,const struct vio_device_id * id)975*4882a593Smuzhiyun static int nx842_probe(struct vio_dev *viodev,
976*4882a593Smuzhiyun 		       const struct vio_device_id *id)
977*4882a593Smuzhiyun {
978*4882a593Smuzhiyun 	struct nx842_devdata *old_devdata, *new_devdata = NULL;
979*4882a593Smuzhiyun 	unsigned long flags;
980*4882a593Smuzhiyun 	int ret = 0;
981*4882a593Smuzhiyun 
982*4882a593Smuzhiyun 	new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS);
983*4882a593Smuzhiyun 	if (!new_devdata)
984*4882a593Smuzhiyun 		return -ENOMEM;
985*4882a593Smuzhiyun 
986*4882a593Smuzhiyun 	new_devdata->counters = kzalloc(sizeof(*new_devdata->counters),
987*4882a593Smuzhiyun 			GFP_NOFS);
988*4882a593Smuzhiyun 	if (!new_devdata->counters) {
989*4882a593Smuzhiyun 		kfree(new_devdata);
990*4882a593Smuzhiyun 		return -ENOMEM;
991*4882a593Smuzhiyun 	}
992*4882a593Smuzhiyun 
993*4882a593Smuzhiyun 	spin_lock_irqsave(&devdata_mutex, flags);
994*4882a593Smuzhiyun 	old_devdata = rcu_dereference_check(devdata,
995*4882a593Smuzhiyun 			lockdep_is_held(&devdata_mutex));
996*4882a593Smuzhiyun 
997*4882a593Smuzhiyun 	if (old_devdata && old_devdata->vdev != NULL) {
998*4882a593Smuzhiyun 		dev_err(&viodev->dev, "%s: Attempt to register more than one instance of the hardware\n", __func__);
999*4882a593Smuzhiyun 		ret = -1;
1000*4882a593Smuzhiyun 		goto error_unlock;
1001*4882a593Smuzhiyun 	}
1002*4882a593Smuzhiyun 
1003*4882a593Smuzhiyun 	dev_set_drvdata(&viodev->dev, NULL);
1004*4882a593Smuzhiyun 
1005*4882a593Smuzhiyun 	new_devdata->vdev = viodev;
1006*4882a593Smuzhiyun 	new_devdata->dev = &viodev->dev;
1007*4882a593Smuzhiyun 	nx842_OF_set_defaults(new_devdata);
1008*4882a593Smuzhiyun 
1009*4882a593Smuzhiyun 	rcu_assign_pointer(devdata, new_devdata);
1010*4882a593Smuzhiyun 	spin_unlock_irqrestore(&devdata_mutex, flags);
1011*4882a593Smuzhiyun 	synchronize_rcu();
1012*4882a593Smuzhiyun 	kfree(old_devdata);
1013*4882a593Smuzhiyun 
1014*4882a593Smuzhiyun 	of_reconfig_notifier_register(&nx842_of_nb);
1015*4882a593Smuzhiyun 
1016*4882a593Smuzhiyun 	ret = nx842_OF_upd(NULL);
1017*4882a593Smuzhiyun 	if (ret)
1018*4882a593Smuzhiyun 		goto error;
1019*4882a593Smuzhiyun 
1020*4882a593Smuzhiyun 	ret = crypto_register_alg(&nx842_pseries_alg);
1021*4882a593Smuzhiyun 	if (ret) {
1022*4882a593Smuzhiyun 		dev_err(&viodev->dev, "could not register comp alg: %d\n", ret);
1023*4882a593Smuzhiyun 		goto error;
1024*4882a593Smuzhiyun 	}
1025*4882a593Smuzhiyun 
1026*4882a593Smuzhiyun 	rcu_read_lock();
1027*4882a593Smuzhiyun 	dev_set_drvdata(&viodev->dev, rcu_dereference(devdata));
1028*4882a593Smuzhiyun 	rcu_read_unlock();
1029*4882a593Smuzhiyun 
1030*4882a593Smuzhiyun 	if (sysfs_create_group(&viodev->dev.kobj, &nx842_attribute_group)) {
1031*4882a593Smuzhiyun 		dev_err(&viodev->dev, "could not create sysfs device attributes\n");
1032*4882a593Smuzhiyun 		ret = -1;
1033*4882a593Smuzhiyun 		goto error;
1034*4882a593Smuzhiyun 	}
1035*4882a593Smuzhiyun 
1036*4882a593Smuzhiyun 	return 0;
1037*4882a593Smuzhiyun 
1038*4882a593Smuzhiyun error_unlock:
1039*4882a593Smuzhiyun 	spin_unlock_irqrestore(&devdata_mutex, flags);
1040*4882a593Smuzhiyun 	if (new_devdata)
1041*4882a593Smuzhiyun 		kfree(new_devdata->counters);
1042*4882a593Smuzhiyun 	kfree(new_devdata);
1043*4882a593Smuzhiyun error:
1044*4882a593Smuzhiyun 	return ret;
1045*4882a593Smuzhiyun }
1046*4882a593Smuzhiyun 
nx842_remove(struct vio_dev * viodev)1047*4882a593Smuzhiyun static int nx842_remove(struct vio_dev *viodev)
1048*4882a593Smuzhiyun {
1049*4882a593Smuzhiyun 	struct nx842_devdata *old_devdata;
1050*4882a593Smuzhiyun 	unsigned long flags;
1051*4882a593Smuzhiyun 
1052*4882a593Smuzhiyun 	pr_info("Removing IBM Power 842 compression device\n");
1053*4882a593Smuzhiyun 	sysfs_remove_group(&viodev->dev.kobj, &nx842_attribute_group);
1054*4882a593Smuzhiyun 
1055*4882a593Smuzhiyun 	crypto_unregister_alg(&nx842_pseries_alg);
1056*4882a593Smuzhiyun 
1057*4882a593Smuzhiyun 	spin_lock_irqsave(&devdata_mutex, flags);
1058*4882a593Smuzhiyun 	old_devdata = rcu_dereference_check(devdata,
1059*4882a593Smuzhiyun 			lockdep_is_held(&devdata_mutex));
1060*4882a593Smuzhiyun 	of_reconfig_notifier_unregister(&nx842_of_nb);
1061*4882a593Smuzhiyun 	RCU_INIT_POINTER(devdata, NULL);
1062*4882a593Smuzhiyun 	spin_unlock_irqrestore(&devdata_mutex, flags);
1063*4882a593Smuzhiyun 	synchronize_rcu();
1064*4882a593Smuzhiyun 	dev_set_drvdata(&viodev->dev, NULL);
1065*4882a593Smuzhiyun 	if (old_devdata)
1066*4882a593Smuzhiyun 		kfree(old_devdata->counters);
1067*4882a593Smuzhiyun 	kfree(old_devdata);
1068*4882a593Smuzhiyun 
1069*4882a593Smuzhiyun 	return 0;
1070*4882a593Smuzhiyun }
1071*4882a593Smuzhiyun 
1072*4882a593Smuzhiyun static const struct vio_device_id nx842_vio_driver_ids[] = {
1073*4882a593Smuzhiyun 	{"ibm,compression-v1", "ibm,compression"},
1074*4882a593Smuzhiyun 	{"", ""},
1075*4882a593Smuzhiyun };
1076*4882a593Smuzhiyun MODULE_DEVICE_TABLE(vio, nx842_vio_driver_ids);
1077*4882a593Smuzhiyun 
1078*4882a593Smuzhiyun static struct vio_driver nx842_vio_driver = {
1079*4882a593Smuzhiyun 	.name = KBUILD_MODNAME,
1080*4882a593Smuzhiyun 	.probe = nx842_probe,
1081*4882a593Smuzhiyun 	.remove = nx842_remove,
1082*4882a593Smuzhiyun 	.get_desired_dma = nx842_get_desired_dma,
1083*4882a593Smuzhiyun 	.id_table = nx842_vio_driver_ids,
1084*4882a593Smuzhiyun };
1085*4882a593Smuzhiyun 
nx842_pseries_init(void)1086*4882a593Smuzhiyun static int __init nx842_pseries_init(void)
1087*4882a593Smuzhiyun {
1088*4882a593Smuzhiyun 	struct nx842_devdata *new_devdata;
1089*4882a593Smuzhiyun 	int ret;
1090*4882a593Smuzhiyun 
1091*4882a593Smuzhiyun 	if (!of_find_compatible_node(NULL, NULL, "ibm,compression"))
1092*4882a593Smuzhiyun 		return -ENODEV;
1093*4882a593Smuzhiyun 
1094*4882a593Smuzhiyun 	RCU_INIT_POINTER(devdata, NULL);
1095*4882a593Smuzhiyun 	new_devdata = kzalloc(sizeof(*new_devdata), GFP_KERNEL);
1096*4882a593Smuzhiyun 	if (!new_devdata)
1097*4882a593Smuzhiyun 		return -ENOMEM;
1098*4882a593Smuzhiyun 
1099*4882a593Smuzhiyun 	RCU_INIT_POINTER(devdata, new_devdata);
1100*4882a593Smuzhiyun 
1101*4882a593Smuzhiyun 	ret = vio_register_driver(&nx842_vio_driver);
1102*4882a593Smuzhiyun 	if (ret) {
1103*4882a593Smuzhiyun 		pr_err("Could not register VIO driver %d\n", ret);
1104*4882a593Smuzhiyun 
1105*4882a593Smuzhiyun 		kfree(new_devdata);
1106*4882a593Smuzhiyun 		return ret;
1107*4882a593Smuzhiyun 	}
1108*4882a593Smuzhiyun 
1109*4882a593Smuzhiyun 	return 0;
1110*4882a593Smuzhiyun }
1111*4882a593Smuzhiyun 
1112*4882a593Smuzhiyun module_init(nx842_pseries_init);
1113*4882a593Smuzhiyun 
nx842_pseries_exit(void)1114*4882a593Smuzhiyun static void __exit nx842_pseries_exit(void)
1115*4882a593Smuzhiyun {
1116*4882a593Smuzhiyun 	struct nx842_devdata *old_devdata;
1117*4882a593Smuzhiyun 	unsigned long flags;
1118*4882a593Smuzhiyun 
1119*4882a593Smuzhiyun 	crypto_unregister_alg(&nx842_pseries_alg);
1120*4882a593Smuzhiyun 
1121*4882a593Smuzhiyun 	spin_lock_irqsave(&devdata_mutex, flags);
1122*4882a593Smuzhiyun 	old_devdata = rcu_dereference_check(devdata,
1123*4882a593Smuzhiyun 			lockdep_is_held(&devdata_mutex));
1124*4882a593Smuzhiyun 	RCU_INIT_POINTER(devdata, NULL);
1125*4882a593Smuzhiyun 	spin_unlock_irqrestore(&devdata_mutex, flags);
1126*4882a593Smuzhiyun 	synchronize_rcu();
1127*4882a593Smuzhiyun 	if (old_devdata && old_devdata->dev)
1128*4882a593Smuzhiyun 		dev_set_drvdata(old_devdata->dev, NULL);
1129*4882a593Smuzhiyun 	kfree(old_devdata);
1130*4882a593Smuzhiyun 	vio_unregister_driver(&nx842_vio_driver);
1131*4882a593Smuzhiyun }
1132*4882a593Smuzhiyun 
1133*4882a593Smuzhiyun module_exit(nx842_pseries_exit);
1134*4882a593Smuzhiyun 
1135