1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Driver for IBM PowerNV compression accelerator
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2015 Dan Streetman, IBM Corp
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include "nx-842.h"
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include <linux/timer.h>
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #include <asm/prom.h>
15*4882a593Smuzhiyun #include <asm/icswx.h>
16*4882a593Smuzhiyun #include <asm/vas.h>
17*4882a593Smuzhiyun #include <asm/reg.h>
18*4882a593Smuzhiyun #include <asm/opal-api.h>
19*4882a593Smuzhiyun #include <asm/opal.h>
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun MODULE_LICENSE("GPL");
22*4882a593Smuzhiyun MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
23*4882a593Smuzhiyun MODULE_DESCRIPTION("H/W Compression driver for IBM PowerNV processors");
24*4882a593Smuzhiyun MODULE_ALIAS_CRYPTO("842");
25*4882a593Smuzhiyun MODULE_ALIAS_CRYPTO("842-nx");
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun #define WORKMEM_ALIGN (CRB_ALIGN)
28*4882a593Smuzhiyun #define CSB_WAIT_MAX (5000) /* ms */
29*4882a593Smuzhiyun #define VAS_RETRIES (10)
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun struct nx842_workmem {
32*4882a593Smuzhiyun /* Below fields must be properly aligned */
33*4882a593Smuzhiyun struct coprocessor_request_block crb; /* CRB_ALIGN align */
34*4882a593Smuzhiyun struct data_descriptor_entry ddl_in[DDL_LEN_MAX]; /* DDE_ALIGN align */
35*4882a593Smuzhiyun struct data_descriptor_entry ddl_out[DDL_LEN_MAX]; /* DDE_ALIGN align */
36*4882a593Smuzhiyun /* Above fields must be properly aligned */
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun ktime_t start;
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun char padding[WORKMEM_ALIGN]; /* unused, to allow alignment */
41*4882a593Smuzhiyun } __packed __aligned(WORKMEM_ALIGN);
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun struct nx_coproc {
44*4882a593Smuzhiyun unsigned int chip_id;
45*4882a593Smuzhiyun unsigned int ct; /* Can be 842 or GZIP high/normal*/
46*4882a593Smuzhiyun unsigned int ci; /* Coprocessor instance, used with icswx */
47*4882a593Smuzhiyun struct {
48*4882a593Smuzhiyun struct vas_window *rxwin;
49*4882a593Smuzhiyun int id;
50*4882a593Smuzhiyun } vas;
51*4882a593Smuzhiyun struct list_head list;
52*4882a593Smuzhiyun };
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun /*
55*4882a593Smuzhiyun * Send the request to NX engine on the chip for the corresponding CPU
56*4882a593Smuzhiyun * where the process is executing. Use with VAS function.
57*4882a593Smuzhiyun */
58*4882a593Smuzhiyun static DEFINE_PER_CPU(struct vas_window *, cpu_txwin);
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun /* no cpu hotplug on powernv, so this list never changes after init */
61*4882a593Smuzhiyun static LIST_HEAD(nx_coprocs);
62*4882a593Smuzhiyun static unsigned int nx842_ct; /* used in icswx function */
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun /*
65*4882a593Smuzhiyun * Using same values as in skiboot or coprocessor type representing
66*4882a593Smuzhiyun * in NX workbook.
67*4882a593Smuzhiyun */
68*4882a593Smuzhiyun #define NX_CT_GZIP (2) /* on P9 and later */
69*4882a593Smuzhiyun #define NX_CT_842 (3)
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun static int (*nx842_powernv_exec)(const unsigned char *in,
72*4882a593Smuzhiyun unsigned int inlen, unsigned char *out,
73*4882a593Smuzhiyun unsigned int *outlenp, void *workmem, int fc);
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun /**
76*4882a593Smuzhiyun * setup_indirect_dde - Setup an indirect DDE
77*4882a593Smuzhiyun *
78*4882a593Smuzhiyun * The DDE is setup with the the DDE count, byte count, and address of
79*4882a593Smuzhiyun * first direct DDE in the list.
80*4882a593Smuzhiyun */
setup_indirect_dde(struct data_descriptor_entry * dde,struct data_descriptor_entry * ddl,unsigned int dde_count,unsigned int byte_count)81*4882a593Smuzhiyun static void setup_indirect_dde(struct data_descriptor_entry *dde,
82*4882a593Smuzhiyun struct data_descriptor_entry *ddl,
83*4882a593Smuzhiyun unsigned int dde_count, unsigned int byte_count)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun dde->flags = 0;
86*4882a593Smuzhiyun dde->count = dde_count;
87*4882a593Smuzhiyun dde->index = 0;
88*4882a593Smuzhiyun dde->length = cpu_to_be32(byte_count);
89*4882a593Smuzhiyun dde->address = cpu_to_be64(nx842_get_pa(ddl));
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun /**
93*4882a593Smuzhiyun * setup_direct_dde - Setup single DDE from buffer
94*4882a593Smuzhiyun *
95*4882a593Smuzhiyun * The DDE is setup with the buffer and length. The buffer must be properly
96*4882a593Smuzhiyun * aligned. The used length is returned.
97*4882a593Smuzhiyun * Returns:
98*4882a593Smuzhiyun * N Successfully set up DDE with N bytes
99*4882a593Smuzhiyun */
setup_direct_dde(struct data_descriptor_entry * dde,unsigned long pa,unsigned int len)100*4882a593Smuzhiyun static unsigned int setup_direct_dde(struct data_descriptor_entry *dde,
101*4882a593Smuzhiyun unsigned long pa, unsigned int len)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun unsigned int l = min_t(unsigned int, len, LEN_ON_PAGE(pa));
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun dde->flags = 0;
106*4882a593Smuzhiyun dde->count = 0;
107*4882a593Smuzhiyun dde->index = 0;
108*4882a593Smuzhiyun dde->length = cpu_to_be32(l);
109*4882a593Smuzhiyun dde->address = cpu_to_be64(pa);
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun return l;
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun /**
115*4882a593Smuzhiyun * setup_ddl - Setup DDL from buffer
116*4882a593Smuzhiyun *
117*4882a593Smuzhiyun * Returns:
118*4882a593Smuzhiyun * 0 Successfully set up DDL
119*4882a593Smuzhiyun */
setup_ddl(struct data_descriptor_entry * dde,struct data_descriptor_entry * ddl,unsigned char * buf,unsigned int len,bool in)120*4882a593Smuzhiyun static int setup_ddl(struct data_descriptor_entry *dde,
121*4882a593Smuzhiyun struct data_descriptor_entry *ddl,
122*4882a593Smuzhiyun unsigned char *buf, unsigned int len,
123*4882a593Smuzhiyun bool in)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun unsigned long pa = nx842_get_pa(buf);
126*4882a593Smuzhiyun int i, ret, total_len = len;
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun if (!IS_ALIGNED(pa, DDE_BUFFER_ALIGN)) {
129*4882a593Smuzhiyun pr_debug("%s buffer pa 0x%lx not 0x%x-byte aligned\n",
130*4882a593Smuzhiyun in ? "input" : "output", pa, DDE_BUFFER_ALIGN);
131*4882a593Smuzhiyun return -EINVAL;
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun /* only need to check last mult; since buffer must be
135*4882a593Smuzhiyun * DDE_BUFFER_ALIGN aligned, and that is a multiple of
136*4882a593Smuzhiyun * DDE_BUFFER_SIZE_MULT, and pre-last page DDE buffers
137*4882a593Smuzhiyun * are guaranteed a multiple of DDE_BUFFER_SIZE_MULT.
138*4882a593Smuzhiyun */
139*4882a593Smuzhiyun if (len % DDE_BUFFER_LAST_MULT) {
140*4882a593Smuzhiyun pr_debug("%s buffer len 0x%x not a multiple of 0x%x\n",
141*4882a593Smuzhiyun in ? "input" : "output", len, DDE_BUFFER_LAST_MULT);
142*4882a593Smuzhiyun if (in)
143*4882a593Smuzhiyun return -EINVAL;
144*4882a593Smuzhiyun len = round_down(len, DDE_BUFFER_LAST_MULT);
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun /* use a single direct DDE */
148*4882a593Smuzhiyun if (len <= LEN_ON_PAGE(pa)) {
149*4882a593Smuzhiyun ret = setup_direct_dde(dde, pa, len);
150*4882a593Smuzhiyun WARN_ON(ret < len);
151*4882a593Smuzhiyun return 0;
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun /* use the DDL */
155*4882a593Smuzhiyun for (i = 0; i < DDL_LEN_MAX && len > 0; i++) {
156*4882a593Smuzhiyun ret = setup_direct_dde(&ddl[i], pa, len);
157*4882a593Smuzhiyun buf += ret;
158*4882a593Smuzhiyun len -= ret;
159*4882a593Smuzhiyun pa = nx842_get_pa(buf);
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun if (len > 0) {
163*4882a593Smuzhiyun pr_debug("0x%x total %s bytes 0x%x too many for DDL.\n",
164*4882a593Smuzhiyun total_len, in ? "input" : "output", len);
165*4882a593Smuzhiyun if (in)
166*4882a593Smuzhiyun return -EMSGSIZE;
167*4882a593Smuzhiyun total_len -= len;
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun setup_indirect_dde(dde, ddl, i, total_len);
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun return 0;
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun #define CSB_ERR(csb, msg, ...) \
175*4882a593Smuzhiyun pr_err("ERROR: " msg " : %02x %02x %02x %02x %08x\n", \
176*4882a593Smuzhiyun ##__VA_ARGS__, (csb)->flags, \
177*4882a593Smuzhiyun (csb)->cs, (csb)->cc, (csb)->ce, \
178*4882a593Smuzhiyun be32_to_cpu((csb)->count))
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun #define CSB_ERR_ADDR(csb, msg, ...) \
181*4882a593Smuzhiyun CSB_ERR(csb, msg " at %lx", ##__VA_ARGS__, \
182*4882a593Smuzhiyun (unsigned long)be64_to_cpu((csb)->address))
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun /**
185*4882a593Smuzhiyun * wait_for_csb
186*4882a593Smuzhiyun */
wait_for_csb(struct nx842_workmem * wmem,struct coprocessor_status_block * csb)187*4882a593Smuzhiyun static int wait_for_csb(struct nx842_workmem *wmem,
188*4882a593Smuzhiyun struct coprocessor_status_block *csb)
189*4882a593Smuzhiyun {
190*4882a593Smuzhiyun ktime_t start = wmem->start, now = ktime_get();
191*4882a593Smuzhiyun ktime_t timeout = ktime_add_ms(start, CSB_WAIT_MAX);
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun while (!(READ_ONCE(csb->flags) & CSB_V)) {
194*4882a593Smuzhiyun cpu_relax();
195*4882a593Smuzhiyun now = ktime_get();
196*4882a593Smuzhiyun if (ktime_after(now, timeout))
197*4882a593Smuzhiyun break;
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun /* hw has updated csb and output buffer */
201*4882a593Smuzhiyun barrier();
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun /* check CSB flags */
204*4882a593Smuzhiyun if (!(csb->flags & CSB_V)) {
205*4882a593Smuzhiyun CSB_ERR(csb, "CSB still not valid after %ld us, giving up",
206*4882a593Smuzhiyun (long)ktime_us_delta(now, start));
207*4882a593Smuzhiyun return -ETIMEDOUT;
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun if (csb->flags & CSB_F) {
210*4882a593Smuzhiyun CSB_ERR(csb, "Invalid CSB format");
211*4882a593Smuzhiyun return -EPROTO;
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun if (csb->flags & CSB_CH) {
214*4882a593Smuzhiyun CSB_ERR(csb, "Invalid CSB chaining state");
215*4882a593Smuzhiyun return -EPROTO;
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun /* verify CSB completion sequence is 0 */
219*4882a593Smuzhiyun if (csb->cs) {
220*4882a593Smuzhiyun CSB_ERR(csb, "Invalid CSB completion sequence");
221*4882a593Smuzhiyun return -EPROTO;
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun /* check CSB Completion Code */
225*4882a593Smuzhiyun switch (csb->cc) {
226*4882a593Smuzhiyun /* no error */
227*4882a593Smuzhiyun case CSB_CC_SUCCESS:
228*4882a593Smuzhiyun break;
229*4882a593Smuzhiyun case CSB_CC_TPBC_GT_SPBC:
230*4882a593Smuzhiyun /* not an error, but the compressed data is
231*4882a593Smuzhiyun * larger than the uncompressed data :(
232*4882a593Smuzhiyun */
233*4882a593Smuzhiyun break;
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun /* input data errors */
236*4882a593Smuzhiyun case CSB_CC_OPERAND_OVERLAP:
237*4882a593Smuzhiyun /* input and output buffers overlap */
238*4882a593Smuzhiyun CSB_ERR(csb, "Operand Overlap error");
239*4882a593Smuzhiyun return -EINVAL;
240*4882a593Smuzhiyun case CSB_CC_INVALID_OPERAND:
241*4882a593Smuzhiyun CSB_ERR(csb, "Invalid operand");
242*4882a593Smuzhiyun return -EINVAL;
243*4882a593Smuzhiyun case CSB_CC_NOSPC:
244*4882a593Smuzhiyun /* output buffer too small */
245*4882a593Smuzhiyun return -ENOSPC;
246*4882a593Smuzhiyun case CSB_CC_ABORT:
247*4882a593Smuzhiyun CSB_ERR(csb, "Function aborted");
248*4882a593Smuzhiyun return -EINTR;
249*4882a593Smuzhiyun case CSB_CC_CRC_MISMATCH:
250*4882a593Smuzhiyun CSB_ERR(csb, "CRC mismatch");
251*4882a593Smuzhiyun return -EINVAL;
252*4882a593Smuzhiyun case CSB_CC_TEMPL_INVALID:
253*4882a593Smuzhiyun CSB_ERR(csb, "Compressed data template invalid");
254*4882a593Smuzhiyun return -EINVAL;
255*4882a593Smuzhiyun case CSB_CC_TEMPL_OVERFLOW:
256*4882a593Smuzhiyun CSB_ERR(csb, "Compressed data template shows data past end");
257*4882a593Smuzhiyun return -EINVAL;
258*4882a593Smuzhiyun case CSB_CC_EXCEED_BYTE_COUNT: /* P9 or later */
259*4882a593Smuzhiyun /*
260*4882a593Smuzhiyun * DDE byte count exceeds the limit specified in Maximum
261*4882a593Smuzhiyun * byte count register.
262*4882a593Smuzhiyun */
263*4882a593Smuzhiyun CSB_ERR(csb, "DDE byte count exceeds the limit");
264*4882a593Smuzhiyun return -EINVAL;
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun /* these should not happen */
267*4882a593Smuzhiyun case CSB_CC_INVALID_ALIGN:
268*4882a593Smuzhiyun /* setup_ddl should have detected this */
269*4882a593Smuzhiyun CSB_ERR_ADDR(csb, "Invalid alignment");
270*4882a593Smuzhiyun return -EINVAL;
271*4882a593Smuzhiyun case CSB_CC_DATA_LENGTH:
272*4882a593Smuzhiyun /* setup_ddl should have detected this */
273*4882a593Smuzhiyun CSB_ERR(csb, "Invalid data length");
274*4882a593Smuzhiyun return -EINVAL;
275*4882a593Smuzhiyun case CSB_CC_WR_TRANSLATION:
276*4882a593Smuzhiyun case CSB_CC_TRANSLATION:
277*4882a593Smuzhiyun case CSB_CC_TRANSLATION_DUP1:
278*4882a593Smuzhiyun case CSB_CC_TRANSLATION_DUP2:
279*4882a593Smuzhiyun case CSB_CC_TRANSLATION_DUP3:
280*4882a593Smuzhiyun case CSB_CC_TRANSLATION_DUP4:
281*4882a593Smuzhiyun case CSB_CC_TRANSLATION_DUP5:
282*4882a593Smuzhiyun case CSB_CC_TRANSLATION_DUP6:
283*4882a593Smuzhiyun /* should not happen, we use physical addrs */
284*4882a593Smuzhiyun CSB_ERR_ADDR(csb, "Translation error");
285*4882a593Smuzhiyun return -EPROTO;
286*4882a593Smuzhiyun case CSB_CC_WR_PROTECTION:
287*4882a593Smuzhiyun case CSB_CC_PROTECTION:
288*4882a593Smuzhiyun case CSB_CC_PROTECTION_DUP1:
289*4882a593Smuzhiyun case CSB_CC_PROTECTION_DUP2:
290*4882a593Smuzhiyun case CSB_CC_PROTECTION_DUP3:
291*4882a593Smuzhiyun case CSB_CC_PROTECTION_DUP4:
292*4882a593Smuzhiyun case CSB_CC_PROTECTION_DUP5:
293*4882a593Smuzhiyun case CSB_CC_PROTECTION_DUP6:
294*4882a593Smuzhiyun /* should not happen, we use physical addrs */
295*4882a593Smuzhiyun CSB_ERR_ADDR(csb, "Protection error");
296*4882a593Smuzhiyun return -EPROTO;
297*4882a593Smuzhiyun case CSB_CC_PRIVILEGE:
298*4882a593Smuzhiyun /* shouldn't happen, we're in HYP mode */
299*4882a593Smuzhiyun CSB_ERR(csb, "Insufficient Privilege error");
300*4882a593Smuzhiyun return -EPROTO;
301*4882a593Smuzhiyun case CSB_CC_EXCESSIVE_DDE:
302*4882a593Smuzhiyun /* shouldn't happen, setup_ddl doesn't use many dde's */
303*4882a593Smuzhiyun CSB_ERR(csb, "Too many DDEs in DDL");
304*4882a593Smuzhiyun return -EINVAL;
305*4882a593Smuzhiyun case CSB_CC_TRANSPORT:
306*4882a593Smuzhiyun case CSB_CC_INVALID_CRB: /* P9 or later */
307*4882a593Smuzhiyun /* shouldn't happen, we setup CRB correctly */
308*4882a593Smuzhiyun CSB_ERR(csb, "Invalid CRB");
309*4882a593Smuzhiyun return -EINVAL;
310*4882a593Smuzhiyun case CSB_CC_INVALID_DDE: /* P9 or later */
311*4882a593Smuzhiyun /*
312*4882a593Smuzhiyun * shouldn't happen, setup_direct/indirect_dde creates
313*4882a593Smuzhiyun * DDE right
314*4882a593Smuzhiyun */
315*4882a593Smuzhiyun CSB_ERR(csb, "Invalid DDE");
316*4882a593Smuzhiyun return -EINVAL;
317*4882a593Smuzhiyun case CSB_CC_SEGMENTED_DDL:
318*4882a593Smuzhiyun /* shouldn't happen, setup_ddl creates DDL right */
319*4882a593Smuzhiyun CSB_ERR(csb, "Segmented DDL error");
320*4882a593Smuzhiyun return -EINVAL;
321*4882a593Smuzhiyun case CSB_CC_DDE_OVERFLOW:
322*4882a593Smuzhiyun /* shouldn't happen, setup_ddl creates DDL right */
323*4882a593Smuzhiyun CSB_ERR(csb, "DDE overflow error");
324*4882a593Smuzhiyun return -EINVAL;
325*4882a593Smuzhiyun case CSB_CC_SESSION:
326*4882a593Smuzhiyun /* should not happen with ICSWX */
327*4882a593Smuzhiyun CSB_ERR(csb, "Session violation error");
328*4882a593Smuzhiyun return -EPROTO;
329*4882a593Smuzhiyun case CSB_CC_CHAIN:
330*4882a593Smuzhiyun /* should not happen, we don't use chained CRBs */
331*4882a593Smuzhiyun CSB_ERR(csb, "Chained CRB error");
332*4882a593Smuzhiyun return -EPROTO;
333*4882a593Smuzhiyun case CSB_CC_SEQUENCE:
334*4882a593Smuzhiyun /* should not happen, we don't use chained CRBs */
335*4882a593Smuzhiyun CSB_ERR(csb, "CRB sequence number error");
336*4882a593Smuzhiyun return -EPROTO;
337*4882a593Smuzhiyun case CSB_CC_UNKNOWN_CODE:
338*4882a593Smuzhiyun CSB_ERR(csb, "Unknown subfunction code");
339*4882a593Smuzhiyun return -EPROTO;
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun /* hardware errors */
342*4882a593Smuzhiyun case CSB_CC_RD_EXTERNAL:
343*4882a593Smuzhiyun case CSB_CC_RD_EXTERNAL_DUP1:
344*4882a593Smuzhiyun case CSB_CC_RD_EXTERNAL_DUP2:
345*4882a593Smuzhiyun case CSB_CC_RD_EXTERNAL_DUP3:
346*4882a593Smuzhiyun CSB_ERR_ADDR(csb, "Read error outside coprocessor");
347*4882a593Smuzhiyun return -EPROTO;
348*4882a593Smuzhiyun case CSB_CC_WR_EXTERNAL:
349*4882a593Smuzhiyun CSB_ERR_ADDR(csb, "Write error outside coprocessor");
350*4882a593Smuzhiyun return -EPROTO;
351*4882a593Smuzhiyun case CSB_CC_INTERNAL:
352*4882a593Smuzhiyun CSB_ERR(csb, "Internal error in coprocessor");
353*4882a593Smuzhiyun return -EPROTO;
354*4882a593Smuzhiyun case CSB_CC_PROVISION:
355*4882a593Smuzhiyun CSB_ERR(csb, "Storage provision error");
356*4882a593Smuzhiyun return -EPROTO;
357*4882a593Smuzhiyun case CSB_CC_HW:
358*4882a593Smuzhiyun CSB_ERR(csb, "Correctable hardware error");
359*4882a593Smuzhiyun return -EPROTO;
360*4882a593Smuzhiyun case CSB_CC_HW_EXPIRED_TIMER: /* P9 or later */
361*4882a593Smuzhiyun CSB_ERR(csb, "Job did not finish within allowed time");
362*4882a593Smuzhiyun return -EPROTO;
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun default:
365*4882a593Smuzhiyun CSB_ERR(csb, "Invalid CC %d", csb->cc);
366*4882a593Smuzhiyun return -EPROTO;
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun /* check Completion Extension state */
370*4882a593Smuzhiyun if (csb->ce & CSB_CE_TERMINATION) {
371*4882a593Smuzhiyun CSB_ERR(csb, "CSB request was terminated");
372*4882a593Smuzhiyun return -EPROTO;
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun if (csb->ce & CSB_CE_INCOMPLETE) {
375*4882a593Smuzhiyun CSB_ERR(csb, "CSB request not complete");
376*4882a593Smuzhiyun return -EPROTO;
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun if (!(csb->ce & CSB_CE_TPBC)) {
379*4882a593Smuzhiyun CSB_ERR(csb, "TPBC not provided, unknown target length");
380*4882a593Smuzhiyun return -EPROTO;
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun /* successful completion */
384*4882a593Smuzhiyun pr_debug_ratelimited("Processed %u bytes in %lu us\n",
385*4882a593Smuzhiyun be32_to_cpu(csb->count),
386*4882a593Smuzhiyun (unsigned long)ktime_us_delta(now, start));
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun return 0;
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun
nx842_config_crb(const unsigned char * in,unsigned int inlen,unsigned char * out,unsigned int outlen,struct nx842_workmem * wmem)391*4882a593Smuzhiyun static int nx842_config_crb(const unsigned char *in, unsigned int inlen,
392*4882a593Smuzhiyun unsigned char *out, unsigned int outlen,
393*4882a593Smuzhiyun struct nx842_workmem *wmem)
394*4882a593Smuzhiyun {
395*4882a593Smuzhiyun struct coprocessor_request_block *crb;
396*4882a593Smuzhiyun struct coprocessor_status_block *csb;
397*4882a593Smuzhiyun u64 csb_addr;
398*4882a593Smuzhiyun int ret;
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun crb = &wmem->crb;
401*4882a593Smuzhiyun csb = &crb->csb;
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun /* Clear any previous values */
404*4882a593Smuzhiyun memset(crb, 0, sizeof(*crb));
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun /* set up DDLs */
407*4882a593Smuzhiyun ret = setup_ddl(&crb->source, wmem->ddl_in,
408*4882a593Smuzhiyun (unsigned char *)in, inlen, true);
409*4882a593Smuzhiyun if (ret)
410*4882a593Smuzhiyun return ret;
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun ret = setup_ddl(&crb->target, wmem->ddl_out,
413*4882a593Smuzhiyun out, outlen, false);
414*4882a593Smuzhiyun if (ret)
415*4882a593Smuzhiyun return ret;
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun /* set up CRB's CSB addr */
418*4882a593Smuzhiyun csb_addr = nx842_get_pa(csb) & CRB_CSB_ADDRESS;
419*4882a593Smuzhiyun csb_addr |= CRB_CSB_AT; /* Addrs are phys */
420*4882a593Smuzhiyun crb->csb_addr = cpu_to_be64(csb_addr);
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun return 0;
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun /**
426*4882a593Smuzhiyun * nx842_exec_icswx - compress/decompress data using the 842 algorithm
427*4882a593Smuzhiyun *
428*4882a593Smuzhiyun * (De)compression provided by the NX842 coprocessor on IBM PowerNV systems.
429*4882a593Smuzhiyun * This compresses or decompresses the provided input buffer into the provided
430*4882a593Smuzhiyun * output buffer.
431*4882a593Smuzhiyun *
432*4882a593Smuzhiyun * Upon return from this function @outlen contains the length of the
433*4882a593Smuzhiyun * output data. If there is an error then @outlen will be 0 and an
434*4882a593Smuzhiyun * error will be specified by the return code from this function.
435*4882a593Smuzhiyun *
436*4882a593Smuzhiyun * The @workmem buffer should only be used by one function call at a time.
437*4882a593Smuzhiyun *
438*4882a593Smuzhiyun * @in: input buffer pointer
439*4882a593Smuzhiyun * @inlen: input buffer size
440*4882a593Smuzhiyun * @out: output buffer pointer
441*4882a593Smuzhiyun * @outlenp: output buffer size pointer
442*4882a593Smuzhiyun * @workmem: working memory buffer pointer, size determined by
443*4882a593Smuzhiyun * nx842_powernv_driver.workmem_size
444*4882a593Smuzhiyun * @fc: function code, see CCW Function Codes in nx-842.h
445*4882a593Smuzhiyun *
446*4882a593Smuzhiyun * Returns:
447*4882a593Smuzhiyun * 0 Success, output of length @outlenp stored in the buffer at @out
448*4882a593Smuzhiyun * -ENODEV Hardware unavailable
449*4882a593Smuzhiyun * -ENOSPC Output buffer is to small
450*4882a593Smuzhiyun * -EMSGSIZE Input buffer too large
451*4882a593Smuzhiyun * -EINVAL buffer constraints do not fix nx842_constraints
452*4882a593Smuzhiyun * -EPROTO hardware error during operation
453*4882a593Smuzhiyun * -ETIMEDOUT hardware did not complete operation in reasonable time
454*4882a593Smuzhiyun * -EINTR operation was aborted
455*4882a593Smuzhiyun */
nx842_exec_icswx(const unsigned char * in,unsigned int inlen,unsigned char * out,unsigned int * outlenp,void * workmem,int fc)456*4882a593Smuzhiyun static int nx842_exec_icswx(const unsigned char *in, unsigned int inlen,
457*4882a593Smuzhiyun unsigned char *out, unsigned int *outlenp,
458*4882a593Smuzhiyun void *workmem, int fc)
459*4882a593Smuzhiyun {
460*4882a593Smuzhiyun struct coprocessor_request_block *crb;
461*4882a593Smuzhiyun struct coprocessor_status_block *csb;
462*4882a593Smuzhiyun struct nx842_workmem *wmem;
463*4882a593Smuzhiyun int ret;
464*4882a593Smuzhiyun u32 ccw;
465*4882a593Smuzhiyun unsigned int outlen = *outlenp;
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun wmem = PTR_ALIGN(workmem, WORKMEM_ALIGN);
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun *outlenp = 0;
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun /* shoudn't happen, we don't load without a coproc */
472*4882a593Smuzhiyun if (!nx842_ct) {
473*4882a593Smuzhiyun pr_err_ratelimited("coprocessor CT is 0");
474*4882a593Smuzhiyun return -ENODEV;
475*4882a593Smuzhiyun }
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun ret = nx842_config_crb(in, inlen, out, outlen, wmem);
478*4882a593Smuzhiyun if (ret)
479*4882a593Smuzhiyun return ret;
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun crb = &wmem->crb;
482*4882a593Smuzhiyun csb = &crb->csb;
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun /* set up CCW */
485*4882a593Smuzhiyun ccw = 0;
486*4882a593Smuzhiyun ccw = SET_FIELD(CCW_CT, ccw, nx842_ct);
487*4882a593Smuzhiyun ccw = SET_FIELD(CCW_CI_842, ccw, 0); /* use 0 for hw auto-selection */
488*4882a593Smuzhiyun ccw = SET_FIELD(CCW_FC_842, ccw, fc);
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun wmem->start = ktime_get();
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun /* do ICSWX */
493*4882a593Smuzhiyun ret = icswx(cpu_to_be32(ccw), crb);
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun pr_debug_ratelimited("icswx CR %x ccw %x crb->ccw %x\n", ret,
496*4882a593Smuzhiyun (unsigned int)ccw,
497*4882a593Smuzhiyun (unsigned int)be32_to_cpu(crb->ccw));
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun /*
500*4882a593Smuzhiyun * NX842 coprocessor sets 3rd bit in CR register with XER[S0].
501*4882a593Smuzhiyun * XER[S0] is the integer summary overflow bit which is nothing
502*4882a593Smuzhiyun * to do NX. Since this bit can be set with other return values,
503*4882a593Smuzhiyun * mask this bit.
504*4882a593Smuzhiyun */
505*4882a593Smuzhiyun ret &= ~ICSWX_XERS0;
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun switch (ret) {
508*4882a593Smuzhiyun case ICSWX_INITIATED:
509*4882a593Smuzhiyun ret = wait_for_csb(wmem, csb);
510*4882a593Smuzhiyun break;
511*4882a593Smuzhiyun case ICSWX_BUSY:
512*4882a593Smuzhiyun pr_debug_ratelimited("842 Coprocessor busy\n");
513*4882a593Smuzhiyun ret = -EBUSY;
514*4882a593Smuzhiyun break;
515*4882a593Smuzhiyun case ICSWX_REJECTED:
516*4882a593Smuzhiyun pr_err_ratelimited("ICSWX rejected\n");
517*4882a593Smuzhiyun ret = -EPROTO;
518*4882a593Smuzhiyun break;
519*4882a593Smuzhiyun }
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun if (!ret)
522*4882a593Smuzhiyun *outlenp = be32_to_cpu(csb->count);
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun return ret;
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun /**
528*4882a593Smuzhiyun * nx842_exec_vas - compress/decompress data using the 842 algorithm
529*4882a593Smuzhiyun *
530*4882a593Smuzhiyun * (De)compression provided by the NX842 coprocessor on IBM PowerNV systems.
531*4882a593Smuzhiyun * This compresses or decompresses the provided input buffer into the provided
532*4882a593Smuzhiyun * output buffer.
533*4882a593Smuzhiyun *
534*4882a593Smuzhiyun * Upon return from this function @outlen contains the length of the
535*4882a593Smuzhiyun * output data. If there is an error then @outlen will be 0 and an
536*4882a593Smuzhiyun * error will be specified by the return code from this function.
537*4882a593Smuzhiyun *
538*4882a593Smuzhiyun * The @workmem buffer should only be used by one function call at a time.
539*4882a593Smuzhiyun *
540*4882a593Smuzhiyun * @in: input buffer pointer
541*4882a593Smuzhiyun * @inlen: input buffer size
542*4882a593Smuzhiyun * @out: output buffer pointer
543*4882a593Smuzhiyun * @outlenp: output buffer size pointer
544*4882a593Smuzhiyun * @workmem: working memory buffer pointer, size determined by
545*4882a593Smuzhiyun * nx842_powernv_driver.workmem_size
546*4882a593Smuzhiyun * @fc: function code, see CCW Function Codes in nx-842.h
547*4882a593Smuzhiyun *
548*4882a593Smuzhiyun * Returns:
549*4882a593Smuzhiyun * 0 Success, output of length @outlenp stored in the buffer
550*4882a593Smuzhiyun * at @out
551*4882a593Smuzhiyun * -ENODEV Hardware unavailable
552*4882a593Smuzhiyun * -ENOSPC Output buffer is to small
553*4882a593Smuzhiyun * -EMSGSIZE Input buffer too large
554*4882a593Smuzhiyun * -EINVAL buffer constraints do not fix nx842_constraints
555*4882a593Smuzhiyun * -EPROTO hardware error during operation
556*4882a593Smuzhiyun * -ETIMEDOUT hardware did not complete operation in reasonable time
557*4882a593Smuzhiyun * -EINTR operation was aborted
558*4882a593Smuzhiyun */
nx842_exec_vas(const unsigned char * in,unsigned int inlen,unsigned char * out,unsigned int * outlenp,void * workmem,int fc)559*4882a593Smuzhiyun static int nx842_exec_vas(const unsigned char *in, unsigned int inlen,
560*4882a593Smuzhiyun unsigned char *out, unsigned int *outlenp,
561*4882a593Smuzhiyun void *workmem, int fc)
562*4882a593Smuzhiyun {
563*4882a593Smuzhiyun struct coprocessor_request_block *crb;
564*4882a593Smuzhiyun struct coprocessor_status_block *csb;
565*4882a593Smuzhiyun struct nx842_workmem *wmem;
566*4882a593Smuzhiyun struct vas_window *txwin;
567*4882a593Smuzhiyun int ret, i = 0;
568*4882a593Smuzhiyun u32 ccw;
569*4882a593Smuzhiyun unsigned int outlen = *outlenp;
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun wmem = PTR_ALIGN(workmem, WORKMEM_ALIGN);
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun *outlenp = 0;
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun crb = &wmem->crb;
576*4882a593Smuzhiyun csb = &crb->csb;
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun ret = nx842_config_crb(in, inlen, out, outlen, wmem);
579*4882a593Smuzhiyun if (ret)
580*4882a593Smuzhiyun return ret;
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun ccw = 0;
583*4882a593Smuzhiyun ccw = SET_FIELD(CCW_FC_842, ccw, fc);
584*4882a593Smuzhiyun crb->ccw = cpu_to_be32(ccw);
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun do {
587*4882a593Smuzhiyun wmem->start = ktime_get();
588*4882a593Smuzhiyun preempt_disable();
589*4882a593Smuzhiyun txwin = this_cpu_read(cpu_txwin);
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun /*
592*4882a593Smuzhiyun * VAS copy CRB into L2 cache. Refer <asm/vas.h>.
593*4882a593Smuzhiyun * @crb and @offset.
594*4882a593Smuzhiyun */
595*4882a593Smuzhiyun vas_copy_crb(crb, 0);
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun /*
598*4882a593Smuzhiyun * VAS paste previously copied CRB to NX.
599*4882a593Smuzhiyun * @txwin, @offset and @last (must be true).
600*4882a593Smuzhiyun */
601*4882a593Smuzhiyun ret = vas_paste_crb(txwin, 0, 1);
602*4882a593Smuzhiyun preempt_enable();
603*4882a593Smuzhiyun /*
604*4882a593Smuzhiyun * Retry copy/paste function for VAS failures.
605*4882a593Smuzhiyun */
606*4882a593Smuzhiyun } while (ret && (i++ < VAS_RETRIES));
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun if (ret) {
609*4882a593Smuzhiyun pr_err_ratelimited("VAS copy/paste failed\n");
610*4882a593Smuzhiyun return ret;
611*4882a593Smuzhiyun }
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun ret = wait_for_csb(wmem, csb);
614*4882a593Smuzhiyun if (!ret)
615*4882a593Smuzhiyun *outlenp = be32_to_cpu(csb->count);
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun return ret;
618*4882a593Smuzhiyun }
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun /**
621*4882a593Smuzhiyun * nx842_powernv_compress - Compress data using the 842 algorithm
622*4882a593Smuzhiyun *
623*4882a593Smuzhiyun * Compression provided by the NX842 coprocessor on IBM PowerNV systems.
624*4882a593Smuzhiyun * The input buffer is compressed and the result is stored in the
625*4882a593Smuzhiyun * provided output buffer.
626*4882a593Smuzhiyun *
627*4882a593Smuzhiyun * Upon return from this function @outlen contains the length of the
628*4882a593Smuzhiyun * compressed data. If there is an error then @outlen will be 0 and an
629*4882a593Smuzhiyun * error will be specified by the return code from this function.
630*4882a593Smuzhiyun *
631*4882a593Smuzhiyun * @in: input buffer pointer
632*4882a593Smuzhiyun * @inlen: input buffer size
633*4882a593Smuzhiyun * @out: output buffer pointer
634*4882a593Smuzhiyun * @outlenp: output buffer size pointer
635*4882a593Smuzhiyun * @workmem: working memory buffer pointer, size determined by
636*4882a593Smuzhiyun * nx842_powernv_driver.workmem_size
637*4882a593Smuzhiyun *
638*4882a593Smuzhiyun * Returns: see @nx842_powernv_exec()
639*4882a593Smuzhiyun */
nx842_powernv_compress(const unsigned char * in,unsigned int inlen,unsigned char * out,unsigned int * outlenp,void * wmem)640*4882a593Smuzhiyun static int nx842_powernv_compress(const unsigned char *in, unsigned int inlen,
641*4882a593Smuzhiyun unsigned char *out, unsigned int *outlenp,
642*4882a593Smuzhiyun void *wmem)
643*4882a593Smuzhiyun {
644*4882a593Smuzhiyun return nx842_powernv_exec(in, inlen, out, outlenp,
645*4882a593Smuzhiyun wmem, CCW_FC_842_COMP_CRC);
646*4882a593Smuzhiyun }
647*4882a593Smuzhiyun
648*4882a593Smuzhiyun /**
649*4882a593Smuzhiyun * nx842_powernv_decompress - Decompress data using the 842 algorithm
650*4882a593Smuzhiyun *
651*4882a593Smuzhiyun * Decompression provided by the NX842 coprocessor on IBM PowerNV systems.
652*4882a593Smuzhiyun * The input buffer is decompressed and the result is stored in the
653*4882a593Smuzhiyun * provided output buffer.
654*4882a593Smuzhiyun *
655*4882a593Smuzhiyun * Upon return from this function @outlen contains the length of the
656*4882a593Smuzhiyun * decompressed data. If there is an error then @outlen will be 0 and an
657*4882a593Smuzhiyun * error will be specified by the return code from this function.
658*4882a593Smuzhiyun *
659*4882a593Smuzhiyun * @in: input buffer pointer
660*4882a593Smuzhiyun * @inlen: input buffer size
661*4882a593Smuzhiyun * @out: output buffer pointer
662*4882a593Smuzhiyun * @outlenp: output buffer size pointer
663*4882a593Smuzhiyun * @workmem: working memory buffer pointer, size determined by
664*4882a593Smuzhiyun * nx842_powernv_driver.workmem_size
665*4882a593Smuzhiyun *
666*4882a593Smuzhiyun * Returns: see @nx842_powernv_exec()
667*4882a593Smuzhiyun */
nx842_powernv_decompress(const unsigned char * in,unsigned int inlen,unsigned char * out,unsigned int * outlenp,void * wmem)668*4882a593Smuzhiyun static int nx842_powernv_decompress(const unsigned char *in, unsigned int inlen,
669*4882a593Smuzhiyun unsigned char *out, unsigned int *outlenp,
670*4882a593Smuzhiyun void *wmem)
671*4882a593Smuzhiyun {
672*4882a593Smuzhiyun return nx842_powernv_exec(in, inlen, out, outlenp,
673*4882a593Smuzhiyun wmem, CCW_FC_842_DECOMP_CRC);
674*4882a593Smuzhiyun }
675*4882a593Smuzhiyun
nx_add_coprocs_list(struct nx_coproc * coproc,int chipid)676*4882a593Smuzhiyun static inline void nx_add_coprocs_list(struct nx_coproc *coproc,
677*4882a593Smuzhiyun int chipid)
678*4882a593Smuzhiyun {
679*4882a593Smuzhiyun coproc->chip_id = chipid;
680*4882a593Smuzhiyun INIT_LIST_HEAD(&coproc->list);
681*4882a593Smuzhiyun list_add(&coproc->list, &nx_coprocs);
682*4882a593Smuzhiyun }
683*4882a593Smuzhiyun
nx_alloc_txwin(struct nx_coproc * coproc)684*4882a593Smuzhiyun static struct vas_window *nx_alloc_txwin(struct nx_coproc *coproc)
685*4882a593Smuzhiyun {
686*4882a593Smuzhiyun struct vas_window *txwin = NULL;
687*4882a593Smuzhiyun struct vas_tx_win_attr txattr;
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun /*
690*4882a593Smuzhiyun * Kernel requests will be high priority. So open send
691*4882a593Smuzhiyun * windows only for high priority RxFIFO entries.
692*4882a593Smuzhiyun */
693*4882a593Smuzhiyun vas_init_tx_win_attr(&txattr, coproc->ct);
694*4882a593Smuzhiyun txattr.lpid = 0; /* lpid is 0 for kernel requests */
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun /*
697*4882a593Smuzhiyun * Open a VAS send window which is used to send request to NX.
698*4882a593Smuzhiyun */
699*4882a593Smuzhiyun txwin = vas_tx_win_open(coproc->vas.id, coproc->ct, &txattr);
700*4882a593Smuzhiyun if (IS_ERR(txwin))
701*4882a593Smuzhiyun pr_err("ibm,nx-842: Can not open TX window: %ld\n",
702*4882a593Smuzhiyun PTR_ERR(txwin));
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun return txwin;
705*4882a593Smuzhiyun }
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun /*
708*4882a593Smuzhiyun * Identify chip ID for each CPU, open send wndow for the corresponding NX
709*4882a593Smuzhiyun * engine and save txwin in percpu cpu_txwin.
710*4882a593Smuzhiyun * cpu_txwin is used in copy/paste operation for each compression /
711*4882a593Smuzhiyun * decompression request.
712*4882a593Smuzhiyun */
nx_open_percpu_txwins(void)713*4882a593Smuzhiyun static int nx_open_percpu_txwins(void)
714*4882a593Smuzhiyun {
715*4882a593Smuzhiyun struct nx_coproc *coproc, *n;
716*4882a593Smuzhiyun unsigned int i, chip_id;
717*4882a593Smuzhiyun
718*4882a593Smuzhiyun for_each_possible_cpu(i) {
719*4882a593Smuzhiyun struct vas_window *txwin = NULL;
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun chip_id = cpu_to_chip_id(i);
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun list_for_each_entry_safe(coproc, n, &nx_coprocs, list) {
724*4882a593Smuzhiyun /*
725*4882a593Smuzhiyun * Kernel requests use only high priority FIFOs. So
726*4882a593Smuzhiyun * open send windows for these FIFOs.
727*4882a593Smuzhiyun * GZIP is not supported in kernel right now.
728*4882a593Smuzhiyun */
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun if (coproc->ct != VAS_COP_TYPE_842_HIPRI)
731*4882a593Smuzhiyun continue;
732*4882a593Smuzhiyun
733*4882a593Smuzhiyun if (coproc->chip_id == chip_id) {
734*4882a593Smuzhiyun txwin = nx_alloc_txwin(coproc);
735*4882a593Smuzhiyun if (IS_ERR(txwin))
736*4882a593Smuzhiyun return PTR_ERR(txwin);
737*4882a593Smuzhiyun
738*4882a593Smuzhiyun per_cpu(cpu_txwin, i) = txwin;
739*4882a593Smuzhiyun break;
740*4882a593Smuzhiyun }
741*4882a593Smuzhiyun }
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun if (!per_cpu(cpu_txwin, i)) {
744*4882a593Smuzhiyun /* shouldn't happen, Each chip will have NX engine */
745*4882a593Smuzhiyun pr_err("NX engine is not available for CPU %d\n", i);
746*4882a593Smuzhiyun return -EINVAL;
747*4882a593Smuzhiyun }
748*4882a593Smuzhiyun }
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun return 0;
751*4882a593Smuzhiyun }
752*4882a593Smuzhiyun
nx_set_ct(struct nx_coproc * coproc,const char * priority,int high,int normal)753*4882a593Smuzhiyun static int __init nx_set_ct(struct nx_coproc *coproc, const char *priority,
754*4882a593Smuzhiyun int high, int normal)
755*4882a593Smuzhiyun {
756*4882a593Smuzhiyun if (!strcmp(priority, "High"))
757*4882a593Smuzhiyun coproc->ct = high;
758*4882a593Smuzhiyun else if (!strcmp(priority, "Normal"))
759*4882a593Smuzhiyun coproc->ct = normal;
760*4882a593Smuzhiyun else {
761*4882a593Smuzhiyun pr_err("Invalid RxFIFO priority value\n");
762*4882a593Smuzhiyun return -EINVAL;
763*4882a593Smuzhiyun }
764*4882a593Smuzhiyun
765*4882a593Smuzhiyun return 0;
766*4882a593Smuzhiyun }
767*4882a593Smuzhiyun
vas_cfg_coproc_info(struct device_node * dn,int chip_id,int vasid,int type,int * ct)768*4882a593Smuzhiyun static int __init vas_cfg_coproc_info(struct device_node *dn, int chip_id,
769*4882a593Smuzhiyun int vasid, int type, int *ct)
770*4882a593Smuzhiyun {
771*4882a593Smuzhiyun struct vas_window *rxwin = NULL;
772*4882a593Smuzhiyun struct vas_rx_win_attr rxattr;
773*4882a593Smuzhiyun u32 lpid, pid, tid, fifo_size;
774*4882a593Smuzhiyun struct nx_coproc *coproc;
775*4882a593Smuzhiyun u64 rx_fifo;
776*4882a593Smuzhiyun const char *priority;
777*4882a593Smuzhiyun int ret;
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun ret = of_property_read_u64(dn, "rx-fifo-address", &rx_fifo);
780*4882a593Smuzhiyun if (ret) {
781*4882a593Smuzhiyun pr_err("Missing rx-fifo-address property\n");
782*4882a593Smuzhiyun return ret;
783*4882a593Smuzhiyun }
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun ret = of_property_read_u32(dn, "rx-fifo-size", &fifo_size);
786*4882a593Smuzhiyun if (ret) {
787*4882a593Smuzhiyun pr_err("Missing rx-fifo-size property\n");
788*4882a593Smuzhiyun return ret;
789*4882a593Smuzhiyun }
790*4882a593Smuzhiyun
791*4882a593Smuzhiyun ret = of_property_read_u32(dn, "lpid", &lpid);
792*4882a593Smuzhiyun if (ret) {
793*4882a593Smuzhiyun pr_err("Missing lpid property\n");
794*4882a593Smuzhiyun return ret;
795*4882a593Smuzhiyun }
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun ret = of_property_read_u32(dn, "pid", &pid);
798*4882a593Smuzhiyun if (ret) {
799*4882a593Smuzhiyun pr_err("Missing pid property\n");
800*4882a593Smuzhiyun return ret;
801*4882a593Smuzhiyun }
802*4882a593Smuzhiyun
803*4882a593Smuzhiyun ret = of_property_read_u32(dn, "tid", &tid);
804*4882a593Smuzhiyun if (ret) {
805*4882a593Smuzhiyun pr_err("Missing tid property\n");
806*4882a593Smuzhiyun return ret;
807*4882a593Smuzhiyun }
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun ret = of_property_read_string(dn, "priority", &priority);
810*4882a593Smuzhiyun if (ret) {
811*4882a593Smuzhiyun pr_err("Missing priority property\n");
812*4882a593Smuzhiyun return ret;
813*4882a593Smuzhiyun }
814*4882a593Smuzhiyun
815*4882a593Smuzhiyun coproc = kzalloc(sizeof(*coproc), GFP_KERNEL);
816*4882a593Smuzhiyun if (!coproc)
817*4882a593Smuzhiyun return -ENOMEM;
818*4882a593Smuzhiyun
819*4882a593Smuzhiyun if (type == NX_CT_842)
820*4882a593Smuzhiyun ret = nx_set_ct(coproc, priority, VAS_COP_TYPE_842_HIPRI,
821*4882a593Smuzhiyun VAS_COP_TYPE_842);
822*4882a593Smuzhiyun else if (type == NX_CT_GZIP)
823*4882a593Smuzhiyun ret = nx_set_ct(coproc, priority, VAS_COP_TYPE_GZIP_HIPRI,
824*4882a593Smuzhiyun VAS_COP_TYPE_GZIP);
825*4882a593Smuzhiyun
826*4882a593Smuzhiyun if (ret)
827*4882a593Smuzhiyun goto err_out;
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun vas_init_rx_win_attr(&rxattr, coproc->ct);
830*4882a593Smuzhiyun rxattr.rx_fifo = rx_fifo;
831*4882a593Smuzhiyun rxattr.rx_fifo_size = fifo_size;
832*4882a593Smuzhiyun rxattr.lnotify_lpid = lpid;
833*4882a593Smuzhiyun rxattr.lnotify_pid = pid;
834*4882a593Smuzhiyun rxattr.lnotify_tid = tid;
835*4882a593Smuzhiyun /*
836*4882a593Smuzhiyun * Maximum RX window credits can not be more than #CRBs in
837*4882a593Smuzhiyun * RxFIFO. Otherwise, can get checkstop if RxFIFO overruns.
838*4882a593Smuzhiyun */
839*4882a593Smuzhiyun rxattr.wcreds_max = fifo_size / CRB_SIZE;
840*4882a593Smuzhiyun
841*4882a593Smuzhiyun /*
842*4882a593Smuzhiyun * Open a VAS receice window which is used to configure RxFIFO
843*4882a593Smuzhiyun * for NX.
844*4882a593Smuzhiyun */
845*4882a593Smuzhiyun rxwin = vas_rx_win_open(vasid, coproc->ct, &rxattr);
846*4882a593Smuzhiyun if (IS_ERR(rxwin)) {
847*4882a593Smuzhiyun ret = PTR_ERR(rxwin);
848*4882a593Smuzhiyun pr_err("setting RxFIFO with VAS failed: %d\n",
849*4882a593Smuzhiyun ret);
850*4882a593Smuzhiyun goto err_out;
851*4882a593Smuzhiyun }
852*4882a593Smuzhiyun
853*4882a593Smuzhiyun coproc->vas.rxwin = rxwin;
854*4882a593Smuzhiyun coproc->vas.id = vasid;
855*4882a593Smuzhiyun nx_add_coprocs_list(coproc, chip_id);
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun /*
858*4882a593Smuzhiyun * (lpid, pid, tid) combination has to be unique for each
859*4882a593Smuzhiyun * coprocessor instance in the system. So to make it
860*4882a593Smuzhiyun * unique, skiboot uses coprocessor type such as 842 or
861*4882a593Smuzhiyun * GZIP for pid and provides this value to kernel in pid
862*4882a593Smuzhiyun * device-tree property.
863*4882a593Smuzhiyun */
864*4882a593Smuzhiyun *ct = pid;
865*4882a593Smuzhiyun
866*4882a593Smuzhiyun return 0;
867*4882a593Smuzhiyun
868*4882a593Smuzhiyun err_out:
869*4882a593Smuzhiyun kfree(coproc);
870*4882a593Smuzhiyun return ret;
871*4882a593Smuzhiyun }
872*4882a593Smuzhiyun
nx_coproc_init(int chip_id,int ct_842,int ct_gzip)873*4882a593Smuzhiyun static int __init nx_coproc_init(int chip_id, int ct_842, int ct_gzip)
874*4882a593Smuzhiyun {
875*4882a593Smuzhiyun int ret = 0;
876*4882a593Smuzhiyun
877*4882a593Smuzhiyun if (opal_check_token(OPAL_NX_COPROC_INIT)) {
878*4882a593Smuzhiyun ret = opal_nx_coproc_init(chip_id, ct_842);
879*4882a593Smuzhiyun
880*4882a593Smuzhiyun if (!ret)
881*4882a593Smuzhiyun ret = opal_nx_coproc_init(chip_id, ct_gzip);
882*4882a593Smuzhiyun
883*4882a593Smuzhiyun if (ret) {
884*4882a593Smuzhiyun ret = opal_error_code(ret);
885*4882a593Smuzhiyun pr_err("Failed to initialize NX for chip(%d): %d\n",
886*4882a593Smuzhiyun chip_id, ret);
887*4882a593Smuzhiyun }
888*4882a593Smuzhiyun } else
889*4882a593Smuzhiyun pr_warn("Firmware doesn't support NX initialization\n");
890*4882a593Smuzhiyun
891*4882a593Smuzhiyun return ret;
892*4882a593Smuzhiyun }
893*4882a593Smuzhiyun
find_nx_device_tree(struct device_node * dn,int chip_id,int vasid,int type,char * devname,int * ct)894*4882a593Smuzhiyun static int __init find_nx_device_tree(struct device_node *dn, int chip_id,
895*4882a593Smuzhiyun int vasid, int type, char *devname,
896*4882a593Smuzhiyun int *ct)
897*4882a593Smuzhiyun {
898*4882a593Smuzhiyun int ret = 0;
899*4882a593Smuzhiyun
900*4882a593Smuzhiyun if (of_device_is_compatible(dn, devname)) {
901*4882a593Smuzhiyun ret = vas_cfg_coproc_info(dn, chip_id, vasid, type, ct);
902*4882a593Smuzhiyun if (ret)
903*4882a593Smuzhiyun of_node_put(dn);
904*4882a593Smuzhiyun }
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun return ret;
907*4882a593Smuzhiyun }
908*4882a593Smuzhiyun
nx_powernv_probe_vas(struct device_node * pn)909*4882a593Smuzhiyun static int __init nx_powernv_probe_vas(struct device_node *pn)
910*4882a593Smuzhiyun {
911*4882a593Smuzhiyun int chip_id, vasid, ret = 0;
912*4882a593Smuzhiyun int ct_842 = 0, ct_gzip = 0;
913*4882a593Smuzhiyun struct device_node *dn;
914*4882a593Smuzhiyun
915*4882a593Smuzhiyun chip_id = of_get_ibm_chip_id(pn);
916*4882a593Smuzhiyun if (chip_id < 0) {
917*4882a593Smuzhiyun pr_err("ibm,chip-id missing\n");
918*4882a593Smuzhiyun return -EINVAL;
919*4882a593Smuzhiyun }
920*4882a593Smuzhiyun
921*4882a593Smuzhiyun vasid = chip_to_vas_id(chip_id);
922*4882a593Smuzhiyun if (vasid < 0) {
923*4882a593Smuzhiyun pr_err("Unable to map chip_id %d to vasid\n", chip_id);
924*4882a593Smuzhiyun return -EINVAL;
925*4882a593Smuzhiyun }
926*4882a593Smuzhiyun
927*4882a593Smuzhiyun for_each_child_of_node(pn, dn) {
928*4882a593Smuzhiyun ret = find_nx_device_tree(dn, chip_id, vasid, NX_CT_842,
929*4882a593Smuzhiyun "ibm,p9-nx-842", &ct_842);
930*4882a593Smuzhiyun
931*4882a593Smuzhiyun if (!ret)
932*4882a593Smuzhiyun ret = find_nx_device_tree(dn, chip_id, vasid,
933*4882a593Smuzhiyun NX_CT_GZIP, "ibm,p9-nx-gzip", &ct_gzip);
934*4882a593Smuzhiyun
935*4882a593Smuzhiyun if (ret)
936*4882a593Smuzhiyun return ret;
937*4882a593Smuzhiyun }
938*4882a593Smuzhiyun
939*4882a593Smuzhiyun if (!ct_842 || !ct_gzip) {
940*4882a593Smuzhiyun pr_err("NX FIFO nodes are missing\n");
941*4882a593Smuzhiyun return -EINVAL;
942*4882a593Smuzhiyun }
943*4882a593Smuzhiyun
944*4882a593Smuzhiyun /*
945*4882a593Smuzhiyun * Initialize NX instance for both high and normal priority FIFOs.
946*4882a593Smuzhiyun */
947*4882a593Smuzhiyun ret = nx_coproc_init(chip_id, ct_842, ct_gzip);
948*4882a593Smuzhiyun
949*4882a593Smuzhiyun return ret;
950*4882a593Smuzhiyun }
951*4882a593Smuzhiyun
nx842_powernv_probe(struct device_node * dn)952*4882a593Smuzhiyun static int __init nx842_powernv_probe(struct device_node *dn)
953*4882a593Smuzhiyun {
954*4882a593Smuzhiyun struct nx_coproc *coproc;
955*4882a593Smuzhiyun unsigned int ct, ci;
956*4882a593Smuzhiyun int chip_id;
957*4882a593Smuzhiyun
958*4882a593Smuzhiyun chip_id = of_get_ibm_chip_id(dn);
959*4882a593Smuzhiyun if (chip_id < 0) {
960*4882a593Smuzhiyun pr_err("ibm,chip-id missing\n");
961*4882a593Smuzhiyun return -EINVAL;
962*4882a593Smuzhiyun }
963*4882a593Smuzhiyun
964*4882a593Smuzhiyun if (of_property_read_u32(dn, "ibm,842-coprocessor-type", &ct)) {
965*4882a593Smuzhiyun pr_err("ibm,842-coprocessor-type missing\n");
966*4882a593Smuzhiyun return -EINVAL;
967*4882a593Smuzhiyun }
968*4882a593Smuzhiyun
969*4882a593Smuzhiyun if (of_property_read_u32(dn, "ibm,842-coprocessor-instance", &ci)) {
970*4882a593Smuzhiyun pr_err("ibm,842-coprocessor-instance missing\n");
971*4882a593Smuzhiyun return -EINVAL;
972*4882a593Smuzhiyun }
973*4882a593Smuzhiyun
974*4882a593Smuzhiyun coproc = kzalloc(sizeof(*coproc), GFP_KERNEL);
975*4882a593Smuzhiyun if (!coproc)
976*4882a593Smuzhiyun return -ENOMEM;
977*4882a593Smuzhiyun
978*4882a593Smuzhiyun coproc->ct = ct;
979*4882a593Smuzhiyun coproc->ci = ci;
980*4882a593Smuzhiyun nx_add_coprocs_list(coproc, chip_id);
981*4882a593Smuzhiyun
982*4882a593Smuzhiyun pr_info("coprocessor found on chip %d, CT %d CI %d\n", chip_id, ct, ci);
983*4882a593Smuzhiyun
984*4882a593Smuzhiyun if (!nx842_ct)
985*4882a593Smuzhiyun nx842_ct = ct;
986*4882a593Smuzhiyun else if (nx842_ct != ct)
987*4882a593Smuzhiyun pr_err("NX842 chip %d, CT %d != first found CT %d\n",
988*4882a593Smuzhiyun chip_id, ct, nx842_ct);
989*4882a593Smuzhiyun
990*4882a593Smuzhiyun return 0;
991*4882a593Smuzhiyun }
992*4882a593Smuzhiyun
nx_delete_coprocs(void)993*4882a593Smuzhiyun static void nx_delete_coprocs(void)
994*4882a593Smuzhiyun {
995*4882a593Smuzhiyun struct nx_coproc *coproc, *n;
996*4882a593Smuzhiyun struct vas_window *txwin;
997*4882a593Smuzhiyun int i;
998*4882a593Smuzhiyun
999*4882a593Smuzhiyun /*
1000*4882a593Smuzhiyun * close percpu txwins that are opened for the corresponding coproc.
1001*4882a593Smuzhiyun */
1002*4882a593Smuzhiyun for_each_possible_cpu(i) {
1003*4882a593Smuzhiyun txwin = per_cpu(cpu_txwin, i);
1004*4882a593Smuzhiyun if (txwin)
1005*4882a593Smuzhiyun vas_win_close(txwin);
1006*4882a593Smuzhiyun
1007*4882a593Smuzhiyun per_cpu(cpu_txwin, i) = NULL;
1008*4882a593Smuzhiyun }
1009*4882a593Smuzhiyun
1010*4882a593Smuzhiyun list_for_each_entry_safe(coproc, n, &nx_coprocs, list) {
1011*4882a593Smuzhiyun if (coproc->vas.rxwin)
1012*4882a593Smuzhiyun vas_win_close(coproc->vas.rxwin);
1013*4882a593Smuzhiyun
1014*4882a593Smuzhiyun list_del(&coproc->list);
1015*4882a593Smuzhiyun kfree(coproc);
1016*4882a593Smuzhiyun }
1017*4882a593Smuzhiyun }
1018*4882a593Smuzhiyun
1019*4882a593Smuzhiyun static struct nx842_constraints nx842_powernv_constraints = {
1020*4882a593Smuzhiyun .alignment = DDE_BUFFER_ALIGN,
1021*4882a593Smuzhiyun .multiple = DDE_BUFFER_LAST_MULT,
1022*4882a593Smuzhiyun .minimum = DDE_BUFFER_LAST_MULT,
1023*4882a593Smuzhiyun .maximum = (DDL_LEN_MAX - 1) * PAGE_SIZE,
1024*4882a593Smuzhiyun };
1025*4882a593Smuzhiyun
1026*4882a593Smuzhiyun static struct nx842_driver nx842_powernv_driver = {
1027*4882a593Smuzhiyun .name = KBUILD_MODNAME,
1028*4882a593Smuzhiyun .owner = THIS_MODULE,
1029*4882a593Smuzhiyun .workmem_size = sizeof(struct nx842_workmem),
1030*4882a593Smuzhiyun .constraints = &nx842_powernv_constraints,
1031*4882a593Smuzhiyun .compress = nx842_powernv_compress,
1032*4882a593Smuzhiyun .decompress = nx842_powernv_decompress,
1033*4882a593Smuzhiyun };
1034*4882a593Smuzhiyun
nx842_powernv_crypto_init(struct crypto_tfm * tfm)1035*4882a593Smuzhiyun static int nx842_powernv_crypto_init(struct crypto_tfm *tfm)
1036*4882a593Smuzhiyun {
1037*4882a593Smuzhiyun return nx842_crypto_init(tfm, &nx842_powernv_driver);
1038*4882a593Smuzhiyun }
1039*4882a593Smuzhiyun
1040*4882a593Smuzhiyun static struct crypto_alg nx842_powernv_alg = {
1041*4882a593Smuzhiyun .cra_name = "842",
1042*4882a593Smuzhiyun .cra_driver_name = "842-nx",
1043*4882a593Smuzhiyun .cra_priority = 300,
1044*4882a593Smuzhiyun .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
1045*4882a593Smuzhiyun .cra_ctxsize = sizeof(struct nx842_crypto_ctx),
1046*4882a593Smuzhiyun .cra_module = THIS_MODULE,
1047*4882a593Smuzhiyun .cra_init = nx842_powernv_crypto_init,
1048*4882a593Smuzhiyun .cra_exit = nx842_crypto_exit,
1049*4882a593Smuzhiyun .cra_u = { .compress = {
1050*4882a593Smuzhiyun .coa_compress = nx842_crypto_compress,
1051*4882a593Smuzhiyun .coa_decompress = nx842_crypto_decompress } }
1052*4882a593Smuzhiyun };
1053*4882a593Smuzhiyun
nx_compress_powernv_init(void)1054*4882a593Smuzhiyun static __init int nx_compress_powernv_init(void)
1055*4882a593Smuzhiyun {
1056*4882a593Smuzhiyun struct device_node *dn;
1057*4882a593Smuzhiyun int ret;
1058*4882a593Smuzhiyun
1059*4882a593Smuzhiyun /* verify workmem size/align restrictions */
1060*4882a593Smuzhiyun BUILD_BUG_ON(WORKMEM_ALIGN % CRB_ALIGN);
1061*4882a593Smuzhiyun BUILD_BUG_ON(CRB_ALIGN % DDE_ALIGN);
1062*4882a593Smuzhiyun BUILD_BUG_ON(CRB_SIZE % DDE_ALIGN);
1063*4882a593Smuzhiyun /* verify buffer size/align restrictions */
1064*4882a593Smuzhiyun BUILD_BUG_ON(PAGE_SIZE % DDE_BUFFER_ALIGN);
1065*4882a593Smuzhiyun BUILD_BUG_ON(DDE_BUFFER_ALIGN % DDE_BUFFER_SIZE_MULT);
1066*4882a593Smuzhiyun BUILD_BUG_ON(DDE_BUFFER_SIZE_MULT % DDE_BUFFER_LAST_MULT);
1067*4882a593Smuzhiyun
1068*4882a593Smuzhiyun for_each_compatible_node(dn, NULL, "ibm,power9-nx") {
1069*4882a593Smuzhiyun ret = nx_powernv_probe_vas(dn);
1070*4882a593Smuzhiyun if (ret) {
1071*4882a593Smuzhiyun nx_delete_coprocs();
1072*4882a593Smuzhiyun of_node_put(dn);
1073*4882a593Smuzhiyun return ret;
1074*4882a593Smuzhiyun }
1075*4882a593Smuzhiyun }
1076*4882a593Smuzhiyun
1077*4882a593Smuzhiyun if (list_empty(&nx_coprocs)) {
1078*4882a593Smuzhiyun for_each_compatible_node(dn, NULL, "ibm,power-nx")
1079*4882a593Smuzhiyun nx842_powernv_probe(dn);
1080*4882a593Smuzhiyun
1081*4882a593Smuzhiyun if (!nx842_ct)
1082*4882a593Smuzhiyun return -ENODEV;
1083*4882a593Smuzhiyun
1084*4882a593Smuzhiyun nx842_powernv_exec = nx842_exec_icswx;
1085*4882a593Smuzhiyun } else {
1086*4882a593Smuzhiyun /*
1087*4882a593Smuzhiyun * Register VAS user space API for NX GZIP so
1088*4882a593Smuzhiyun * that user space can use GZIP engine.
1089*4882a593Smuzhiyun * Using high FIFO priority for kernel requests and
1090*4882a593Smuzhiyun * normal FIFO priority is assigned for userspace.
1091*4882a593Smuzhiyun * 842 compression is supported only in kernel.
1092*4882a593Smuzhiyun */
1093*4882a593Smuzhiyun ret = vas_register_coproc_api(THIS_MODULE, VAS_COP_TYPE_GZIP,
1094*4882a593Smuzhiyun "nx-gzip");
1095*4882a593Smuzhiyun
1096*4882a593Smuzhiyun /*
1097*4882a593Smuzhiyun * GZIP is not supported in kernel right now.
1098*4882a593Smuzhiyun * So open tx windows only for 842.
1099*4882a593Smuzhiyun */
1100*4882a593Smuzhiyun if (!ret)
1101*4882a593Smuzhiyun ret = nx_open_percpu_txwins();
1102*4882a593Smuzhiyun
1103*4882a593Smuzhiyun if (ret) {
1104*4882a593Smuzhiyun nx_delete_coprocs();
1105*4882a593Smuzhiyun return ret;
1106*4882a593Smuzhiyun }
1107*4882a593Smuzhiyun
1108*4882a593Smuzhiyun nx842_powernv_exec = nx842_exec_vas;
1109*4882a593Smuzhiyun }
1110*4882a593Smuzhiyun
1111*4882a593Smuzhiyun ret = crypto_register_alg(&nx842_powernv_alg);
1112*4882a593Smuzhiyun if (ret) {
1113*4882a593Smuzhiyun nx_delete_coprocs();
1114*4882a593Smuzhiyun return ret;
1115*4882a593Smuzhiyun }
1116*4882a593Smuzhiyun
1117*4882a593Smuzhiyun return 0;
1118*4882a593Smuzhiyun }
1119*4882a593Smuzhiyun module_init(nx_compress_powernv_init);
1120*4882a593Smuzhiyun
nx_compress_powernv_exit(void)1121*4882a593Smuzhiyun static void __exit nx_compress_powernv_exit(void)
1122*4882a593Smuzhiyun {
1123*4882a593Smuzhiyun /*
1124*4882a593Smuzhiyun * GZIP engine is supported only in power9 or later and nx842_ct
1125*4882a593Smuzhiyun * is used on power8 (icswx).
1126*4882a593Smuzhiyun * VAS API for NX GZIP is registered during init for user space
1127*4882a593Smuzhiyun * use. So delete this API use for GZIP engine.
1128*4882a593Smuzhiyun */
1129*4882a593Smuzhiyun if (!nx842_ct)
1130*4882a593Smuzhiyun vas_unregister_coproc_api();
1131*4882a593Smuzhiyun
1132*4882a593Smuzhiyun crypto_unregister_alg(&nx842_powernv_alg);
1133*4882a593Smuzhiyun
1134*4882a593Smuzhiyun nx_delete_coprocs();
1135*4882a593Smuzhiyun }
1136*4882a593Smuzhiyun module_exit(nx_compress_powernv_exit);
1137