1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #include <linux/module.h>
4 #include <linux/slab.h>
5 #include <linux/crypto.h>
6 #include <crypto/internal/aead.h>
7 #include <crypto/internal/cipher.h>
8 #include <crypto/internal/skcipher.h>
9 #include <crypto/aes.h>
10 #include <crypto/sha.h>
11 #include <crypto/hash.h>
12 #include <crypto/hmac.h>
13 #include <crypto/algapi.h>
14 #include <crypto/authenc.h>
15 #include <crypto/xts.h>
16 #include <linux/dma-mapping.h>
17 #include "adf_accel_devices.h"
18 #include "adf_transport.h"
19 #include "adf_common_drv.h"
20 #include "qat_crypto.h"
21 #include "icp_qat_hw.h"
22 #include "icp_qat_fw.h"
23 #include "icp_qat_fw_la.h"
24
25 #define QAT_AES_HW_CONFIG_ENC(alg, mode) \
26 ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
27 ICP_QAT_HW_CIPHER_NO_CONVERT, \
28 ICP_QAT_HW_CIPHER_ENCRYPT)
29
30 #define QAT_AES_HW_CONFIG_DEC(alg, mode) \
31 ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
32 ICP_QAT_HW_CIPHER_KEY_CONVERT, \
33 ICP_QAT_HW_CIPHER_DECRYPT)
34
35 static DEFINE_MUTEX(algs_lock);
36 static unsigned int active_devs;
37
38 /* Common content descriptor */
39 struct qat_alg_cd {
40 union {
41 struct qat_enc { /* Encrypt content desc */
42 struct icp_qat_hw_cipher_algo_blk cipher;
43 struct icp_qat_hw_auth_algo_blk hash;
44 } qat_enc_cd;
45 struct qat_dec { /* Decrypt content desc */
46 struct icp_qat_hw_auth_algo_blk hash;
47 struct icp_qat_hw_cipher_algo_blk cipher;
48 } qat_dec_cd;
49 };
50 } __aligned(64);
51
52 struct qat_alg_aead_ctx {
53 struct qat_alg_cd *enc_cd;
54 struct qat_alg_cd *dec_cd;
55 dma_addr_t enc_cd_paddr;
56 dma_addr_t dec_cd_paddr;
57 struct icp_qat_fw_la_bulk_req enc_fw_req;
58 struct icp_qat_fw_la_bulk_req dec_fw_req;
59 struct crypto_shash *hash_tfm;
60 enum icp_qat_hw_auth_algo qat_hash_alg;
61 struct qat_crypto_instance *inst;
62 union {
63 struct sha1_state sha1;
64 struct sha256_state sha256;
65 struct sha512_state sha512;
66 };
67 char ipad[SHA512_BLOCK_SIZE]; /* sufficient for SHA-1/SHA-256 as well */
68 char opad[SHA512_BLOCK_SIZE];
69 };
70
71 struct qat_alg_skcipher_ctx {
72 struct icp_qat_hw_cipher_algo_blk *enc_cd;
73 struct icp_qat_hw_cipher_algo_blk *dec_cd;
74 dma_addr_t enc_cd_paddr;
75 dma_addr_t dec_cd_paddr;
76 struct icp_qat_fw_la_bulk_req enc_fw_req;
77 struct icp_qat_fw_la_bulk_req dec_fw_req;
78 struct qat_crypto_instance *inst;
79 struct crypto_skcipher *ftfm;
80 bool fallback;
81 };
82
qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)83 static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
84 {
85 switch (qat_hash_alg) {
86 case ICP_QAT_HW_AUTH_ALGO_SHA1:
87 return ICP_QAT_HW_SHA1_STATE1_SZ;
88 case ICP_QAT_HW_AUTH_ALGO_SHA256:
89 return ICP_QAT_HW_SHA256_STATE1_SZ;
90 case ICP_QAT_HW_AUTH_ALGO_SHA512:
91 return ICP_QAT_HW_SHA512_STATE1_SZ;
92 default:
93 return -EFAULT;
94 };
95 return -EFAULT;
96 }
97
qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk * hash,struct qat_alg_aead_ctx * ctx,const u8 * auth_key,unsigned int auth_keylen)98 static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
99 struct qat_alg_aead_ctx *ctx,
100 const u8 *auth_key,
101 unsigned int auth_keylen)
102 {
103 SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
104 int block_size = crypto_shash_blocksize(ctx->hash_tfm);
105 int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
106 __be32 *hash_state_out;
107 __be64 *hash512_state_out;
108 int i, offset;
109
110 memset(ctx->ipad, 0, block_size);
111 memset(ctx->opad, 0, block_size);
112 shash->tfm = ctx->hash_tfm;
113
114 if (auth_keylen > block_size) {
115 int ret = crypto_shash_digest(shash, auth_key,
116 auth_keylen, ctx->ipad);
117 if (ret)
118 return ret;
119
120 memcpy(ctx->opad, ctx->ipad, digest_size);
121 } else {
122 memcpy(ctx->ipad, auth_key, auth_keylen);
123 memcpy(ctx->opad, auth_key, auth_keylen);
124 }
125
126 for (i = 0; i < block_size; i++) {
127 char *ipad_ptr = ctx->ipad + i;
128 char *opad_ptr = ctx->opad + i;
129 *ipad_ptr ^= HMAC_IPAD_VALUE;
130 *opad_ptr ^= HMAC_OPAD_VALUE;
131 }
132
133 if (crypto_shash_init(shash))
134 return -EFAULT;
135
136 if (crypto_shash_update(shash, ctx->ipad, block_size))
137 return -EFAULT;
138
139 hash_state_out = (__be32 *)hash->sha.state1;
140 hash512_state_out = (__be64 *)hash_state_out;
141
142 switch (ctx->qat_hash_alg) {
143 case ICP_QAT_HW_AUTH_ALGO_SHA1:
144 if (crypto_shash_export(shash, &ctx->sha1))
145 return -EFAULT;
146 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
147 *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
148 break;
149 case ICP_QAT_HW_AUTH_ALGO_SHA256:
150 if (crypto_shash_export(shash, &ctx->sha256))
151 return -EFAULT;
152 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
153 *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
154 break;
155 case ICP_QAT_HW_AUTH_ALGO_SHA512:
156 if (crypto_shash_export(shash, &ctx->sha512))
157 return -EFAULT;
158 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
159 *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
160 break;
161 default:
162 return -EFAULT;
163 }
164
165 if (crypto_shash_init(shash))
166 return -EFAULT;
167
168 if (crypto_shash_update(shash, ctx->opad, block_size))
169 return -EFAULT;
170
171 offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
172 if (offset < 0)
173 return -EFAULT;
174
175 hash_state_out = (__be32 *)(hash->sha.state1 + offset);
176 hash512_state_out = (__be64 *)hash_state_out;
177
178 switch (ctx->qat_hash_alg) {
179 case ICP_QAT_HW_AUTH_ALGO_SHA1:
180 if (crypto_shash_export(shash, &ctx->sha1))
181 return -EFAULT;
182 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
183 *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
184 break;
185 case ICP_QAT_HW_AUTH_ALGO_SHA256:
186 if (crypto_shash_export(shash, &ctx->sha256))
187 return -EFAULT;
188 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
189 *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
190 break;
191 case ICP_QAT_HW_AUTH_ALGO_SHA512:
192 if (crypto_shash_export(shash, &ctx->sha512))
193 return -EFAULT;
194 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
195 *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
196 break;
197 default:
198 return -EFAULT;
199 }
200 memzero_explicit(ctx->ipad, block_size);
201 memzero_explicit(ctx->opad, block_size);
202 return 0;
203 }
204
qat_alg_init_hdr_iv_updt(struct icp_qat_fw_comn_req_hdr * header)205 static void qat_alg_init_hdr_iv_updt(struct icp_qat_fw_comn_req_hdr *header)
206 {
207 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
208 ICP_QAT_FW_CIPH_IV_64BIT_PTR);
209 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
210 ICP_QAT_FW_LA_UPDATE_STATE);
211 }
212
qat_alg_init_hdr_no_iv_updt(struct icp_qat_fw_comn_req_hdr * header)213 static void qat_alg_init_hdr_no_iv_updt(struct icp_qat_fw_comn_req_hdr *header)
214 {
215 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
216 ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
217 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
218 ICP_QAT_FW_LA_NO_UPDATE_STATE);
219 }
220
qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr * header,int aead)221 static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
222 int aead)
223 {
224 header->hdr_flags =
225 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
226 header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
227 header->comn_req_flags =
228 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
229 QAT_COMN_PTR_TYPE_SGL);
230 ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
231 ICP_QAT_FW_LA_PARTIAL_NONE);
232 if (aead)
233 qat_alg_init_hdr_no_iv_updt(header);
234 else
235 qat_alg_init_hdr_iv_updt(header);
236 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
237 ICP_QAT_FW_LA_NO_PROTO);
238 }
239
qat_alg_aead_init_enc_session(struct crypto_aead * aead_tfm,int alg,struct crypto_authenc_keys * keys,int mode)240 static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
241 int alg,
242 struct crypto_authenc_keys *keys,
243 int mode)
244 {
245 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
246 unsigned int digestsize = crypto_aead_authsize(aead_tfm);
247 struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
248 struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
249 struct icp_qat_hw_auth_algo_blk *hash =
250 (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
251 sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
252 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req;
253 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
254 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
255 void *ptr = &req_tmpl->cd_ctrl;
256 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
257 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
258
259 /* CD setup */
260 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
261 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
262 hash->sha.inner_setup.auth_config.config =
263 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
264 ctx->qat_hash_alg, digestsize);
265 hash->sha.inner_setup.auth_counter.counter =
266 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
267
268 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
269 return -EFAULT;
270
271 /* Request setup */
272 qat_alg_init_common_hdr(header, 1);
273 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
274 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
275 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
276 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
277 ICP_QAT_FW_LA_RET_AUTH_RES);
278 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
279 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
280 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
281 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
282
283 /* Cipher CD config setup */
284 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
285 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
286 cipher_cd_ctrl->cipher_cfg_offset = 0;
287 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
288 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
289 /* Auth CD config setup */
290 hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
291 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
292 hash_cd_ctrl->inner_res_sz = digestsize;
293 hash_cd_ctrl->final_sz = digestsize;
294
295 switch (ctx->qat_hash_alg) {
296 case ICP_QAT_HW_AUTH_ALGO_SHA1:
297 hash_cd_ctrl->inner_state1_sz =
298 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
299 hash_cd_ctrl->inner_state2_sz =
300 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
301 break;
302 case ICP_QAT_HW_AUTH_ALGO_SHA256:
303 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
304 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
305 break;
306 case ICP_QAT_HW_AUTH_ALGO_SHA512:
307 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
308 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
309 break;
310 default:
311 break;
312 }
313 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
314 ((sizeof(struct icp_qat_hw_auth_setup) +
315 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
316 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
317 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
318 return 0;
319 }
320
qat_alg_aead_init_dec_session(struct crypto_aead * aead_tfm,int alg,struct crypto_authenc_keys * keys,int mode)321 static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
322 int alg,
323 struct crypto_authenc_keys *keys,
324 int mode)
325 {
326 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
327 unsigned int digestsize = crypto_aead_authsize(aead_tfm);
328 struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
329 struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
330 struct icp_qat_hw_cipher_algo_blk *cipher =
331 (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
332 sizeof(struct icp_qat_hw_auth_setup) +
333 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
334 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
335 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
336 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
337 void *ptr = &req_tmpl->cd_ctrl;
338 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
339 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
340 struct icp_qat_fw_la_auth_req_params *auth_param =
341 (struct icp_qat_fw_la_auth_req_params *)
342 ((char *)&req_tmpl->serv_specif_rqpars +
343 sizeof(struct icp_qat_fw_la_cipher_req_params));
344
345 /* CD setup */
346 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg, mode);
347 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
348 hash->sha.inner_setup.auth_config.config =
349 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
350 ctx->qat_hash_alg,
351 digestsize);
352 hash->sha.inner_setup.auth_counter.counter =
353 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
354
355 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
356 return -EFAULT;
357
358 /* Request setup */
359 qat_alg_init_common_hdr(header, 1);
360 header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
361 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
362 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
363 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
364 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
365 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
366 ICP_QAT_FW_LA_CMP_AUTH_RES);
367 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
368 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
369
370 /* Cipher CD config setup */
371 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
372 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
373 cipher_cd_ctrl->cipher_cfg_offset =
374 (sizeof(struct icp_qat_hw_auth_setup) +
375 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
376 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
377 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
378
379 /* Auth CD config setup */
380 hash_cd_ctrl->hash_cfg_offset = 0;
381 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
382 hash_cd_ctrl->inner_res_sz = digestsize;
383 hash_cd_ctrl->final_sz = digestsize;
384
385 switch (ctx->qat_hash_alg) {
386 case ICP_QAT_HW_AUTH_ALGO_SHA1:
387 hash_cd_ctrl->inner_state1_sz =
388 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
389 hash_cd_ctrl->inner_state2_sz =
390 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
391 break;
392 case ICP_QAT_HW_AUTH_ALGO_SHA256:
393 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
394 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
395 break;
396 case ICP_QAT_HW_AUTH_ALGO_SHA512:
397 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
398 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
399 break;
400 default:
401 break;
402 }
403
404 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
405 ((sizeof(struct icp_qat_hw_auth_setup) +
406 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
407 auth_param->auth_res_sz = digestsize;
408 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
409 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
410 return 0;
411 }
412
qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx * ctx,struct icp_qat_fw_la_bulk_req * req,struct icp_qat_hw_cipher_algo_blk * cd,const u8 * key,unsigned int keylen)413 static void qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx *ctx,
414 struct icp_qat_fw_la_bulk_req *req,
415 struct icp_qat_hw_cipher_algo_blk *cd,
416 const u8 *key, unsigned int keylen)
417 {
418 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
419 struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
420 struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
421
422 memcpy(cd->aes.key, key, keylen);
423 qat_alg_init_common_hdr(header, 0);
424 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
425 cd_pars->u.s.content_desc_params_sz =
426 sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
427 /* Cipher CD config setup */
428 cd_ctrl->cipher_key_sz = keylen >> 3;
429 cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
430 cd_ctrl->cipher_cfg_offset = 0;
431 ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
432 ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
433 }
434
qat_alg_skcipher_init_enc(struct qat_alg_skcipher_ctx * ctx,int alg,const u8 * key,unsigned int keylen,int mode)435 static void qat_alg_skcipher_init_enc(struct qat_alg_skcipher_ctx *ctx,
436 int alg, const u8 *key,
437 unsigned int keylen, int mode)
438 {
439 struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
440 struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
441 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
442
443 qat_alg_skcipher_init_com(ctx, req, enc_cd, key, keylen);
444 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
445 enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
446 }
447
qat_alg_skcipher_init_dec(struct qat_alg_skcipher_ctx * ctx,int alg,const u8 * key,unsigned int keylen,int mode)448 static void qat_alg_skcipher_init_dec(struct qat_alg_skcipher_ctx *ctx,
449 int alg, const u8 *key,
450 unsigned int keylen, int mode)
451 {
452 struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
453 struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
454 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
455
456 qat_alg_skcipher_init_com(ctx, req, dec_cd, key, keylen);
457 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
458
459 if (mode != ICP_QAT_HW_CIPHER_CTR_MODE)
460 dec_cd->aes.cipher_config.val =
461 QAT_AES_HW_CONFIG_DEC(alg, mode);
462 else
463 dec_cd->aes.cipher_config.val =
464 QAT_AES_HW_CONFIG_ENC(alg, mode);
465 }
466
qat_alg_validate_key(int key_len,int * alg,int mode)467 static int qat_alg_validate_key(int key_len, int *alg, int mode)
468 {
469 if (mode != ICP_QAT_HW_CIPHER_XTS_MODE) {
470 switch (key_len) {
471 case AES_KEYSIZE_128:
472 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
473 break;
474 case AES_KEYSIZE_192:
475 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
476 break;
477 case AES_KEYSIZE_256:
478 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
479 break;
480 default:
481 return -EINVAL;
482 }
483 } else {
484 switch (key_len) {
485 case AES_KEYSIZE_128 << 1:
486 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
487 break;
488 case AES_KEYSIZE_256 << 1:
489 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
490 break;
491 default:
492 return -EINVAL;
493 }
494 }
495 return 0;
496 }
497
qat_alg_aead_init_sessions(struct crypto_aead * tfm,const u8 * key,unsigned int keylen,int mode)498 static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key,
499 unsigned int keylen, int mode)
500 {
501 struct crypto_authenc_keys keys;
502 int alg;
503
504 if (crypto_authenc_extractkeys(&keys, key, keylen))
505 goto bad_key;
506
507 if (qat_alg_validate_key(keys.enckeylen, &alg, mode))
508 goto bad_key;
509
510 if (qat_alg_aead_init_enc_session(tfm, alg, &keys, mode))
511 goto error;
512
513 if (qat_alg_aead_init_dec_session(tfm, alg, &keys, mode))
514 goto error;
515
516 memzero_explicit(&keys, sizeof(keys));
517 return 0;
518 bad_key:
519 memzero_explicit(&keys, sizeof(keys));
520 return -EINVAL;
521 error:
522 memzero_explicit(&keys, sizeof(keys));
523 return -EFAULT;
524 }
525
qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx * ctx,const u8 * key,unsigned int keylen,int mode)526 static int qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx *ctx,
527 const u8 *key,
528 unsigned int keylen,
529 int mode)
530 {
531 int alg;
532
533 if (qat_alg_validate_key(keylen, &alg, mode))
534 return -EINVAL;
535
536 qat_alg_skcipher_init_enc(ctx, alg, key, keylen, mode);
537 qat_alg_skcipher_init_dec(ctx, alg, key, keylen, mode);
538 return 0;
539 }
540
qat_alg_aead_rekey(struct crypto_aead * tfm,const u8 * key,unsigned int keylen)541 static int qat_alg_aead_rekey(struct crypto_aead *tfm, const u8 *key,
542 unsigned int keylen)
543 {
544 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
545
546 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
547 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
548 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
549 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
550
551 return qat_alg_aead_init_sessions(tfm, key, keylen,
552 ICP_QAT_HW_CIPHER_CBC_MODE);
553 }
554
qat_alg_aead_newkey(struct crypto_aead * tfm,const u8 * key,unsigned int keylen)555 static int qat_alg_aead_newkey(struct crypto_aead *tfm, const u8 *key,
556 unsigned int keylen)
557 {
558 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
559 struct qat_crypto_instance *inst = NULL;
560 int node = get_current_node();
561 struct device *dev;
562 int ret;
563
564 inst = qat_crypto_get_instance_node(node);
565 if (!inst)
566 return -EINVAL;
567 dev = &GET_DEV(inst->accel_dev);
568 ctx->inst = inst;
569 ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
570 &ctx->enc_cd_paddr,
571 GFP_ATOMIC);
572 if (!ctx->enc_cd) {
573 ret = -ENOMEM;
574 goto out_free_inst;
575 }
576 ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
577 &ctx->dec_cd_paddr,
578 GFP_ATOMIC);
579 if (!ctx->dec_cd) {
580 ret = -ENOMEM;
581 goto out_free_enc;
582 }
583
584 ret = qat_alg_aead_init_sessions(tfm, key, keylen,
585 ICP_QAT_HW_CIPHER_CBC_MODE);
586 if (ret)
587 goto out_free_all;
588
589 return 0;
590
591 out_free_all:
592 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
593 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
594 ctx->dec_cd, ctx->dec_cd_paddr);
595 ctx->dec_cd = NULL;
596 out_free_enc:
597 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
598 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
599 ctx->enc_cd, ctx->enc_cd_paddr);
600 ctx->enc_cd = NULL;
601 out_free_inst:
602 ctx->inst = NULL;
603 qat_crypto_put_instance(inst);
604 return ret;
605 }
606
qat_alg_aead_setkey(struct crypto_aead * tfm,const u8 * key,unsigned int keylen)607 static int qat_alg_aead_setkey(struct crypto_aead *tfm, const u8 *key,
608 unsigned int keylen)
609 {
610 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
611
612 if (ctx->enc_cd)
613 return qat_alg_aead_rekey(tfm, key, keylen);
614 else
615 return qat_alg_aead_newkey(tfm, key, keylen);
616 }
617
qat_alg_free_bufl(struct qat_crypto_instance * inst,struct qat_crypto_request * qat_req)618 static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
619 struct qat_crypto_request *qat_req)
620 {
621 struct device *dev = &GET_DEV(inst->accel_dev);
622 struct qat_alg_buf_list *bl = qat_req->buf.bl;
623 struct qat_alg_buf_list *blout = qat_req->buf.blout;
624 dma_addr_t blp = qat_req->buf.blp;
625 dma_addr_t blpout = qat_req->buf.bloutp;
626 size_t sz = qat_req->buf.sz;
627 size_t sz_out = qat_req->buf.sz_out;
628 int bl_dma_dir;
629 int i;
630
631 bl_dma_dir = blp != blpout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
632
633 for (i = 0; i < bl->num_bufs; i++)
634 dma_unmap_single(dev, bl->bufers[i].addr,
635 bl->bufers[i].len, bl_dma_dir);
636
637 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
638
639 if (!qat_req->buf.sgl_src_valid)
640 kfree(bl);
641
642 if (blp != blpout) {
643 /* If out of place operation dma unmap only data */
644 int bufless = blout->num_bufs - blout->num_mapped_bufs;
645
646 for (i = bufless; i < blout->num_bufs; i++) {
647 dma_unmap_single(dev, blout->bufers[i].addr,
648 blout->bufers[i].len,
649 DMA_FROM_DEVICE);
650 }
651 dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
652
653 if (!qat_req->buf.sgl_dst_valid)
654 kfree(blout);
655 }
656 }
657
qat_alg_sgl_to_bufl(struct qat_crypto_instance * inst,struct scatterlist * sgl,struct scatterlist * sglout,struct qat_crypto_request * qat_req)658 static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
659 struct scatterlist *sgl,
660 struct scatterlist *sglout,
661 struct qat_crypto_request *qat_req)
662 {
663 struct device *dev = &GET_DEV(inst->accel_dev);
664 int i, sg_nctr = 0;
665 int n = sg_nents(sgl);
666 struct qat_alg_buf_list *bufl;
667 struct qat_alg_buf_list *buflout = NULL;
668 dma_addr_t blp = DMA_MAPPING_ERROR;
669 dma_addr_t bloutp = DMA_MAPPING_ERROR;
670 struct scatterlist *sg;
671 size_t sz_out, sz = struct_size(bufl, bufers, n);
672 int node = dev_to_node(&GET_DEV(inst->accel_dev));
673 int bufl_dma_dir;
674
675 if (unlikely(!n))
676 return -EINVAL;
677
678 qat_req->buf.sgl_src_valid = false;
679 qat_req->buf.sgl_dst_valid = false;
680
681 if (n > QAT_MAX_BUFF_DESC) {
682 bufl = kzalloc_node(sz, GFP_ATOMIC, node);
683 if (unlikely(!bufl))
684 return -ENOMEM;
685 } else {
686 bufl = &qat_req->buf.sgl_src.sgl_hdr;
687 memset(bufl, 0, sizeof(struct qat_alg_buf_list));
688 qat_req->buf.sgl_src_valid = true;
689 }
690
691 bufl_dma_dir = sgl != sglout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
692
693 for_each_sg(sgl, sg, n, i)
694 bufl->bufers[i].addr = DMA_MAPPING_ERROR;
695
696 for_each_sg(sgl, sg, n, i) {
697 int y = sg_nctr;
698
699 if (!sg->length)
700 continue;
701
702 bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
703 sg->length,
704 bufl_dma_dir);
705 bufl->bufers[y].len = sg->length;
706 if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
707 goto err_in;
708 sg_nctr++;
709 }
710 bufl->num_bufs = sg_nctr;
711 blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
712 if (unlikely(dma_mapping_error(dev, blp)))
713 goto err_in;
714 qat_req->buf.bl = bufl;
715 qat_req->buf.blp = blp;
716 qat_req->buf.sz = sz;
717 /* Handle out of place operation */
718 if (sgl != sglout) {
719 struct qat_alg_buf *bufers;
720
721 n = sg_nents(sglout);
722 sz_out = struct_size(buflout, bufers, n);
723 sg_nctr = 0;
724
725 if (n > QAT_MAX_BUFF_DESC) {
726 buflout = kzalloc_node(sz_out, GFP_ATOMIC, node);
727 if (unlikely(!buflout))
728 goto err_in;
729 } else {
730 buflout = &qat_req->buf.sgl_dst.sgl_hdr;
731 memset(buflout, 0, sizeof(struct qat_alg_buf_list));
732 qat_req->buf.sgl_dst_valid = true;
733 }
734
735 bufers = buflout->bufers;
736 for_each_sg(sglout, sg, n, i)
737 bufers[i].addr = DMA_MAPPING_ERROR;
738
739 for_each_sg(sglout, sg, n, i) {
740 int y = sg_nctr;
741
742 if (!sg->length)
743 continue;
744
745 bufers[y].addr = dma_map_single(dev, sg_virt(sg),
746 sg->length,
747 DMA_FROM_DEVICE);
748 if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
749 goto err_out;
750 bufers[y].len = sg->length;
751 sg_nctr++;
752 }
753 buflout->num_bufs = sg_nctr;
754 buflout->num_mapped_bufs = sg_nctr;
755 bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
756 if (unlikely(dma_mapping_error(dev, bloutp)))
757 goto err_out;
758 qat_req->buf.blout = buflout;
759 qat_req->buf.bloutp = bloutp;
760 qat_req->buf.sz_out = sz_out;
761 } else {
762 /* Otherwise set the src and dst to the same address */
763 qat_req->buf.bloutp = qat_req->buf.blp;
764 qat_req->buf.sz_out = 0;
765 }
766 return 0;
767
768 err_out:
769 if (!dma_mapping_error(dev, bloutp))
770 dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
771
772 n = sg_nents(sglout);
773 for (i = 0; i < n; i++)
774 if (!dma_mapping_error(dev, buflout->bufers[i].addr))
775 dma_unmap_single(dev, buflout->bufers[i].addr,
776 buflout->bufers[i].len,
777 DMA_FROM_DEVICE);
778
779 if (!qat_req->buf.sgl_dst_valid)
780 kfree(buflout);
781
782 err_in:
783 if (!dma_mapping_error(dev, blp))
784 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
785
786 n = sg_nents(sgl);
787 for (i = 0; i < n; i++)
788 if (!dma_mapping_error(dev, bufl->bufers[i].addr))
789 dma_unmap_single(dev, bufl->bufers[i].addr,
790 bufl->bufers[i].len,
791 bufl_dma_dir);
792
793 if (!qat_req->buf.sgl_src_valid)
794 kfree(bufl);
795
796 dev_err(dev, "Failed to map buf for dma\n");
797 return -ENOMEM;
798 }
799
qat_aead_alg_callback(struct icp_qat_fw_la_resp * qat_resp,struct qat_crypto_request * qat_req)800 static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
801 struct qat_crypto_request *qat_req)
802 {
803 struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
804 struct qat_crypto_instance *inst = ctx->inst;
805 struct aead_request *areq = qat_req->aead_req;
806 u8 stat_filed = qat_resp->comn_resp.comn_status;
807 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
808
809 qat_alg_free_bufl(inst, qat_req);
810 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
811 res = -EBADMSG;
812 areq->base.complete(&areq->base, res);
813 }
814
qat_skcipher_alg_callback(struct icp_qat_fw_la_resp * qat_resp,struct qat_crypto_request * qat_req)815 static void qat_skcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
816 struct qat_crypto_request *qat_req)
817 {
818 struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
819 struct qat_crypto_instance *inst = ctx->inst;
820 struct skcipher_request *sreq = qat_req->skcipher_req;
821 u8 stat_filed = qat_resp->comn_resp.comn_status;
822 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
823 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
824
825 qat_alg_free_bufl(inst, qat_req);
826 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
827 res = -EINVAL;
828
829 memcpy(sreq->iv, qat_req->iv, AES_BLOCK_SIZE);
830 dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
831 qat_req->iv_paddr);
832
833 sreq->base.complete(&sreq->base, res);
834 }
835
qat_alg_callback(void * resp)836 void qat_alg_callback(void *resp)
837 {
838 struct icp_qat_fw_la_resp *qat_resp = resp;
839 struct qat_crypto_request *qat_req =
840 (void *)(__force long)qat_resp->opaque_data;
841
842 qat_req->cb(qat_resp, qat_req);
843 }
844
qat_alg_aead_dec(struct aead_request * areq)845 static int qat_alg_aead_dec(struct aead_request *areq)
846 {
847 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
848 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
849 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
850 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
851 struct icp_qat_fw_la_cipher_req_params *cipher_param;
852 struct icp_qat_fw_la_auth_req_params *auth_param;
853 struct icp_qat_fw_la_bulk_req *msg;
854 int digst_size = crypto_aead_authsize(aead_tfm);
855 int ret, ctr = 0;
856 u32 cipher_len;
857
858 cipher_len = areq->cryptlen - digst_size;
859 if (cipher_len % AES_BLOCK_SIZE != 0)
860 return -EINVAL;
861
862 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
863 if (unlikely(ret))
864 return ret;
865
866 msg = &qat_req->req;
867 *msg = ctx->dec_fw_req;
868 qat_req->aead_ctx = ctx;
869 qat_req->aead_req = areq;
870 qat_req->cb = qat_aead_alg_callback;
871 qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
872 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
873 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
874 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
875 cipher_param->cipher_length = cipher_len;
876 cipher_param->cipher_offset = areq->assoclen;
877 memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
878 auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
879 auth_param->auth_off = 0;
880 auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
881 do {
882 ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
883 } while (ret == -EAGAIN && ctr++ < 10);
884
885 if (ret == -EAGAIN) {
886 qat_alg_free_bufl(ctx->inst, qat_req);
887 return -EBUSY;
888 }
889 return -EINPROGRESS;
890 }
891
qat_alg_aead_enc(struct aead_request * areq)892 static int qat_alg_aead_enc(struct aead_request *areq)
893 {
894 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
895 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
896 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
897 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
898 struct icp_qat_fw_la_cipher_req_params *cipher_param;
899 struct icp_qat_fw_la_auth_req_params *auth_param;
900 struct icp_qat_fw_la_bulk_req *msg;
901 u8 *iv = areq->iv;
902 int ret, ctr = 0;
903
904 if (areq->cryptlen % AES_BLOCK_SIZE != 0)
905 return -EINVAL;
906
907 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
908 if (unlikely(ret))
909 return ret;
910
911 msg = &qat_req->req;
912 *msg = ctx->enc_fw_req;
913 qat_req->aead_ctx = ctx;
914 qat_req->aead_req = areq;
915 qat_req->cb = qat_aead_alg_callback;
916 qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
917 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
918 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
919 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
920 auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
921
922 memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
923 cipher_param->cipher_length = areq->cryptlen;
924 cipher_param->cipher_offset = areq->assoclen;
925
926 auth_param->auth_off = 0;
927 auth_param->auth_len = areq->assoclen + areq->cryptlen;
928
929 do {
930 ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
931 } while (ret == -EAGAIN && ctr++ < 10);
932
933 if (ret == -EAGAIN) {
934 qat_alg_free_bufl(ctx->inst, qat_req);
935 return -EBUSY;
936 }
937 return -EINPROGRESS;
938 }
939
qat_alg_skcipher_rekey(struct qat_alg_skcipher_ctx * ctx,const u8 * key,unsigned int keylen,int mode)940 static int qat_alg_skcipher_rekey(struct qat_alg_skcipher_ctx *ctx,
941 const u8 *key, unsigned int keylen,
942 int mode)
943 {
944 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
945 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
946 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
947 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
948
949 return qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
950 }
951
qat_alg_skcipher_newkey(struct qat_alg_skcipher_ctx * ctx,const u8 * key,unsigned int keylen,int mode)952 static int qat_alg_skcipher_newkey(struct qat_alg_skcipher_ctx *ctx,
953 const u8 *key, unsigned int keylen,
954 int mode)
955 {
956 struct qat_crypto_instance *inst = NULL;
957 struct device *dev;
958 int node = get_current_node();
959 int ret;
960
961 inst = qat_crypto_get_instance_node(node);
962 if (!inst)
963 return -EINVAL;
964 dev = &GET_DEV(inst->accel_dev);
965 ctx->inst = inst;
966 ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
967 &ctx->enc_cd_paddr,
968 GFP_ATOMIC);
969 if (!ctx->enc_cd) {
970 ret = -ENOMEM;
971 goto out_free_instance;
972 }
973 ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
974 &ctx->dec_cd_paddr,
975 GFP_ATOMIC);
976 if (!ctx->dec_cd) {
977 ret = -ENOMEM;
978 goto out_free_enc;
979 }
980
981 ret = qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
982 if (ret)
983 goto out_free_all;
984
985 return 0;
986
987 out_free_all:
988 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
989 dma_free_coherent(dev, sizeof(*ctx->dec_cd),
990 ctx->dec_cd, ctx->dec_cd_paddr);
991 ctx->dec_cd = NULL;
992 out_free_enc:
993 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
994 dma_free_coherent(dev, sizeof(*ctx->enc_cd),
995 ctx->enc_cd, ctx->enc_cd_paddr);
996 ctx->enc_cd = NULL;
997 out_free_instance:
998 ctx->inst = NULL;
999 qat_crypto_put_instance(inst);
1000 return ret;
1001 }
1002
qat_alg_skcipher_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen,int mode)1003 static int qat_alg_skcipher_setkey(struct crypto_skcipher *tfm,
1004 const u8 *key, unsigned int keylen,
1005 int mode)
1006 {
1007 struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1008
1009 if (ctx->enc_cd)
1010 return qat_alg_skcipher_rekey(ctx, key, keylen, mode);
1011 else
1012 return qat_alg_skcipher_newkey(ctx, key, keylen, mode);
1013 }
1014
qat_alg_skcipher_cbc_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)1015 static int qat_alg_skcipher_cbc_setkey(struct crypto_skcipher *tfm,
1016 const u8 *key, unsigned int keylen)
1017 {
1018 return qat_alg_skcipher_setkey(tfm, key, keylen,
1019 ICP_QAT_HW_CIPHER_CBC_MODE);
1020 }
1021
qat_alg_skcipher_ctr_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)1022 static int qat_alg_skcipher_ctr_setkey(struct crypto_skcipher *tfm,
1023 const u8 *key, unsigned int keylen)
1024 {
1025 return qat_alg_skcipher_setkey(tfm, key, keylen,
1026 ICP_QAT_HW_CIPHER_CTR_MODE);
1027 }
1028
qat_alg_skcipher_xts_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)1029 static int qat_alg_skcipher_xts_setkey(struct crypto_skcipher *tfm,
1030 const u8 *key, unsigned int keylen)
1031 {
1032 struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1033 int ret;
1034
1035 ret = xts_verify_key(tfm, key, keylen);
1036 if (ret)
1037 return ret;
1038
1039 if (keylen >> 1 == AES_KEYSIZE_192) {
1040 ret = crypto_skcipher_setkey(ctx->ftfm, key, keylen);
1041 if (ret)
1042 return ret;
1043
1044 ctx->fallback = true;
1045
1046 return 0;
1047 }
1048
1049 ctx->fallback = false;
1050
1051 return qat_alg_skcipher_setkey(tfm, key, keylen,
1052 ICP_QAT_HW_CIPHER_XTS_MODE);
1053 }
1054
qat_alg_skcipher_encrypt(struct skcipher_request * req)1055 static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
1056 {
1057 struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1058 struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
1059 struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1060 struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
1061 struct icp_qat_fw_la_cipher_req_params *cipher_param;
1062 struct icp_qat_fw_la_bulk_req *msg;
1063 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
1064 int ret, ctr = 0;
1065
1066 if (req->cryptlen == 0)
1067 return 0;
1068
1069 qat_req->iv = dma_alloc_coherent(dev, AES_BLOCK_SIZE,
1070 &qat_req->iv_paddr, GFP_ATOMIC);
1071 if (!qat_req->iv)
1072 return -ENOMEM;
1073
1074 ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
1075 if (unlikely(ret)) {
1076 dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
1077 qat_req->iv_paddr);
1078 return ret;
1079 }
1080
1081 msg = &qat_req->req;
1082 *msg = ctx->enc_fw_req;
1083 qat_req->skcipher_ctx = ctx;
1084 qat_req->skcipher_req = req;
1085 qat_req->cb = qat_skcipher_alg_callback;
1086 qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
1087 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1088 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1089 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1090 cipher_param->cipher_length = req->cryptlen;
1091 cipher_param->cipher_offset = 0;
1092 cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr;
1093 memcpy(qat_req->iv, req->iv, AES_BLOCK_SIZE);
1094 do {
1095 ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
1096 } while (ret == -EAGAIN && ctr++ < 10);
1097
1098 if (ret == -EAGAIN) {
1099 qat_alg_free_bufl(ctx->inst, qat_req);
1100 dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
1101 qat_req->iv_paddr);
1102 return -EBUSY;
1103 }
1104 return -EINPROGRESS;
1105 }
1106
qat_alg_skcipher_blk_encrypt(struct skcipher_request * req)1107 static int qat_alg_skcipher_blk_encrypt(struct skcipher_request *req)
1108 {
1109 if (req->cryptlen % AES_BLOCK_SIZE != 0)
1110 return -EINVAL;
1111
1112 return qat_alg_skcipher_encrypt(req);
1113 }
1114
qat_alg_skcipher_xts_encrypt(struct skcipher_request * req)1115 static int qat_alg_skcipher_xts_encrypt(struct skcipher_request *req)
1116 {
1117 struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1118 struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm);
1119 struct skcipher_request *nreq = skcipher_request_ctx(req);
1120
1121 if (req->cryptlen < XTS_BLOCK_SIZE)
1122 return -EINVAL;
1123
1124 if (ctx->fallback) {
1125 memcpy(nreq, req, sizeof(*req));
1126 skcipher_request_set_tfm(nreq, ctx->ftfm);
1127 return crypto_skcipher_encrypt(nreq);
1128 }
1129
1130 return qat_alg_skcipher_encrypt(req);
1131 }
1132
qat_alg_skcipher_decrypt(struct skcipher_request * req)1133 static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
1134 {
1135 struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1136 struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
1137 struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1138 struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
1139 struct icp_qat_fw_la_cipher_req_params *cipher_param;
1140 struct icp_qat_fw_la_bulk_req *msg;
1141 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
1142 int ret, ctr = 0;
1143
1144 if (req->cryptlen == 0)
1145 return 0;
1146
1147 qat_req->iv = dma_alloc_coherent(dev, AES_BLOCK_SIZE,
1148 &qat_req->iv_paddr, GFP_ATOMIC);
1149 if (!qat_req->iv)
1150 return -ENOMEM;
1151
1152 ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
1153 if (unlikely(ret)) {
1154 dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
1155 qat_req->iv_paddr);
1156 return ret;
1157 }
1158
1159 msg = &qat_req->req;
1160 *msg = ctx->dec_fw_req;
1161 qat_req->skcipher_ctx = ctx;
1162 qat_req->skcipher_req = req;
1163 qat_req->cb = qat_skcipher_alg_callback;
1164 qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
1165 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1166 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1167 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1168 cipher_param->cipher_length = req->cryptlen;
1169 cipher_param->cipher_offset = 0;
1170 cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr;
1171 memcpy(qat_req->iv, req->iv, AES_BLOCK_SIZE);
1172 do {
1173 ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
1174 } while (ret == -EAGAIN && ctr++ < 10);
1175
1176 if (ret == -EAGAIN) {
1177 qat_alg_free_bufl(ctx->inst, qat_req);
1178 dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
1179 qat_req->iv_paddr);
1180 return -EBUSY;
1181 }
1182 return -EINPROGRESS;
1183 }
1184
qat_alg_skcipher_blk_decrypt(struct skcipher_request * req)1185 static int qat_alg_skcipher_blk_decrypt(struct skcipher_request *req)
1186 {
1187 if (req->cryptlen % AES_BLOCK_SIZE != 0)
1188 return -EINVAL;
1189
1190 return qat_alg_skcipher_decrypt(req);
1191 }
1192
qat_alg_skcipher_xts_decrypt(struct skcipher_request * req)1193 static int qat_alg_skcipher_xts_decrypt(struct skcipher_request *req)
1194 {
1195 struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1196 struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm);
1197 struct skcipher_request *nreq = skcipher_request_ctx(req);
1198
1199 if (req->cryptlen < XTS_BLOCK_SIZE)
1200 return -EINVAL;
1201
1202 if (ctx->fallback) {
1203 memcpy(nreq, req, sizeof(*req));
1204 skcipher_request_set_tfm(nreq, ctx->ftfm);
1205 return crypto_skcipher_decrypt(nreq);
1206 }
1207
1208 return qat_alg_skcipher_decrypt(req);
1209 }
1210
qat_alg_aead_init(struct crypto_aead * tfm,enum icp_qat_hw_auth_algo hash,const char * hash_name)1211 static int qat_alg_aead_init(struct crypto_aead *tfm,
1212 enum icp_qat_hw_auth_algo hash,
1213 const char *hash_name)
1214 {
1215 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1216
1217 ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
1218 if (IS_ERR(ctx->hash_tfm))
1219 return PTR_ERR(ctx->hash_tfm);
1220 ctx->qat_hash_alg = hash;
1221 crypto_aead_set_reqsize(tfm, sizeof(struct qat_crypto_request));
1222 return 0;
1223 }
1224
qat_alg_aead_sha1_init(struct crypto_aead * tfm)1225 static int qat_alg_aead_sha1_init(struct crypto_aead *tfm)
1226 {
1227 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
1228 }
1229
qat_alg_aead_sha256_init(struct crypto_aead * tfm)1230 static int qat_alg_aead_sha256_init(struct crypto_aead *tfm)
1231 {
1232 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
1233 }
1234
qat_alg_aead_sha512_init(struct crypto_aead * tfm)1235 static int qat_alg_aead_sha512_init(struct crypto_aead *tfm)
1236 {
1237 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
1238 }
1239
qat_alg_aead_exit(struct crypto_aead * tfm)1240 static void qat_alg_aead_exit(struct crypto_aead *tfm)
1241 {
1242 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1243 struct qat_crypto_instance *inst = ctx->inst;
1244 struct device *dev;
1245
1246 crypto_free_shash(ctx->hash_tfm);
1247
1248 if (!inst)
1249 return;
1250
1251 dev = &GET_DEV(inst->accel_dev);
1252 if (ctx->enc_cd) {
1253 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
1254 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1255 ctx->enc_cd, ctx->enc_cd_paddr);
1256 }
1257 if (ctx->dec_cd) {
1258 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
1259 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1260 ctx->dec_cd, ctx->dec_cd_paddr);
1261 }
1262 qat_crypto_put_instance(inst);
1263 }
1264
qat_alg_skcipher_init_tfm(struct crypto_skcipher * tfm)1265 static int qat_alg_skcipher_init_tfm(struct crypto_skcipher *tfm)
1266 {
1267 crypto_skcipher_set_reqsize(tfm, sizeof(struct qat_crypto_request));
1268 return 0;
1269 }
1270
qat_alg_skcipher_init_xts_tfm(struct crypto_skcipher * tfm)1271 static int qat_alg_skcipher_init_xts_tfm(struct crypto_skcipher *tfm)
1272 {
1273 struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1274 int reqsize;
1275
1276 ctx->ftfm = crypto_alloc_skcipher("xts(aes)", 0,
1277 CRYPTO_ALG_NEED_FALLBACK);
1278 if (IS_ERR(ctx->ftfm))
1279 return PTR_ERR(ctx->ftfm);
1280
1281 reqsize = max(sizeof(struct qat_crypto_request),
1282 sizeof(struct skcipher_request) +
1283 crypto_skcipher_reqsize(ctx->ftfm));
1284 crypto_skcipher_set_reqsize(tfm, reqsize);
1285
1286 return 0;
1287 }
1288
qat_alg_skcipher_exit_tfm(struct crypto_skcipher * tfm)1289 static void qat_alg_skcipher_exit_tfm(struct crypto_skcipher *tfm)
1290 {
1291 struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1292 struct qat_crypto_instance *inst = ctx->inst;
1293 struct device *dev;
1294
1295 if (!inst)
1296 return;
1297
1298 dev = &GET_DEV(inst->accel_dev);
1299 if (ctx->enc_cd) {
1300 memset(ctx->enc_cd, 0,
1301 sizeof(struct icp_qat_hw_cipher_algo_blk));
1302 dma_free_coherent(dev,
1303 sizeof(struct icp_qat_hw_cipher_algo_blk),
1304 ctx->enc_cd, ctx->enc_cd_paddr);
1305 }
1306 if (ctx->dec_cd) {
1307 memset(ctx->dec_cd, 0,
1308 sizeof(struct icp_qat_hw_cipher_algo_blk));
1309 dma_free_coherent(dev,
1310 sizeof(struct icp_qat_hw_cipher_algo_blk),
1311 ctx->dec_cd, ctx->dec_cd_paddr);
1312 }
1313 qat_crypto_put_instance(inst);
1314 }
1315
qat_alg_skcipher_exit_xts_tfm(struct crypto_skcipher * tfm)1316 static void qat_alg_skcipher_exit_xts_tfm(struct crypto_skcipher *tfm)
1317 {
1318 struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1319
1320 if (ctx->ftfm)
1321 crypto_free_skcipher(ctx->ftfm);
1322
1323 qat_alg_skcipher_exit_tfm(tfm);
1324 }
1325
1326 static struct aead_alg qat_aeads[] = { {
1327 .base = {
1328 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1329 .cra_driver_name = "qat_aes_cbc_hmac_sha1",
1330 .cra_priority = 4001,
1331 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1332 .cra_blocksize = AES_BLOCK_SIZE,
1333 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1334 .cra_module = THIS_MODULE,
1335 },
1336 .init = qat_alg_aead_sha1_init,
1337 .exit = qat_alg_aead_exit,
1338 .setkey = qat_alg_aead_setkey,
1339 .decrypt = qat_alg_aead_dec,
1340 .encrypt = qat_alg_aead_enc,
1341 .ivsize = AES_BLOCK_SIZE,
1342 .maxauthsize = SHA1_DIGEST_SIZE,
1343 }, {
1344 .base = {
1345 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1346 .cra_driver_name = "qat_aes_cbc_hmac_sha256",
1347 .cra_priority = 4001,
1348 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1349 .cra_blocksize = AES_BLOCK_SIZE,
1350 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1351 .cra_module = THIS_MODULE,
1352 },
1353 .init = qat_alg_aead_sha256_init,
1354 .exit = qat_alg_aead_exit,
1355 .setkey = qat_alg_aead_setkey,
1356 .decrypt = qat_alg_aead_dec,
1357 .encrypt = qat_alg_aead_enc,
1358 .ivsize = AES_BLOCK_SIZE,
1359 .maxauthsize = SHA256_DIGEST_SIZE,
1360 }, {
1361 .base = {
1362 .cra_name = "authenc(hmac(sha512),cbc(aes))",
1363 .cra_driver_name = "qat_aes_cbc_hmac_sha512",
1364 .cra_priority = 4001,
1365 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1366 .cra_blocksize = AES_BLOCK_SIZE,
1367 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1368 .cra_module = THIS_MODULE,
1369 },
1370 .init = qat_alg_aead_sha512_init,
1371 .exit = qat_alg_aead_exit,
1372 .setkey = qat_alg_aead_setkey,
1373 .decrypt = qat_alg_aead_dec,
1374 .encrypt = qat_alg_aead_enc,
1375 .ivsize = AES_BLOCK_SIZE,
1376 .maxauthsize = SHA512_DIGEST_SIZE,
1377 } };
1378
1379 static struct skcipher_alg qat_skciphers[] = { {
1380 .base.cra_name = "cbc(aes)",
1381 .base.cra_driver_name = "qat_aes_cbc",
1382 .base.cra_priority = 4001,
1383 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1384 .base.cra_blocksize = AES_BLOCK_SIZE,
1385 .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
1386 .base.cra_alignmask = 0,
1387 .base.cra_module = THIS_MODULE,
1388
1389 .init = qat_alg_skcipher_init_tfm,
1390 .exit = qat_alg_skcipher_exit_tfm,
1391 .setkey = qat_alg_skcipher_cbc_setkey,
1392 .decrypt = qat_alg_skcipher_blk_decrypt,
1393 .encrypt = qat_alg_skcipher_blk_encrypt,
1394 .min_keysize = AES_MIN_KEY_SIZE,
1395 .max_keysize = AES_MAX_KEY_SIZE,
1396 .ivsize = AES_BLOCK_SIZE,
1397 }, {
1398 .base.cra_name = "ctr(aes)",
1399 .base.cra_driver_name = "qat_aes_ctr",
1400 .base.cra_priority = 4001,
1401 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1402 .base.cra_blocksize = 1,
1403 .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
1404 .base.cra_alignmask = 0,
1405 .base.cra_module = THIS_MODULE,
1406
1407 .init = qat_alg_skcipher_init_tfm,
1408 .exit = qat_alg_skcipher_exit_tfm,
1409 .setkey = qat_alg_skcipher_ctr_setkey,
1410 .decrypt = qat_alg_skcipher_decrypt,
1411 .encrypt = qat_alg_skcipher_encrypt,
1412 .min_keysize = AES_MIN_KEY_SIZE,
1413 .max_keysize = AES_MAX_KEY_SIZE,
1414 .ivsize = AES_BLOCK_SIZE,
1415 }, {
1416 .base.cra_name = "xts(aes)",
1417 .base.cra_driver_name = "qat_aes_xts",
1418 .base.cra_priority = 4001,
1419 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |
1420 CRYPTO_ALG_ALLOCATES_MEMORY,
1421 .base.cra_blocksize = AES_BLOCK_SIZE,
1422 .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
1423 .base.cra_alignmask = 0,
1424 .base.cra_module = THIS_MODULE,
1425
1426 .init = qat_alg_skcipher_init_xts_tfm,
1427 .exit = qat_alg_skcipher_exit_xts_tfm,
1428 .setkey = qat_alg_skcipher_xts_setkey,
1429 .decrypt = qat_alg_skcipher_xts_decrypt,
1430 .encrypt = qat_alg_skcipher_xts_encrypt,
1431 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1432 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1433 .ivsize = AES_BLOCK_SIZE,
1434 } };
1435
qat_algs_register(void)1436 int qat_algs_register(void)
1437 {
1438 int ret = 0;
1439
1440 mutex_lock(&algs_lock);
1441 if (++active_devs != 1)
1442 goto unlock;
1443
1444 ret = crypto_register_skciphers(qat_skciphers,
1445 ARRAY_SIZE(qat_skciphers));
1446 if (ret)
1447 goto unlock;
1448
1449 ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1450 if (ret)
1451 goto unreg_algs;
1452
1453 unlock:
1454 mutex_unlock(&algs_lock);
1455 return ret;
1456
1457 unreg_algs:
1458 crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
1459 goto unlock;
1460 }
1461
qat_algs_unregister(void)1462 void qat_algs_unregister(void)
1463 {
1464 mutex_lock(&algs_lock);
1465 if (--active_devs != 0)
1466 goto unlock;
1467
1468 crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1469 crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
1470
1471 unlock:
1472 mutex_unlock(&algs_lock);
1473 }
1474