1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /* Copyright (c) 2016-2017 Hisilicon Limited. */
3*4882a593Smuzhiyun #include <linux/crypto.h>
4*4882a593Smuzhiyun #include <linux/dma-mapping.h>
5*4882a593Smuzhiyun #include <linux/dmapool.h>
6*4882a593Smuzhiyun #include <linux/module.h>
7*4882a593Smuzhiyun #include <linux/mutex.h>
8*4882a593Smuzhiyun #include <linux/slab.h>
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include <crypto/aes.h>
11*4882a593Smuzhiyun #include <crypto/algapi.h>
12*4882a593Smuzhiyun #include <crypto/internal/des.h>
13*4882a593Smuzhiyun #include <crypto/skcipher.h>
14*4882a593Smuzhiyun #include <crypto/xts.h>
15*4882a593Smuzhiyun #include <crypto/internal/skcipher.h>
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #include "sec_drv.h"
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #define SEC_MAX_CIPHER_KEY 64
20*4882a593Smuzhiyun #define SEC_REQ_LIMIT SZ_32M
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun struct sec_c_alg_cfg {
23*4882a593Smuzhiyun unsigned c_alg : 3;
24*4882a593Smuzhiyun unsigned c_mode : 3;
25*4882a593Smuzhiyun unsigned key_len : 2;
26*4882a593Smuzhiyun unsigned c_width : 2;
27*4882a593Smuzhiyun };
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun static const struct sec_c_alg_cfg sec_c_alg_cfgs[] = {
30*4882a593Smuzhiyun [SEC_C_DES_ECB_64] = {
31*4882a593Smuzhiyun .c_alg = SEC_C_ALG_DES,
32*4882a593Smuzhiyun .c_mode = SEC_C_MODE_ECB,
33*4882a593Smuzhiyun .key_len = SEC_KEY_LEN_DES,
34*4882a593Smuzhiyun },
35*4882a593Smuzhiyun [SEC_C_DES_CBC_64] = {
36*4882a593Smuzhiyun .c_alg = SEC_C_ALG_DES,
37*4882a593Smuzhiyun .c_mode = SEC_C_MODE_CBC,
38*4882a593Smuzhiyun .key_len = SEC_KEY_LEN_DES,
39*4882a593Smuzhiyun },
40*4882a593Smuzhiyun [SEC_C_3DES_ECB_192_3KEY] = {
41*4882a593Smuzhiyun .c_alg = SEC_C_ALG_3DES,
42*4882a593Smuzhiyun .c_mode = SEC_C_MODE_ECB,
43*4882a593Smuzhiyun .key_len = SEC_KEY_LEN_3DES_3_KEY,
44*4882a593Smuzhiyun },
45*4882a593Smuzhiyun [SEC_C_3DES_ECB_192_2KEY] = {
46*4882a593Smuzhiyun .c_alg = SEC_C_ALG_3DES,
47*4882a593Smuzhiyun .c_mode = SEC_C_MODE_ECB,
48*4882a593Smuzhiyun .key_len = SEC_KEY_LEN_3DES_2_KEY,
49*4882a593Smuzhiyun },
50*4882a593Smuzhiyun [SEC_C_3DES_CBC_192_3KEY] = {
51*4882a593Smuzhiyun .c_alg = SEC_C_ALG_3DES,
52*4882a593Smuzhiyun .c_mode = SEC_C_MODE_CBC,
53*4882a593Smuzhiyun .key_len = SEC_KEY_LEN_3DES_3_KEY,
54*4882a593Smuzhiyun },
55*4882a593Smuzhiyun [SEC_C_3DES_CBC_192_2KEY] = {
56*4882a593Smuzhiyun .c_alg = SEC_C_ALG_3DES,
57*4882a593Smuzhiyun .c_mode = SEC_C_MODE_CBC,
58*4882a593Smuzhiyun .key_len = SEC_KEY_LEN_3DES_2_KEY,
59*4882a593Smuzhiyun },
60*4882a593Smuzhiyun [SEC_C_AES_ECB_128] = {
61*4882a593Smuzhiyun .c_alg = SEC_C_ALG_AES,
62*4882a593Smuzhiyun .c_mode = SEC_C_MODE_ECB,
63*4882a593Smuzhiyun .key_len = SEC_KEY_LEN_AES_128,
64*4882a593Smuzhiyun },
65*4882a593Smuzhiyun [SEC_C_AES_ECB_192] = {
66*4882a593Smuzhiyun .c_alg = SEC_C_ALG_AES,
67*4882a593Smuzhiyun .c_mode = SEC_C_MODE_ECB,
68*4882a593Smuzhiyun .key_len = SEC_KEY_LEN_AES_192,
69*4882a593Smuzhiyun },
70*4882a593Smuzhiyun [SEC_C_AES_ECB_256] = {
71*4882a593Smuzhiyun .c_alg = SEC_C_ALG_AES,
72*4882a593Smuzhiyun .c_mode = SEC_C_MODE_ECB,
73*4882a593Smuzhiyun .key_len = SEC_KEY_LEN_AES_256,
74*4882a593Smuzhiyun },
75*4882a593Smuzhiyun [SEC_C_AES_CBC_128] = {
76*4882a593Smuzhiyun .c_alg = SEC_C_ALG_AES,
77*4882a593Smuzhiyun .c_mode = SEC_C_MODE_CBC,
78*4882a593Smuzhiyun .key_len = SEC_KEY_LEN_AES_128,
79*4882a593Smuzhiyun },
80*4882a593Smuzhiyun [SEC_C_AES_CBC_192] = {
81*4882a593Smuzhiyun .c_alg = SEC_C_ALG_AES,
82*4882a593Smuzhiyun .c_mode = SEC_C_MODE_CBC,
83*4882a593Smuzhiyun .key_len = SEC_KEY_LEN_AES_192,
84*4882a593Smuzhiyun },
85*4882a593Smuzhiyun [SEC_C_AES_CBC_256] = {
86*4882a593Smuzhiyun .c_alg = SEC_C_ALG_AES,
87*4882a593Smuzhiyun .c_mode = SEC_C_MODE_CBC,
88*4882a593Smuzhiyun .key_len = SEC_KEY_LEN_AES_256,
89*4882a593Smuzhiyun },
90*4882a593Smuzhiyun [SEC_C_AES_CTR_128] = {
91*4882a593Smuzhiyun .c_alg = SEC_C_ALG_AES,
92*4882a593Smuzhiyun .c_mode = SEC_C_MODE_CTR,
93*4882a593Smuzhiyun .key_len = SEC_KEY_LEN_AES_128,
94*4882a593Smuzhiyun },
95*4882a593Smuzhiyun [SEC_C_AES_CTR_192] = {
96*4882a593Smuzhiyun .c_alg = SEC_C_ALG_AES,
97*4882a593Smuzhiyun .c_mode = SEC_C_MODE_CTR,
98*4882a593Smuzhiyun .key_len = SEC_KEY_LEN_AES_192,
99*4882a593Smuzhiyun },
100*4882a593Smuzhiyun [SEC_C_AES_CTR_256] = {
101*4882a593Smuzhiyun .c_alg = SEC_C_ALG_AES,
102*4882a593Smuzhiyun .c_mode = SEC_C_MODE_CTR,
103*4882a593Smuzhiyun .key_len = SEC_KEY_LEN_AES_256,
104*4882a593Smuzhiyun },
105*4882a593Smuzhiyun [SEC_C_AES_XTS_128] = {
106*4882a593Smuzhiyun .c_alg = SEC_C_ALG_AES,
107*4882a593Smuzhiyun .c_mode = SEC_C_MODE_XTS,
108*4882a593Smuzhiyun .key_len = SEC_KEY_LEN_AES_128,
109*4882a593Smuzhiyun },
110*4882a593Smuzhiyun [SEC_C_AES_XTS_256] = {
111*4882a593Smuzhiyun .c_alg = SEC_C_ALG_AES,
112*4882a593Smuzhiyun .c_mode = SEC_C_MODE_XTS,
113*4882a593Smuzhiyun .key_len = SEC_KEY_LEN_AES_256,
114*4882a593Smuzhiyun },
115*4882a593Smuzhiyun [SEC_C_NULL] = {
116*4882a593Smuzhiyun },
117*4882a593Smuzhiyun };
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun /*
120*4882a593Smuzhiyun * Mutex used to ensure safe operation of reference count of
121*4882a593Smuzhiyun * alg providers
122*4882a593Smuzhiyun */
123*4882a593Smuzhiyun static DEFINE_MUTEX(algs_lock);
124*4882a593Smuzhiyun static unsigned int active_devs;
125*4882a593Smuzhiyun
sec_alg_skcipher_init_template(struct sec_alg_tfm_ctx * ctx,struct sec_bd_info * req,enum sec_cipher_alg alg)126*4882a593Smuzhiyun static void sec_alg_skcipher_init_template(struct sec_alg_tfm_ctx *ctx,
127*4882a593Smuzhiyun struct sec_bd_info *req,
128*4882a593Smuzhiyun enum sec_cipher_alg alg)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun const struct sec_c_alg_cfg *cfg = &sec_c_alg_cfgs[alg];
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun memset(req, 0, sizeof(*req));
133*4882a593Smuzhiyun req->w0 |= cfg->c_mode << SEC_BD_W0_C_MODE_S;
134*4882a593Smuzhiyun req->w1 |= cfg->c_alg << SEC_BD_W1_C_ALG_S;
135*4882a593Smuzhiyun req->w3 |= cfg->key_len << SEC_BD_W3_C_KEY_LEN_S;
136*4882a593Smuzhiyun req->w0 |= cfg->c_width << SEC_BD_W0_C_WIDTH_S;
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun req->cipher_key_addr_lo = lower_32_bits(ctx->pkey);
139*4882a593Smuzhiyun req->cipher_key_addr_hi = upper_32_bits(ctx->pkey);
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun
sec_alg_skcipher_init_context(struct crypto_skcipher * atfm,const u8 * key,unsigned int keylen,enum sec_cipher_alg alg)142*4882a593Smuzhiyun static void sec_alg_skcipher_init_context(struct crypto_skcipher *atfm,
143*4882a593Smuzhiyun const u8 *key,
144*4882a593Smuzhiyun unsigned int keylen,
145*4882a593Smuzhiyun enum sec_cipher_alg alg)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun struct crypto_tfm *tfm = crypto_skcipher_tfm(atfm);
148*4882a593Smuzhiyun struct sec_alg_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun ctx->cipher_alg = alg;
151*4882a593Smuzhiyun memcpy(ctx->key, key, keylen);
152*4882a593Smuzhiyun sec_alg_skcipher_init_template(ctx, &ctx->req_template,
153*4882a593Smuzhiyun ctx->cipher_alg);
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun
sec_free_hw_sgl(struct sec_hw_sgl * hw_sgl,dma_addr_t psec_sgl,struct sec_dev_info * info)156*4882a593Smuzhiyun static void sec_free_hw_sgl(struct sec_hw_sgl *hw_sgl,
157*4882a593Smuzhiyun dma_addr_t psec_sgl, struct sec_dev_info *info)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun struct sec_hw_sgl *sgl_current, *sgl_next;
160*4882a593Smuzhiyun dma_addr_t sgl_next_dma;
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun sgl_current = hw_sgl;
163*4882a593Smuzhiyun while (sgl_current) {
164*4882a593Smuzhiyun sgl_next = sgl_current->next;
165*4882a593Smuzhiyun sgl_next_dma = sgl_current->next_sgl;
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun dma_pool_free(info->hw_sgl_pool, sgl_current, psec_sgl);
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun sgl_current = sgl_next;
170*4882a593Smuzhiyun psec_sgl = sgl_next_dma;
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun
sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl ** sec_sgl,dma_addr_t * psec_sgl,struct scatterlist * sgl,int count,struct sec_dev_info * info,gfp_t gfp)174*4882a593Smuzhiyun static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl **sec_sgl,
175*4882a593Smuzhiyun dma_addr_t *psec_sgl,
176*4882a593Smuzhiyun struct scatterlist *sgl,
177*4882a593Smuzhiyun int count,
178*4882a593Smuzhiyun struct sec_dev_info *info,
179*4882a593Smuzhiyun gfp_t gfp)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun struct sec_hw_sgl *sgl_current = NULL;
182*4882a593Smuzhiyun struct sec_hw_sgl *sgl_next;
183*4882a593Smuzhiyun dma_addr_t sgl_next_dma;
184*4882a593Smuzhiyun struct scatterlist *sg;
185*4882a593Smuzhiyun int ret, sge_index, i;
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun if (!count)
188*4882a593Smuzhiyun return -EINVAL;
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun for_each_sg(sgl, sg, count, i) {
191*4882a593Smuzhiyun sge_index = i % SEC_MAX_SGE_NUM;
192*4882a593Smuzhiyun if (sge_index == 0) {
193*4882a593Smuzhiyun sgl_next = dma_pool_zalloc(info->hw_sgl_pool,
194*4882a593Smuzhiyun gfp, &sgl_next_dma);
195*4882a593Smuzhiyun if (!sgl_next) {
196*4882a593Smuzhiyun ret = -ENOMEM;
197*4882a593Smuzhiyun goto err_free_hw_sgls;
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun if (!sgl_current) { /* First one */
201*4882a593Smuzhiyun *psec_sgl = sgl_next_dma;
202*4882a593Smuzhiyun *sec_sgl = sgl_next;
203*4882a593Smuzhiyun } else { /* Chained */
204*4882a593Smuzhiyun sgl_current->entry_sum_in_sgl = SEC_MAX_SGE_NUM;
205*4882a593Smuzhiyun sgl_current->next_sgl = sgl_next_dma;
206*4882a593Smuzhiyun sgl_current->next = sgl_next;
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun sgl_current = sgl_next;
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun sgl_current->sge_entries[sge_index].buf = sg_dma_address(sg);
211*4882a593Smuzhiyun sgl_current->sge_entries[sge_index].len = sg_dma_len(sg);
212*4882a593Smuzhiyun sgl_current->data_bytes_in_sgl += sg_dma_len(sg);
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun sgl_current->entry_sum_in_sgl = count % SEC_MAX_SGE_NUM;
215*4882a593Smuzhiyun sgl_current->next_sgl = 0;
216*4882a593Smuzhiyun (*sec_sgl)->entry_sum_in_chain = count;
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun return 0;
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun err_free_hw_sgls:
221*4882a593Smuzhiyun sec_free_hw_sgl(*sec_sgl, *psec_sgl, info);
222*4882a593Smuzhiyun *psec_sgl = 0;
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun return ret;
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun
sec_alg_skcipher_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen,enum sec_cipher_alg alg)227*4882a593Smuzhiyun static int sec_alg_skcipher_setkey(struct crypto_skcipher *tfm,
228*4882a593Smuzhiyun const u8 *key, unsigned int keylen,
229*4882a593Smuzhiyun enum sec_cipher_alg alg)
230*4882a593Smuzhiyun {
231*4882a593Smuzhiyun struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
232*4882a593Smuzhiyun struct device *dev = ctx->queue->dev_info->dev;
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun mutex_lock(&ctx->lock);
235*4882a593Smuzhiyun if (ctx->key) {
236*4882a593Smuzhiyun /* rekeying */
237*4882a593Smuzhiyun memset(ctx->key, 0, SEC_MAX_CIPHER_KEY);
238*4882a593Smuzhiyun } else {
239*4882a593Smuzhiyun /* new key */
240*4882a593Smuzhiyun ctx->key = dma_alloc_coherent(dev, SEC_MAX_CIPHER_KEY,
241*4882a593Smuzhiyun &ctx->pkey, GFP_KERNEL);
242*4882a593Smuzhiyun if (!ctx->key) {
243*4882a593Smuzhiyun mutex_unlock(&ctx->lock);
244*4882a593Smuzhiyun return -ENOMEM;
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun mutex_unlock(&ctx->lock);
248*4882a593Smuzhiyun sec_alg_skcipher_init_context(tfm, key, keylen, alg);
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun return 0;
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun
sec_alg_skcipher_setkey_aes_ecb(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)253*4882a593Smuzhiyun static int sec_alg_skcipher_setkey_aes_ecb(struct crypto_skcipher *tfm,
254*4882a593Smuzhiyun const u8 *key, unsigned int keylen)
255*4882a593Smuzhiyun {
256*4882a593Smuzhiyun enum sec_cipher_alg alg;
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun switch (keylen) {
259*4882a593Smuzhiyun case AES_KEYSIZE_128:
260*4882a593Smuzhiyun alg = SEC_C_AES_ECB_128;
261*4882a593Smuzhiyun break;
262*4882a593Smuzhiyun case AES_KEYSIZE_192:
263*4882a593Smuzhiyun alg = SEC_C_AES_ECB_192;
264*4882a593Smuzhiyun break;
265*4882a593Smuzhiyun case AES_KEYSIZE_256:
266*4882a593Smuzhiyun alg = SEC_C_AES_ECB_256;
267*4882a593Smuzhiyun break;
268*4882a593Smuzhiyun default:
269*4882a593Smuzhiyun return -EINVAL;
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun
sec_alg_skcipher_setkey_aes_cbc(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)275*4882a593Smuzhiyun static int sec_alg_skcipher_setkey_aes_cbc(struct crypto_skcipher *tfm,
276*4882a593Smuzhiyun const u8 *key, unsigned int keylen)
277*4882a593Smuzhiyun {
278*4882a593Smuzhiyun enum sec_cipher_alg alg;
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun switch (keylen) {
281*4882a593Smuzhiyun case AES_KEYSIZE_128:
282*4882a593Smuzhiyun alg = SEC_C_AES_CBC_128;
283*4882a593Smuzhiyun break;
284*4882a593Smuzhiyun case AES_KEYSIZE_192:
285*4882a593Smuzhiyun alg = SEC_C_AES_CBC_192;
286*4882a593Smuzhiyun break;
287*4882a593Smuzhiyun case AES_KEYSIZE_256:
288*4882a593Smuzhiyun alg = SEC_C_AES_CBC_256;
289*4882a593Smuzhiyun break;
290*4882a593Smuzhiyun default:
291*4882a593Smuzhiyun return -EINVAL;
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun
sec_alg_skcipher_setkey_aes_ctr(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)297*4882a593Smuzhiyun static int sec_alg_skcipher_setkey_aes_ctr(struct crypto_skcipher *tfm,
298*4882a593Smuzhiyun const u8 *key, unsigned int keylen)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun enum sec_cipher_alg alg;
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun switch (keylen) {
303*4882a593Smuzhiyun case AES_KEYSIZE_128:
304*4882a593Smuzhiyun alg = SEC_C_AES_CTR_128;
305*4882a593Smuzhiyun break;
306*4882a593Smuzhiyun case AES_KEYSIZE_192:
307*4882a593Smuzhiyun alg = SEC_C_AES_CTR_192;
308*4882a593Smuzhiyun break;
309*4882a593Smuzhiyun case AES_KEYSIZE_256:
310*4882a593Smuzhiyun alg = SEC_C_AES_CTR_256;
311*4882a593Smuzhiyun break;
312*4882a593Smuzhiyun default:
313*4882a593Smuzhiyun return -EINVAL;
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun
sec_alg_skcipher_setkey_aes_xts(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)319*4882a593Smuzhiyun static int sec_alg_skcipher_setkey_aes_xts(struct crypto_skcipher *tfm,
320*4882a593Smuzhiyun const u8 *key, unsigned int keylen)
321*4882a593Smuzhiyun {
322*4882a593Smuzhiyun enum sec_cipher_alg alg;
323*4882a593Smuzhiyun int ret;
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun ret = xts_verify_key(tfm, key, keylen);
326*4882a593Smuzhiyun if (ret)
327*4882a593Smuzhiyun return ret;
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun switch (keylen) {
330*4882a593Smuzhiyun case AES_KEYSIZE_128 * 2:
331*4882a593Smuzhiyun alg = SEC_C_AES_XTS_128;
332*4882a593Smuzhiyun break;
333*4882a593Smuzhiyun case AES_KEYSIZE_256 * 2:
334*4882a593Smuzhiyun alg = SEC_C_AES_XTS_256;
335*4882a593Smuzhiyun break;
336*4882a593Smuzhiyun default:
337*4882a593Smuzhiyun return -EINVAL;
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun
sec_alg_skcipher_setkey_des_ecb(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)343*4882a593Smuzhiyun static int sec_alg_skcipher_setkey_des_ecb(struct crypto_skcipher *tfm,
344*4882a593Smuzhiyun const u8 *key, unsigned int keylen)
345*4882a593Smuzhiyun {
346*4882a593Smuzhiyun return verify_skcipher_des_key(tfm, key) ?:
347*4882a593Smuzhiyun sec_alg_skcipher_setkey(tfm, key, keylen, SEC_C_DES_ECB_64);
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun
sec_alg_skcipher_setkey_des_cbc(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)350*4882a593Smuzhiyun static int sec_alg_skcipher_setkey_des_cbc(struct crypto_skcipher *tfm,
351*4882a593Smuzhiyun const u8 *key, unsigned int keylen)
352*4882a593Smuzhiyun {
353*4882a593Smuzhiyun return verify_skcipher_des_key(tfm, key) ?:
354*4882a593Smuzhiyun sec_alg_skcipher_setkey(tfm, key, keylen, SEC_C_DES_CBC_64);
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun
sec_alg_skcipher_setkey_3des_ecb(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)357*4882a593Smuzhiyun static int sec_alg_skcipher_setkey_3des_ecb(struct crypto_skcipher *tfm,
358*4882a593Smuzhiyun const u8 *key, unsigned int keylen)
359*4882a593Smuzhiyun {
360*4882a593Smuzhiyun return verify_skcipher_des3_key(tfm, key) ?:
361*4882a593Smuzhiyun sec_alg_skcipher_setkey(tfm, key, keylen,
362*4882a593Smuzhiyun SEC_C_3DES_ECB_192_3KEY);
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun
sec_alg_skcipher_setkey_3des_cbc(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)365*4882a593Smuzhiyun static int sec_alg_skcipher_setkey_3des_cbc(struct crypto_skcipher *tfm,
366*4882a593Smuzhiyun const u8 *key, unsigned int keylen)
367*4882a593Smuzhiyun {
368*4882a593Smuzhiyun return verify_skcipher_des3_key(tfm, key) ?:
369*4882a593Smuzhiyun sec_alg_skcipher_setkey(tfm, key, keylen,
370*4882a593Smuzhiyun SEC_C_3DES_CBC_192_3KEY);
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun
sec_alg_free_el(struct sec_request_el * el,struct sec_dev_info * info)373*4882a593Smuzhiyun static void sec_alg_free_el(struct sec_request_el *el,
374*4882a593Smuzhiyun struct sec_dev_info *info)
375*4882a593Smuzhiyun {
376*4882a593Smuzhiyun sec_free_hw_sgl(el->out, el->dma_out, info);
377*4882a593Smuzhiyun sec_free_hw_sgl(el->in, el->dma_in, info);
378*4882a593Smuzhiyun kfree(el->sgl_in);
379*4882a593Smuzhiyun kfree(el->sgl_out);
380*4882a593Smuzhiyun kfree(el);
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun /* queuelock must be held */
sec_send_request(struct sec_request * sec_req,struct sec_queue * queue)384*4882a593Smuzhiyun static int sec_send_request(struct sec_request *sec_req, struct sec_queue *queue)
385*4882a593Smuzhiyun {
386*4882a593Smuzhiyun struct sec_request_el *el, *temp;
387*4882a593Smuzhiyun int ret = 0;
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun mutex_lock(&sec_req->lock);
390*4882a593Smuzhiyun list_for_each_entry_safe(el, temp, &sec_req->elements, head) {
391*4882a593Smuzhiyun /*
392*4882a593Smuzhiyun * Add to hardware queue only under following circumstances
393*4882a593Smuzhiyun * 1) Software and hardware queue empty so no chain dependencies
394*4882a593Smuzhiyun * 2) No dependencies as new IV - (check software queue empty
395*4882a593Smuzhiyun * to maintain order)
396*4882a593Smuzhiyun * 3) No dependencies because the mode does no chaining.
397*4882a593Smuzhiyun *
398*4882a593Smuzhiyun * In other cases first insert onto the software queue which
399*4882a593Smuzhiyun * is then emptied as requests complete
400*4882a593Smuzhiyun */
401*4882a593Smuzhiyun if (!queue->havesoftqueue ||
402*4882a593Smuzhiyun (kfifo_is_empty(&queue->softqueue) &&
403*4882a593Smuzhiyun sec_queue_empty(queue))) {
404*4882a593Smuzhiyun ret = sec_queue_send(queue, &el->req, sec_req);
405*4882a593Smuzhiyun if (ret == -EAGAIN) {
406*4882a593Smuzhiyun /* Wait unti we can send then try again */
407*4882a593Smuzhiyun /* DEAD if here - should not happen */
408*4882a593Smuzhiyun ret = -EBUSY;
409*4882a593Smuzhiyun goto err_unlock;
410*4882a593Smuzhiyun }
411*4882a593Smuzhiyun } else {
412*4882a593Smuzhiyun kfifo_put(&queue->softqueue, el);
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun err_unlock:
416*4882a593Smuzhiyun mutex_unlock(&sec_req->lock);
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun return ret;
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun
sec_skcipher_alg_callback(struct sec_bd_info * sec_resp,struct crypto_async_request * req_base)421*4882a593Smuzhiyun static void sec_skcipher_alg_callback(struct sec_bd_info *sec_resp,
422*4882a593Smuzhiyun struct crypto_async_request *req_base)
423*4882a593Smuzhiyun {
424*4882a593Smuzhiyun struct skcipher_request *skreq = container_of(req_base,
425*4882a593Smuzhiyun struct skcipher_request,
426*4882a593Smuzhiyun base);
427*4882a593Smuzhiyun struct sec_request *sec_req = skcipher_request_ctx(skreq);
428*4882a593Smuzhiyun struct sec_request *backlog_req;
429*4882a593Smuzhiyun struct sec_request_el *sec_req_el, *nextrequest;
430*4882a593Smuzhiyun struct sec_alg_tfm_ctx *ctx = sec_req->tfm_ctx;
431*4882a593Smuzhiyun struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(skreq);
432*4882a593Smuzhiyun struct device *dev = ctx->queue->dev_info->dev;
433*4882a593Smuzhiyun int icv_or_skey_en, ret;
434*4882a593Smuzhiyun bool done;
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun sec_req_el = list_first_entry(&sec_req->elements, struct sec_request_el,
437*4882a593Smuzhiyun head);
438*4882a593Smuzhiyun icv_or_skey_en = (sec_resp->w0 & SEC_BD_W0_ICV_OR_SKEY_EN_M) >>
439*4882a593Smuzhiyun SEC_BD_W0_ICV_OR_SKEY_EN_S;
440*4882a593Smuzhiyun if (sec_resp->w1 & SEC_BD_W1_BD_INVALID || icv_or_skey_en == 3) {
441*4882a593Smuzhiyun dev_err(dev, "Got an invalid answer %lu %d\n",
442*4882a593Smuzhiyun sec_resp->w1 & SEC_BD_W1_BD_INVALID,
443*4882a593Smuzhiyun icv_or_skey_en);
444*4882a593Smuzhiyun sec_req->err = -EINVAL;
445*4882a593Smuzhiyun /*
446*4882a593Smuzhiyun * We need to muddle on to avoid getting stuck with elements
447*4882a593Smuzhiyun * on the queue. Error will be reported so requester so
448*4882a593Smuzhiyun * it should be able to handle appropriately.
449*4882a593Smuzhiyun */
450*4882a593Smuzhiyun }
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun spin_lock_bh(&ctx->queue->queuelock);
453*4882a593Smuzhiyun /* Put the IV in place for chained cases */
454*4882a593Smuzhiyun switch (ctx->cipher_alg) {
455*4882a593Smuzhiyun case SEC_C_AES_CBC_128:
456*4882a593Smuzhiyun case SEC_C_AES_CBC_192:
457*4882a593Smuzhiyun case SEC_C_AES_CBC_256:
458*4882a593Smuzhiyun if (sec_req_el->req.w0 & SEC_BD_W0_DE)
459*4882a593Smuzhiyun sg_pcopy_to_buffer(sec_req_el->sgl_out,
460*4882a593Smuzhiyun sg_nents(sec_req_el->sgl_out),
461*4882a593Smuzhiyun skreq->iv,
462*4882a593Smuzhiyun crypto_skcipher_ivsize(atfm),
463*4882a593Smuzhiyun sec_req_el->el_length -
464*4882a593Smuzhiyun crypto_skcipher_ivsize(atfm));
465*4882a593Smuzhiyun else
466*4882a593Smuzhiyun sg_pcopy_to_buffer(sec_req_el->sgl_in,
467*4882a593Smuzhiyun sg_nents(sec_req_el->sgl_in),
468*4882a593Smuzhiyun skreq->iv,
469*4882a593Smuzhiyun crypto_skcipher_ivsize(atfm),
470*4882a593Smuzhiyun sec_req_el->el_length -
471*4882a593Smuzhiyun crypto_skcipher_ivsize(atfm));
472*4882a593Smuzhiyun /* No need to sync to the device as coherent DMA */
473*4882a593Smuzhiyun break;
474*4882a593Smuzhiyun case SEC_C_AES_CTR_128:
475*4882a593Smuzhiyun case SEC_C_AES_CTR_192:
476*4882a593Smuzhiyun case SEC_C_AES_CTR_256:
477*4882a593Smuzhiyun crypto_inc(skreq->iv, 16);
478*4882a593Smuzhiyun break;
479*4882a593Smuzhiyun default:
480*4882a593Smuzhiyun /* Do not update */
481*4882a593Smuzhiyun break;
482*4882a593Smuzhiyun }
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun if (ctx->queue->havesoftqueue &&
485*4882a593Smuzhiyun !kfifo_is_empty(&ctx->queue->softqueue) &&
486*4882a593Smuzhiyun sec_queue_empty(ctx->queue)) {
487*4882a593Smuzhiyun ret = kfifo_get(&ctx->queue->softqueue, &nextrequest);
488*4882a593Smuzhiyun if (ret <= 0)
489*4882a593Smuzhiyun dev_err(dev,
490*4882a593Smuzhiyun "Error getting next element from kfifo %d\n",
491*4882a593Smuzhiyun ret);
492*4882a593Smuzhiyun else
493*4882a593Smuzhiyun /* We know there is space so this cannot fail */
494*4882a593Smuzhiyun sec_queue_send(ctx->queue, &nextrequest->req,
495*4882a593Smuzhiyun nextrequest->sec_req);
496*4882a593Smuzhiyun } else if (!list_empty(&ctx->backlog)) {
497*4882a593Smuzhiyun /* Need to verify there is room first */
498*4882a593Smuzhiyun backlog_req = list_first_entry(&ctx->backlog,
499*4882a593Smuzhiyun typeof(*backlog_req),
500*4882a593Smuzhiyun backlog_head);
501*4882a593Smuzhiyun if (sec_queue_can_enqueue(ctx->queue,
502*4882a593Smuzhiyun backlog_req->num_elements) ||
503*4882a593Smuzhiyun (ctx->queue->havesoftqueue &&
504*4882a593Smuzhiyun kfifo_avail(&ctx->queue->softqueue) >
505*4882a593Smuzhiyun backlog_req->num_elements)) {
506*4882a593Smuzhiyun sec_send_request(backlog_req, ctx->queue);
507*4882a593Smuzhiyun backlog_req->req_base->complete(backlog_req->req_base,
508*4882a593Smuzhiyun -EINPROGRESS);
509*4882a593Smuzhiyun list_del(&backlog_req->backlog_head);
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun }
512*4882a593Smuzhiyun spin_unlock_bh(&ctx->queue->queuelock);
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun mutex_lock(&sec_req->lock);
515*4882a593Smuzhiyun list_del(&sec_req_el->head);
516*4882a593Smuzhiyun mutex_unlock(&sec_req->lock);
517*4882a593Smuzhiyun sec_alg_free_el(sec_req_el, ctx->queue->dev_info);
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun /*
520*4882a593Smuzhiyun * Request is done.
521*4882a593Smuzhiyun * The dance is needed as the lock is freed in the completion
522*4882a593Smuzhiyun */
523*4882a593Smuzhiyun mutex_lock(&sec_req->lock);
524*4882a593Smuzhiyun done = list_empty(&sec_req->elements);
525*4882a593Smuzhiyun mutex_unlock(&sec_req->lock);
526*4882a593Smuzhiyun if (done) {
527*4882a593Smuzhiyun if (crypto_skcipher_ivsize(atfm)) {
528*4882a593Smuzhiyun dma_unmap_single(dev, sec_req->dma_iv,
529*4882a593Smuzhiyun crypto_skcipher_ivsize(atfm),
530*4882a593Smuzhiyun DMA_TO_DEVICE);
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun dma_unmap_sg(dev, skreq->src, sec_req->len_in,
533*4882a593Smuzhiyun DMA_BIDIRECTIONAL);
534*4882a593Smuzhiyun if (skreq->src != skreq->dst)
535*4882a593Smuzhiyun dma_unmap_sg(dev, skreq->dst, sec_req->len_out,
536*4882a593Smuzhiyun DMA_BIDIRECTIONAL);
537*4882a593Smuzhiyun skreq->base.complete(&skreq->base, sec_req->err);
538*4882a593Smuzhiyun }
539*4882a593Smuzhiyun }
540*4882a593Smuzhiyun
sec_alg_callback(struct sec_bd_info * resp,void * shadow)541*4882a593Smuzhiyun void sec_alg_callback(struct sec_bd_info *resp, void *shadow)
542*4882a593Smuzhiyun {
543*4882a593Smuzhiyun struct sec_request *sec_req = shadow;
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun sec_req->cb(resp, sec_req->req_base);
546*4882a593Smuzhiyun }
547*4882a593Smuzhiyun
sec_alg_alloc_and_calc_split_sizes(int length,size_t ** split_sizes,int * steps,gfp_t gfp)548*4882a593Smuzhiyun static int sec_alg_alloc_and_calc_split_sizes(int length, size_t **split_sizes,
549*4882a593Smuzhiyun int *steps, gfp_t gfp)
550*4882a593Smuzhiyun {
551*4882a593Smuzhiyun size_t *sizes;
552*4882a593Smuzhiyun int i;
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun /* Split into suitable sized blocks */
555*4882a593Smuzhiyun *steps = roundup(length, SEC_REQ_LIMIT) / SEC_REQ_LIMIT;
556*4882a593Smuzhiyun sizes = kcalloc(*steps, sizeof(*sizes), gfp);
557*4882a593Smuzhiyun if (!sizes)
558*4882a593Smuzhiyun return -ENOMEM;
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun for (i = 0; i < *steps - 1; i++)
561*4882a593Smuzhiyun sizes[i] = SEC_REQ_LIMIT;
562*4882a593Smuzhiyun sizes[*steps - 1] = length - SEC_REQ_LIMIT * (*steps - 1);
563*4882a593Smuzhiyun *split_sizes = sizes;
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun return 0;
566*4882a593Smuzhiyun }
567*4882a593Smuzhiyun
sec_map_and_split_sg(struct scatterlist * sgl,size_t * split_sizes,int steps,struct scatterlist *** splits,int ** splits_nents,int sgl_len_in,struct device * dev,gfp_t gfp)568*4882a593Smuzhiyun static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes,
569*4882a593Smuzhiyun int steps, struct scatterlist ***splits,
570*4882a593Smuzhiyun int **splits_nents,
571*4882a593Smuzhiyun int sgl_len_in,
572*4882a593Smuzhiyun struct device *dev, gfp_t gfp)
573*4882a593Smuzhiyun {
574*4882a593Smuzhiyun int ret, count;
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun count = dma_map_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL);
577*4882a593Smuzhiyun if (!count)
578*4882a593Smuzhiyun return -EINVAL;
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun *splits = kcalloc(steps, sizeof(struct scatterlist *), gfp);
581*4882a593Smuzhiyun if (!*splits) {
582*4882a593Smuzhiyun ret = -ENOMEM;
583*4882a593Smuzhiyun goto err_unmap_sg;
584*4882a593Smuzhiyun }
585*4882a593Smuzhiyun *splits_nents = kcalloc(steps, sizeof(int), gfp);
586*4882a593Smuzhiyun if (!*splits_nents) {
587*4882a593Smuzhiyun ret = -ENOMEM;
588*4882a593Smuzhiyun goto err_free_splits;
589*4882a593Smuzhiyun }
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun /* output the scatter list before and after this */
592*4882a593Smuzhiyun ret = sg_split(sgl, count, 0, steps, split_sizes,
593*4882a593Smuzhiyun *splits, *splits_nents, gfp);
594*4882a593Smuzhiyun if (ret) {
595*4882a593Smuzhiyun ret = -ENOMEM;
596*4882a593Smuzhiyun goto err_free_splits_nents;
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun return 0;
600*4882a593Smuzhiyun
601*4882a593Smuzhiyun err_free_splits_nents:
602*4882a593Smuzhiyun kfree(*splits_nents);
603*4882a593Smuzhiyun err_free_splits:
604*4882a593Smuzhiyun kfree(*splits);
605*4882a593Smuzhiyun err_unmap_sg:
606*4882a593Smuzhiyun dma_unmap_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL);
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun return ret;
609*4882a593Smuzhiyun }
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun /*
612*4882a593Smuzhiyun * Reverses the sec_map_and_split_sg call for messages not yet added to
613*4882a593Smuzhiyun * the queues.
614*4882a593Smuzhiyun */
sec_unmap_sg_on_err(struct scatterlist * sgl,int steps,struct scatterlist ** splits,int * splits_nents,int sgl_len_in,struct device * dev)615*4882a593Smuzhiyun static void sec_unmap_sg_on_err(struct scatterlist *sgl, int steps,
616*4882a593Smuzhiyun struct scatterlist **splits, int *splits_nents,
617*4882a593Smuzhiyun int sgl_len_in, struct device *dev)
618*4882a593Smuzhiyun {
619*4882a593Smuzhiyun int i;
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun for (i = 0; i < steps; i++)
622*4882a593Smuzhiyun kfree(splits[i]);
623*4882a593Smuzhiyun kfree(splits_nents);
624*4882a593Smuzhiyun kfree(splits);
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun dma_unmap_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL);
627*4882a593Smuzhiyun }
628*4882a593Smuzhiyun
629*4882a593Smuzhiyun static struct sec_request_el
sec_alg_alloc_and_fill_el(struct sec_bd_info * template,int encrypt,int el_size,bool different_dest,struct scatterlist * sgl_in,int n_ents_in,struct scatterlist * sgl_out,int n_ents_out,struct sec_dev_info * info,gfp_t gfp)630*4882a593Smuzhiyun *sec_alg_alloc_and_fill_el(struct sec_bd_info *template, int encrypt,
631*4882a593Smuzhiyun int el_size, bool different_dest,
632*4882a593Smuzhiyun struct scatterlist *sgl_in, int n_ents_in,
633*4882a593Smuzhiyun struct scatterlist *sgl_out, int n_ents_out,
634*4882a593Smuzhiyun struct sec_dev_info *info, gfp_t gfp)
635*4882a593Smuzhiyun {
636*4882a593Smuzhiyun struct sec_request_el *el;
637*4882a593Smuzhiyun struct sec_bd_info *req;
638*4882a593Smuzhiyun int ret;
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun el = kzalloc(sizeof(*el), gfp);
641*4882a593Smuzhiyun if (!el)
642*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
643*4882a593Smuzhiyun el->el_length = el_size;
644*4882a593Smuzhiyun req = &el->req;
645*4882a593Smuzhiyun memcpy(req, template, sizeof(*req));
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun req->w0 &= ~SEC_BD_W0_CIPHER_M;
648*4882a593Smuzhiyun if (encrypt)
649*4882a593Smuzhiyun req->w0 |= SEC_CIPHER_ENCRYPT << SEC_BD_W0_CIPHER_S;
650*4882a593Smuzhiyun else
651*4882a593Smuzhiyun req->w0 |= SEC_CIPHER_DECRYPT << SEC_BD_W0_CIPHER_S;
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun req->w0 &= ~SEC_BD_W0_C_GRAN_SIZE_19_16_M;
654*4882a593Smuzhiyun req->w0 |= ((el_size >> 16) << SEC_BD_W0_C_GRAN_SIZE_19_16_S) &
655*4882a593Smuzhiyun SEC_BD_W0_C_GRAN_SIZE_19_16_M;
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun req->w0 &= ~SEC_BD_W0_C_GRAN_SIZE_21_20_M;
658*4882a593Smuzhiyun req->w0 |= ((el_size >> 20) << SEC_BD_W0_C_GRAN_SIZE_21_20_S) &
659*4882a593Smuzhiyun SEC_BD_W0_C_GRAN_SIZE_21_20_M;
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun /* Writing whole u32 so no need to take care of masking */
662*4882a593Smuzhiyun req->w2 = ((1 << SEC_BD_W2_GRAN_NUM_S) & SEC_BD_W2_GRAN_NUM_M) |
663*4882a593Smuzhiyun ((el_size << SEC_BD_W2_C_GRAN_SIZE_15_0_S) &
664*4882a593Smuzhiyun SEC_BD_W2_C_GRAN_SIZE_15_0_M);
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun req->w3 &= ~SEC_BD_W3_CIPHER_LEN_OFFSET_M;
667*4882a593Smuzhiyun req->w1 |= SEC_BD_W1_ADDR_TYPE;
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun el->sgl_in = sgl_in;
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun ret = sec_alloc_and_fill_hw_sgl(&el->in, &el->dma_in, el->sgl_in,
672*4882a593Smuzhiyun n_ents_in, info, gfp);
673*4882a593Smuzhiyun if (ret)
674*4882a593Smuzhiyun goto err_free_el;
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun req->data_addr_lo = lower_32_bits(el->dma_in);
677*4882a593Smuzhiyun req->data_addr_hi = upper_32_bits(el->dma_in);
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun if (different_dest) {
680*4882a593Smuzhiyun el->sgl_out = sgl_out;
681*4882a593Smuzhiyun ret = sec_alloc_and_fill_hw_sgl(&el->out, &el->dma_out,
682*4882a593Smuzhiyun el->sgl_out,
683*4882a593Smuzhiyun n_ents_out, info, gfp);
684*4882a593Smuzhiyun if (ret)
685*4882a593Smuzhiyun goto err_free_hw_sgl_in;
686*4882a593Smuzhiyun
687*4882a593Smuzhiyun req->w0 |= SEC_BD_W0_DE;
688*4882a593Smuzhiyun req->cipher_destin_addr_lo = lower_32_bits(el->dma_out);
689*4882a593Smuzhiyun req->cipher_destin_addr_hi = upper_32_bits(el->dma_out);
690*4882a593Smuzhiyun
691*4882a593Smuzhiyun } else {
692*4882a593Smuzhiyun req->w0 &= ~SEC_BD_W0_DE;
693*4882a593Smuzhiyun req->cipher_destin_addr_lo = lower_32_bits(el->dma_in);
694*4882a593Smuzhiyun req->cipher_destin_addr_hi = upper_32_bits(el->dma_in);
695*4882a593Smuzhiyun }
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun return el;
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun err_free_hw_sgl_in:
700*4882a593Smuzhiyun sec_free_hw_sgl(el->in, el->dma_in, info);
701*4882a593Smuzhiyun err_free_el:
702*4882a593Smuzhiyun kfree(el);
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun return ERR_PTR(ret);
705*4882a593Smuzhiyun }
706*4882a593Smuzhiyun
sec_alg_skcipher_crypto(struct skcipher_request * skreq,bool encrypt)707*4882a593Smuzhiyun static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
708*4882a593Smuzhiyun bool encrypt)
709*4882a593Smuzhiyun {
710*4882a593Smuzhiyun struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(skreq);
711*4882a593Smuzhiyun struct crypto_tfm *tfm = crypto_skcipher_tfm(atfm);
712*4882a593Smuzhiyun struct sec_alg_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
713*4882a593Smuzhiyun struct sec_queue *queue = ctx->queue;
714*4882a593Smuzhiyun struct sec_request *sec_req = skcipher_request_ctx(skreq);
715*4882a593Smuzhiyun struct sec_dev_info *info = queue->dev_info;
716*4882a593Smuzhiyun int i, ret, steps;
717*4882a593Smuzhiyun size_t *split_sizes;
718*4882a593Smuzhiyun struct scatterlist **splits_in;
719*4882a593Smuzhiyun struct scatterlist **splits_out = NULL;
720*4882a593Smuzhiyun int *splits_in_nents;
721*4882a593Smuzhiyun int *splits_out_nents = NULL;
722*4882a593Smuzhiyun struct sec_request_el *el, *temp;
723*4882a593Smuzhiyun bool split = skreq->src != skreq->dst;
724*4882a593Smuzhiyun gfp_t gfp = skreq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun mutex_init(&sec_req->lock);
727*4882a593Smuzhiyun sec_req->req_base = &skreq->base;
728*4882a593Smuzhiyun sec_req->err = 0;
729*4882a593Smuzhiyun /* SGL mapping out here to allow us to break it up as necessary */
730*4882a593Smuzhiyun sec_req->len_in = sg_nents(skreq->src);
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun ret = sec_alg_alloc_and_calc_split_sizes(skreq->cryptlen, &split_sizes,
733*4882a593Smuzhiyun &steps, gfp);
734*4882a593Smuzhiyun if (ret)
735*4882a593Smuzhiyun return ret;
736*4882a593Smuzhiyun sec_req->num_elements = steps;
737*4882a593Smuzhiyun ret = sec_map_and_split_sg(skreq->src, split_sizes, steps, &splits_in,
738*4882a593Smuzhiyun &splits_in_nents, sec_req->len_in,
739*4882a593Smuzhiyun info->dev, gfp);
740*4882a593Smuzhiyun if (ret)
741*4882a593Smuzhiyun goto err_free_split_sizes;
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun if (split) {
744*4882a593Smuzhiyun sec_req->len_out = sg_nents(skreq->dst);
745*4882a593Smuzhiyun ret = sec_map_and_split_sg(skreq->dst, split_sizes, steps,
746*4882a593Smuzhiyun &splits_out, &splits_out_nents,
747*4882a593Smuzhiyun sec_req->len_out, info->dev, gfp);
748*4882a593Smuzhiyun if (ret)
749*4882a593Smuzhiyun goto err_unmap_in_sg;
750*4882a593Smuzhiyun }
751*4882a593Smuzhiyun /* Shared info stored in seq_req - applies to all BDs */
752*4882a593Smuzhiyun sec_req->tfm_ctx = ctx;
753*4882a593Smuzhiyun sec_req->cb = sec_skcipher_alg_callback;
754*4882a593Smuzhiyun INIT_LIST_HEAD(&sec_req->elements);
755*4882a593Smuzhiyun
756*4882a593Smuzhiyun /*
757*4882a593Smuzhiyun * Future optimization.
758*4882a593Smuzhiyun * In the chaining case we can't use a dma pool bounce buffer
759*4882a593Smuzhiyun * but in the case where we know there is no chaining we can
760*4882a593Smuzhiyun */
761*4882a593Smuzhiyun if (crypto_skcipher_ivsize(atfm)) {
762*4882a593Smuzhiyun sec_req->dma_iv = dma_map_single(info->dev, skreq->iv,
763*4882a593Smuzhiyun crypto_skcipher_ivsize(atfm),
764*4882a593Smuzhiyun DMA_TO_DEVICE);
765*4882a593Smuzhiyun if (dma_mapping_error(info->dev, sec_req->dma_iv)) {
766*4882a593Smuzhiyun ret = -ENOMEM;
767*4882a593Smuzhiyun goto err_unmap_out_sg;
768*4882a593Smuzhiyun }
769*4882a593Smuzhiyun }
770*4882a593Smuzhiyun
771*4882a593Smuzhiyun /* Set them all up then queue - cleaner error handling. */
772*4882a593Smuzhiyun for (i = 0; i < steps; i++) {
773*4882a593Smuzhiyun el = sec_alg_alloc_and_fill_el(&ctx->req_template,
774*4882a593Smuzhiyun encrypt ? 1 : 0,
775*4882a593Smuzhiyun split_sizes[i],
776*4882a593Smuzhiyun skreq->src != skreq->dst,
777*4882a593Smuzhiyun splits_in[i], splits_in_nents[i],
778*4882a593Smuzhiyun split ? splits_out[i] : NULL,
779*4882a593Smuzhiyun split ? splits_out_nents[i] : 0,
780*4882a593Smuzhiyun info, gfp);
781*4882a593Smuzhiyun if (IS_ERR(el)) {
782*4882a593Smuzhiyun ret = PTR_ERR(el);
783*4882a593Smuzhiyun goto err_free_elements;
784*4882a593Smuzhiyun }
785*4882a593Smuzhiyun el->req.cipher_iv_addr_lo = lower_32_bits(sec_req->dma_iv);
786*4882a593Smuzhiyun el->req.cipher_iv_addr_hi = upper_32_bits(sec_req->dma_iv);
787*4882a593Smuzhiyun el->sec_req = sec_req;
788*4882a593Smuzhiyun list_add_tail(&el->head, &sec_req->elements);
789*4882a593Smuzhiyun }
790*4882a593Smuzhiyun
791*4882a593Smuzhiyun /*
792*4882a593Smuzhiyun * Only attempt to queue if the whole lot can fit in the queue -
793*4882a593Smuzhiyun * we can't successfully cleanup after a partial queing so this
794*4882a593Smuzhiyun * must succeed or fail atomically.
795*4882a593Smuzhiyun *
796*4882a593Smuzhiyun * Big hammer test of both software and hardware queues - could be
797*4882a593Smuzhiyun * more refined but this is unlikely to happen so no need.
798*4882a593Smuzhiyun */
799*4882a593Smuzhiyun
800*4882a593Smuzhiyun /* Grab a big lock for a long time to avoid concurrency issues */
801*4882a593Smuzhiyun spin_lock_bh(&queue->queuelock);
802*4882a593Smuzhiyun
803*4882a593Smuzhiyun /*
804*4882a593Smuzhiyun * Can go on to queue if we have space in either:
805*4882a593Smuzhiyun * 1) The hardware queue and no software queue
806*4882a593Smuzhiyun * 2) The software queue
807*4882a593Smuzhiyun * AND there is nothing in the backlog. If there is backlog we
808*4882a593Smuzhiyun * have to only queue to the backlog queue and return busy.
809*4882a593Smuzhiyun */
810*4882a593Smuzhiyun if ((!sec_queue_can_enqueue(queue, steps) &&
811*4882a593Smuzhiyun (!queue->havesoftqueue ||
812*4882a593Smuzhiyun kfifo_avail(&queue->softqueue) > steps)) ||
813*4882a593Smuzhiyun !list_empty(&ctx->backlog)) {
814*4882a593Smuzhiyun ret = -EBUSY;
815*4882a593Smuzhiyun if ((skreq->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
816*4882a593Smuzhiyun list_add_tail(&sec_req->backlog_head, &ctx->backlog);
817*4882a593Smuzhiyun spin_unlock_bh(&queue->queuelock);
818*4882a593Smuzhiyun goto out;
819*4882a593Smuzhiyun }
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun spin_unlock_bh(&queue->queuelock);
822*4882a593Smuzhiyun goto err_free_elements;
823*4882a593Smuzhiyun }
824*4882a593Smuzhiyun ret = sec_send_request(sec_req, queue);
825*4882a593Smuzhiyun spin_unlock_bh(&queue->queuelock);
826*4882a593Smuzhiyun if (ret)
827*4882a593Smuzhiyun goto err_free_elements;
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun ret = -EINPROGRESS;
830*4882a593Smuzhiyun out:
831*4882a593Smuzhiyun /* Cleanup - all elements in pointer arrays have been copied */
832*4882a593Smuzhiyun kfree(splits_in_nents);
833*4882a593Smuzhiyun kfree(splits_in);
834*4882a593Smuzhiyun kfree(splits_out_nents);
835*4882a593Smuzhiyun kfree(splits_out);
836*4882a593Smuzhiyun kfree(split_sizes);
837*4882a593Smuzhiyun return ret;
838*4882a593Smuzhiyun
839*4882a593Smuzhiyun err_free_elements:
840*4882a593Smuzhiyun list_for_each_entry_safe(el, temp, &sec_req->elements, head) {
841*4882a593Smuzhiyun list_del(&el->head);
842*4882a593Smuzhiyun sec_alg_free_el(el, info);
843*4882a593Smuzhiyun }
844*4882a593Smuzhiyun if (crypto_skcipher_ivsize(atfm))
845*4882a593Smuzhiyun dma_unmap_single(info->dev, sec_req->dma_iv,
846*4882a593Smuzhiyun crypto_skcipher_ivsize(atfm),
847*4882a593Smuzhiyun DMA_BIDIRECTIONAL);
848*4882a593Smuzhiyun err_unmap_out_sg:
849*4882a593Smuzhiyun if (split)
850*4882a593Smuzhiyun sec_unmap_sg_on_err(skreq->dst, steps, splits_out,
851*4882a593Smuzhiyun splits_out_nents, sec_req->len_out,
852*4882a593Smuzhiyun info->dev);
853*4882a593Smuzhiyun err_unmap_in_sg:
854*4882a593Smuzhiyun sec_unmap_sg_on_err(skreq->src, steps, splits_in, splits_in_nents,
855*4882a593Smuzhiyun sec_req->len_in, info->dev);
856*4882a593Smuzhiyun err_free_split_sizes:
857*4882a593Smuzhiyun kfree(split_sizes);
858*4882a593Smuzhiyun
859*4882a593Smuzhiyun return ret;
860*4882a593Smuzhiyun }
861*4882a593Smuzhiyun
sec_alg_skcipher_encrypt(struct skcipher_request * req)862*4882a593Smuzhiyun static int sec_alg_skcipher_encrypt(struct skcipher_request *req)
863*4882a593Smuzhiyun {
864*4882a593Smuzhiyun return sec_alg_skcipher_crypto(req, true);
865*4882a593Smuzhiyun }
866*4882a593Smuzhiyun
sec_alg_skcipher_decrypt(struct skcipher_request * req)867*4882a593Smuzhiyun static int sec_alg_skcipher_decrypt(struct skcipher_request *req)
868*4882a593Smuzhiyun {
869*4882a593Smuzhiyun return sec_alg_skcipher_crypto(req, false);
870*4882a593Smuzhiyun }
871*4882a593Smuzhiyun
sec_alg_skcipher_init(struct crypto_skcipher * tfm)872*4882a593Smuzhiyun static int sec_alg_skcipher_init(struct crypto_skcipher *tfm)
873*4882a593Smuzhiyun {
874*4882a593Smuzhiyun struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun mutex_init(&ctx->lock);
877*4882a593Smuzhiyun INIT_LIST_HEAD(&ctx->backlog);
878*4882a593Smuzhiyun crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_request));
879*4882a593Smuzhiyun
880*4882a593Smuzhiyun ctx->queue = sec_queue_alloc_start_safe();
881*4882a593Smuzhiyun if (IS_ERR(ctx->queue))
882*4882a593Smuzhiyun return PTR_ERR(ctx->queue);
883*4882a593Smuzhiyun
884*4882a593Smuzhiyun spin_lock_init(&ctx->queue->queuelock);
885*4882a593Smuzhiyun ctx->queue->havesoftqueue = false;
886*4882a593Smuzhiyun
887*4882a593Smuzhiyun return 0;
888*4882a593Smuzhiyun }
889*4882a593Smuzhiyun
sec_alg_skcipher_exit(struct crypto_skcipher * tfm)890*4882a593Smuzhiyun static void sec_alg_skcipher_exit(struct crypto_skcipher *tfm)
891*4882a593Smuzhiyun {
892*4882a593Smuzhiyun struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
893*4882a593Smuzhiyun struct device *dev = ctx->queue->dev_info->dev;
894*4882a593Smuzhiyun
895*4882a593Smuzhiyun if (ctx->key) {
896*4882a593Smuzhiyun memzero_explicit(ctx->key, SEC_MAX_CIPHER_KEY);
897*4882a593Smuzhiyun dma_free_coherent(dev, SEC_MAX_CIPHER_KEY, ctx->key,
898*4882a593Smuzhiyun ctx->pkey);
899*4882a593Smuzhiyun }
900*4882a593Smuzhiyun sec_queue_stop_release(ctx->queue);
901*4882a593Smuzhiyun }
902*4882a593Smuzhiyun
sec_alg_skcipher_init_with_queue(struct crypto_skcipher * tfm)903*4882a593Smuzhiyun static int sec_alg_skcipher_init_with_queue(struct crypto_skcipher *tfm)
904*4882a593Smuzhiyun {
905*4882a593Smuzhiyun struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
906*4882a593Smuzhiyun int ret;
907*4882a593Smuzhiyun
908*4882a593Smuzhiyun ret = sec_alg_skcipher_init(tfm);
909*4882a593Smuzhiyun if (ret)
910*4882a593Smuzhiyun return ret;
911*4882a593Smuzhiyun
912*4882a593Smuzhiyun INIT_KFIFO(ctx->queue->softqueue);
913*4882a593Smuzhiyun ret = kfifo_alloc(&ctx->queue->softqueue, 512, GFP_KERNEL);
914*4882a593Smuzhiyun if (ret) {
915*4882a593Smuzhiyun sec_alg_skcipher_exit(tfm);
916*4882a593Smuzhiyun return ret;
917*4882a593Smuzhiyun }
918*4882a593Smuzhiyun ctx->queue->havesoftqueue = true;
919*4882a593Smuzhiyun
920*4882a593Smuzhiyun return 0;
921*4882a593Smuzhiyun }
922*4882a593Smuzhiyun
sec_alg_skcipher_exit_with_queue(struct crypto_skcipher * tfm)923*4882a593Smuzhiyun static void sec_alg_skcipher_exit_with_queue(struct crypto_skcipher *tfm)
924*4882a593Smuzhiyun {
925*4882a593Smuzhiyun struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
926*4882a593Smuzhiyun
927*4882a593Smuzhiyun kfifo_free(&ctx->queue->softqueue);
928*4882a593Smuzhiyun sec_alg_skcipher_exit(tfm);
929*4882a593Smuzhiyun }
930*4882a593Smuzhiyun
931*4882a593Smuzhiyun static struct skcipher_alg sec_algs[] = {
932*4882a593Smuzhiyun {
933*4882a593Smuzhiyun .base = {
934*4882a593Smuzhiyun .cra_name = "ecb(aes)",
935*4882a593Smuzhiyun .cra_driver_name = "hisi_sec_aes_ecb",
936*4882a593Smuzhiyun .cra_priority = 4001,
937*4882a593Smuzhiyun .cra_flags = CRYPTO_ALG_ASYNC |
938*4882a593Smuzhiyun CRYPTO_ALG_ALLOCATES_MEMORY,
939*4882a593Smuzhiyun .cra_blocksize = AES_BLOCK_SIZE,
940*4882a593Smuzhiyun .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
941*4882a593Smuzhiyun .cra_alignmask = 0,
942*4882a593Smuzhiyun .cra_module = THIS_MODULE,
943*4882a593Smuzhiyun },
944*4882a593Smuzhiyun .init = sec_alg_skcipher_init,
945*4882a593Smuzhiyun .exit = sec_alg_skcipher_exit,
946*4882a593Smuzhiyun .setkey = sec_alg_skcipher_setkey_aes_ecb,
947*4882a593Smuzhiyun .decrypt = sec_alg_skcipher_decrypt,
948*4882a593Smuzhiyun .encrypt = sec_alg_skcipher_encrypt,
949*4882a593Smuzhiyun .min_keysize = AES_MIN_KEY_SIZE,
950*4882a593Smuzhiyun .max_keysize = AES_MAX_KEY_SIZE,
951*4882a593Smuzhiyun .ivsize = 0,
952*4882a593Smuzhiyun }, {
953*4882a593Smuzhiyun .base = {
954*4882a593Smuzhiyun .cra_name = "cbc(aes)",
955*4882a593Smuzhiyun .cra_driver_name = "hisi_sec_aes_cbc",
956*4882a593Smuzhiyun .cra_priority = 4001,
957*4882a593Smuzhiyun .cra_flags = CRYPTO_ALG_ASYNC |
958*4882a593Smuzhiyun CRYPTO_ALG_ALLOCATES_MEMORY,
959*4882a593Smuzhiyun .cra_blocksize = AES_BLOCK_SIZE,
960*4882a593Smuzhiyun .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
961*4882a593Smuzhiyun .cra_alignmask = 0,
962*4882a593Smuzhiyun .cra_module = THIS_MODULE,
963*4882a593Smuzhiyun },
964*4882a593Smuzhiyun .init = sec_alg_skcipher_init_with_queue,
965*4882a593Smuzhiyun .exit = sec_alg_skcipher_exit_with_queue,
966*4882a593Smuzhiyun .setkey = sec_alg_skcipher_setkey_aes_cbc,
967*4882a593Smuzhiyun .decrypt = sec_alg_skcipher_decrypt,
968*4882a593Smuzhiyun .encrypt = sec_alg_skcipher_encrypt,
969*4882a593Smuzhiyun .min_keysize = AES_MIN_KEY_SIZE,
970*4882a593Smuzhiyun .max_keysize = AES_MAX_KEY_SIZE,
971*4882a593Smuzhiyun .ivsize = AES_BLOCK_SIZE,
972*4882a593Smuzhiyun }, {
973*4882a593Smuzhiyun .base = {
974*4882a593Smuzhiyun .cra_name = "ctr(aes)",
975*4882a593Smuzhiyun .cra_driver_name = "hisi_sec_aes_ctr",
976*4882a593Smuzhiyun .cra_priority = 4001,
977*4882a593Smuzhiyun .cra_flags = CRYPTO_ALG_ASYNC |
978*4882a593Smuzhiyun CRYPTO_ALG_ALLOCATES_MEMORY,
979*4882a593Smuzhiyun .cra_blocksize = AES_BLOCK_SIZE,
980*4882a593Smuzhiyun .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
981*4882a593Smuzhiyun .cra_alignmask = 0,
982*4882a593Smuzhiyun .cra_module = THIS_MODULE,
983*4882a593Smuzhiyun },
984*4882a593Smuzhiyun .init = sec_alg_skcipher_init_with_queue,
985*4882a593Smuzhiyun .exit = sec_alg_skcipher_exit_with_queue,
986*4882a593Smuzhiyun .setkey = sec_alg_skcipher_setkey_aes_ctr,
987*4882a593Smuzhiyun .decrypt = sec_alg_skcipher_decrypt,
988*4882a593Smuzhiyun .encrypt = sec_alg_skcipher_encrypt,
989*4882a593Smuzhiyun .min_keysize = AES_MIN_KEY_SIZE,
990*4882a593Smuzhiyun .max_keysize = AES_MAX_KEY_SIZE,
991*4882a593Smuzhiyun .ivsize = AES_BLOCK_SIZE,
992*4882a593Smuzhiyun }, {
993*4882a593Smuzhiyun .base = {
994*4882a593Smuzhiyun .cra_name = "xts(aes)",
995*4882a593Smuzhiyun .cra_driver_name = "hisi_sec_aes_xts",
996*4882a593Smuzhiyun .cra_priority = 4001,
997*4882a593Smuzhiyun .cra_flags = CRYPTO_ALG_ASYNC |
998*4882a593Smuzhiyun CRYPTO_ALG_ALLOCATES_MEMORY,
999*4882a593Smuzhiyun .cra_blocksize = AES_BLOCK_SIZE,
1000*4882a593Smuzhiyun .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
1001*4882a593Smuzhiyun .cra_alignmask = 0,
1002*4882a593Smuzhiyun .cra_module = THIS_MODULE,
1003*4882a593Smuzhiyun },
1004*4882a593Smuzhiyun .init = sec_alg_skcipher_init,
1005*4882a593Smuzhiyun .exit = sec_alg_skcipher_exit,
1006*4882a593Smuzhiyun .setkey = sec_alg_skcipher_setkey_aes_xts,
1007*4882a593Smuzhiyun .decrypt = sec_alg_skcipher_decrypt,
1008*4882a593Smuzhiyun .encrypt = sec_alg_skcipher_encrypt,
1009*4882a593Smuzhiyun .min_keysize = 2 * AES_MIN_KEY_SIZE,
1010*4882a593Smuzhiyun .max_keysize = 2 * AES_MAX_KEY_SIZE,
1011*4882a593Smuzhiyun .ivsize = AES_BLOCK_SIZE,
1012*4882a593Smuzhiyun }, {
1013*4882a593Smuzhiyun /* Unable to find any test vectors so untested */
1014*4882a593Smuzhiyun .base = {
1015*4882a593Smuzhiyun .cra_name = "ecb(des)",
1016*4882a593Smuzhiyun .cra_driver_name = "hisi_sec_des_ecb",
1017*4882a593Smuzhiyun .cra_priority = 4001,
1018*4882a593Smuzhiyun .cra_flags = CRYPTO_ALG_ASYNC |
1019*4882a593Smuzhiyun CRYPTO_ALG_ALLOCATES_MEMORY,
1020*4882a593Smuzhiyun .cra_blocksize = DES_BLOCK_SIZE,
1021*4882a593Smuzhiyun .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
1022*4882a593Smuzhiyun .cra_alignmask = 0,
1023*4882a593Smuzhiyun .cra_module = THIS_MODULE,
1024*4882a593Smuzhiyun },
1025*4882a593Smuzhiyun .init = sec_alg_skcipher_init,
1026*4882a593Smuzhiyun .exit = sec_alg_skcipher_exit,
1027*4882a593Smuzhiyun .setkey = sec_alg_skcipher_setkey_des_ecb,
1028*4882a593Smuzhiyun .decrypt = sec_alg_skcipher_decrypt,
1029*4882a593Smuzhiyun .encrypt = sec_alg_skcipher_encrypt,
1030*4882a593Smuzhiyun .min_keysize = DES_KEY_SIZE,
1031*4882a593Smuzhiyun .max_keysize = DES_KEY_SIZE,
1032*4882a593Smuzhiyun .ivsize = 0,
1033*4882a593Smuzhiyun }, {
1034*4882a593Smuzhiyun .base = {
1035*4882a593Smuzhiyun .cra_name = "cbc(des)",
1036*4882a593Smuzhiyun .cra_driver_name = "hisi_sec_des_cbc",
1037*4882a593Smuzhiyun .cra_priority = 4001,
1038*4882a593Smuzhiyun .cra_flags = CRYPTO_ALG_ASYNC |
1039*4882a593Smuzhiyun CRYPTO_ALG_ALLOCATES_MEMORY,
1040*4882a593Smuzhiyun .cra_blocksize = DES_BLOCK_SIZE,
1041*4882a593Smuzhiyun .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
1042*4882a593Smuzhiyun .cra_alignmask = 0,
1043*4882a593Smuzhiyun .cra_module = THIS_MODULE,
1044*4882a593Smuzhiyun },
1045*4882a593Smuzhiyun .init = sec_alg_skcipher_init_with_queue,
1046*4882a593Smuzhiyun .exit = sec_alg_skcipher_exit_with_queue,
1047*4882a593Smuzhiyun .setkey = sec_alg_skcipher_setkey_des_cbc,
1048*4882a593Smuzhiyun .decrypt = sec_alg_skcipher_decrypt,
1049*4882a593Smuzhiyun .encrypt = sec_alg_skcipher_encrypt,
1050*4882a593Smuzhiyun .min_keysize = DES_KEY_SIZE,
1051*4882a593Smuzhiyun .max_keysize = DES_KEY_SIZE,
1052*4882a593Smuzhiyun .ivsize = DES_BLOCK_SIZE,
1053*4882a593Smuzhiyun }, {
1054*4882a593Smuzhiyun .base = {
1055*4882a593Smuzhiyun .cra_name = "cbc(des3_ede)",
1056*4882a593Smuzhiyun .cra_driver_name = "hisi_sec_3des_cbc",
1057*4882a593Smuzhiyun .cra_priority = 4001,
1058*4882a593Smuzhiyun .cra_flags = CRYPTO_ALG_ASYNC |
1059*4882a593Smuzhiyun CRYPTO_ALG_ALLOCATES_MEMORY,
1060*4882a593Smuzhiyun .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1061*4882a593Smuzhiyun .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
1062*4882a593Smuzhiyun .cra_alignmask = 0,
1063*4882a593Smuzhiyun .cra_module = THIS_MODULE,
1064*4882a593Smuzhiyun },
1065*4882a593Smuzhiyun .init = sec_alg_skcipher_init_with_queue,
1066*4882a593Smuzhiyun .exit = sec_alg_skcipher_exit_with_queue,
1067*4882a593Smuzhiyun .setkey = sec_alg_skcipher_setkey_3des_cbc,
1068*4882a593Smuzhiyun .decrypt = sec_alg_skcipher_decrypt,
1069*4882a593Smuzhiyun .encrypt = sec_alg_skcipher_encrypt,
1070*4882a593Smuzhiyun .min_keysize = DES3_EDE_KEY_SIZE,
1071*4882a593Smuzhiyun .max_keysize = DES3_EDE_KEY_SIZE,
1072*4882a593Smuzhiyun .ivsize = DES3_EDE_BLOCK_SIZE,
1073*4882a593Smuzhiyun }, {
1074*4882a593Smuzhiyun .base = {
1075*4882a593Smuzhiyun .cra_name = "ecb(des3_ede)",
1076*4882a593Smuzhiyun .cra_driver_name = "hisi_sec_3des_ecb",
1077*4882a593Smuzhiyun .cra_priority = 4001,
1078*4882a593Smuzhiyun .cra_flags = CRYPTO_ALG_ASYNC |
1079*4882a593Smuzhiyun CRYPTO_ALG_ALLOCATES_MEMORY,
1080*4882a593Smuzhiyun .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1081*4882a593Smuzhiyun .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
1082*4882a593Smuzhiyun .cra_alignmask = 0,
1083*4882a593Smuzhiyun .cra_module = THIS_MODULE,
1084*4882a593Smuzhiyun },
1085*4882a593Smuzhiyun .init = sec_alg_skcipher_init,
1086*4882a593Smuzhiyun .exit = sec_alg_skcipher_exit,
1087*4882a593Smuzhiyun .setkey = sec_alg_skcipher_setkey_3des_ecb,
1088*4882a593Smuzhiyun .decrypt = sec_alg_skcipher_decrypt,
1089*4882a593Smuzhiyun .encrypt = sec_alg_skcipher_encrypt,
1090*4882a593Smuzhiyun .min_keysize = DES3_EDE_KEY_SIZE,
1091*4882a593Smuzhiyun .max_keysize = DES3_EDE_KEY_SIZE,
1092*4882a593Smuzhiyun .ivsize = 0,
1093*4882a593Smuzhiyun }
1094*4882a593Smuzhiyun };
1095*4882a593Smuzhiyun
sec_algs_register(void)1096*4882a593Smuzhiyun int sec_algs_register(void)
1097*4882a593Smuzhiyun {
1098*4882a593Smuzhiyun int ret = 0;
1099*4882a593Smuzhiyun
1100*4882a593Smuzhiyun mutex_lock(&algs_lock);
1101*4882a593Smuzhiyun if (++active_devs != 1)
1102*4882a593Smuzhiyun goto unlock;
1103*4882a593Smuzhiyun
1104*4882a593Smuzhiyun ret = crypto_register_skciphers(sec_algs, ARRAY_SIZE(sec_algs));
1105*4882a593Smuzhiyun if (ret)
1106*4882a593Smuzhiyun --active_devs;
1107*4882a593Smuzhiyun unlock:
1108*4882a593Smuzhiyun mutex_unlock(&algs_lock);
1109*4882a593Smuzhiyun
1110*4882a593Smuzhiyun return ret;
1111*4882a593Smuzhiyun }
1112*4882a593Smuzhiyun
sec_algs_unregister(void)1113*4882a593Smuzhiyun void sec_algs_unregister(void)
1114*4882a593Smuzhiyun {
1115*4882a593Smuzhiyun mutex_lock(&algs_lock);
1116*4882a593Smuzhiyun if (--active_devs != 0)
1117*4882a593Smuzhiyun goto unlock;
1118*4882a593Smuzhiyun crypto_unregister_skciphers(sec_algs, ARRAY_SIZE(sec_algs));
1119*4882a593Smuzhiyun
1120*4882a593Smuzhiyun unlock:
1121*4882a593Smuzhiyun mutex_unlock(&algs_lock);
1122*4882a593Smuzhiyun }
1123