1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /* Algorithms supported by virtio crypto device
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Authors: Gonglei <arei.gonglei@huawei.com>
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/scatterlist.h>
10*4882a593Smuzhiyun #include <crypto/algapi.h>
11*4882a593Smuzhiyun #include <crypto/internal/skcipher.h>
12*4882a593Smuzhiyun #include <linux/err.h>
13*4882a593Smuzhiyun #include <crypto/scatterwalk.h>
14*4882a593Smuzhiyun #include <linux/atomic.h>
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #include <uapi/linux/virtio_crypto.h>
17*4882a593Smuzhiyun #include "virtio_crypto_common.h"
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun struct virtio_crypto_skcipher_ctx {
21*4882a593Smuzhiyun struct crypto_engine_ctx enginectx;
22*4882a593Smuzhiyun struct virtio_crypto *vcrypto;
23*4882a593Smuzhiyun struct crypto_skcipher *tfm;
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun struct virtio_crypto_sym_session_info enc_sess_info;
26*4882a593Smuzhiyun struct virtio_crypto_sym_session_info dec_sess_info;
27*4882a593Smuzhiyun };
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun struct virtio_crypto_sym_request {
30*4882a593Smuzhiyun struct virtio_crypto_request base;
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun /* Cipher or aead */
33*4882a593Smuzhiyun uint32_t type;
34*4882a593Smuzhiyun struct virtio_crypto_skcipher_ctx *skcipher_ctx;
35*4882a593Smuzhiyun struct skcipher_request *skcipher_req;
36*4882a593Smuzhiyun uint8_t *iv;
37*4882a593Smuzhiyun /* Encryption? */
38*4882a593Smuzhiyun bool encrypt;
39*4882a593Smuzhiyun };
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun struct virtio_crypto_algo {
42*4882a593Smuzhiyun uint32_t algonum;
43*4882a593Smuzhiyun uint32_t service;
44*4882a593Smuzhiyun unsigned int active_devs;
45*4882a593Smuzhiyun struct skcipher_alg algo;
46*4882a593Smuzhiyun };
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun /*
49*4882a593Smuzhiyun * The algs_lock protects the below global virtio_crypto_active_devs
50*4882a593Smuzhiyun * and crypto algorithms registion.
51*4882a593Smuzhiyun */
52*4882a593Smuzhiyun static DEFINE_MUTEX(algs_lock);
53*4882a593Smuzhiyun static void virtio_crypto_skcipher_finalize_req(
54*4882a593Smuzhiyun struct virtio_crypto_sym_request *vc_sym_req,
55*4882a593Smuzhiyun struct skcipher_request *req,
56*4882a593Smuzhiyun int err);
57*4882a593Smuzhiyun
virtio_crypto_dataq_sym_callback(struct virtio_crypto_request * vc_req,int len)58*4882a593Smuzhiyun static void virtio_crypto_dataq_sym_callback
59*4882a593Smuzhiyun (struct virtio_crypto_request *vc_req, int len)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun struct virtio_crypto_sym_request *vc_sym_req =
62*4882a593Smuzhiyun container_of(vc_req, struct virtio_crypto_sym_request, base);
63*4882a593Smuzhiyun struct skcipher_request *ablk_req;
64*4882a593Smuzhiyun int error;
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun /* Finish the encrypt or decrypt process */
67*4882a593Smuzhiyun if (vc_sym_req->type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
68*4882a593Smuzhiyun switch (vc_req->status) {
69*4882a593Smuzhiyun case VIRTIO_CRYPTO_OK:
70*4882a593Smuzhiyun error = 0;
71*4882a593Smuzhiyun break;
72*4882a593Smuzhiyun case VIRTIO_CRYPTO_INVSESS:
73*4882a593Smuzhiyun case VIRTIO_CRYPTO_ERR:
74*4882a593Smuzhiyun error = -EINVAL;
75*4882a593Smuzhiyun break;
76*4882a593Smuzhiyun case VIRTIO_CRYPTO_BADMSG:
77*4882a593Smuzhiyun error = -EBADMSG;
78*4882a593Smuzhiyun break;
79*4882a593Smuzhiyun default:
80*4882a593Smuzhiyun error = -EIO;
81*4882a593Smuzhiyun break;
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun ablk_req = vc_sym_req->skcipher_req;
84*4882a593Smuzhiyun virtio_crypto_skcipher_finalize_req(vc_sym_req,
85*4882a593Smuzhiyun ablk_req, error);
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun
virtio_crypto_alg_sg_nents_length(struct scatterlist * sg)89*4882a593Smuzhiyun static u64 virtio_crypto_alg_sg_nents_length(struct scatterlist *sg)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun u64 total = 0;
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun for (total = 0; sg; sg = sg_next(sg))
94*4882a593Smuzhiyun total += sg->length;
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun return total;
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun static int
virtio_crypto_alg_validate_key(int key_len,uint32_t * alg)100*4882a593Smuzhiyun virtio_crypto_alg_validate_key(int key_len, uint32_t *alg)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun switch (key_len) {
103*4882a593Smuzhiyun case AES_KEYSIZE_128:
104*4882a593Smuzhiyun case AES_KEYSIZE_192:
105*4882a593Smuzhiyun case AES_KEYSIZE_256:
106*4882a593Smuzhiyun *alg = VIRTIO_CRYPTO_CIPHER_AES_CBC;
107*4882a593Smuzhiyun break;
108*4882a593Smuzhiyun default:
109*4882a593Smuzhiyun return -EINVAL;
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun return 0;
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun
virtio_crypto_alg_skcipher_init_session(struct virtio_crypto_skcipher_ctx * ctx,uint32_t alg,const uint8_t * key,unsigned int keylen,int encrypt)114*4882a593Smuzhiyun static int virtio_crypto_alg_skcipher_init_session(
115*4882a593Smuzhiyun struct virtio_crypto_skcipher_ctx *ctx,
116*4882a593Smuzhiyun uint32_t alg, const uint8_t *key,
117*4882a593Smuzhiyun unsigned int keylen,
118*4882a593Smuzhiyun int encrypt)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun struct scatterlist outhdr, key_sg, inhdr, *sgs[3];
121*4882a593Smuzhiyun unsigned int tmp;
122*4882a593Smuzhiyun struct virtio_crypto *vcrypto = ctx->vcrypto;
123*4882a593Smuzhiyun int op = encrypt ? VIRTIO_CRYPTO_OP_ENCRYPT : VIRTIO_CRYPTO_OP_DECRYPT;
124*4882a593Smuzhiyun int err;
125*4882a593Smuzhiyun unsigned int num_out = 0, num_in = 0;
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun /*
128*4882a593Smuzhiyun * Avoid to do DMA from the stack, switch to using
129*4882a593Smuzhiyun * dynamically-allocated for the key
130*4882a593Smuzhiyun */
131*4882a593Smuzhiyun uint8_t *cipher_key = kmemdup(key, keylen, GFP_ATOMIC);
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun if (!cipher_key)
134*4882a593Smuzhiyun return -ENOMEM;
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun spin_lock(&vcrypto->ctrl_lock);
137*4882a593Smuzhiyun /* Pad ctrl header */
138*4882a593Smuzhiyun vcrypto->ctrl.header.opcode =
139*4882a593Smuzhiyun cpu_to_le32(VIRTIO_CRYPTO_CIPHER_CREATE_SESSION);
140*4882a593Smuzhiyun vcrypto->ctrl.header.algo = cpu_to_le32(alg);
141*4882a593Smuzhiyun /* Set the default dataqueue id to 0 */
142*4882a593Smuzhiyun vcrypto->ctrl.header.queue_id = 0;
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun vcrypto->input.status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
145*4882a593Smuzhiyun /* Pad cipher's parameters */
146*4882a593Smuzhiyun vcrypto->ctrl.u.sym_create_session.op_type =
147*4882a593Smuzhiyun cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
148*4882a593Smuzhiyun vcrypto->ctrl.u.sym_create_session.u.cipher.para.algo =
149*4882a593Smuzhiyun vcrypto->ctrl.header.algo;
150*4882a593Smuzhiyun vcrypto->ctrl.u.sym_create_session.u.cipher.para.keylen =
151*4882a593Smuzhiyun cpu_to_le32(keylen);
152*4882a593Smuzhiyun vcrypto->ctrl.u.sym_create_session.u.cipher.para.op =
153*4882a593Smuzhiyun cpu_to_le32(op);
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
156*4882a593Smuzhiyun sgs[num_out++] = &outhdr;
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun /* Set key */
159*4882a593Smuzhiyun sg_init_one(&key_sg, cipher_key, keylen);
160*4882a593Smuzhiyun sgs[num_out++] = &key_sg;
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun /* Return status and session id back */
163*4882a593Smuzhiyun sg_init_one(&inhdr, &vcrypto->input, sizeof(vcrypto->input));
164*4882a593Smuzhiyun sgs[num_out + num_in++] = &inhdr;
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out,
167*4882a593Smuzhiyun num_in, vcrypto, GFP_ATOMIC);
168*4882a593Smuzhiyun if (err < 0) {
169*4882a593Smuzhiyun spin_unlock(&vcrypto->ctrl_lock);
170*4882a593Smuzhiyun kfree_sensitive(cipher_key);
171*4882a593Smuzhiyun return err;
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun virtqueue_kick(vcrypto->ctrl_vq);
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun /*
176*4882a593Smuzhiyun * Trapping into the hypervisor, so the request should be
177*4882a593Smuzhiyun * handled immediately.
178*4882a593Smuzhiyun */
179*4882a593Smuzhiyun while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) &&
180*4882a593Smuzhiyun !virtqueue_is_broken(vcrypto->ctrl_vq))
181*4882a593Smuzhiyun cpu_relax();
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun if (le32_to_cpu(vcrypto->input.status) != VIRTIO_CRYPTO_OK) {
184*4882a593Smuzhiyun spin_unlock(&vcrypto->ctrl_lock);
185*4882a593Smuzhiyun pr_err("virtio_crypto: Create session failed status: %u\n",
186*4882a593Smuzhiyun le32_to_cpu(vcrypto->input.status));
187*4882a593Smuzhiyun kfree_sensitive(cipher_key);
188*4882a593Smuzhiyun return -EINVAL;
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun if (encrypt)
192*4882a593Smuzhiyun ctx->enc_sess_info.session_id =
193*4882a593Smuzhiyun le64_to_cpu(vcrypto->input.session_id);
194*4882a593Smuzhiyun else
195*4882a593Smuzhiyun ctx->dec_sess_info.session_id =
196*4882a593Smuzhiyun le64_to_cpu(vcrypto->input.session_id);
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun spin_unlock(&vcrypto->ctrl_lock);
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun kfree_sensitive(cipher_key);
201*4882a593Smuzhiyun return 0;
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun
virtio_crypto_alg_skcipher_close_session(struct virtio_crypto_skcipher_ctx * ctx,int encrypt)204*4882a593Smuzhiyun static int virtio_crypto_alg_skcipher_close_session(
205*4882a593Smuzhiyun struct virtio_crypto_skcipher_ctx *ctx,
206*4882a593Smuzhiyun int encrypt)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun struct scatterlist outhdr, status_sg, *sgs[2];
209*4882a593Smuzhiyun unsigned int tmp;
210*4882a593Smuzhiyun struct virtio_crypto_destroy_session_req *destroy_session;
211*4882a593Smuzhiyun struct virtio_crypto *vcrypto = ctx->vcrypto;
212*4882a593Smuzhiyun int err;
213*4882a593Smuzhiyun unsigned int num_out = 0, num_in = 0;
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun spin_lock(&vcrypto->ctrl_lock);
216*4882a593Smuzhiyun vcrypto->ctrl_status.status = VIRTIO_CRYPTO_ERR;
217*4882a593Smuzhiyun /* Pad ctrl header */
218*4882a593Smuzhiyun vcrypto->ctrl.header.opcode =
219*4882a593Smuzhiyun cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION);
220*4882a593Smuzhiyun /* Set the default virtqueue id to 0 */
221*4882a593Smuzhiyun vcrypto->ctrl.header.queue_id = 0;
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun destroy_session = &vcrypto->ctrl.u.destroy_session;
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun if (encrypt)
226*4882a593Smuzhiyun destroy_session->session_id =
227*4882a593Smuzhiyun cpu_to_le64(ctx->enc_sess_info.session_id);
228*4882a593Smuzhiyun else
229*4882a593Smuzhiyun destroy_session->session_id =
230*4882a593Smuzhiyun cpu_to_le64(ctx->dec_sess_info.session_id);
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
233*4882a593Smuzhiyun sgs[num_out++] = &outhdr;
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun /* Return status and session id back */
236*4882a593Smuzhiyun sg_init_one(&status_sg, &vcrypto->ctrl_status.status,
237*4882a593Smuzhiyun sizeof(vcrypto->ctrl_status.status));
238*4882a593Smuzhiyun sgs[num_out + num_in++] = &status_sg;
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out,
241*4882a593Smuzhiyun num_in, vcrypto, GFP_ATOMIC);
242*4882a593Smuzhiyun if (err < 0) {
243*4882a593Smuzhiyun spin_unlock(&vcrypto->ctrl_lock);
244*4882a593Smuzhiyun return err;
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun virtqueue_kick(vcrypto->ctrl_vq);
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) &&
249*4882a593Smuzhiyun !virtqueue_is_broken(vcrypto->ctrl_vq))
250*4882a593Smuzhiyun cpu_relax();
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun if (vcrypto->ctrl_status.status != VIRTIO_CRYPTO_OK) {
253*4882a593Smuzhiyun spin_unlock(&vcrypto->ctrl_lock);
254*4882a593Smuzhiyun pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
255*4882a593Smuzhiyun vcrypto->ctrl_status.status,
256*4882a593Smuzhiyun destroy_session->session_id);
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun return -EINVAL;
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun spin_unlock(&vcrypto->ctrl_lock);
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun return 0;
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun
virtio_crypto_alg_skcipher_init_sessions(struct virtio_crypto_skcipher_ctx * ctx,const uint8_t * key,unsigned int keylen)265*4882a593Smuzhiyun static int virtio_crypto_alg_skcipher_init_sessions(
266*4882a593Smuzhiyun struct virtio_crypto_skcipher_ctx *ctx,
267*4882a593Smuzhiyun const uint8_t *key, unsigned int keylen)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun uint32_t alg;
270*4882a593Smuzhiyun int ret;
271*4882a593Smuzhiyun struct virtio_crypto *vcrypto = ctx->vcrypto;
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun if (keylen > vcrypto->max_cipher_key_len) {
274*4882a593Smuzhiyun pr_err("virtio_crypto: the key is too long\n");
275*4882a593Smuzhiyun return -EINVAL;
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun if (virtio_crypto_alg_validate_key(keylen, &alg))
279*4882a593Smuzhiyun return -EINVAL;
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun /* Create encryption session */
282*4882a593Smuzhiyun ret = virtio_crypto_alg_skcipher_init_session(ctx,
283*4882a593Smuzhiyun alg, key, keylen, 1);
284*4882a593Smuzhiyun if (ret)
285*4882a593Smuzhiyun return ret;
286*4882a593Smuzhiyun /* Create decryption session */
287*4882a593Smuzhiyun ret = virtio_crypto_alg_skcipher_init_session(ctx,
288*4882a593Smuzhiyun alg, key, keylen, 0);
289*4882a593Smuzhiyun if (ret) {
290*4882a593Smuzhiyun virtio_crypto_alg_skcipher_close_session(ctx, 1);
291*4882a593Smuzhiyun return ret;
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun return 0;
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun /* Note: kernel crypto API realization */
virtio_crypto_skcipher_setkey(struct crypto_skcipher * tfm,const uint8_t * key,unsigned int keylen)297*4882a593Smuzhiyun static int virtio_crypto_skcipher_setkey(struct crypto_skcipher *tfm,
298*4882a593Smuzhiyun const uint8_t *key,
299*4882a593Smuzhiyun unsigned int keylen)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
302*4882a593Smuzhiyun uint32_t alg;
303*4882a593Smuzhiyun int ret;
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun ret = virtio_crypto_alg_validate_key(keylen, &alg);
306*4882a593Smuzhiyun if (ret)
307*4882a593Smuzhiyun return ret;
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun if (!ctx->vcrypto) {
310*4882a593Smuzhiyun /* New key */
311*4882a593Smuzhiyun int node = virtio_crypto_get_current_node();
312*4882a593Smuzhiyun struct virtio_crypto *vcrypto =
313*4882a593Smuzhiyun virtcrypto_get_dev_node(node,
314*4882a593Smuzhiyun VIRTIO_CRYPTO_SERVICE_CIPHER, alg);
315*4882a593Smuzhiyun if (!vcrypto) {
316*4882a593Smuzhiyun pr_err("virtio_crypto: Could not find a virtio device in the system or unsupported algo\n");
317*4882a593Smuzhiyun return -ENODEV;
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun ctx->vcrypto = vcrypto;
321*4882a593Smuzhiyun } else {
322*4882a593Smuzhiyun /* Rekeying, we should close the created sessions previously */
323*4882a593Smuzhiyun virtio_crypto_alg_skcipher_close_session(ctx, 1);
324*4882a593Smuzhiyun virtio_crypto_alg_skcipher_close_session(ctx, 0);
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun ret = virtio_crypto_alg_skcipher_init_sessions(ctx, key, keylen);
328*4882a593Smuzhiyun if (ret) {
329*4882a593Smuzhiyun virtcrypto_dev_put(ctx->vcrypto);
330*4882a593Smuzhiyun ctx->vcrypto = NULL;
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun return ret;
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun return 0;
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun static int
__virtio_crypto_skcipher_do_req(struct virtio_crypto_sym_request * vc_sym_req,struct skcipher_request * req,struct data_queue * data_vq)339*4882a593Smuzhiyun __virtio_crypto_skcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
340*4882a593Smuzhiyun struct skcipher_request *req,
341*4882a593Smuzhiyun struct data_queue *data_vq)
342*4882a593Smuzhiyun {
343*4882a593Smuzhiyun struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
344*4882a593Smuzhiyun struct virtio_crypto_skcipher_ctx *ctx = vc_sym_req->skcipher_ctx;
345*4882a593Smuzhiyun struct virtio_crypto_request *vc_req = &vc_sym_req->base;
346*4882a593Smuzhiyun unsigned int ivsize = crypto_skcipher_ivsize(tfm);
347*4882a593Smuzhiyun struct virtio_crypto *vcrypto = ctx->vcrypto;
348*4882a593Smuzhiyun struct virtio_crypto_op_data_req *req_data;
349*4882a593Smuzhiyun int src_nents, dst_nents;
350*4882a593Smuzhiyun int err;
351*4882a593Smuzhiyun unsigned long flags;
352*4882a593Smuzhiyun struct scatterlist outhdr, iv_sg, status_sg, **sgs;
353*4882a593Smuzhiyun u64 dst_len;
354*4882a593Smuzhiyun unsigned int num_out = 0, num_in = 0;
355*4882a593Smuzhiyun int sg_total;
356*4882a593Smuzhiyun uint8_t *iv;
357*4882a593Smuzhiyun struct scatterlist *sg;
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun src_nents = sg_nents_for_len(req->src, req->cryptlen);
360*4882a593Smuzhiyun if (src_nents < 0) {
361*4882a593Smuzhiyun pr_err("Invalid number of src SG.\n");
362*4882a593Smuzhiyun return src_nents;
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun dst_nents = sg_nents(req->dst);
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n",
368*4882a593Smuzhiyun src_nents, dst_nents);
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun /* Why 3? outhdr + iv + inhdr */
371*4882a593Smuzhiyun sg_total = src_nents + dst_nents + 3;
372*4882a593Smuzhiyun sgs = kcalloc_node(sg_total, sizeof(*sgs), GFP_KERNEL,
373*4882a593Smuzhiyun dev_to_node(&vcrypto->vdev->dev));
374*4882a593Smuzhiyun if (!sgs)
375*4882a593Smuzhiyun return -ENOMEM;
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun req_data = kzalloc_node(sizeof(*req_data), GFP_KERNEL,
378*4882a593Smuzhiyun dev_to_node(&vcrypto->vdev->dev));
379*4882a593Smuzhiyun if (!req_data) {
380*4882a593Smuzhiyun kfree(sgs);
381*4882a593Smuzhiyun return -ENOMEM;
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun vc_req->req_data = req_data;
385*4882a593Smuzhiyun vc_sym_req->type = VIRTIO_CRYPTO_SYM_OP_CIPHER;
386*4882a593Smuzhiyun /* Head of operation */
387*4882a593Smuzhiyun if (vc_sym_req->encrypt) {
388*4882a593Smuzhiyun req_data->header.session_id =
389*4882a593Smuzhiyun cpu_to_le64(ctx->enc_sess_info.session_id);
390*4882a593Smuzhiyun req_data->header.opcode =
391*4882a593Smuzhiyun cpu_to_le32(VIRTIO_CRYPTO_CIPHER_ENCRYPT);
392*4882a593Smuzhiyun } else {
393*4882a593Smuzhiyun req_data->header.session_id =
394*4882a593Smuzhiyun cpu_to_le64(ctx->dec_sess_info.session_id);
395*4882a593Smuzhiyun req_data->header.opcode =
396*4882a593Smuzhiyun cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DECRYPT);
397*4882a593Smuzhiyun }
398*4882a593Smuzhiyun req_data->u.sym_req.op_type = cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
399*4882a593Smuzhiyun req_data->u.sym_req.u.cipher.para.iv_len = cpu_to_le32(ivsize);
400*4882a593Smuzhiyun req_data->u.sym_req.u.cipher.para.src_data_len =
401*4882a593Smuzhiyun cpu_to_le32(req->cryptlen);
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun dst_len = virtio_crypto_alg_sg_nents_length(req->dst);
404*4882a593Smuzhiyun if (unlikely(dst_len > U32_MAX)) {
405*4882a593Smuzhiyun pr_err("virtio_crypto: The dst_len is beyond U32_MAX\n");
406*4882a593Smuzhiyun err = -EINVAL;
407*4882a593Smuzhiyun goto free;
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun dst_len = min_t(unsigned int, req->cryptlen, dst_len);
411*4882a593Smuzhiyun pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n",
412*4882a593Smuzhiyun req->cryptlen, dst_len);
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun if (unlikely(req->cryptlen + dst_len + ivsize +
415*4882a593Smuzhiyun sizeof(vc_req->status) > vcrypto->max_size)) {
416*4882a593Smuzhiyun pr_err("virtio_crypto: The length is too big\n");
417*4882a593Smuzhiyun err = -EINVAL;
418*4882a593Smuzhiyun goto free;
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun req_data->u.sym_req.u.cipher.para.dst_data_len =
422*4882a593Smuzhiyun cpu_to_le32((uint32_t)dst_len);
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun /* Outhdr */
425*4882a593Smuzhiyun sg_init_one(&outhdr, req_data, sizeof(*req_data));
426*4882a593Smuzhiyun sgs[num_out++] = &outhdr;
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun /* IV */
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun /*
431*4882a593Smuzhiyun * Avoid to do DMA from the stack, switch to using
432*4882a593Smuzhiyun * dynamically-allocated for the IV
433*4882a593Smuzhiyun */
434*4882a593Smuzhiyun iv = kzalloc_node(ivsize, GFP_ATOMIC,
435*4882a593Smuzhiyun dev_to_node(&vcrypto->vdev->dev));
436*4882a593Smuzhiyun if (!iv) {
437*4882a593Smuzhiyun err = -ENOMEM;
438*4882a593Smuzhiyun goto free;
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun memcpy(iv, req->iv, ivsize);
441*4882a593Smuzhiyun if (!vc_sym_req->encrypt)
442*4882a593Smuzhiyun scatterwalk_map_and_copy(req->iv, req->src,
443*4882a593Smuzhiyun req->cryptlen - AES_BLOCK_SIZE,
444*4882a593Smuzhiyun AES_BLOCK_SIZE, 0);
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun sg_init_one(&iv_sg, iv, ivsize);
447*4882a593Smuzhiyun sgs[num_out++] = &iv_sg;
448*4882a593Smuzhiyun vc_sym_req->iv = iv;
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun /* Source data */
451*4882a593Smuzhiyun for (sg = req->src; src_nents; sg = sg_next(sg), src_nents--)
452*4882a593Smuzhiyun sgs[num_out++] = sg;
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun /* Destination data */
455*4882a593Smuzhiyun for (sg = req->dst; sg; sg = sg_next(sg))
456*4882a593Smuzhiyun sgs[num_out + num_in++] = sg;
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun /* Status */
459*4882a593Smuzhiyun sg_init_one(&status_sg, &vc_req->status, sizeof(vc_req->status));
460*4882a593Smuzhiyun sgs[num_out + num_in++] = &status_sg;
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun vc_req->sgs = sgs;
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun spin_lock_irqsave(&data_vq->lock, flags);
465*4882a593Smuzhiyun err = virtqueue_add_sgs(data_vq->vq, sgs, num_out,
466*4882a593Smuzhiyun num_in, vc_req, GFP_ATOMIC);
467*4882a593Smuzhiyun virtqueue_kick(data_vq->vq);
468*4882a593Smuzhiyun spin_unlock_irqrestore(&data_vq->lock, flags);
469*4882a593Smuzhiyun if (unlikely(err < 0))
470*4882a593Smuzhiyun goto free_iv;
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun return 0;
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun free_iv:
475*4882a593Smuzhiyun kfree_sensitive(iv);
476*4882a593Smuzhiyun free:
477*4882a593Smuzhiyun kfree_sensitive(req_data);
478*4882a593Smuzhiyun kfree(sgs);
479*4882a593Smuzhiyun return err;
480*4882a593Smuzhiyun }
481*4882a593Smuzhiyun
virtio_crypto_skcipher_encrypt(struct skcipher_request * req)482*4882a593Smuzhiyun static int virtio_crypto_skcipher_encrypt(struct skcipher_request *req)
483*4882a593Smuzhiyun {
484*4882a593Smuzhiyun struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(req);
485*4882a593Smuzhiyun struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(atfm);
486*4882a593Smuzhiyun struct virtio_crypto_sym_request *vc_sym_req =
487*4882a593Smuzhiyun skcipher_request_ctx(req);
488*4882a593Smuzhiyun struct virtio_crypto_request *vc_req = &vc_sym_req->base;
489*4882a593Smuzhiyun struct virtio_crypto *vcrypto = ctx->vcrypto;
490*4882a593Smuzhiyun /* Use the first data virtqueue as default */
491*4882a593Smuzhiyun struct data_queue *data_vq = &vcrypto->data_vq[0];
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun if (!req->cryptlen)
494*4882a593Smuzhiyun return 0;
495*4882a593Smuzhiyun if (req->cryptlen % AES_BLOCK_SIZE)
496*4882a593Smuzhiyun return -EINVAL;
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun vc_req->dataq = data_vq;
499*4882a593Smuzhiyun vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
500*4882a593Smuzhiyun vc_sym_req->skcipher_ctx = ctx;
501*4882a593Smuzhiyun vc_sym_req->skcipher_req = req;
502*4882a593Smuzhiyun vc_sym_req->encrypt = true;
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun return crypto_transfer_skcipher_request_to_engine(data_vq->engine, req);
505*4882a593Smuzhiyun }
506*4882a593Smuzhiyun
virtio_crypto_skcipher_decrypt(struct skcipher_request * req)507*4882a593Smuzhiyun static int virtio_crypto_skcipher_decrypt(struct skcipher_request *req)
508*4882a593Smuzhiyun {
509*4882a593Smuzhiyun struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(req);
510*4882a593Smuzhiyun struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(atfm);
511*4882a593Smuzhiyun struct virtio_crypto_sym_request *vc_sym_req =
512*4882a593Smuzhiyun skcipher_request_ctx(req);
513*4882a593Smuzhiyun struct virtio_crypto_request *vc_req = &vc_sym_req->base;
514*4882a593Smuzhiyun struct virtio_crypto *vcrypto = ctx->vcrypto;
515*4882a593Smuzhiyun /* Use the first data virtqueue as default */
516*4882a593Smuzhiyun struct data_queue *data_vq = &vcrypto->data_vq[0];
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun if (!req->cryptlen)
519*4882a593Smuzhiyun return 0;
520*4882a593Smuzhiyun if (req->cryptlen % AES_BLOCK_SIZE)
521*4882a593Smuzhiyun return -EINVAL;
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun vc_req->dataq = data_vq;
524*4882a593Smuzhiyun vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
525*4882a593Smuzhiyun vc_sym_req->skcipher_ctx = ctx;
526*4882a593Smuzhiyun vc_sym_req->skcipher_req = req;
527*4882a593Smuzhiyun vc_sym_req->encrypt = false;
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun return crypto_transfer_skcipher_request_to_engine(data_vq->engine, req);
530*4882a593Smuzhiyun }
531*4882a593Smuzhiyun
virtio_crypto_skcipher_init(struct crypto_skcipher * tfm)532*4882a593Smuzhiyun static int virtio_crypto_skcipher_init(struct crypto_skcipher *tfm)
533*4882a593Smuzhiyun {
534*4882a593Smuzhiyun struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun crypto_skcipher_set_reqsize(tfm, sizeof(struct virtio_crypto_sym_request));
537*4882a593Smuzhiyun ctx->tfm = tfm;
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun ctx->enginectx.op.do_one_request = virtio_crypto_skcipher_crypt_req;
540*4882a593Smuzhiyun ctx->enginectx.op.prepare_request = NULL;
541*4882a593Smuzhiyun ctx->enginectx.op.unprepare_request = NULL;
542*4882a593Smuzhiyun return 0;
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun
virtio_crypto_skcipher_exit(struct crypto_skcipher * tfm)545*4882a593Smuzhiyun static void virtio_crypto_skcipher_exit(struct crypto_skcipher *tfm)
546*4882a593Smuzhiyun {
547*4882a593Smuzhiyun struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun if (!ctx->vcrypto)
550*4882a593Smuzhiyun return;
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun virtio_crypto_alg_skcipher_close_session(ctx, 1);
553*4882a593Smuzhiyun virtio_crypto_alg_skcipher_close_session(ctx, 0);
554*4882a593Smuzhiyun virtcrypto_dev_put(ctx->vcrypto);
555*4882a593Smuzhiyun ctx->vcrypto = NULL;
556*4882a593Smuzhiyun }
557*4882a593Smuzhiyun
virtio_crypto_skcipher_crypt_req(struct crypto_engine * engine,void * vreq)558*4882a593Smuzhiyun int virtio_crypto_skcipher_crypt_req(
559*4882a593Smuzhiyun struct crypto_engine *engine, void *vreq)
560*4882a593Smuzhiyun {
561*4882a593Smuzhiyun struct skcipher_request *req = container_of(vreq, struct skcipher_request, base);
562*4882a593Smuzhiyun struct virtio_crypto_sym_request *vc_sym_req =
563*4882a593Smuzhiyun skcipher_request_ctx(req);
564*4882a593Smuzhiyun struct virtio_crypto_request *vc_req = &vc_sym_req->base;
565*4882a593Smuzhiyun struct data_queue *data_vq = vc_req->dataq;
566*4882a593Smuzhiyun int ret;
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun ret = __virtio_crypto_skcipher_do_req(vc_sym_req, req, data_vq);
569*4882a593Smuzhiyun if (ret < 0)
570*4882a593Smuzhiyun return ret;
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun virtqueue_kick(data_vq->vq);
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun return 0;
575*4882a593Smuzhiyun }
576*4882a593Smuzhiyun
virtio_crypto_skcipher_finalize_req(struct virtio_crypto_sym_request * vc_sym_req,struct skcipher_request * req,int err)577*4882a593Smuzhiyun static void virtio_crypto_skcipher_finalize_req(
578*4882a593Smuzhiyun struct virtio_crypto_sym_request *vc_sym_req,
579*4882a593Smuzhiyun struct skcipher_request *req,
580*4882a593Smuzhiyun int err)
581*4882a593Smuzhiyun {
582*4882a593Smuzhiyun if (vc_sym_req->encrypt)
583*4882a593Smuzhiyun scatterwalk_map_and_copy(req->iv, req->dst,
584*4882a593Smuzhiyun req->cryptlen - AES_BLOCK_SIZE,
585*4882a593Smuzhiyun AES_BLOCK_SIZE, 0);
586*4882a593Smuzhiyun kfree_sensitive(vc_sym_req->iv);
587*4882a593Smuzhiyun virtcrypto_clear_request(&vc_sym_req->base);
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun crypto_finalize_skcipher_request(vc_sym_req->base.dataq->engine,
590*4882a593Smuzhiyun req, err);
591*4882a593Smuzhiyun }
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun static struct virtio_crypto_algo virtio_crypto_algs[] = { {
594*4882a593Smuzhiyun .algonum = VIRTIO_CRYPTO_CIPHER_AES_CBC,
595*4882a593Smuzhiyun .service = VIRTIO_CRYPTO_SERVICE_CIPHER,
596*4882a593Smuzhiyun .algo = {
597*4882a593Smuzhiyun .base.cra_name = "cbc(aes)",
598*4882a593Smuzhiyun .base.cra_driver_name = "virtio_crypto_aes_cbc",
599*4882a593Smuzhiyun .base.cra_priority = 150,
600*4882a593Smuzhiyun .base.cra_flags = CRYPTO_ALG_ASYNC |
601*4882a593Smuzhiyun CRYPTO_ALG_ALLOCATES_MEMORY,
602*4882a593Smuzhiyun .base.cra_blocksize = AES_BLOCK_SIZE,
603*4882a593Smuzhiyun .base.cra_ctxsize = sizeof(struct virtio_crypto_skcipher_ctx),
604*4882a593Smuzhiyun .base.cra_module = THIS_MODULE,
605*4882a593Smuzhiyun .init = virtio_crypto_skcipher_init,
606*4882a593Smuzhiyun .exit = virtio_crypto_skcipher_exit,
607*4882a593Smuzhiyun .setkey = virtio_crypto_skcipher_setkey,
608*4882a593Smuzhiyun .decrypt = virtio_crypto_skcipher_decrypt,
609*4882a593Smuzhiyun .encrypt = virtio_crypto_skcipher_encrypt,
610*4882a593Smuzhiyun .min_keysize = AES_MIN_KEY_SIZE,
611*4882a593Smuzhiyun .max_keysize = AES_MAX_KEY_SIZE,
612*4882a593Smuzhiyun .ivsize = AES_BLOCK_SIZE,
613*4882a593Smuzhiyun },
614*4882a593Smuzhiyun } };
615*4882a593Smuzhiyun
virtio_crypto_algs_register(struct virtio_crypto * vcrypto)616*4882a593Smuzhiyun int virtio_crypto_algs_register(struct virtio_crypto *vcrypto)
617*4882a593Smuzhiyun {
618*4882a593Smuzhiyun int ret = 0;
619*4882a593Smuzhiyun int i = 0;
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun mutex_lock(&algs_lock);
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(virtio_crypto_algs); i++) {
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun uint32_t service = virtio_crypto_algs[i].service;
626*4882a593Smuzhiyun uint32_t algonum = virtio_crypto_algs[i].algonum;
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun if (!virtcrypto_algo_is_supported(vcrypto, service, algonum))
629*4882a593Smuzhiyun continue;
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun if (virtio_crypto_algs[i].active_devs == 0) {
632*4882a593Smuzhiyun ret = crypto_register_skcipher(&virtio_crypto_algs[i].algo);
633*4882a593Smuzhiyun if (ret)
634*4882a593Smuzhiyun goto unlock;
635*4882a593Smuzhiyun }
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun virtio_crypto_algs[i].active_devs++;
638*4882a593Smuzhiyun dev_info(&vcrypto->vdev->dev, "Registered algo %s\n",
639*4882a593Smuzhiyun virtio_crypto_algs[i].algo.base.cra_name);
640*4882a593Smuzhiyun }
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun unlock:
643*4882a593Smuzhiyun mutex_unlock(&algs_lock);
644*4882a593Smuzhiyun return ret;
645*4882a593Smuzhiyun }
646*4882a593Smuzhiyun
virtio_crypto_algs_unregister(struct virtio_crypto * vcrypto)647*4882a593Smuzhiyun void virtio_crypto_algs_unregister(struct virtio_crypto *vcrypto)
648*4882a593Smuzhiyun {
649*4882a593Smuzhiyun int i = 0;
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun mutex_lock(&algs_lock);
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(virtio_crypto_algs); i++) {
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun uint32_t service = virtio_crypto_algs[i].service;
656*4882a593Smuzhiyun uint32_t algonum = virtio_crypto_algs[i].algonum;
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun if (virtio_crypto_algs[i].active_devs == 0 ||
659*4882a593Smuzhiyun !virtcrypto_algo_is_supported(vcrypto, service, algonum))
660*4882a593Smuzhiyun continue;
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun if (virtio_crypto_algs[i].active_devs == 1)
663*4882a593Smuzhiyun crypto_unregister_skcipher(&virtio_crypto_algs[i].algo);
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun virtio_crypto_algs[i].active_devs--;
666*4882a593Smuzhiyun }
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun mutex_unlock(&algs_lock);
669*4882a593Smuzhiyun }
670