xref: /OK3568_Linux_fs/kernel/crypto/keywrap.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Key Wrapping: RFC3394 / NIST SP800-38F
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright (C) 2015, Stephan Mueller <smueller@chronox.de>
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Redistribution and use in source and binary forms, with or without
7*4882a593Smuzhiyun  * modification, are permitted provided that the following conditions
8*4882a593Smuzhiyun  * are met:
9*4882a593Smuzhiyun  * 1. Redistributions of source code must retain the above copyright
10*4882a593Smuzhiyun  *    notice, and the entire permission notice in its entirety,
11*4882a593Smuzhiyun  *    including the disclaimer of warranties.
12*4882a593Smuzhiyun  * 2. Redistributions in binary form must reproduce the above copyright
13*4882a593Smuzhiyun  *    notice, this list of conditions and the following disclaimer in the
14*4882a593Smuzhiyun  *    documentation and/or other materials provided with the distribution.
15*4882a593Smuzhiyun  * 3. The name of the author may not be used to endorse or promote
16*4882a593Smuzhiyun  *    products derived from this software without specific prior
17*4882a593Smuzhiyun  *    written permission.
18*4882a593Smuzhiyun  *
19*4882a593Smuzhiyun  * ALTERNATIVELY, this product may be distributed under the terms of
20*4882a593Smuzhiyun  * the GNU General Public License, in which case the provisions of the GPL2
21*4882a593Smuzhiyun  * are required INSTEAD OF the above restrictions.  (This clause is
22*4882a593Smuzhiyun  * necessary due to a potential bad interaction between the GPL and
23*4882a593Smuzhiyun  * the restrictions contained in a BSD-style copyright.)
24*4882a593Smuzhiyun  *
25*4882a593Smuzhiyun  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
26*4882a593Smuzhiyun  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
27*4882a593Smuzhiyun  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
28*4882a593Smuzhiyun  * WHICH ARE HEREBY DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE
29*4882a593Smuzhiyun  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30*4882a593Smuzhiyun  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
31*4882a593Smuzhiyun  * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
32*4882a593Smuzhiyun  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33*4882a593Smuzhiyun  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34*4882a593Smuzhiyun  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
35*4882a593Smuzhiyun  * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
36*4882a593Smuzhiyun  * DAMAGE.
37*4882a593Smuzhiyun  */
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun /*
40*4882a593Smuzhiyun  * Note for using key wrapping:
41*4882a593Smuzhiyun  *
42*4882a593Smuzhiyun  *	* The result of the encryption operation is the ciphertext starting
43*4882a593Smuzhiyun  *	  with the 2nd semiblock. The first semiblock is provided as the IV.
44*4882a593Smuzhiyun  *	  The IV used to start the encryption operation is the default IV.
45*4882a593Smuzhiyun  *
46*4882a593Smuzhiyun  *	* The input for the decryption is the first semiblock handed in as an
47*4882a593Smuzhiyun  *	  IV. The ciphertext is the data starting with the 2nd semiblock. The
48*4882a593Smuzhiyun  *	  return code of the decryption operation will be EBADMSG in case an
49*4882a593Smuzhiyun  *	  integrity error occurs.
50*4882a593Smuzhiyun  *
51*4882a593Smuzhiyun  * To obtain the full result of an encryption as expected by SP800-38F, the
52*4882a593Smuzhiyun  * caller must allocate a buffer of plaintext + 8 bytes:
53*4882a593Smuzhiyun  *
54*4882a593Smuzhiyun  *	unsigned int datalen = ptlen + crypto_skcipher_ivsize(tfm);
55*4882a593Smuzhiyun  *	u8 data[datalen];
56*4882a593Smuzhiyun  *	u8 *iv = data;
57*4882a593Smuzhiyun  *	u8 *pt = data + crypto_skcipher_ivsize(tfm);
58*4882a593Smuzhiyun  *		<ensure that pt contains the plaintext of size ptlen>
59*4882a593Smuzhiyun  *	sg_init_one(&sg, pt, ptlen);
60*4882a593Smuzhiyun  *	skcipher_request_set_crypt(req, &sg, &sg, ptlen, iv);
61*4882a593Smuzhiyun  *
62*4882a593Smuzhiyun  *	==> After encryption, data now contains full KW result as per SP800-38F.
63*4882a593Smuzhiyun  *
64*4882a593Smuzhiyun  * In case of decryption, ciphertext now already has the expected length
65*4882a593Smuzhiyun  * and must be segmented appropriately:
66*4882a593Smuzhiyun  *
67*4882a593Smuzhiyun  *	unsigned int datalen = CTLEN;
68*4882a593Smuzhiyun  *	u8 data[datalen];
69*4882a593Smuzhiyun  *		<ensure that data contains full ciphertext>
70*4882a593Smuzhiyun  *	u8 *iv = data;
71*4882a593Smuzhiyun  *	u8 *ct = data + crypto_skcipher_ivsize(tfm);
72*4882a593Smuzhiyun  *	unsigned int ctlen = datalen - crypto_skcipher_ivsize(tfm);
73*4882a593Smuzhiyun  *	sg_init_one(&sg, ct, ctlen);
74*4882a593Smuzhiyun  *	skcipher_request_set_crypt(req, &sg, &sg, ctlen, iv);
75*4882a593Smuzhiyun  *
76*4882a593Smuzhiyun  *	==> After decryption (which hopefully does not return EBADMSG), the ct
77*4882a593Smuzhiyun  *	pointer now points to the plaintext of size ctlen.
78*4882a593Smuzhiyun  *
79*4882a593Smuzhiyun  * Note 2: KWP is not implemented as this would defy in-place operation.
80*4882a593Smuzhiyun  *	   If somebody wants to wrap non-aligned data, he should simply pad
81*4882a593Smuzhiyun  *	   the input with zeros to fill it up to the 8 byte boundary.
82*4882a593Smuzhiyun  */
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun #include <linux/module.h>
85*4882a593Smuzhiyun #include <linux/crypto.h>
86*4882a593Smuzhiyun #include <linux/scatterlist.h>
87*4882a593Smuzhiyun #include <crypto/scatterwalk.h>
88*4882a593Smuzhiyun #include <crypto/internal/cipher.h>
89*4882a593Smuzhiyun #include <crypto/internal/skcipher.h>
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun struct crypto_kw_block {
92*4882a593Smuzhiyun #define SEMIBSIZE 8
93*4882a593Smuzhiyun 	__be64 A;
94*4882a593Smuzhiyun 	__be64 R;
95*4882a593Smuzhiyun };
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun /*
98*4882a593Smuzhiyun  * Fast forward the SGL to the "end" length minus SEMIBSIZE.
99*4882a593Smuzhiyun  * The start in the SGL defined by the fast-forward is returned with
100*4882a593Smuzhiyun  * the walk variable
101*4882a593Smuzhiyun  */
crypto_kw_scatterlist_ff(struct scatter_walk * walk,struct scatterlist * sg,unsigned int end)102*4882a593Smuzhiyun static void crypto_kw_scatterlist_ff(struct scatter_walk *walk,
103*4882a593Smuzhiyun 				     struct scatterlist *sg,
104*4882a593Smuzhiyun 				     unsigned int end)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun 	unsigned int skip = 0;
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	/* The caller should only operate on full SEMIBLOCKs. */
109*4882a593Smuzhiyun 	BUG_ON(end < SEMIBSIZE);
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	skip = end - SEMIBSIZE;
112*4882a593Smuzhiyun 	while (sg) {
113*4882a593Smuzhiyun 		if (sg->length > skip) {
114*4882a593Smuzhiyun 			scatterwalk_start(walk, sg);
115*4882a593Smuzhiyun 			scatterwalk_advance(walk, skip);
116*4882a593Smuzhiyun 			break;
117*4882a593Smuzhiyun 		} else
118*4882a593Smuzhiyun 			skip -= sg->length;
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 		sg = sg_next(sg);
121*4882a593Smuzhiyun 	}
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun 
crypto_kw_decrypt(struct skcipher_request * req)124*4882a593Smuzhiyun static int crypto_kw_decrypt(struct skcipher_request *req)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
127*4882a593Smuzhiyun 	struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
128*4882a593Smuzhiyun 	struct crypto_kw_block block;
129*4882a593Smuzhiyun 	struct scatterlist *src, *dst;
130*4882a593Smuzhiyun 	u64 t = 6 * ((req->cryptlen) >> 3);
131*4882a593Smuzhiyun 	unsigned int i;
132*4882a593Smuzhiyun 	int ret = 0;
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	/*
135*4882a593Smuzhiyun 	 * Require at least 2 semiblocks (note, the 3rd semiblock that is
136*4882a593Smuzhiyun 	 * required by SP800-38F is the IV.
137*4882a593Smuzhiyun 	 */
138*4882a593Smuzhiyun 	if (req->cryptlen < (2 * SEMIBSIZE) || req->cryptlen % SEMIBSIZE)
139*4882a593Smuzhiyun 		return -EINVAL;
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	/* Place the IV into block A */
142*4882a593Smuzhiyun 	memcpy(&block.A, req->iv, SEMIBSIZE);
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	/*
145*4882a593Smuzhiyun 	 * src scatterlist is read-only. dst scatterlist is r/w. During the
146*4882a593Smuzhiyun 	 * first loop, src points to req->src and dst to req->dst. For any
147*4882a593Smuzhiyun 	 * subsequent round, the code operates on req->dst only.
148*4882a593Smuzhiyun 	 */
149*4882a593Smuzhiyun 	src = req->src;
150*4882a593Smuzhiyun 	dst = req->dst;
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	for (i = 0; i < 6; i++) {
153*4882a593Smuzhiyun 		struct scatter_walk src_walk, dst_walk;
154*4882a593Smuzhiyun 		unsigned int nbytes = req->cryptlen;
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 		while (nbytes) {
157*4882a593Smuzhiyun 			/* move pointer by nbytes in the SGL */
158*4882a593Smuzhiyun 			crypto_kw_scatterlist_ff(&src_walk, src, nbytes);
159*4882a593Smuzhiyun 			/* get the source block */
160*4882a593Smuzhiyun 			scatterwalk_copychunks(&block.R, &src_walk, SEMIBSIZE,
161*4882a593Smuzhiyun 					       false);
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 			/* perform KW operation: modify IV with counter */
164*4882a593Smuzhiyun 			block.A ^= cpu_to_be64(t);
165*4882a593Smuzhiyun 			t--;
166*4882a593Smuzhiyun 			/* perform KW operation: decrypt block */
167*4882a593Smuzhiyun 			crypto_cipher_decrypt_one(cipher, (u8 *)&block,
168*4882a593Smuzhiyun 						  (u8 *)&block);
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 			/* move pointer by nbytes in the SGL */
171*4882a593Smuzhiyun 			crypto_kw_scatterlist_ff(&dst_walk, dst, nbytes);
172*4882a593Smuzhiyun 			/* Copy block->R into place */
173*4882a593Smuzhiyun 			scatterwalk_copychunks(&block.R, &dst_walk, SEMIBSIZE,
174*4882a593Smuzhiyun 					       true);
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 			nbytes -= SEMIBSIZE;
177*4882a593Smuzhiyun 		}
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 		/* we now start to operate on the dst SGL only */
180*4882a593Smuzhiyun 		src = req->dst;
181*4882a593Smuzhiyun 		dst = req->dst;
182*4882a593Smuzhiyun 	}
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	/* Perform authentication check */
185*4882a593Smuzhiyun 	if (block.A != cpu_to_be64(0xa6a6a6a6a6a6a6a6ULL))
186*4882a593Smuzhiyun 		ret = -EBADMSG;
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	memzero_explicit(&block, sizeof(struct crypto_kw_block));
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	return ret;
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun 
crypto_kw_encrypt(struct skcipher_request * req)193*4882a593Smuzhiyun static int crypto_kw_encrypt(struct skcipher_request *req)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
196*4882a593Smuzhiyun 	struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
197*4882a593Smuzhiyun 	struct crypto_kw_block block;
198*4882a593Smuzhiyun 	struct scatterlist *src, *dst;
199*4882a593Smuzhiyun 	u64 t = 1;
200*4882a593Smuzhiyun 	unsigned int i;
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	/*
203*4882a593Smuzhiyun 	 * Require at least 2 semiblocks (note, the 3rd semiblock that is
204*4882a593Smuzhiyun 	 * required by SP800-38F is the IV that occupies the first semiblock.
205*4882a593Smuzhiyun 	 * This means that the dst memory must be one semiblock larger than src.
206*4882a593Smuzhiyun 	 * Also ensure that the given data is aligned to semiblock.
207*4882a593Smuzhiyun 	 */
208*4882a593Smuzhiyun 	if (req->cryptlen < (2 * SEMIBSIZE) || req->cryptlen % SEMIBSIZE)
209*4882a593Smuzhiyun 		return -EINVAL;
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	/*
212*4882a593Smuzhiyun 	 * Place the predefined IV into block A -- for encrypt, the caller
213*4882a593Smuzhiyun 	 * does not need to provide an IV, but he needs to fetch the final IV.
214*4882a593Smuzhiyun 	 */
215*4882a593Smuzhiyun 	block.A = cpu_to_be64(0xa6a6a6a6a6a6a6a6ULL);
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	/*
218*4882a593Smuzhiyun 	 * src scatterlist is read-only. dst scatterlist is r/w. During the
219*4882a593Smuzhiyun 	 * first loop, src points to req->src and dst to req->dst. For any
220*4882a593Smuzhiyun 	 * subsequent round, the code operates on req->dst only.
221*4882a593Smuzhiyun 	 */
222*4882a593Smuzhiyun 	src = req->src;
223*4882a593Smuzhiyun 	dst = req->dst;
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	for (i = 0; i < 6; i++) {
226*4882a593Smuzhiyun 		struct scatter_walk src_walk, dst_walk;
227*4882a593Smuzhiyun 		unsigned int nbytes = req->cryptlen;
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 		scatterwalk_start(&src_walk, src);
230*4882a593Smuzhiyun 		scatterwalk_start(&dst_walk, dst);
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 		while (nbytes) {
233*4882a593Smuzhiyun 			/* get the source block */
234*4882a593Smuzhiyun 			scatterwalk_copychunks(&block.R, &src_walk, SEMIBSIZE,
235*4882a593Smuzhiyun 					       false);
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 			/* perform KW operation: encrypt block */
238*4882a593Smuzhiyun 			crypto_cipher_encrypt_one(cipher, (u8 *)&block,
239*4882a593Smuzhiyun 						  (u8 *)&block);
240*4882a593Smuzhiyun 			/* perform KW operation: modify IV with counter */
241*4882a593Smuzhiyun 			block.A ^= cpu_to_be64(t);
242*4882a593Smuzhiyun 			t++;
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 			/* Copy block->R into place */
245*4882a593Smuzhiyun 			scatterwalk_copychunks(&block.R, &dst_walk, SEMIBSIZE,
246*4882a593Smuzhiyun 					       true);
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 			nbytes -= SEMIBSIZE;
249*4882a593Smuzhiyun 		}
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 		/* we now start to operate on the dst SGL only */
252*4882a593Smuzhiyun 		src = req->dst;
253*4882a593Smuzhiyun 		dst = req->dst;
254*4882a593Smuzhiyun 	}
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	/* establish the IV for the caller to pick up */
257*4882a593Smuzhiyun 	memcpy(req->iv, &block.A, SEMIBSIZE);
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 	memzero_explicit(&block, sizeof(struct crypto_kw_block));
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	return 0;
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun 
crypto_kw_create(struct crypto_template * tmpl,struct rtattr ** tb)264*4882a593Smuzhiyun static int crypto_kw_create(struct crypto_template *tmpl, struct rtattr **tb)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun 	struct skcipher_instance *inst;
267*4882a593Smuzhiyun 	struct crypto_alg *alg;
268*4882a593Smuzhiyun 	int err;
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 	inst = skcipher_alloc_instance_simple(tmpl, tb);
271*4882a593Smuzhiyun 	if (IS_ERR(inst))
272*4882a593Smuzhiyun 		return PTR_ERR(inst);
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	alg = skcipher_ialg_simple(inst);
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	err = -EINVAL;
277*4882a593Smuzhiyun 	/* Section 5.1 requirement for KW */
278*4882a593Smuzhiyun 	if (alg->cra_blocksize != sizeof(struct crypto_kw_block))
279*4882a593Smuzhiyun 		goto out_free_inst;
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 	inst->alg.base.cra_blocksize = SEMIBSIZE;
282*4882a593Smuzhiyun 	inst->alg.base.cra_alignmask = 0;
283*4882a593Smuzhiyun 	inst->alg.ivsize = SEMIBSIZE;
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 	inst->alg.encrypt = crypto_kw_encrypt;
286*4882a593Smuzhiyun 	inst->alg.decrypt = crypto_kw_decrypt;
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	err = skcipher_register_instance(tmpl, inst);
289*4882a593Smuzhiyun 	if (err) {
290*4882a593Smuzhiyun out_free_inst:
291*4882a593Smuzhiyun 		inst->free(inst);
292*4882a593Smuzhiyun 	}
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	return err;
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun static struct crypto_template crypto_kw_tmpl = {
298*4882a593Smuzhiyun 	.name = "kw",
299*4882a593Smuzhiyun 	.create = crypto_kw_create,
300*4882a593Smuzhiyun 	.module = THIS_MODULE,
301*4882a593Smuzhiyun };
302*4882a593Smuzhiyun 
crypto_kw_init(void)303*4882a593Smuzhiyun static int __init crypto_kw_init(void)
304*4882a593Smuzhiyun {
305*4882a593Smuzhiyun 	return crypto_register_template(&crypto_kw_tmpl);
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun 
crypto_kw_exit(void)308*4882a593Smuzhiyun static void __exit crypto_kw_exit(void)
309*4882a593Smuzhiyun {
310*4882a593Smuzhiyun 	crypto_unregister_template(&crypto_kw_tmpl);
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun subsys_initcall(crypto_kw_init);
314*4882a593Smuzhiyun module_exit(crypto_kw_exit);
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun MODULE_LICENSE("Dual BSD/GPL");
317*4882a593Smuzhiyun MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
318*4882a593Smuzhiyun MODULE_DESCRIPTION("Key Wrapping (RFC3394 / NIST SP800-38F)");
319*4882a593Smuzhiyun MODULE_ALIAS_CRYPTO("kw");
320*4882a593Smuzhiyun MODULE_IMPORT_NS(CRYPTO_INTERNAL);
321