1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright 2019 Google LLC
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun /*
7*4882a593Smuzhiyun * Refer to Documentation/block/inline-encryption.rst for detailed explanation.
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #define pr_fmt(fmt) "blk-crypto: " fmt
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include <linux/bio.h>
13*4882a593Smuzhiyun #include <linux/blkdev.h>
14*4882a593Smuzhiyun #include <linux/keyslot-manager.h>
15*4882a593Smuzhiyun #include <linux/module.h>
16*4882a593Smuzhiyun #include <linux/slab.h>
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #include "blk-crypto-internal.h"
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun const struct blk_crypto_mode blk_crypto_modes[] = {
21*4882a593Smuzhiyun [BLK_ENCRYPTION_MODE_AES_256_XTS] = {
22*4882a593Smuzhiyun .cipher_str = "xts(aes)",
23*4882a593Smuzhiyun .keysize = 64,
24*4882a593Smuzhiyun .ivsize = 16,
25*4882a593Smuzhiyun },
26*4882a593Smuzhiyun [BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV] = {
27*4882a593Smuzhiyun .cipher_str = "essiv(cbc(aes),sha256)",
28*4882a593Smuzhiyun .keysize = 16,
29*4882a593Smuzhiyun .ivsize = 16,
30*4882a593Smuzhiyun },
31*4882a593Smuzhiyun [BLK_ENCRYPTION_MODE_ADIANTUM] = {
32*4882a593Smuzhiyun .cipher_str = "adiantum(xchacha12,aes)",
33*4882a593Smuzhiyun .keysize = 32,
34*4882a593Smuzhiyun .ivsize = 32,
35*4882a593Smuzhiyun },
36*4882a593Smuzhiyun };
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun /*
39*4882a593Smuzhiyun * This number needs to be at least (the number of threads doing IO
40*4882a593Smuzhiyun * concurrently) * (maximum recursive depth of a bio), so that we don't
41*4882a593Smuzhiyun * deadlock on crypt_ctx allocations. The default is chosen to be the same
42*4882a593Smuzhiyun * as the default number of post read contexts in both EXT4 and F2FS.
43*4882a593Smuzhiyun */
44*4882a593Smuzhiyun static int num_prealloc_crypt_ctxs = 128;
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun module_param(num_prealloc_crypt_ctxs, int, 0444);
47*4882a593Smuzhiyun MODULE_PARM_DESC(num_prealloc_crypt_ctxs,
48*4882a593Smuzhiyun "Number of bio crypto contexts to preallocate");
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun static struct kmem_cache *bio_crypt_ctx_cache;
51*4882a593Smuzhiyun static mempool_t *bio_crypt_ctx_pool;
52*4882a593Smuzhiyun
bio_crypt_ctx_init(void)53*4882a593Smuzhiyun static int __init bio_crypt_ctx_init(void)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun size_t i;
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun bio_crypt_ctx_cache = KMEM_CACHE(bio_crypt_ctx, 0);
58*4882a593Smuzhiyun if (!bio_crypt_ctx_cache)
59*4882a593Smuzhiyun goto out_no_mem;
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun bio_crypt_ctx_pool = mempool_create_slab_pool(num_prealloc_crypt_ctxs,
62*4882a593Smuzhiyun bio_crypt_ctx_cache);
63*4882a593Smuzhiyun if (!bio_crypt_ctx_pool)
64*4882a593Smuzhiyun goto out_no_mem;
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun /* This is assumed in various places. */
67*4882a593Smuzhiyun BUILD_BUG_ON(BLK_ENCRYPTION_MODE_INVALID != 0);
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun /* Sanity check that no algorithm exceeds the defined limits. */
70*4882a593Smuzhiyun for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++) {
71*4882a593Smuzhiyun BUG_ON(blk_crypto_modes[i].keysize > BLK_CRYPTO_MAX_KEY_SIZE);
72*4882a593Smuzhiyun BUG_ON(blk_crypto_modes[i].ivsize > BLK_CRYPTO_MAX_IV_SIZE);
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun return 0;
76*4882a593Smuzhiyun out_no_mem:
77*4882a593Smuzhiyun panic("Failed to allocate mem for bio crypt ctxs\n");
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun subsys_initcall(bio_crypt_ctx_init);
80*4882a593Smuzhiyun
bio_crypt_set_ctx(struct bio * bio,const struct blk_crypto_key * key,const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],gfp_t gfp_mask)81*4882a593Smuzhiyun void bio_crypt_set_ctx(struct bio *bio, const struct blk_crypto_key *key,
82*4882a593Smuzhiyun const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], gfp_t gfp_mask)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun struct bio_crypt_ctx *bc;
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun /*
87*4882a593Smuzhiyun * The caller must use a gfp_mask that contains __GFP_DIRECT_RECLAIM so
88*4882a593Smuzhiyun * that the mempool_alloc() can't fail.
89*4882a593Smuzhiyun */
90*4882a593Smuzhiyun WARN_ON_ONCE(!(gfp_mask & __GFP_DIRECT_RECLAIM));
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun bc = mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun bc->bc_key = key;
95*4882a593Smuzhiyun memcpy(bc->bc_dun, dun, sizeof(bc->bc_dun));
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun bio->bi_crypt_context = bc;
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(bio_crypt_set_ctx);
100*4882a593Smuzhiyun
__bio_crypt_free_ctx(struct bio * bio)101*4882a593Smuzhiyun void __bio_crypt_free_ctx(struct bio *bio)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun mempool_free(bio->bi_crypt_context, bio_crypt_ctx_pool);
104*4882a593Smuzhiyun bio->bi_crypt_context = NULL;
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun
__bio_crypt_clone(struct bio * dst,struct bio * src,gfp_t gfp_mask)107*4882a593Smuzhiyun int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun dst->bi_crypt_context = mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
110*4882a593Smuzhiyun if (!dst->bi_crypt_context)
111*4882a593Smuzhiyun return -ENOMEM;
112*4882a593Smuzhiyun *dst->bi_crypt_context = *src->bi_crypt_context;
113*4882a593Smuzhiyun return 0;
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(__bio_crypt_clone);
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun /* Increments @dun by @inc, treating @dun as a multi-limb integer. */
bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],unsigned int inc)118*4882a593Smuzhiyun void bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
119*4882a593Smuzhiyun unsigned int inc)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun int i;
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun for (i = 0; inc && i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) {
124*4882a593Smuzhiyun dun[i] += inc;
125*4882a593Smuzhiyun /*
126*4882a593Smuzhiyun * If the addition in this limb overflowed, then we need to
127*4882a593Smuzhiyun * carry 1 into the next limb. Else the carry is 0.
128*4882a593Smuzhiyun */
129*4882a593Smuzhiyun if (dun[i] < inc)
130*4882a593Smuzhiyun inc = 1;
131*4882a593Smuzhiyun else
132*4882a593Smuzhiyun inc = 0;
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun
__bio_crypt_advance(struct bio * bio,unsigned int bytes)136*4882a593Smuzhiyun void __bio_crypt_advance(struct bio *bio, unsigned int bytes)
137*4882a593Smuzhiyun {
138*4882a593Smuzhiyun struct bio_crypt_ctx *bc = bio->bi_crypt_context;
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun bio_crypt_dun_increment(bc->bc_dun,
141*4882a593Smuzhiyun bytes >> bc->bc_key->data_unit_size_bits);
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun /*
145*4882a593Smuzhiyun * Returns true if @bc->bc_dun plus @bytes converted to data units is equal to
146*4882a593Smuzhiyun * @next_dun, treating the DUNs as multi-limb integers.
147*4882a593Smuzhiyun */
bio_crypt_dun_is_contiguous(const struct bio_crypt_ctx * bc,unsigned int bytes,const u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE])148*4882a593Smuzhiyun bool bio_crypt_dun_is_contiguous(const struct bio_crypt_ctx *bc,
149*4882a593Smuzhiyun unsigned int bytes,
150*4882a593Smuzhiyun const u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE])
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun int i;
153*4882a593Smuzhiyun unsigned int carry = bytes >> bc->bc_key->data_unit_size_bits;
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun for (i = 0; i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) {
156*4882a593Smuzhiyun if (bc->bc_dun[i] + carry != next_dun[i])
157*4882a593Smuzhiyun return false;
158*4882a593Smuzhiyun /*
159*4882a593Smuzhiyun * If the addition in this limb overflowed, then we need to
160*4882a593Smuzhiyun * carry 1 into the next limb. Else the carry is 0.
161*4882a593Smuzhiyun */
162*4882a593Smuzhiyun if ((bc->bc_dun[i] + carry) < carry)
163*4882a593Smuzhiyun carry = 1;
164*4882a593Smuzhiyun else
165*4882a593Smuzhiyun carry = 0;
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun /* If the DUN wrapped through 0, don't treat it as contiguous. */
169*4882a593Smuzhiyun return carry == 0;
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun /*
173*4882a593Smuzhiyun * Checks that two bio crypt contexts are compatible - i.e. that
174*4882a593Smuzhiyun * they are mergeable except for data_unit_num continuity.
175*4882a593Smuzhiyun */
bio_crypt_ctx_compatible(struct bio_crypt_ctx * bc1,struct bio_crypt_ctx * bc2)176*4882a593Smuzhiyun static bool bio_crypt_ctx_compatible(struct bio_crypt_ctx *bc1,
177*4882a593Smuzhiyun struct bio_crypt_ctx *bc2)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun if (!bc1)
180*4882a593Smuzhiyun return !bc2;
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun return bc2 && bc1->bc_key == bc2->bc_key;
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun
bio_crypt_rq_ctx_compatible(struct request * rq,struct bio * bio)185*4882a593Smuzhiyun bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun return bio_crypt_ctx_compatible(rq->crypt_ctx, bio->bi_crypt_context);
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun /*
191*4882a593Smuzhiyun * Checks that two bio crypt contexts are compatible, and also
192*4882a593Smuzhiyun * that their data_unit_nums are continuous (and can hence be merged)
193*4882a593Smuzhiyun * in the order @bc1 followed by @bc2.
194*4882a593Smuzhiyun */
bio_crypt_ctx_mergeable(struct bio_crypt_ctx * bc1,unsigned int bc1_bytes,struct bio_crypt_ctx * bc2)195*4882a593Smuzhiyun bool bio_crypt_ctx_mergeable(struct bio_crypt_ctx *bc1, unsigned int bc1_bytes,
196*4882a593Smuzhiyun struct bio_crypt_ctx *bc2)
197*4882a593Smuzhiyun {
198*4882a593Smuzhiyun if (!bio_crypt_ctx_compatible(bc1, bc2))
199*4882a593Smuzhiyun return false;
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun return !bc1 || bio_crypt_dun_is_contiguous(bc1, bc1_bytes, bc2->bc_dun);
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun /* Check that all I/O segments are data unit aligned. */
bio_crypt_check_alignment(struct bio * bio)205*4882a593Smuzhiyun static bool bio_crypt_check_alignment(struct bio *bio)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun const unsigned int data_unit_size =
208*4882a593Smuzhiyun bio->bi_crypt_context->bc_key->crypto_cfg.data_unit_size;
209*4882a593Smuzhiyun struct bvec_iter iter;
210*4882a593Smuzhiyun struct bio_vec bv;
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun bio_for_each_segment(bv, bio, iter) {
213*4882a593Smuzhiyun if (!IS_ALIGNED(bv.bv_len | bv.bv_offset, data_unit_size))
214*4882a593Smuzhiyun return false;
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun return true;
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun
__blk_crypto_init_request(struct request * rq)220*4882a593Smuzhiyun blk_status_t __blk_crypto_init_request(struct request *rq)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun return blk_ksm_get_slot_for_key(rq->q->ksm, rq->crypt_ctx->bc_key,
223*4882a593Smuzhiyun &rq->crypt_keyslot);
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun /**
227*4882a593Smuzhiyun * __blk_crypto_free_request - Uninitialize the crypto fields of a request.
228*4882a593Smuzhiyun *
229*4882a593Smuzhiyun * @rq: The request whose crypto fields to uninitialize.
230*4882a593Smuzhiyun *
231*4882a593Smuzhiyun * Completely uninitializes the crypto fields of a request. If a keyslot has
232*4882a593Smuzhiyun * been programmed into some inline encryption hardware, that keyslot is
233*4882a593Smuzhiyun * released. The rq->crypt_ctx is also freed.
234*4882a593Smuzhiyun */
__blk_crypto_free_request(struct request * rq)235*4882a593Smuzhiyun void __blk_crypto_free_request(struct request *rq)
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun blk_ksm_put_slot(rq->crypt_keyslot);
238*4882a593Smuzhiyun mempool_free(rq->crypt_ctx, bio_crypt_ctx_pool);
239*4882a593Smuzhiyun blk_crypto_rq_set_defaults(rq);
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun /**
243*4882a593Smuzhiyun * __blk_crypto_bio_prep - Prepare bio for inline encryption
244*4882a593Smuzhiyun *
245*4882a593Smuzhiyun * @bio_ptr: pointer to original bio pointer
246*4882a593Smuzhiyun *
247*4882a593Smuzhiyun * If the bio crypt context provided for the bio is supported by the underlying
248*4882a593Smuzhiyun * device's inline encryption hardware, do nothing.
249*4882a593Smuzhiyun *
250*4882a593Smuzhiyun * Otherwise, try to perform en/decryption for this bio by falling back to the
251*4882a593Smuzhiyun * kernel crypto API. When the crypto API fallback is used for encryption,
252*4882a593Smuzhiyun * blk-crypto may choose to split the bio into 2 - the first one that will
253*4882a593Smuzhiyun * continue to be processed and the second one that will be resubmitted via
254*4882a593Smuzhiyun * submit_bio_noacct. A bounce bio will be allocated to encrypt the contents
255*4882a593Smuzhiyun * of the aforementioned "first one", and *bio_ptr will be updated to this
256*4882a593Smuzhiyun * bounce bio.
257*4882a593Smuzhiyun *
258*4882a593Smuzhiyun * Caller must ensure bio has bio_crypt_ctx.
259*4882a593Smuzhiyun *
260*4882a593Smuzhiyun * Return: true on success; false on error (and bio->bi_status will be set
261*4882a593Smuzhiyun * appropriately, and bio_endio() will have been called so bio
262*4882a593Smuzhiyun * submission should abort).
263*4882a593Smuzhiyun */
__blk_crypto_bio_prep(struct bio ** bio_ptr)264*4882a593Smuzhiyun bool __blk_crypto_bio_prep(struct bio **bio_ptr)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun struct bio *bio = *bio_ptr;
267*4882a593Smuzhiyun const struct blk_crypto_key *bc_key = bio->bi_crypt_context->bc_key;
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun /* Error if bio has no data. */
270*4882a593Smuzhiyun if (WARN_ON_ONCE(!bio_has_data(bio))) {
271*4882a593Smuzhiyun bio->bi_status = BLK_STS_IOERR;
272*4882a593Smuzhiyun goto fail;
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun if (!bio_crypt_check_alignment(bio)) {
276*4882a593Smuzhiyun bio->bi_status = BLK_STS_IOERR;
277*4882a593Smuzhiyun goto fail;
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun /*
281*4882a593Smuzhiyun * Success if device supports the encryption context, or if we succeeded
282*4882a593Smuzhiyun * in falling back to the crypto API.
283*4882a593Smuzhiyun */
284*4882a593Smuzhiyun if (blk_ksm_crypto_cfg_supported(bio->bi_disk->queue->ksm,
285*4882a593Smuzhiyun &bc_key->crypto_cfg))
286*4882a593Smuzhiyun return true;
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun if (blk_crypto_fallback_bio_prep(bio_ptr))
289*4882a593Smuzhiyun return true;
290*4882a593Smuzhiyun fail:
291*4882a593Smuzhiyun bio_endio(*bio_ptr);
292*4882a593Smuzhiyun return false;
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun
__blk_crypto_rq_bio_prep(struct request * rq,struct bio * bio,gfp_t gfp_mask)295*4882a593Smuzhiyun int __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
296*4882a593Smuzhiyun gfp_t gfp_mask)
297*4882a593Smuzhiyun {
298*4882a593Smuzhiyun if (!rq->crypt_ctx) {
299*4882a593Smuzhiyun rq->crypt_ctx = mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
300*4882a593Smuzhiyun if (!rq->crypt_ctx)
301*4882a593Smuzhiyun return -ENOMEM;
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun *rq->crypt_ctx = *bio->bi_crypt_context;
304*4882a593Smuzhiyun return 0;
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun /**
308*4882a593Smuzhiyun * blk_crypto_init_key() - Prepare a key for use with blk-crypto
309*4882a593Smuzhiyun * @blk_key: Pointer to the blk_crypto_key to initialize.
310*4882a593Smuzhiyun * @raw_key: Pointer to the raw key.
311*4882a593Smuzhiyun * @raw_key_size: Size of raw key. Must be at least the required size for the
312*4882a593Smuzhiyun * chosen @crypto_mode; see blk_crypto_modes[]. (It's allowed
313*4882a593Smuzhiyun * to be longer than the mode's actual key size, in order to
314*4882a593Smuzhiyun * support inline encryption hardware that accepts wrapped keys.
315*4882a593Smuzhiyun * @is_hw_wrapped has to be set for such keys)
316*4882a593Smuzhiyun * @is_hw_wrapped: Denotes @raw_key is wrapped.
317*4882a593Smuzhiyun * @crypto_mode: identifier for the encryption algorithm to use
318*4882a593Smuzhiyun * @dun_bytes: number of bytes that will be used to specify the DUN when this
319*4882a593Smuzhiyun * key is used
320*4882a593Smuzhiyun * @data_unit_size: the data unit size to use for en/decryption
321*4882a593Smuzhiyun *
322*4882a593Smuzhiyun * Return: 0 on success, -errno on failure. The caller is responsible for
323*4882a593Smuzhiyun * zeroizing both blk_key and raw_key when done with them.
324*4882a593Smuzhiyun */
blk_crypto_init_key(struct blk_crypto_key * blk_key,const u8 * raw_key,unsigned int raw_key_size,bool is_hw_wrapped,enum blk_crypto_mode_num crypto_mode,unsigned int dun_bytes,unsigned int data_unit_size)325*4882a593Smuzhiyun int blk_crypto_init_key(struct blk_crypto_key *blk_key,
326*4882a593Smuzhiyun const u8 *raw_key, unsigned int raw_key_size,
327*4882a593Smuzhiyun bool is_hw_wrapped,
328*4882a593Smuzhiyun enum blk_crypto_mode_num crypto_mode,
329*4882a593Smuzhiyun unsigned int dun_bytes,
330*4882a593Smuzhiyun unsigned int data_unit_size)
331*4882a593Smuzhiyun {
332*4882a593Smuzhiyun const struct blk_crypto_mode *mode;
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun memset(blk_key, 0, sizeof(*blk_key));
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun if (crypto_mode >= ARRAY_SIZE(blk_crypto_modes))
337*4882a593Smuzhiyun return -EINVAL;
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun BUILD_BUG_ON(BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE < BLK_CRYPTO_MAX_KEY_SIZE);
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun mode = &blk_crypto_modes[crypto_mode];
342*4882a593Smuzhiyun if (is_hw_wrapped) {
343*4882a593Smuzhiyun if (raw_key_size < mode->keysize ||
344*4882a593Smuzhiyun raw_key_size > BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE)
345*4882a593Smuzhiyun return -EINVAL;
346*4882a593Smuzhiyun } else {
347*4882a593Smuzhiyun if (raw_key_size != mode->keysize)
348*4882a593Smuzhiyun return -EINVAL;
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun if (dun_bytes == 0 || dun_bytes > mode->ivsize)
352*4882a593Smuzhiyun return -EINVAL;
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun if (!is_power_of_2(data_unit_size))
355*4882a593Smuzhiyun return -EINVAL;
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun blk_key->crypto_cfg.crypto_mode = crypto_mode;
358*4882a593Smuzhiyun blk_key->crypto_cfg.dun_bytes = dun_bytes;
359*4882a593Smuzhiyun blk_key->crypto_cfg.data_unit_size = data_unit_size;
360*4882a593Smuzhiyun blk_key->crypto_cfg.is_hw_wrapped = is_hw_wrapped;
361*4882a593Smuzhiyun blk_key->data_unit_size_bits = ilog2(data_unit_size);
362*4882a593Smuzhiyun blk_key->size = raw_key_size;
363*4882a593Smuzhiyun memcpy(blk_key->raw, raw_key, raw_key_size);
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun return 0;
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(blk_crypto_init_key);
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun /*
370*4882a593Smuzhiyun * Check if bios with @cfg can be en/decrypted by blk-crypto (i.e. either the
371*4882a593Smuzhiyun * request queue it's submitted to supports inline crypto, or the
372*4882a593Smuzhiyun * blk-crypto-fallback is enabled and supports the cfg).
373*4882a593Smuzhiyun */
blk_crypto_config_supported(struct request_queue * q,const struct blk_crypto_config * cfg)374*4882a593Smuzhiyun bool blk_crypto_config_supported(struct request_queue *q,
375*4882a593Smuzhiyun const struct blk_crypto_config *cfg)
376*4882a593Smuzhiyun {
377*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) &&
378*4882a593Smuzhiyun !cfg->is_hw_wrapped)
379*4882a593Smuzhiyun return true;
380*4882a593Smuzhiyun return blk_ksm_crypto_cfg_supported(q->ksm, cfg);
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun /**
384*4882a593Smuzhiyun * blk_crypto_start_using_key() - Start using a blk_crypto_key on a device
385*4882a593Smuzhiyun * @key: A key to use on the device
386*4882a593Smuzhiyun * @q: the request queue for the device
387*4882a593Smuzhiyun *
388*4882a593Smuzhiyun * Upper layers must call this function to ensure that either the hardware
389*4882a593Smuzhiyun * supports the key's crypto settings, or the crypto API fallback has transforms
390*4882a593Smuzhiyun * for the needed mode allocated and ready to go. This function may allocate
391*4882a593Smuzhiyun * an skcipher, and *should not* be called from the data path, since that might
392*4882a593Smuzhiyun * cause a deadlock
393*4882a593Smuzhiyun *
394*4882a593Smuzhiyun * Return: 0 on success; -ENOPKG if the hardware doesn't support the key and
395*4882a593Smuzhiyun * blk-crypto-fallback is either disabled or the needed algorithm
396*4882a593Smuzhiyun * is disabled in the crypto API; or another -errno code.
397*4882a593Smuzhiyun */
blk_crypto_start_using_key(const struct blk_crypto_key * key,struct request_queue * q)398*4882a593Smuzhiyun int blk_crypto_start_using_key(const struct blk_crypto_key *key,
399*4882a593Smuzhiyun struct request_queue *q)
400*4882a593Smuzhiyun {
401*4882a593Smuzhiyun if (blk_ksm_crypto_cfg_supported(q->ksm, &key->crypto_cfg))
402*4882a593Smuzhiyun return 0;
403*4882a593Smuzhiyun if (key->crypto_cfg.is_hw_wrapped) {
404*4882a593Smuzhiyun pr_warn_once("hardware doesn't support wrapped keys\n");
405*4882a593Smuzhiyun return -EOPNOTSUPP;
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun return blk_crypto_fallback_start_using_mode(key->crypto_cfg.crypto_mode);
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(blk_crypto_start_using_key);
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun /**
412*4882a593Smuzhiyun * blk_crypto_evict_key() - Evict a key from any inline encryption hardware
413*4882a593Smuzhiyun * it may have been programmed into
414*4882a593Smuzhiyun * @q: The request queue who's associated inline encryption hardware this key
415*4882a593Smuzhiyun * might have been programmed into
416*4882a593Smuzhiyun * @key: The key to evict
417*4882a593Smuzhiyun *
418*4882a593Smuzhiyun * Upper layers (filesystems) must call this function to ensure that a key is
419*4882a593Smuzhiyun * evicted from any hardware that it might have been programmed into. The key
420*4882a593Smuzhiyun * must not be in use by any in-flight IO when this function is called.
421*4882a593Smuzhiyun *
422*4882a593Smuzhiyun * Return: 0 on success or if key is not present in the q's ksm, -err on error.
423*4882a593Smuzhiyun */
blk_crypto_evict_key(struct request_queue * q,const struct blk_crypto_key * key)424*4882a593Smuzhiyun int blk_crypto_evict_key(struct request_queue *q,
425*4882a593Smuzhiyun const struct blk_crypto_key *key)
426*4882a593Smuzhiyun {
427*4882a593Smuzhiyun if (blk_ksm_crypto_cfg_supported(q->ksm, &key->crypto_cfg))
428*4882a593Smuzhiyun return blk_ksm_evict_key(q->ksm, key);
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun /*
431*4882a593Smuzhiyun * If the request queue's associated inline encryption hardware didn't
432*4882a593Smuzhiyun * have support for the key, then the key might have been programmed
433*4882a593Smuzhiyun * into the fallback keyslot manager, so try to evict from there.
434*4882a593Smuzhiyun */
435*4882a593Smuzhiyun return blk_crypto_fallback_evict_key(key);
436*4882a593Smuzhiyun }
437*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(blk_crypto_evict_key);
438