1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright 2019 Google LLC
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun /*
7*4882a593Smuzhiyun * Refer to Documentation/block/inline-encryption.rst for detailed explanation.
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #define pr_fmt(fmt) "blk-crypto-fallback: " fmt
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include <crypto/skcipher.h>
13*4882a593Smuzhiyun #include <linux/blk-cgroup.h>
14*4882a593Smuzhiyun #include <linux/blk-crypto.h>
15*4882a593Smuzhiyun #include <linux/blkdev.h>
16*4882a593Smuzhiyun #include <linux/crypto.h>
17*4882a593Smuzhiyun #include <linux/keyslot-manager.h>
18*4882a593Smuzhiyun #include <linux/mempool.h>
19*4882a593Smuzhiyun #include <linux/module.h>
20*4882a593Smuzhiyun #include <linux/random.h>
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #include "blk-crypto-internal.h"
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun static unsigned int num_prealloc_bounce_pg = 32;
25*4882a593Smuzhiyun module_param(num_prealloc_bounce_pg, uint, 0);
26*4882a593Smuzhiyun MODULE_PARM_DESC(num_prealloc_bounce_pg,
27*4882a593Smuzhiyun "Number of preallocated bounce pages for the blk-crypto crypto API fallback");
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun static unsigned int blk_crypto_num_keyslots = 100;
30*4882a593Smuzhiyun module_param_named(num_keyslots, blk_crypto_num_keyslots, uint, 0);
31*4882a593Smuzhiyun MODULE_PARM_DESC(num_keyslots,
32*4882a593Smuzhiyun "Number of keyslots for the blk-crypto crypto API fallback");
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun static unsigned int num_prealloc_fallback_crypt_ctxs = 128;
35*4882a593Smuzhiyun module_param(num_prealloc_fallback_crypt_ctxs, uint, 0);
36*4882a593Smuzhiyun MODULE_PARM_DESC(num_prealloc_crypt_fallback_ctxs,
37*4882a593Smuzhiyun "Number of preallocated bio fallback crypto contexts for blk-crypto to use during crypto API fallback");
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun struct bio_fallback_crypt_ctx {
40*4882a593Smuzhiyun struct bio_crypt_ctx crypt_ctx;
41*4882a593Smuzhiyun /*
42*4882a593Smuzhiyun * Copy of the bvec_iter when this bio was submitted.
43*4882a593Smuzhiyun * We only want to en/decrypt the part of the bio as described by the
44*4882a593Smuzhiyun * bvec_iter upon submission because bio might be split before being
45*4882a593Smuzhiyun * resubmitted
46*4882a593Smuzhiyun */
47*4882a593Smuzhiyun struct bvec_iter crypt_iter;
48*4882a593Smuzhiyun union {
49*4882a593Smuzhiyun struct {
50*4882a593Smuzhiyun struct work_struct work;
51*4882a593Smuzhiyun struct bio *bio;
52*4882a593Smuzhiyun };
53*4882a593Smuzhiyun struct {
54*4882a593Smuzhiyun void *bi_private_orig;
55*4882a593Smuzhiyun bio_end_io_t *bi_end_io_orig;
56*4882a593Smuzhiyun };
57*4882a593Smuzhiyun };
58*4882a593Smuzhiyun };
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun static struct kmem_cache *bio_fallback_crypt_ctx_cache;
61*4882a593Smuzhiyun static mempool_t *bio_fallback_crypt_ctx_pool;
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun /*
64*4882a593Smuzhiyun * Allocating a crypto tfm during I/O can deadlock, so we have to preallocate
65*4882a593Smuzhiyun * all of a mode's tfms when that mode starts being used. Since each mode may
66*4882a593Smuzhiyun * need all the keyslots at some point, each mode needs its own tfm for each
67*4882a593Smuzhiyun * keyslot; thus, a keyslot may contain tfms for multiple modes. However, to
68*4882a593Smuzhiyun * match the behavior of real inline encryption hardware (which only supports a
69*4882a593Smuzhiyun * single encryption context per keyslot), we only allow one tfm per keyslot to
70*4882a593Smuzhiyun * be used at a time - the rest of the unused tfms have their keys cleared.
71*4882a593Smuzhiyun */
72*4882a593Smuzhiyun static DEFINE_MUTEX(tfms_init_lock);
73*4882a593Smuzhiyun static bool tfms_inited[BLK_ENCRYPTION_MODE_MAX];
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun static struct blk_crypto_keyslot {
76*4882a593Smuzhiyun enum blk_crypto_mode_num crypto_mode;
77*4882a593Smuzhiyun struct crypto_skcipher *tfms[BLK_ENCRYPTION_MODE_MAX];
78*4882a593Smuzhiyun } *blk_crypto_keyslots;
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun static struct blk_keyslot_manager blk_crypto_ksm;
81*4882a593Smuzhiyun static struct workqueue_struct *blk_crypto_wq;
82*4882a593Smuzhiyun static mempool_t *blk_crypto_bounce_page_pool;
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun /*
85*4882a593Smuzhiyun * This is the key we set when evicting a keyslot. This *should* be the all 0's
86*4882a593Smuzhiyun * key, but AES-XTS rejects that key, so we use some random bytes instead.
87*4882a593Smuzhiyun */
88*4882a593Smuzhiyun static u8 blank_key[BLK_CRYPTO_MAX_KEY_SIZE];
89*4882a593Smuzhiyun
blk_crypto_evict_keyslot(unsigned int slot)90*4882a593Smuzhiyun static void blk_crypto_evict_keyslot(unsigned int slot)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun struct blk_crypto_keyslot *slotp = &blk_crypto_keyslots[slot];
93*4882a593Smuzhiyun enum blk_crypto_mode_num crypto_mode = slotp->crypto_mode;
94*4882a593Smuzhiyun int err;
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun WARN_ON(slotp->crypto_mode == BLK_ENCRYPTION_MODE_INVALID);
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun /* Clear the key in the skcipher */
99*4882a593Smuzhiyun err = crypto_skcipher_setkey(slotp->tfms[crypto_mode], blank_key,
100*4882a593Smuzhiyun blk_crypto_modes[crypto_mode].keysize);
101*4882a593Smuzhiyun WARN_ON(err);
102*4882a593Smuzhiyun slotp->crypto_mode = BLK_ENCRYPTION_MODE_INVALID;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
blk_crypto_keyslot_program(struct blk_keyslot_manager * ksm,const struct blk_crypto_key * key,unsigned int slot)105*4882a593Smuzhiyun static int blk_crypto_keyslot_program(struct blk_keyslot_manager *ksm,
106*4882a593Smuzhiyun const struct blk_crypto_key *key,
107*4882a593Smuzhiyun unsigned int slot)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun struct blk_crypto_keyslot *slotp = &blk_crypto_keyslots[slot];
110*4882a593Smuzhiyun const enum blk_crypto_mode_num crypto_mode =
111*4882a593Smuzhiyun key->crypto_cfg.crypto_mode;
112*4882a593Smuzhiyun int err;
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun if (crypto_mode != slotp->crypto_mode &&
115*4882a593Smuzhiyun slotp->crypto_mode != BLK_ENCRYPTION_MODE_INVALID)
116*4882a593Smuzhiyun blk_crypto_evict_keyslot(slot);
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun slotp->crypto_mode = crypto_mode;
119*4882a593Smuzhiyun err = crypto_skcipher_setkey(slotp->tfms[crypto_mode], key->raw,
120*4882a593Smuzhiyun key->size);
121*4882a593Smuzhiyun if (err) {
122*4882a593Smuzhiyun blk_crypto_evict_keyslot(slot);
123*4882a593Smuzhiyun return err;
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun return 0;
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun
blk_crypto_keyslot_evict(struct blk_keyslot_manager * ksm,const struct blk_crypto_key * key,unsigned int slot)128*4882a593Smuzhiyun static int blk_crypto_keyslot_evict(struct blk_keyslot_manager *ksm,
129*4882a593Smuzhiyun const struct blk_crypto_key *key,
130*4882a593Smuzhiyun unsigned int slot)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun blk_crypto_evict_keyslot(slot);
133*4882a593Smuzhiyun return 0;
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun /*
137*4882a593Smuzhiyun * The crypto API fallback KSM ops - only used for a bio when it specifies a
138*4882a593Smuzhiyun * blk_crypto_key that was not supported by the device's inline encryption
139*4882a593Smuzhiyun * hardware.
140*4882a593Smuzhiyun */
141*4882a593Smuzhiyun static const struct blk_ksm_ll_ops blk_crypto_ksm_ll_ops = {
142*4882a593Smuzhiyun .keyslot_program = blk_crypto_keyslot_program,
143*4882a593Smuzhiyun .keyslot_evict = blk_crypto_keyslot_evict,
144*4882a593Smuzhiyun };
145*4882a593Smuzhiyun
blk_crypto_fallback_encrypt_endio(struct bio * enc_bio)146*4882a593Smuzhiyun static void blk_crypto_fallback_encrypt_endio(struct bio *enc_bio)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun struct bio *src_bio = enc_bio->bi_private;
149*4882a593Smuzhiyun int i;
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun for (i = 0; i < enc_bio->bi_vcnt; i++)
152*4882a593Smuzhiyun mempool_free(enc_bio->bi_io_vec[i].bv_page,
153*4882a593Smuzhiyun blk_crypto_bounce_page_pool);
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun src_bio->bi_status = enc_bio->bi_status;
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun bio_put(enc_bio);
158*4882a593Smuzhiyun bio_endio(src_bio);
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun
blk_crypto_clone_bio(struct bio * bio_src)161*4882a593Smuzhiyun static struct bio *blk_crypto_clone_bio(struct bio *bio_src)
162*4882a593Smuzhiyun {
163*4882a593Smuzhiyun struct bvec_iter iter;
164*4882a593Smuzhiyun struct bio_vec bv;
165*4882a593Smuzhiyun struct bio *bio;
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun bio = bio_alloc_bioset(GFP_NOIO, bio_segments(bio_src), NULL);
168*4882a593Smuzhiyun if (!bio)
169*4882a593Smuzhiyun return NULL;
170*4882a593Smuzhiyun bio->bi_disk = bio_src->bi_disk;
171*4882a593Smuzhiyun bio->bi_opf = bio_src->bi_opf;
172*4882a593Smuzhiyun bio->bi_ioprio = bio_src->bi_ioprio;
173*4882a593Smuzhiyun bio->bi_write_hint = bio_src->bi_write_hint;
174*4882a593Smuzhiyun bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
175*4882a593Smuzhiyun bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun bio_for_each_segment(bv, bio_src, iter)
178*4882a593Smuzhiyun bio->bi_io_vec[bio->bi_vcnt++] = bv;
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun bio_clone_blkg_association(bio, bio_src);
181*4882a593Smuzhiyun blkcg_bio_issue_init(bio);
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun bio_clone_skip_dm_default_key(bio, bio_src);
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun return bio;
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun
blk_crypto_alloc_cipher_req(struct blk_ksm_keyslot * slot,struct skcipher_request ** ciph_req_ret,struct crypto_wait * wait)188*4882a593Smuzhiyun static bool blk_crypto_alloc_cipher_req(struct blk_ksm_keyslot *slot,
189*4882a593Smuzhiyun struct skcipher_request **ciph_req_ret,
190*4882a593Smuzhiyun struct crypto_wait *wait)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun struct skcipher_request *ciph_req;
193*4882a593Smuzhiyun const struct blk_crypto_keyslot *slotp;
194*4882a593Smuzhiyun int keyslot_idx = blk_ksm_get_slot_idx(slot);
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun slotp = &blk_crypto_keyslots[keyslot_idx];
197*4882a593Smuzhiyun ciph_req = skcipher_request_alloc(slotp->tfms[slotp->crypto_mode],
198*4882a593Smuzhiyun GFP_NOIO);
199*4882a593Smuzhiyun if (!ciph_req)
200*4882a593Smuzhiyun return false;
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun skcipher_request_set_callback(ciph_req,
203*4882a593Smuzhiyun CRYPTO_TFM_REQ_MAY_BACKLOG |
204*4882a593Smuzhiyun CRYPTO_TFM_REQ_MAY_SLEEP,
205*4882a593Smuzhiyun crypto_req_done, wait);
206*4882a593Smuzhiyun *ciph_req_ret = ciph_req;
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun return true;
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun
blk_crypto_split_bio_if_needed(struct bio ** bio_ptr)211*4882a593Smuzhiyun static bool blk_crypto_split_bio_if_needed(struct bio **bio_ptr)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun struct bio *bio = *bio_ptr;
214*4882a593Smuzhiyun unsigned int i = 0;
215*4882a593Smuzhiyun unsigned int num_sectors = 0;
216*4882a593Smuzhiyun struct bio_vec bv;
217*4882a593Smuzhiyun struct bvec_iter iter;
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun bio_for_each_segment(bv, bio, iter) {
220*4882a593Smuzhiyun num_sectors += bv.bv_len >> SECTOR_SHIFT;
221*4882a593Smuzhiyun if (++i == BIO_MAX_PAGES)
222*4882a593Smuzhiyun break;
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun if (num_sectors < bio_sectors(bio)) {
225*4882a593Smuzhiyun struct bio *split_bio;
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun split_bio = bio_split(bio, num_sectors, GFP_NOIO, NULL);
228*4882a593Smuzhiyun if (!split_bio) {
229*4882a593Smuzhiyun bio->bi_status = BLK_STS_RESOURCE;
230*4882a593Smuzhiyun return false;
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun bio_chain(split_bio, bio);
233*4882a593Smuzhiyun submit_bio_noacct(bio);
234*4882a593Smuzhiyun *bio_ptr = split_bio;
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun return true;
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun union blk_crypto_iv {
241*4882a593Smuzhiyun __le64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
242*4882a593Smuzhiyun u8 bytes[BLK_CRYPTO_MAX_IV_SIZE];
243*4882a593Smuzhiyun };
244*4882a593Smuzhiyun
blk_crypto_dun_to_iv(const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],union blk_crypto_iv * iv)245*4882a593Smuzhiyun static void blk_crypto_dun_to_iv(const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
246*4882a593Smuzhiyun union blk_crypto_iv *iv)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun int i;
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun for (i = 0; i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++)
251*4882a593Smuzhiyun iv->dun[i] = cpu_to_le64(dun[i]);
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun /*
255*4882a593Smuzhiyun * The crypto API fallback's encryption routine.
256*4882a593Smuzhiyun * Allocate a bounce bio for encryption, encrypt the input bio using crypto API,
257*4882a593Smuzhiyun * and replace *bio_ptr with the bounce bio. May split input bio if it's too
258*4882a593Smuzhiyun * large. Returns true on success. Returns false and sets bio->bi_status on
259*4882a593Smuzhiyun * error.
260*4882a593Smuzhiyun */
blk_crypto_fallback_encrypt_bio(struct bio ** bio_ptr)261*4882a593Smuzhiyun static bool blk_crypto_fallback_encrypt_bio(struct bio **bio_ptr)
262*4882a593Smuzhiyun {
263*4882a593Smuzhiyun struct bio *src_bio, *enc_bio;
264*4882a593Smuzhiyun struct bio_crypt_ctx *bc;
265*4882a593Smuzhiyun struct blk_ksm_keyslot *slot;
266*4882a593Smuzhiyun int data_unit_size;
267*4882a593Smuzhiyun struct skcipher_request *ciph_req = NULL;
268*4882a593Smuzhiyun DECLARE_CRYPTO_WAIT(wait);
269*4882a593Smuzhiyun u64 curr_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
270*4882a593Smuzhiyun struct scatterlist src, dst;
271*4882a593Smuzhiyun union blk_crypto_iv iv;
272*4882a593Smuzhiyun unsigned int i, j;
273*4882a593Smuzhiyun bool ret = false;
274*4882a593Smuzhiyun blk_status_t blk_st;
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun /* Split the bio if it's too big for single page bvec */
277*4882a593Smuzhiyun if (!blk_crypto_split_bio_if_needed(bio_ptr))
278*4882a593Smuzhiyun return false;
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun src_bio = *bio_ptr;
281*4882a593Smuzhiyun bc = src_bio->bi_crypt_context;
282*4882a593Smuzhiyun data_unit_size = bc->bc_key->crypto_cfg.data_unit_size;
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun /* Allocate bounce bio for encryption */
285*4882a593Smuzhiyun enc_bio = blk_crypto_clone_bio(src_bio);
286*4882a593Smuzhiyun if (!enc_bio) {
287*4882a593Smuzhiyun src_bio->bi_status = BLK_STS_RESOURCE;
288*4882a593Smuzhiyun return false;
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun /*
292*4882a593Smuzhiyun * Use the crypto API fallback keyslot manager to get a crypto_skcipher
293*4882a593Smuzhiyun * for the algorithm and key specified for this bio.
294*4882a593Smuzhiyun */
295*4882a593Smuzhiyun blk_st = blk_ksm_get_slot_for_key(&blk_crypto_ksm, bc->bc_key, &slot);
296*4882a593Smuzhiyun if (blk_st != BLK_STS_OK) {
297*4882a593Smuzhiyun src_bio->bi_status = blk_st;
298*4882a593Smuzhiyun goto out_put_enc_bio;
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun /* and then allocate an skcipher_request for it */
302*4882a593Smuzhiyun if (!blk_crypto_alloc_cipher_req(slot, &ciph_req, &wait)) {
303*4882a593Smuzhiyun src_bio->bi_status = BLK_STS_RESOURCE;
304*4882a593Smuzhiyun goto out_release_keyslot;
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun memcpy(curr_dun, bc->bc_dun, sizeof(curr_dun));
308*4882a593Smuzhiyun sg_init_table(&src, 1);
309*4882a593Smuzhiyun sg_init_table(&dst, 1);
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun skcipher_request_set_crypt(ciph_req, &src, &dst, data_unit_size,
312*4882a593Smuzhiyun iv.bytes);
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun /* Encrypt each page in the bounce bio */
315*4882a593Smuzhiyun for (i = 0; i < enc_bio->bi_vcnt; i++) {
316*4882a593Smuzhiyun struct bio_vec *enc_bvec = &enc_bio->bi_io_vec[i];
317*4882a593Smuzhiyun struct page *plaintext_page = enc_bvec->bv_page;
318*4882a593Smuzhiyun struct page *ciphertext_page =
319*4882a593Smuzhiyun mempool_alloc(blk_crypto_bounce_page_pool, GFP_NOIO);
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun enc_bvec->bv_page = ciphertext_page;
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun if (!ciphertext_page) {
324*4882a593Smuzhiyun src_bio->bi_status = BLK_STS_RESOURCE;
325*4882a593Smuzhiyun goto out_free_bounce_pages;
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun sg_set_page(&src, plaintext_page, data_unit_size,
329*4882a593Smuzhiyun enc_bvec->bv_offset);
330*4882a593Smuzhiyun sg_set_page(&dst, ciphertext_page, data_unit_size,
331*4882a593Smuzhiyun enc_bvec->bv_offset);
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun /* Encrypt each data unit in this page */
334*4882a593Smuzhiyun for (j = 0; j < enc_bvec->bv_len; j += data_unit_size) {
335*4882a593Smuzhiyun blk_crypto_dun_to_iv(curr_dun, &iv);
336*4882a593Smuzhiyun if (crypto_wait_req(crypto_skcipher_encrypt(ciph_req),
337*4882a593Smuzhiyun &wait)) {
338*4882a593Smuzhiyun i++;
339*4882a593Smuzhiyun src_bio->bi_status = BLK_STS_IOERR;
340*4882a593Smuzhiyun goto out_free_bounce_pages;
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun bio_crypt_dun_increment(curr_dun, 1);
343*4882a593Smuzhiyun src.offset += data_unit_size;
344*4882a593Smuzhiyun dst.offset += data_unit_size;
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun enc_bio->bi_private = src_bio;
349*4882a593Smuzhiyun enc_bio->bi_end_io = blk_crypto_fallback_encrypt_endio;
350*4882a593Smuzhiyun *bio_ptr = enc_bio;
351*4882a593Smuzhiyun ret = true;
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun enc_bio = NULL;
354*4882a593Smuzhiyun goto out_free_ciph_req;
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun out_free_bounce_pages:
357*4882a593Smuzhiyun while (i > 0)
358*4882a593Smuzhiyun mempool_free(enc_bio->bi_io_vec[--i].bv_page,
359*4882a593Smuzhiyun blk_crypto_bounce_page_pool);
360*4882a593Smuzhiyun out_free_ciph_req:
361*4882a593Smuzhiyun skcipher_request_free(ciph_req);
362*4882a593Smuzhiyun out_release_keyslot:
363*4882a593Smuzhiyun blk_ksm_put_slot(slot);
364*4882a593Smuzhiyun out_put_enc_bio:
365*4882a593Smuzhiyun if (enc_bio)
366*4882a593Smuzhiyun bio_put(enc_bio);
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun return ret;
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun /*
372*4882a593Smuzhiyun * The crypto API fallback's main decryption routine.
373*4882a593Smuzhiyun * Decrypts input bio in place, and calls bio_endio on the bio.
374*4882a593Smuzhiyun */
blk_crypto_fallback_decrypt_bio(struct work_struct * work)375*4882a593Smuzhiyun static void blk_crypto_fallback_decrypt_bio(struct work_struct *work)
376*4882a593Smuzhiyun {
377*4882a593Smuzhiyun struct bio_fallback_crypt_ctx *f_ctx =
378*4882a593Smuzhiyun container_of(work, struct bio_fallback_crypt_ctx, work);
379*4882a593Smuzhiyun struct bio *bio = f_ctx->bio;
380*4882a593Smuzhiyun struct bio_crypt_ctx *bc = &f_ctx->crypt_ctx;
381*4882a593Smuzhiyun struct blk_ksm_keyslot *slot;
382*4882a593Smuzhiyun struct skcipher_request *ciph_req = NULL;
383*4882a593Smuzhiyun DECLARE_CRYPTO_WAIT(wait);
384*4882a593Smuzhiyun u64 curr_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
385*4882a593Smuzhiyun union blk_crypto_iv iv;
386*4882a593Smuzhiyun struct scatterlist sg;
387*4882a593Smuzhiyun struct bio_vec bv;
388*4882a593Smuzhiyun struct bvec_iter iter;
389*4882a593Smuzhiyun const int data_unit_size = bc->bc_key->crypto_cfg.data_unit_size;
390*4882a593Smuzhiyun unsigned int i;
391*4882a593Smuzhiyun blk_status_t blk_st;
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun /*
394*4882a593Smuzhiyun * Use the crypto API fallback keyslot manager to get a crypto_skcipher
395*4882a593Smuzhiyun * for the algorithm and key specified for this bio.
396*4882a593Smuzhiyun */
397*4882a593Smuzhiyun blk_st = blk_ksm_get_slot_for_key(&blk_crypto_ksm, bc->bc_key, &slot);
398*4882a593Smuzhiyun if (blk_st != BLK_STS_OK) {
399*4882a593Smuzhiyun bio->bi_status = blk_st;
400*4882a593Smuzhiyun goto out_no_keyslot;
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun /* and then allocate an skcipher_request for it */
404*4882a593Smuzhiyun if (!blk_crypto_alloc_cipher_req(slot, &ciph_req, &wait)) {
405*4882a593Smuzhiyun bio->bi_status = BLK_STS_RESOURCE;
406*4882a593Smuzhiyun goto out;
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun memcpy(curr_dun, bc->bc_dun, sizeof(curr_dun));
410*4882a593Smuzhiyun sg_init_table(&sg, 1);
411*4882a593Smuzhiyun skcipher_request_set_crypt(ciph_req, &sg, &sg, data_unit_size,
412*4882a593Smuzhiyun iv.bytes);
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun /* Decrypt each segment in the bio */
415*4882a593Smuzhiyun __bio_for_each_segment(bv, bio, iter, f_ctx->crypt_iter) {
416*4882a593Smuzhiyun struct page *page = bv.bv_page;
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun sg_set_page(&sg, page, data_unit_size, bv.bv_offset);
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun /* Decrypt each data unit in the segment */
421*4882a593Smuzhiyun for (i = 0; i < bv.bv_len; i += data_unit_size) {
422*4882a593Smuzhiyun blk_crypto_dun_to_iv(curr_dun, &iv);
423*4882a593Smuzhiyun if (crypto_wait_req(crypto_skcipher_decrypt(ciph_req),
424*4882a593Smuzhiyun &wait)) {
425*4882a593Smuzhiyun bio->bi_status = BLK_STS_IOERR;
426*4882a593Smuzhiyun goto out;
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun bio_crypt_dun_increment(curr_dun, 1);
429*4882a593Smuzhiyun sg.offset += data_unit_size;
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun out:
434*4882a593Smuzhiyun skcipher_request_free(ciph_req);
435*4882a593Smuzhiyun blk_ksm_put_slot(slot);
436*4882a593Smuzhiyun out_no_keyslot:
437*4882a593Smuzhiyun mempool_free(f_ctx, bio_fallback_crypt_ctx_pool);
438*4882a593Smuzhiyun bio_endio(bio);
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun /**
442*4882a593Smuzhiyun * blk_crypto_fallback_decrypt_endio - queue bio for fallback decryption
443*4882a593Smuzhiyun *
444*4882a593Smuzhiyun * @bio: the bio to queue
445*4882a593Smuzhiyun *
446*4882a593Smuzhiyun * Restore bi_private and bi_end_io, and queue the bio for decryption into a
447*4882a593Smuzhiyun * workqueue, since this function will be called from an atomic context.
448*4882a593Smuzhiyun */
blk_crypto_fallback_decrypt_endio(struct bio * bio)449*4882a593Smuzhiyun static void blk_crypto_fallback_decrypt_endio(struct bio *bio)
450*4882a593Smuzhiyun {
451*4882a593Smuzhiyun struct bio_fallback_crypt_ctx *f_ctx = bio->bi_private;
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun bio->bi_private = f_ctx->bi_private_orig;
454*4882a593Smuzhiyun bio->bi_end_io = f_ctx->bi_end_io_orig;
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun /* If there was an IO error, don't queue for decrypt. */
457*4882a593Smuzhiyun if (bio->bi_status) {
458*4882a593Smuzhiyun mempool_free(f_ctx, bio_fallback_crypt_ctx_pool);
459*4882a593Smuzhiyun bio_endio(bio);
460*4882a593Smuzhiyun return;
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun INIT_WORK(&f_ctx->work, blk_crypto_fallback_decrypt_bio);
464*4882a593Smuzhiyun f_ctx->bio = bio;
465*4882a593Smuzhiyun queue_work(blk_crypto_wq, &f_ctx->work);
466*4882a593Smuzhiyun }
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun /**
469*4882a593Smuzhiyun * blk_crypto_fallback_bio_prep - Prepare a bio to use fallback en/decryption
470*4882a593Smuzhiyun *
471*4882a593Smuzhiyun * @bio_ptr: pointer to the bio to prepare
472*4882a593Smuzhiyun *
473*4882a593Smuzhiyun * If bio is doing a WRITE operation, this splits the bio into two parts if it's
474*4882a593Smuzhiyun * too big (see blk_crypto_split_bio_if_needed). It then allocates a bounce bio
475*4882a593Smuzhiyun * for the first part, encrypts it, and update bio_ptr to point to the bounce
476*4882a593Smuzhiyun * bio.
477*4882a593Smuzhiyun *
478*4882a593Smuzhiyun * For a READ operation, we mark the bio for decryption by using bi_private and
479*4882a593Smuzhiyun * bi_end_io.
480*4882a593Smuzhiyun *
481*4882a593Smuzhiyun * In either case, this function will make the bio look like a regular bio (i.e.
482*4882a593Smuzhiyun * as if no encryption context was ever specified) for the purposes of the rest
483*4882a593Smuzhiyun * of the stack except for blk-integrity (blk-integrity and blk-crypto are not
484*4882a593Smuzhiyun * currently supported together).
485*4882a593Smuzhiyun *
486*4882a593Smuzhiyun * Return: true on success. Sets bio->bi_status and returns false on error.
487*4882a593Smuzhiyun */
blk_crypto_fallback_bio_prep(struct bio ** bio_ptr)488*4882a593Smuzhiyun bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr)
489*4882a593Smuzhiyun {
490*4882a593Smuzhiyun struct bio *bio = *bio_ptr;
491*4882a593Smuzhiyun struct bio_crypt_ctx *bc = bio->bi_crypt_context;
492*4882a593Smuzhiyun struct bio_fallback_crypt_ctx *f_ctx;
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun if (WARN_ON_ONCE(!tfms_inited[bc->bc_key->crypto_cfg.crypto_mode])) {
495*4882a593Smuzhiyun /* User didn't call blk_crypto_start_using_key() first */
496*4882a593Smuzhiyun bio->bi_status = BLK_STS_IOERR;
497*4882a593Smuzhiyun return false;
498*4882a593Smuzhiyun }
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun if (!blk_ksm_crypto_cfg_supported(&blk_crypto_ksm,
501*4882a593Smuzhiyun &bc->bc_key->crypto_cfg)) {
502*4882a593Smuzhiyun bio->bi_status = BLK_STS_NOTSUPP;
503*4882a593Smuzhiyun return false;
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun if (bio_data_dir(bio) == WRITE)
507*4882a593Smuzhiyun return blk_crypto_fallback_encrypt_bio(bio_ptr);
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun /*
510*4882a593Smuzhiyun * bio READ case: Set up a f_ctx in the bio's bi_private and set the
511*4882a593Smuzhiyun * bi_end_io appropriately to trigger decryption when the bio is ended.
512*4882a593Smuzhiyun */
513*4882a593Smuzhiyun f_ctx = mempool_alloc(bio_fallback_crypt_ctx_pool, GFP_NOIO);
514*4882a593Smuzhiyun f_ctx->crypt_ctx = *bc;
515*4882a593Smuzhiyun f_ctx->crypt_iter = bio->bi_iter;
516*4882a593Smuzhiyun f_ctx->bi_private_orig = bio->bi_private;
517*4882a593Smuzhiyun f_ctx->bi_end_io_orig = bio->bi_end_io;
518*4882a593Smuzhiyun bio->bi_private = (void *)f_ctx;
519*4882a593Smuzhiyun bio->bi_end_io = blk_crypto_fallback_decrypt_endio;
520*4882a593Smuzhiyun bio_crypt_free_ctx(bio);
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun return true;
523*4882a593Smuzhiyun }
524*4882a593Smuzhiyun
blk_crypto_fallback_evict_key(const struct blk_crypto_key * key)525*4882a593Smuzhiyun int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key)
526*4882a593Smuzhiyun {
527*4882a593Smuzhiyun return blk_ksm_evict_key(&blk_crypto_ksm, key);
528*4882a593Smuzhiyun }
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun static bool blk_crypto_fallback_inited;
blk_crypto_fallback_init(void)531*4882a593Smuzhiyun static int blk_crypto_fallback_init(void)
532*4882a593Smuzhiyun {
533*4882a593Smuzhiyun int i;
534*4882a593Smuzhiyun int err;
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun if (blk_crypto_fallback_inited)
537*4882a593Smuzhiyun return 0;
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun prandom_bytes(blank_key, BLK_CRYPTO_MAX_KEY_SIZE);
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun err = blk_ksm_init(&blk_crypto_ksm, blk_crypto_num_keyslots);
542*4882a593Smuzhiyun if (err)
543*4882a593Smuzhiyun goto out;
544*4882a593Smuzhiyun err = -ENOMEM;
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun blk_crypto_ksm.ksm_ll_ops = blk_crypto_ksm_ll_ops;
547*4882a593Smuzhiyun blk_crypto_ksm.max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE;
548*4882a593Smuzhiyun blk_crypto_ksm.features = BLK_CRYPTO_FEATURE_STANDARD_KEYS;
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun /* All blk-crypto modes have a crypto API fallback. */
551*4882a593Smuzhiyun for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++)
552*4882a593Smuzhiyun blk_crypto_ksm.crypto_modes_supported[i] = 0xFFFFFFFF;
553*4882a593Smuzhiyun blk_crypto_ksm.crypto_modes_supported[BLK_ENCRYPTION_MODE_INVALID] = 0;
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun blk_crypto_wq = alloc_workqueue("blk_crypto_wq",
556*4882a593Smuzhiyun WQ_UNBOUND | WQ_HIGHPRI |
557*4882a593Smuzhiyun WQ_MEM_RECLAIM, num_online_cpus());
558*4882a593Smuzhiyun if (!blk_crypto_wq)
559*4882a593Smuzhiyun goto fail_free_ksm;
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun blk_crypto_keyslots = kcalloc(blk_crypto_num_keyslots,
562*4882a593Smuzhiyun sizeof(blk_crypto_keyslots[0]),
563*4882a593Smuzhiyun GFP_KERNEL);
564*4882a593Smuzhiyun if (!blk_crypto_keyslots)
565*4882a593Smuzhiyun goto fail_free_wq;
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun blk_crypto_bounce_page_pool =
568*4882a593Smuzhiyun mempool_create_page_pool(num_prealloc_bounce_pg, 0);
569*4882a593Smuzhiyun if (!blk_crypto_bounce_page_pool)
570*4882a593Smuzhiyun goto fail_free_keyslots;
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun bio_fallback_crypt_ctx_cache = KMEM_CACHE(bio_fallback_crypt_ctx, 0);
573*4882a593Smuzhiyun if (!bio_fallback_crypt_ctx_cache)
574*4882a593Smuzhiyun goto fail_free_bounce_page_pool;
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun bio_fallback_crypt_ctx_pool =
577*4882a593Smuzhiyun mempool_create_slab_pool(num_prealloc_fallback_crypt_ctxs,
578*4882a593Smuzhiyun bio_fallback_crypt_ctx_cache);
579*4882a593Smuzhiyun if (!bio_fallback_crypt_ctx_pool)
580*4882a593Smuzhiyun goto fail_free_crypt_ctx_cache;
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun blk_crypto_fallback_inited = true;
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun return 0;
585*4882a593Smuzhiyun fail_free_crypt_ctx_cache:
586*4882a593Smuzhiyun kmem_cache_destroy(bio_fallback_crypt_ctx_cache);
587*4882a593Smuzhiyun fail_free_bounce_page_pool:
588*4882a593Smuzhiyun mempool_destroy(blk_crypto_bounce_page_pool);
589*4882a593Smuzhiyun fail_free_keyslots:
590*4882a593Smuzhiyun kfree(blk_crypto_keyslots);
591*4882a593Smuzhiyun fail_free_wq:
592*4882a593Smuzhiyun destroy_workqueue(blk_crypto_wq);
593*4882a593Smuzhiyun fail_free_ksm:
594*4882a593Smuzhiyun blk_ksm_destroy(&blk_crypto_ksm);
595*4882a593Smuzhiyun out:
596*4882a593Smuzhiyun return err;
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun /*
600*4882a593Smuzhiyun * Prepare blk-crypto-fallback for the specified crypto mode.
601*4882a593Smuzhiyun * Returns -ENOPKG if the needed crypto API support is missing.
602*4882a593Smuzhiyun */
blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num)603*4882a593Smuzhiyun int blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num)
604*4882a593Smuzhiyun {
605*4882a593Smuzhiyun const char *cipher_str = blk_crypto_modes[mode_num].cipher_str;
606*4882a593Smuzhiyun struct blk_crypto_keyslot *slotp;
607*4882a593Smuzhiyun unsigned int i;
608*4882a593Smuzhiyun int err = 0;
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun /*
611*4882a593Smuzhiyun * Fast path
612*4882a593Smuzhiyun * Ensure that updates to blk_crypto_keyslots[i].tfms[mode_num]
613*4882a593Smuzhiyun * for each i are visible before we try to access them.
614*4882a593Smuzhiyun */
615*4882a593Smuzhiyun if (likely(smp_load_acquire(&tfms_inited[mode_num])))
616*4882a593Smuzhiyun return 0;
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun mutex_lock(&tfms_init_lock);
619*4882a593Smuzhiyun if (tfms_inited[mode_num])
620*4882a593Smuzhiyun goto out;
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun err = blk_crypto_fallback_init();
623*4882a593Smuzhiyun if (err)
624*4882a593Smuzhiyun goto out;
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun for (i = 0; i < blk_crypto_num_keyslots; i++) {
627*4882a593Smuzhiyun slotp = &blk_crypto_keyslots[i];
628*4882a593Smuzhiyun slotp->tfms[mode_num] = crypto_alloc_skcipher(cipher_str, 0, 0);
629*4882a593Smuzhiyun if (IS_ERR(slotp->tfms[mode_num])) {
630*4882a593Smuzhiyun err = PTR_ERR(slotp->tfms[mode_num]);
631*4882a593Smuzhiyun if (err == -ENOENT) {
632*4882a593Smuzhiyun pr_warn_once("Missing crypto API support for \"%s\"\n",
633*4882a593Smuzhiyun cipher_str);
634*4882a593Smuzhiyun err = -ENOPKG;
635*4882a593Smuzhiyun }
636*4882a593Smuzhiyun slotp->tfms[mode_num] = NULL;
637*4882a593Smuzhiyun goto out_free_tfms;
638*4882a593Smuzhiyun }
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun crypto_skcipher_set_flags(slotp->tfms[mode_num],
641*4882a593Smuzhiyun CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
642*4882a593Smuzhiyun }
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun /*
645*4882a593Smuzhiyun * Ensure that updates to blk_crypto_keyslots[i].tfms[mode_num]
646*4882a593Smuzhiyun * for each i are visible before we set tfms_inited[mode_num].
647*4882a593Smuzhiyun */
648*4882a593Smuzhiyun smp_store_release(&tfms_inited[mode_num], true);
649*4882a593Smuzhiyun goto out;
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun out_free_tfms:
652*4882a593Smuzhiyun for (i = 0; i < blk_crypto_num_keyslots; i++) {
653*4882a593Smuzhiyun slotp = &blk_crypto_keyslots[i];
654*4882a593Smuzhiyun crypto_free_skcipher(slotp->tfms[mode_num]);
655*4882a593Smuzhiyun slotp->tfms[mode_num] = NULL;
656*4882a593Smuzhiyun }
657*4882a593Smuzhiyun out:
658*4882a593Smuzhiyun mutex_unlock(&tfms_init_lock);
659*4882a593Smuzhiyun return err;
660*4882a593Smuzhiyun }
661