xref: /OK3568_Linux_fs/kernel/fs/crypto/bio.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * This contains encryption functions for per-file encryption.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2015, Google, Inc.
6*4882a593Smuzhiyun  * Copyright (C) 2015, Motorola Mobility
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * Written by Michael Halcrow, 2014.
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  * Filename encryption additions
11*4882a593Smuzhiyun  *	Uday Savagaonkar, 2014
12*4882a593Smuzhiyun  * Encryption policy handling additions
13*4882a593Smuzhiyun  *	Ildar Muslukhov, 2014
14*4882a593Smuzhiyun  * Add fscrypt_pullback_bio_page()
15*4882a593Smuzhiyun  *	Jaegeuk Kim, 2015.
16*4882a593Smuzhiyun  *
17*4882a593Smuzhiyun  * This has not yet undergone a rigorous security audit.
18*4882a593Smuzhiyun  *
19*4882a593Smuzhiyun  * The usage of AES-XTS should conform to recommendations in NIST
20*4882a593Smuzhiyun  * Special Publication 800-38E and IEEE P1619/D16.
21*4882a593Smuzhiyun  */
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun #include <linux/pagemap.h>
24*4882a593Smuzhiyun #include <linux/module.h>
25*4882a593Smuzhiyun #include <linux/bio.h>
26*4882a593Smuzhiyun #include <linux/namei.h>
27*4882a593Smuzhiyun #include "fscrypt_private.h"
28*4882a593Smuzhiyun 
fscrypt_decrypt_bio(struct bio * bio)29*4882a593Smuzhiyun void fscrypt_decrypt_bio(struct bio *bio)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun 	struct bio_vec *bv;
32*4882a593Smuzhiyun 	struct bvec_iter_all iter_all;
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun 	bio_for_each_segment_all(bv, bio, iter_all) {
35*4882a593Smuzhiyun 		struct page *page = bv->bv_page;
36*4882a593Smuzhiyun 		int ret = fscrypt_decrypt_pagecache_blocks(page, bv->bv_len,
37*4882a593Smuzhiyun 							   bv->bv_offset);
38*4882a593Smuzhiyun 		if (ret)
39*4882a593Smuzhiyun 			SetPageError(page);
40*4882a593Smuzhiyun 	}
41*4882a593Smuzhiyun }
42*4882a593Smuzhiyun EXPORT_SYMBOL(fscrypt_decrypt_bio);
43*4882a593Smuzhiyun 
fscrypt_zeroout_range_inline_crypt(const struct inode * inode,pgoff_t lblk,sector_t pblk,unsigned int len)44*4882a593Smuzhiyun static int fscrypt_zeroout_range_inline_crypt(const struct inode *inode,
45*4882a593Smuzhiyun 					      pgoff_t lblk, sector_t pblk,
46*4882a593Smuzhiyun 					      unsigned int len)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun 	const unsigned int blockbits = inode->i_blkbits;
49*4882a593Smuzhiyun 	const unsigned int blocks_per_page = 1 << (PAGE_SHIFT - blockbits);
50*4882a593Smuzhiyun 	struct bio *bio;
51*4882a593Smuzhiyun 	int ret, err = 0;
52*4882a593Smuzhiyun 	int num_pages = 0;
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 	/* This always succeeds since __GFP_DIRECT_RECLAIM is set. */
55*4882a593Smuzhiyun 	bio = bio_alloc(GFP_NOFS, BIO_MAX_PAGES);
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	while (len) {
58*4882a593Smuzhiyun 		unsigned int blocks_this_page = min(len, blocks_per_page);
59*4882a593Smuzhiyun 		unsigned int bytes_this_page = blocks_this_page << blockbits;
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 		if (num_pages == 0) {
62*4882a593Smuzhiyun 			fscrypt_set_bio_crypt_ctx(bio, inode, lblk, GFP_NOFS);
63*4882a593Smuzhiyun 			bio_set_dev(bio, inode->i_sb->s_bdev);
64*4882a593Smuzhiyun 			bio->bi_iter.bi_sector =
65*4882a593Smuzhiyun 					pblk << (blockbits - SECTOR_SHIFT);
66*4882a593Smuzhiyun 			bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
67*4882a593Smuzhiyun 		}
68*4882a593Smuzhiyun 		ret = bio_add_page(bio, ZERO_PAGE(0), bytes_this_page, 0);
69*4882a593Smuzhiyun 		if (WARN_ON(ret != bytes_this_page)) {
70*4882a593Smuzhiyun 			err = -EIO;
71*4882a593Smuzhiyun 			goto out;
72*4882a593Smuzhiyun 		}
73*4882a593Smuzhiyun 		num_pages++;
74*4882a593Smuzhiyun 		len -= blocks_this_page;
75*4882a593Smuzhiyun 		lblk += blocks_this_page;
76*4882a593Smuzhiyun 		pblk += blocks_this_page;
77*4882a593Smuzhiyun 		if (num_pages == BIO_MAX_PAGES || !len ||
78*4882a593Smuzhiyun 		    !fscrypt_mergeable_bio(bio, inode, lblk)) {
79*4882a593Smuzhiyun 			err = submit_bio_wait(bio);
80*4882a593Smuzhiyun 			if (err)
81*4882a593Smuzhiyun 				goto out;
82*4882a593Smuzhiyun 			bio_reset(bio);
83*4882a593Smuzhiyun 			num_pages = 0;
84*4882a593Smuzhiyun 		}
85*4882a593Smuzhiyun 	}
86*4882a593Smuzhiyun out:
87*4882a593Smuzhiyun 	bio_put(bio);
88*4882a593Smuzhiyun 	return err;
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun /**
92*4882a593Smuzhiyun  * fscrypt_zeroout_range() - zero out a range of blocks in an encrypted file
93*4882a593Smuzhiyun  * @inode: the file's inode
94*4882a593Smuzhiyun  * @lblk: the first file logical block to zero out
95*4882a593Smuzhiyun  * @pblk: the first filesystem physical block to zero out
96*4882a593Smuzhiyun  * @len: number of blocks to zero out
97*4882a593Smuzhiyun  *
98*4882a593Smuzhiyun  * Zero out filesystem blocks in an encrypted regular file on-disk, i.e. write
99*4882a593Smuzhiyun  * ciphertext blocks which decrypt to the all-zeroes block.  The blocks must be
100*4882a593Smuzhiyun  * both logically and physically contiguous.  It's also assumed that the
101*4882a593Smuzhiyun  * filesystem only uses a single block device, ->s_bdev.
102*4882a593Smuzhiyun  *
103*4882a593Smuzhiyun  * Note that since each block uses a different IV, this involves writing a
104*4882a593Smuzhiyun  * different ciphertext to each block; we can't simply reuse the same one.
105*4882a593Smuzhiyun  *
106*4882a593Smuzhiyun  * Return: 0 on success; -errno on failure.
107*4882a593Smuzhiyun  */
fscrypt_zeroout_range(const struct inode * inode,pgoff_t lblk,sector_t pblk,unsigned int len)108*4882a593Smuzhiyun int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
109*4882a593Smuzhiyun 			  sector_t pblk, unsigned int len)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun 	const unsigned int blockbits = inode->i_blkbits;
112*4882a593Smuzhiyun 	const unsigned int blocksize = 1 << blockbits;
113*4882a593Smuzhiyun 	const unsigned int blocks_per_page_bits = PAGE_SHIFT - blockbits;
114*4882a593Smuzhiyun 	const unsigned int blocks_per_page = 1 << blocks_per_page_bits;
115*4882a593Smuzhiyun 	struct page *pages[16]; /* write up to 16 pages at a time */
116*4882a593Smuzhiyun 	unsigned int nr_pages;
117*4882a593Smuzhiyun 	unsigned int i;
118*4882a593Smuzhiyun 	unsigned int offset;
119*4882a593Smuzhiyun 	struct bio *bio;
120*4882a593Smuzhiyun 	int ret, err;
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	if (len == 0)
123*4882a593Smuzhiyun 		return 0;
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	if (fscrypt_inode_uses_inline_crypto(inode))
126*4882a593Smuzhiyun 		return fscrypt_zeroout_range_inline_crypt(inode, lblk, pblk,
127*4882a593Smuzhiyun 							  len);
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	BUILD_BUG_ON(ARRAY_SIZE(pages) > BIO_MAX_PAGES);
130*4882a593Smuzhiyun 	nr_pages = min_t(unsigned int, ARRAY_SIZE(pages),
131*4882a593Smuzhiyun 			 (len + blocks_per_page - 1) >> blocks_per_page_bits);
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	/*
134*4882a593Smuzhiyun 	 * We need at least one page for ciphertext.  Allocate the first one
135*4882a593Smuzhiyun 	 * from a mempool, with __GFP_DIRECT_RECLAIM set so that it can't fail.
136*4882a593Smuzhiyun 	 *
137*4882a593Smuzhiyun 	 * Any additional page allocations are allowed to fail, as they only
138*4882a593Smuzhiyun 	 * help performance, and waiting on the mempool for them could deadlock.
139*4882a593Smuzhiyun 	 */
140*4882a593Smuzhiyun 	for (i = 0; i < nr_pages; i++) {
141*4882a593Smuzhiyun 		pages[i] = fscrypt_alloc_bounce_page(i == 0 ? GFP_NOFS :
142*4882a593Smuzhiyun 						     GFP_NOWAIT | __GFP_NOWARN);
143*4882a593Smuzhiyun 		if (!pages[i])
144*4882a593Smuzhiyun 			break;
145*4882a593Smuzhiyun 	}
146*4882a593Smuzhiyun 	nr_pages = i;
147*4882a593Smuzhiyun 	if (WARN_ON(nr_pages <= 0))
148*4882a593Smuzhiyun 		return -EINVAL;
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	/* This always succeeds since __GFP_DIRECT_RECLAIM is set. */
151*4882a593Smuzhiyun 	bio = bio_alloc(GFP_NOFS, nr_pages);
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	do {
154*4882a593Smuzhiyun 		bio_set_dev(bio, inode->i_sb->s_bdev);
155*4882a593Smuzhiyun 		bio->bi_iter.bi_sector = pblk << (blockbits - 9);
156*4882a593Smuzhiyun 		bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 		i = 0;
159*4882a593Smuzhiyun 		offset = 0;
160*4882a593Smuzhiyun 		do {
161*4882a593Smuzhiyun 			err = fscrypt_crypt_block(inode, FS_ENCRYPT, lblk,
162*4882a593Smuzhiyun 						  ZERO_PAGE(0), pages[i],
163*4882a593Smuzhiyun 						  blocksize, offset, GFP_NOFS);
164*4882a593Smuzhiyun 			if (err)
165*4882a593Smuzhiyun 				goto out;
166*4882a593Smuzhiyun 			lblk++;
167*4882a593Smuzhiyun 			pblk++;
168*4882a593Smuzhiyun 			len--;
169*4882a593Smuzhiyun 			offset += blocksize;
170*4882a593Smuzhiyun 			if (offset == PAGE_SIZE || len == 0) {
171*4882a593Smuzhiyun 				ret = bio_add_page(bio, pages[i++], offset, 0);
172*4882a593Smuzhiyun 				if (WARN_ON(ret != offset)) {
173*4882a593Smuzhiyun 					err = -EIO;
174*4882a593Smuzhiyun 					goto out;
175*4882a593Smuzhiyun 				}
176*4882a593Smuzhiyun 				offset = 0;
177*4882a593Smuzhiyun 			}
178*4882a593Smuzhiyun 		} while (i != nr_pages && len != 0);
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 		err = submit_bio_wait(bio);
181*4882a593Smuzhiyun 		if (err)
182*4882a593Smuzhiyun 			goto out;
183*4882a593Smuzhiyun 		bio_reset(bio);
184*4882a593Smuzhiyun 	} while (len != 0);
185*4882a593Smuzhiyun 	err = 0;
186*4882a593Smuzhiyun out:
187*4882a593Smuzhiyun 	bio_put(bio);
188*4882a593Smuzhiyun 	for (i = 0; i < nr_pages; i++)
189*4882a593Smuzhiyun 		fscrypt_free_bounce_page(pages[i]);
190*4882a593Smuzhiyun 	return err;
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun EXPORT_SYMBOL(fscrypt_zeroout_range);
193