xref: /OK3568_Linux_fs/kernel/block/blk-crypto-internal.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright 2019 Google LLC
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #ifndef __LINUX_BLK_CRYPTO_INTERNAL_H
7*4882a593Smuzhiyun #define __LINUX_BLK_CRYPTO_INTERNAL_H
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/bio.h>
10*4882a593Smuzhiyun #include <linux/blkdev.h>
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun /* Represents a crypto mode supported by blk-crypto  */
13*4882a593Smuzhiyun struct blk_crypto_mode {
14*4882a593Smuzhiyun 	const char *cipher_str; /* crypto API name (for fallback case) */
15*4882a593Smuzhiyun 	unsigned int keysize; /* key size in bytes */
16*4882a593Smuzhiyun 	unsigned int ivsize; /* iv size in bytes */
17*4882a593Smuzhiyun };
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun extern const struct blk_crypto_mode blk_crypto_modes[];
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun #ifdef CONFIG_BLK_INLINE_ENCRYPTION
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun void bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
24*4882a593Smuzhiyun 			     unsigned int inc);
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio);
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun bool bio_crypt_ctx_mergeable(struct bio_crypt_ctx *bc1, unsigned int bc1_bytes,
29*4882a593Smuzhiyun 			     struct bio_crypt_ctx *bc2);
30*4882a593Smuzhiyun 
bio_crypt_ctx_back_mergeable(struct request * req,struct bio * bio)31*4882a593Smuzhiyun static inline bool bio_crypt_ctx_back_mergeable(struct request *req,
32*4882a593Smuzhiyun 						struct bio *bio)
33*4882a593Smuzhiyun {
34*4882a593Smuzhiyun 	return bio_crypt_ctx_mergeable(req->crypt_ctx, blk_rq_bytes(req),
35*4882a593Smuzhiyun 				       bio->bi_crypt_context);
36*4882a593Smuzhiyun }
37*4882a593Smuzhiyun 
bio_crypt_ctx_front_mergeable(struct request * req,struct bio * bio)38*4882a593Smuzhiyun static inline bool bio_crypt_ctx_front_mergeable(struct request *req,
39*4882a593Smuzhiyun 						 struct bio *bio)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun 	return bio_crypt_ctx_mergeable(bio->bi_crypt_context,
42*4882a593Smuzhiyun 				       bio->bi_iter.bi_size, req->crypt_ctx);
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun 
bio_crypt_ctx_merge_rq(struct request * req,struct request * next)45*4882a593Smuzhiyun static inline bool bio_crypt_ctx_merge_rq(struct request *req,
46*4882a593Smuzhiyun 					  struct request *next)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun 	return bio_crypt_ctx_mergeable(req->crypt_ctx, blk_rq_bytes(req),
49*4882a593Smuzhiyun 				       next->crypt_ctx);
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun 
blk_crypto_rq_set_defaults(struct request * rq)52*4882a593Smuzhiyun static inline void blk_crypto_rq_set_defaults(struct request *rq)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun 	rq->crypt_ctx = NULL;
55*4882a593Smuzhiyun 	rq->crypt_keyslot = NULL;
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun 
blk_crypto_rq_is_encrypted(struct request * rq)58*4882a593Smuzhiyun static inline bool blk_crypto_rq_is_encrypted(struct request *rq)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun 	return rq->crypt_ctx;
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun #else /* CONFIG_BLK_INLINE_ENCRYPTION */
64*4882a593Smuzhiyun 
bio_crypt_rq_ctx_compatible(struct request * rq,struct bio * bio)65*4882a593Smuzhiyun static inline bool bio_crypt_rq_ctx_compatible(struct request *rq,
66*4882a593Smuzhiyun 					       struct bio *bio)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun 	return true;
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun 
bio_crypt_ctx_front_mergeable(struct request * req,struct bio * bio)71*4882a593Smuzhiyun static inline bool bio_crypt_ctx_front_mergeable(struct request *req,
72*4882a593Smuzhiyun 						 struct bio *bio)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun 	return true;
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun 
bio_crypt_ctx_back_mergeable(struct request * req,struct bio * bio)77*4882a593Smuzhiyun static inline bool bio_crypt_ctx_back_mergeable(struct request *req,
78*4882a593Smuzhiyun 						struct bio *bio)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun 	return true;
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun 
bio_crypt_ctx_merge_rq(struct request * req,struct request * next)83*4882a593Smuzhiyun static inline bool bio_crypt_ctx_merge_rq(struct request *req,
84*4882a593Smuzhiyun 					  struct request *next)
85*4882a593Smuzhiyun {
86*4882a593Smuzhiyun 	return true;
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun 
blk_crypto_rq_set_defaults(struct request * rq)89*4882a593Smuzhiyun static inline void blk_crypto_rq_set_defaults(struct request *rq) { }
90*4882a593Smuzhiyun 
blk_crypto_rq_is_encrypted(struct request * rq)91*4882a593Smuzhiyun static inline bool blk_crypto_rq_is_encrypted(struct request *rq)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun 	return false;
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun #endif /* CONFIG_BLK_INLINE_ENCRYPTION */
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun void __bio_crypt_advance(struct bio *bio, unsigned int bytes);
bio_crypt_advance(struct bio * bio,unsigned int bytes)99*4882a593Smuzhiyun static inline void bio_crypt_advance(struct bio *bio, unsigned int bytes)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun 	if (bio_has_crypt_ctx(bio))
102*4882a593Smuzhiyun 		__bio_crypt_advance(bio, bytes);
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun void __bio_crypt_free_ctx(struct bio *bio);
bio_crypt_free_ctx(struct bio * bio)106*4882a593Smuzhiyun static inline void bio_crypt_free_ctx(struct bio *bio)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun 	if (bio_has_crypt_ctx(bio))
109*4882a593Smuzhiyun 		__bio_crypt_free_ctx(bio);
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun 
bio_crypt_do_front_merge(struct request * rq,struct bio * bio)112*4882a593Smuzhiyun static inline void bio_crypt_do_front_merge(struct request *rq,
113*4882a593Smuzhiyun 					    struct bio *bio)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun #ifdef CONFIG_BLK_INLINE_ENCRYPTION
116*4882a593Smuzhiyun 	if (bio_has_crypt_ctx(bio))
117*4882a593Smuzhiyun 		memcpy(rq->crypt_ctx->bc_dun, bio->bi_crypt_context->bc_dun,
118*4882a593Smuzhiyun 		       sizeof(rq->crypt_ctx->bc_dun));
119*4882a593Smuzhiyun #endif
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun bool __blk_crypto_bio_prep(struct bio **bio_ptr);
blk_crypto_bio_prep(struct bio ** bio_ptr)123*4882a593Smuzhiyun static inline bool blk_crypto_bio_prep(struct bio **bio_ptr)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun 	if (bio_has_crypt_ctx(*bio_ptr))
126*4882a593Smuzhiyun 		return __blk_crypto_bio_prep(bio_ptr);
127*4882a593Smuzhiyun 	return true;
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun blk_status_t __blk_crypto_init_request(struct request *rq);
blk_crypto_init_request(struct request * rq)131*4882a593Smuzhiyun static inline blk_status_t blk_crypto_init_request(struct request *rq)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun 	if (blk_crypto_rq_is_encrypted(rq))
134*4882a593Smuzhiyun 		return __blk_crypto_init_request(rq);
135*4882a593Smuzhiyun 	return BLK_STS_OK;
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun void __blk_crypto_free_request(struct request *rq);
blk_crypto_free_request(struct request * rq)139*4882a593Smuzhiyun static inline void blk_crypto_free_request(struct request *rq)
140*4882a593Smuzhiyun {
141*4882a593Smuzhiyun 	if (blk_crypto_rq_is_encrypted(rq))
142*4882a593Smuzhiyun 		__blk_crypto_free_request(rq);
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun int __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
146*4882a593Smuzhiyun 			     gfp_t gfp_mask);
147*4882a593Smuzhiyun /**
148*4882a593Smuzhiyun  * blk_crypto_rq_bio_prep - Prepare a request's crypt_ctx when its first bio
149*4882a593Smuzhiyun  *			    is inserted
150*4882a593Smuzhiyun  * @rq: The request to prepare
151*4882a593Smuzhiyun  * @bio: The first bio being inserted into the request
152*4882a593Smuzhiyun  * @gfp_mask: Memory allocation flags
153*4882a593Smuzhiyun  *
154*4882a593Smuzhiyun  * Return: 0 on success, -ENOMEM if out of memory.  -ENOMEM is only possible if
155*4882a593Smuzhiyun  *	   @gfp_mask doesn't include %__GFP_DIRECT_RECLAIM.
156*4882a593Smuzhiyun  */
blk_crypto_rq_bio_prep(struct request * rq,struct bio * bio,gfp_t gfp_mask)157*4882a593Smuzhiyun static inline int blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
158*4882a593Smuzhiyun 					 gfp_t gfp_mask)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun 	if (bio_has_crypt_ctx(bio))
161*4882a593Smuzhiyun 		return __blk_crypto_rq_bio_prep(rq, bio, gfp_mask);
162*4882a593Smuzhiyun 	return 0;
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun /**
166*4882a593Smuzhiyun  * blk_crypto_insert_cloned_request - Prepare a cloned request to be inserted
167*4882a593Smuzhiyun  *				      into a request queue.
168*4882a593Smuzhiyun  * @rq: the request being queued
169*4882a593Smuzhiyun  *
170*4882a593Smuzhiyun  * Return: BLK_STS_OK on success, nonzero on error.
171*4882a593Smuzhiyun  */
blk_crypto_insert_cloned_request(struct request * rq)172*4882a593Smuzhiyun static inline blk_status_t blk_crypto_insert_cloned_request(struct request *rq)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	if (blk_crypto_rq_is_encrypted(rq))
176*4882a593Smuzhiyun 		return blk_crypto_init_request(rq);
177*4882a593Smuzhiyun 	return BLK_STS_OK;
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun #ifdef CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun int blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num);
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr);
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key);
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun #else /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun static inline int
blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num)191*4882a593Smuzhiyun blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun 	pr_warn_once("crypto API fallback is disabled\n");
194*4882a593Smuzhiyun 	return -ENOPKG;
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun 
blk_crypto_fallback_bio_prep(struct bio ** bio_ptr)197*4882a593Smuzhiyun static inline bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr)
198*4882a593Smuzhiyun {
199*4882a593Smuzhiyun 	pr_warn_once("crypto API fallback disabled; failing request.\n");
200*4882a593Smuzhiyun 	(*bio_ptr)->bi_status = BLK_STS_NOTSUPP;
201*4882a593Smuzhiyun 	return false;
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun static inline int
blk_crypto_fallback_evict_key(const struct blk_crypto_key * key)205*4882a593Smuzhiyun blk_crypto_fallback_evict_key(const struct blk_crypto_key *key)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun 	return 0;
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun #endif /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun #endif /* __LINUX_BLK_CRYPTO_INTERNAL_H */
213