• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright 2019 Google LLC
4  */
5 
6 #ifndef __LINUX_BLK_CRYPTO_INTERNAL_H
7 #define __LINUX_BLK_CRYPTO_INTERNAL_H
8 
9 #include <linux/bio.h>
10 #include <linux/blk-mq.h>
11 
12 /* Represents a crypto mode supported by blk-crypto  */
13 struct blk_crypto_mode {
14 	const char *name; /* name of this mode, shown in sysfs */
15 	const char *cipher_str; /* crypto API name (for fallback case) */
16 	unsigned int keysize; /* key size in bytes */
17 	unsigned int security_strength; /* security strength in bytes */
18 	unsigned int ivsize; /* iv size in bytes */
19 };
20 
21 extern const struct blk_crypto_mode blk_crypto_modes[];
22 
23 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
24 
25 int blk_crypto_sysfs_register(struct gendisk *disk);
26 
27 void blk_crypto_sysfs_unregister(struct gendisk *disk);
28 
29 void bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
30 			     unsigned int inc);
31 
32 bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio);
33 
34 bool bio_crypt_ctx_mergeable(struct bio_crypt_ctx *bc1, unsigned int bc1_bytes,
35 			     struct bio_crypt_ctx *bc2);
36 
bio_crypt_ctx_back_mergeable(struct request * req,struct bio * bio)37 static inline bool bio_crypt_ctx_back_mergeable(struct request *req,
38 						struct bio *bio)
39 {
40 	return bio_crypt_ctx_mergeable(req->crypt_ctx, blk_rq_bytes(req),
41 				       bio->bi_crypt_context);
42 }
43 
bio_crypt_ctx_front_mergeable(struct request * req,struct bio * bio)44 static inline bool bio_crypt_ctx_front_mergeable(struct request *req,
45 						 struct bio *bio)
46 {
47 	return bio_crypt_ctx_mergeable(bio->bi_crypt_context,
48 				       bio->bi_iter.bi_size, req->crypt_ctx);
49 }
50 
bio_crypt_ctx_merge_rq(struct request * req,struct request * next)51 static inline bool bio_crypt_ctx_merge_rq(struct request *req,
52 					  struct request *next)
53 {
54 	return bio_crypt_ctx_mergeable(req->crypt_ctx, blk_rq_bytes(req),
55 				       next->crypt_ctx);
56 }
57 
blk_crypto_rq_set_defaults(struct request * rq)58 static inline void blk_crypto_rq_set_defaults(struct request *rq)
59 {
60 	rq->crypt_ctx = NULL;
61 	rq->crypt_keyslot = NULL;
62 }
63 
blk_crypto_rq_is_encrypted(struct request * rq)64 static inline bool blk_crypto_rq_is_encrypted(struct request *rq)
65 {
66 	return rq->crypt_ctx;
67 }
68 
blk_crypto_rq_has_keyslot(struct request * rq)69 static inline bool blk_crypto_rq_has_keyslot(struct request *rq)
70 {
71 	return rq->crypt_keyslot;
72 }
73 
74 blk_status_t blk_crypto_get_keyslot(struct blk_crypto_profile *profile,
75 				    const struct blk_crypto_key *key,
76 				    struct blk_crypto_keyslot **slot_ptr);
77 
78 void blk_crypto_put_keyslot(struct blk_crypto_keyslot *slot);
79 
80 int __blk_crypto_evict_key(struct blk_crypto_profile *profile,
81 			   const struct blk_crypto_key *key);
82 
83 bool __blk_crypto_cfg_supported(struct blk_crypto_profile *profile,
84 				const struct blk_crypto_config *cfg);
85 
86 #else /* CONFIG_BLK_INLINE_ENCRYPTION */
87 
blk_crypto_sysfs_register(struct gendisk * disk)88 static inline int blk_crypto_sysfs_register(struct gendisk *disk)
89 {
90 	return 0;
91 }
92 
blk_crypto_sysfs_unregister(struct gendisk * disk)93 static inline void blk_crypto_sysfs_unregister(struct gendisk *disk)
94 {
95 }
96 
bio_crypt_rq_ctx_compatible(struct request * rq,struct bio * bio)97 static inline bool bio_crypt_rq_ctx_compatible(struct request *rq,
98 					       struct bio *bio)
99 {
100 	return true;
101 }
102 
bio_crypt_ctx_front_mergeable(struct request * req,struct bio * bio)103 static inline bool bio_crypt_ctx_front_mergeable(struct request *req,
104 						 struct bio *bio)
105 {
106 	return true;
107 }
108 
bio_crypt_ctx_back_mergeable(struct request * req,struct bio * bio)109 static inline bool bio_crypt_ctx_back_mergeable(struct request *req,
110 						struct bio *bio)
111 {
112 	return true;
113 }
114 
bio_crypt_ctx_merge_rq(struct request * req,struct request * next)115 static inline bool bio_crypt_ctx_merge_rq(struct request *req,
116 					  struct request *next)
117 {
118 	return true;
119 }
120 
blk_crypto_rq_set_defaults(struct request * rq)121 static inline void blk_crypto_rq_set_defaults(struct request *rq) { }
122 
blk_crypto_rq_is_encrypted(struct request * rq)123 static inline bool blk_crypto_rq_is_encrypted(struct request *rq)
124 {
125 	return false;
126 }
127 
blk_crypto_rq_has_keyslot(struct request * rq)128 static inline bool blk_crypto_rq_has_keyslot(struct request *rq)
129 {
130 	return false;
131 }
132 
133 #endif /* CONFIG_BLK_INLINE_ENCRYPTION */
134 
135 void __bio_crypt_advance(struct bio *bio, unsigned int bytes);
bio_crypt_advance(struct bio * bio,unsigned int bytes)136 static inline void bio_crypt_advance(struct bio *bio, unsigned int bytes)
137 {
138 	if (bio_has_crypt_ctx(bio))
139 		__bio_crypt_advance(bio, bytes);
140 }
141 
142 void __bio_crypt_free_ctx(struct bio *bio);
bio_crypt_free_ctx(struct bio * bio)143 static inline void bio_crypt_free_ctx(struct bio *bio)
144 {
145 	if (bio_has_crypt_ctx(bio))
146 		__bio_crypt_free_ctx(bio);
147 }
148 
bio_crypt_do_front_merge(struct request * rq,struct bio * bio)149 static inline void bio_crypt_do_front_merge(struct request *rq,
150 					    struct bio *bio)
151 {
152 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
153 	if (bio_has_crypt_ctx(bio))
154 		memcpy(rq->crypt_ctx->bc_dun, bio->bi_crypt_context->bc_dun,
155 		       sizeof(rq->crypt_ctx->bc_dun));
156 #endif
157 }
158 
159 bool __blk_crypto_bio_prep(struct bio **bio_ptr);
blk_crypto_bio_prep(struct bio ** bio_ptr)160 static inline bool blk_crypto_bio_prep(struct bio **bio_ptr)
161 {
162 	if (bio_has_crypt_ctx(*bio_ptr))
163 		return __blk_crypto_bio_prep(bio_ptr);
164 	return true;
165 }
166 
167 blk_status_t __blk_crypto_rq_get_keyslot(struct request *rq);
blk_crypto_rq_get_keyslot(struct request * rq)168 static inline blk_status_t blk_crypto_rq_get_keyslot(struct request *rq)
169 {
170 	if (blk_crypto_rq_is_encrypted(rq))
171 		return __blk_crypto_rq_get_keyslot(rq);
172 	return BLK_STS_OK;
173 }
174 
175 void __blk_crypto_rq_put_keyslot(struct request *rq);
blk_crypto_rq_put_keyslot(struct request * rq)176 static inline void blk_crypto_rq_put_keyslot(struct request *rq)
177 {
178 	if (blk_crypto_rq_has_keyslot(rq))
179 		__blk_crypto_rq_put_keyslot(rq);
180 }
181 
182 void __blk_crypto_free_request(struct request *rq);
blk_crypto_free_request(struct request * rq)183 static inline void blk_crypto_free_request(struct request *rq)
184 {
185 	if (blk_crypto_rq_is_encrypted(rq))
186 		__blk_crypto_free_request(rq);
187 }
188 
189 int __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
190 			     gfp_t gfp_mask);
191 /**
192  * blk_crypto_rq_bio_prep - Prepare a request's crypt_ctx when its first bio
193  *			    is inserted
194  * @rq: The request to prepare
195  * @bio: The first bio being inserted into the request
196  * @gfp_mask: Memory allocation flags
197  *
198  * Return: 0 on success, -ENOMEM if out of memory.  -ENOMEM is only possible if
199  *	   @gfp_mask doesn't include %__GFP_DIRECT_RECLAIM.
200  */
blk_crypto_rq_bio_prep(struct request * rq,struct bio * bio,gfp_t gfp_mask)201 static inline int blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
202 					 gfp_t gfp_mask)
203 {
204 	if (bio_has_crypt_ctx(bio))
205 		return __blk_crypto_rq_bio_prep(rq, bio, gfp_mask);
206 	return 0;
207 }
208 
209 #ifdef CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK
210 
211 int blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num);
212 
213 bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr);
214 
215 int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key);
216 
217 #else /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */
218 
219 static inline int
blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num)220 blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num)
221 {
222 	pr_warn_once("crypto API fallback is disabled\n");
223 	return -ENOPKG;
224 }
225 
blk_crypto_fallback_bio_prep(struct bio ** bio_ptr)226 static inline bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr)
227 {
228 	pr_warn_once("crypto API fallback disabled; failing request.\n");
229 	(*bio_ptr)->bi_status = BLK_STS_NOTSUPP;
230 	return false;
231 }
232 
233 static inline int
blk_crypto_fallback_evict_key(const struct blk_crypto_key * key)234 blk_crypto_fallback_evict_key(const struct blk_crypto_key *key)
235 {
236 	return 0;
237 }
238 
239 #endif /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */
240 
241 #endif /* __LINUX_BLK_CRYPTO_INTERNAL_H */
242