• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2019 Google LLC
4  */
5 
6 /*
7  * Refer to Documentation/block/inline-encryption.rst for detailed explanation.
8  */
9 
10 #define pr_fmt(fmt) "blk-crypto: " fmt
11 
12 #include <linux/bio.h>
13 #include <linux/blkdev.h>
14 #include <linux/keyslot-manager.h>
15 #include <linux/module.h>
16 #include <linux/ratelimit.h>
17 #include <linux/slab.h>
18 
19 #include "blk-crypto-internal.h"
20 
21 const struct blk_crypto_mode blk_crypto_modes[] = {
22 	[BLK_ENCRYPTION_MODE_AES_256_XTS] = {
23 		.cipher_str = "xts(aes)",
24 		.keysize = 64,
25 		.ivsize = 16,
26 	},
27 	[BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV] = {
28 		.cipher_str = "essiv(cbc(aes),sha256)",
29 		.keysize = 16,
30 		.ivsize = 16,
31 	},
32 	[BLK_ENCRYPTION_MODE_ADIANTUM] = {
33 		.cipher_str = "adiantum(xchacha12,aes)",
34 		.keysize = 32,
35 		.ivsize = 32,
36 	},
37 };
38 
39 /*
40  * This number needs to be at least (the number of threads doing IO
41  * concurrently) * (maximum recursive depth of a bio), so that we don't
42  * deadlock on crypt_ctx allocations. The default is chosen to be the same
43  * as the default number of post read contexts in both EXT4 and F2FS.
44  */
45 static int num_prealloc_crypt_ctxs = 128;
46 
47 module_param(num_prealloc_crypt_ctxs, int, 0444);
48 MODULE_PARM_DESC(num_prealloc_crypt_ctxs,
49 		"Number of bio crypto contexts to preallocate");
50 
51 static struct kmem_cache *bio_crypt_ctx_cache;
52 static mempool_t *bio_crypt_ctx_pool;
53 
bio_crypt_ctx_init(void)54 static int __init bio_crypt_ctx_init(void)
55 {
56 	size_t i;
57 
58 	bio_crypt_ctx_cache = KMEM_CACHE(bio_crypt_ctx, 0);
59 	if (!bio_crypt_ctx_cache)
60 		goto out_no_mem;
61 
62 	bio_crypt_ctx_pool = mempool_create_slab_pool(num_prealloc_crypt_ctxs,
63 						      bio_crypt_ctx_cache);
64 	if (!bio_crypt_ctx_pool)
65 		goto out_no_mem;
66 
67 	/* This is assumed in various places. */
68 	BUILD_BUG_ON(BLK_ENCRYPTION_MODE_INVALID != 0);
69 
70 	/* Sanity check that no algorithm exceeds the defined limits. */
71 	for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++) {
72 		BUG_ON(blk_crypto_modes[i].keysize > BLK_CRYPTO_MAX_KEY_SIZE);
73 		BUG_ON(blk_crypto_modes[i].ivsize > BLK_CRYPTO_MAX_IV_SIZE);
74 	}
75 
76 	return 0;
77 out_no_mem:
78 	panic("Failed to allocate mem for bio crypt ctxs\n");
79 }
80 subsys_initcall(bio_crypt_ctx_init);
81 
bio_crypt_set_ctx(struct bio * bio,const struct blk_crypto_key * key,const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],gfp_t gfp_mask)82 void bio_crypt_set_ctx(struct bio *bio, const struct blk_crypto_key *key,
83 		       const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], gfp_t gfp_mask)
84 {
85 	struct bio_crypt_ctx *bc;
86 
87 	/*
88 	 * The caller must use a gfp_mask that contains __GFP_DIRECT_RECLAIM so
89 	 * that the mempool_alloc() can't fail.
90 	 */
91 	WARN_ON_ONCE(!(gfp_mask & __GFP_DIRECT_RECLAIM));
92 
93 	bc = mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
94 
95 	bc->bc_key = key;
96 	memcpy(bc->bc_dun, dun, sizeof(bc->bc_dun));
97 
98 	bio->bi_crypt_context = bc;
99 }
100 EXPORT_SYMBOL_GPL(bio_crypt_set_ctx);
101 
__bio_crypt_free_ctx(struct bio * bio)102 void __bio_crypt_free_ctx(struct bio *bio)
103 {
104 	mempool_free(bio->bi_crypt_context, bio_crypt_ctx_pool);
105 	bio->bi_crypt_context = NULL;
106 }
107 
__bio_crypt_clone(struct bio * dst,struct bio * src,gfp_t gfp_mask)108 int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask)
109 {
110 	dst->bi_crypt_context = mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
111 	if (!dst->bi_crypt_context)
112 		return -ENOMEM;
113 	*dst->bi_crypt_context = *src->bi_crypt_context;
114 	return 0;
115 }
116 EXPORT_SYMBOL_GPL(__bio_crypt_clone);
117 
118 /* Increments @dun by @inc, treating @dun as a multi-limb integer. */
bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],unsigned int inc)119 void bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
120 			     unsigned int inc)
121 {
122 	int i;
123 
124 	for (i = 0; inc && i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) {
125 		dun[i] += inc;
126 		/*
127 		 * If the addition in this limb overflowed, then we need to
128 		 * carry 1 into the next limb. Else the carry is 0.
129 		 */
130 		if (dun[i] < inc)
131 			inc = 1;
132 		else
133 			inc = 0;
134 	}
135 }
136 
__bio_crypt_advance(struct bio * bio,unsigned int bytes)137 void __bio_crypt_advance(struct bio *bio, unsigned int bytes)
138 {
139 	struct bio_crypt_ctx *bc = bio->bi_crypt_context;
140 
141 	bio_crypt_dun_increment(bc->bc_dun,
142 				bytes >> bc->bc_key->data_unit_size_bits);
143 }
144 
145 /*
146  * Returns true if @bc->bc_dun plus @bytes converted to data units is equal to
147  * @next_dun, treating the DUNs as multi-limb integers.
148  */
bio_crypt_dun_is_contiguous(const struct bio_crypt_ctx * bc,unsigned int bytes,const u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE])149 bool bio_crypt_dun_is_contiguous(const struct bio_crypt_ctx *bc,
150 				 unsigned int bytes,
151 				 const u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE])
152 {
153 	int i;
154 	unsigned int carry = bytes >> bc->bc_key->data_unit_size_bits;
155 
156 	for (i = 0; i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) {
157 		if (bc->bc_dun[i] + carry != next_dun[i])
158 			return false;
159 		/*
160 		 * If the addition in this limb overflowed, then we need to
161 		 * carry 1 into the next limb. Else the carry is 0.
162 		 */
163 		if ((bc->bc_dun[i] + carry) < carry)
164 			carry = 1;
165 		else
166 			carry = 0;
167 	}
168 
169 	/* If the DUN wrapped through 0, don't treat it as contiguous. */
170 	return carry == 0;
171 }
172 
173 /*
174  * Checks that two bio crypt contexts are compatible - i.e. that
175  * they are mergeable except for data_unit_num continuity.
176  */
bio_crypt_ctx_compatible(struct bio_crypt_ctx * bc1,struct bio_crypt_ctx * bc2)177 static bool bio_crypt_ctx_compatible(struct bio_crypt_ctx *bc1,
178 				     struct bio_crypt_ctx *bc2)
179 {
180 	if (!bc1)
181 		return !bc2;
182 
183 	return bc2 && bc1->bc_key == bc2->bc_key;
184 }
185 
bio_crypt_rq_ctx_compatible(struct request * rq,struct bio * bio)186 bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio)
187 {
188 	return bio_crypt_ctx_compatible(rq->crypt_ctx, bio->bi_crypt_context);
189 }
190 
191 /*
192  * Checks that two bio crypt contexts are compatible, and also
193  * that their data_unit_nums are continuous (and can hence be merged)
194  * in the order @bc1 followed by @bc2.
195  */
bio_crypt_ctx_mergeable(struct bio_crypt_ctx * bc1,unsigned int bc1_bytes,struct bio_crypt_ctx * bc2)196 bool bio_crypt_ctx_mergeable(struct bio_crypt_ctx *bc1, unsigned int bc1_bytes,
197 			     struct bio_crypt_ctx *bc2)
198 {
199 	if (!bio_crypt_ctx_compatible(bc1, bc2))
200 		return false;
201 
202 	return !bc1 || bio_crypt_dun_is_contiguous(bc1, bc1_bytes, bc2->bc_dun);
203 }
204 
205 /* Check that all I/O segments are data unit aligned. */
bio_crypt_check_alignment(struct bio * bio)206 static bool bio_crypt_check_alignment(struct bio *bio)
207 {
208 	const unsigned int data_unit_size =
209 		bio->bi_crypt_context->bc_key->crypto_cfg.data_unit_size;
210 	struct bvec_iter iter;
211 	struct bio_vec bv;
212 
213 	bio_for_each_segment(bv, bio, iter) {
214 		if (!IS_ALIGNED(bv.bv_len | bv.bv_offset, data_unit_size))
215 			return false;
216 	}
217 
218 	return true;
219 }
220 
__blk_crypto_rq_get_keyslot(struct request * rq)221 blk_status_t __blk_crypto_rq_get_keyslot(struct request *rq)
222 {
223 	return blk_ksm_get_slot_for_key(rq->q->ksm, rq->crypt_ctx->bc_key,
224 					&rq->crypt_keyslot);
225 }
226 
__blk_crypto_rq_put_keyslot(struct request * rq)227 void __blk_crypto_rq_put_keyslot(struct request *rq)
228 {
229 	blk_ksm_put_slot(rq->crypt_keyslot);
230 	rq->crypt_keyslot = NULL;
231 }
232 
__blk_crypto_free_request(struct request * rq)233 void __blk_crypto_free_request(struct request *rq)
234 {
235 	/* The keyslot, if one was needed, should have been released earlier. */
236 	if (WARN_ON_ONCE(rq->crypt_keyslot))
237 		__blk_crypto_rq_put_keyslot(rq);
238 
239 	mempool_free(rq->crypt_ctx, bio_crypt_ctx_pool);
240 	rq->crypt_ctx = NULL;
241 }
242 
243 /**
244  * __blk_crypto_bio_prep - Prepare bio for inline encryption
245  *
246  * @bio_ptr: pointer to original bio pointer
247  *
248  * If the bio crypt context provided for the bio is supported by the underlying
249  * device's inline encryption hardware, do nothing.
250  *
251  * Otherwise, try to perform en/decryption for this bio by falling back to the
252  * kernel crypto API. When the crypto API fallback is used for encryption,
253  * blk-crypto may choose to split the bio into 2 - the first one that will
254  * continue to be processed and the second one that will be resubmitted via
255  * submit_bio_noacct. A bounce bio will be allocated to encrypt the contents
256  * of the aforementioned "first one", and *bio_ptr will be updated to this
257  * bounce bio.
258  *
259  * Caller must ensure bio has bio_crypt_ctx.
260  *
261  * Return: true on success; false on error (and bio->bi_status will be set
262  *	   appropriately, and bio_endio() will have been called so bio
263  *	   submission should abort).
264  */
__blk_crypto_bio_prep(struct bio ** bio_ptr)265 bool __blk_crypto_bio_prep(struct bio **bio_ptr)
266 {
267 	struct bio *bio = *bio_ptr;
268 	const struct blk_crypto_key *bc_key = bio->bi_crypt_context->bc_key;
269 
270 	/* Error if bio has no data. */
271 	if (WARN_ON_ONCE(!bio_has_data(bio))) {
272 		bio->bi_status = BLK_STS_IOERR;
273 		goto fail;
274 	}
275 
276 	if (!bio_crypt_check_alignment(bio)) {
277 		bio->bi_status = BLK_STS_IOERR;
278 		goto fail;
279 	}
280 
281 	/*
282 	 * Success if device supports the encryption context, or if we succeeded
283 	 * in falling back to the crypto API.
284 	 */
285 	if (blk_ksm_crypto_cfg_supported(bio->bi_disk->queue->ksm,
286 					 &bc_key->crypto_cfg))
287 		return true;
288 
289 	if (blk_crypto_fallback_bio_prep(bio_ptr))
290 		return true;
291 fail:
292 	bio_endio(*bio_ptr);
293 	return false;
294 }
295 
__blk_crypto_rq_bio_prep(struct request * rq,struct bio * bio,gfp_t gfp_mask)296 int __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
297 			     gfp_t gfp_mask)
298 {
299 	if (!rq->crypt_ctx) {
300 		rq->crypt_ctx = mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
301 		if (!rq->crypt_ctx)
302 			return -ENOMEM;
303 	}
304 	*rq->crypt_ctx = *bio->bi_crypt_context;
305 	return 0;
306 }
307 
308 /**
309  * blk_crypto_init_key() - Prepare a key for use with blk-crypto
310  * @blk_key: Pointer to the blk_crypto_key to initialize.
311  * @raw_key: Pointer to the raw key.
312  * @raw_key_size: Size of raw key.  Must be at least the required size for the
313  *                chosen @crypto_mode; see blk_crypto_modes[].  (It's allowed
314  *                to be longer than the mode's actual key size, in order to
315  *                support inline encryption hardware that accepts wrapped keys.
316  *                @is_hw_wrapped has to be set for such keys)
317  * @is_hw_wrapped: Denotes @raw_key is wrapped.
318  * @crypto_mode: identifier for the encryption algorithm to use
319  * @dun_bytes: number of bytes that will be used to specify the DUN when this
320  *	       key is used
321  * @data_unit_size: the data unit size to use for en/decryption
322  *
323  * Return: 0 on success, -errno on failure.  The caller is responsible for
324  *	   zeroizing both blk_key and raw_key when done with them.
325  */
blk_crypto_init_key(struct blk_crypto_key * blk_key,const u8 * raw_key,unsigned int raw_key_size,bool is_hw_wrapped,enum blk_crypto_mode_num crypto_mode,unsigned int dun_bytes,unsigned int data_unit_size)326 int blk_crypto_init_key(struct blk_crypto_key *blk_key,
327 			const u8 *raw_key, unsigned int raw_key_size,
328 			bool is_hw_wrapped,
329 			enum blk_crypto_mode_num crypto_mode,
330 			unsigned int dun_bytes,
331 			unsigned int data_unit_size)
332 {
333 	const struct blk_crypto_mode *mode;
334 
335 	memset(blk_key, 0, sizeof(*blk_key));
336 
337 	if (crypto_mode >= ARRAY_SIZE(blk_crypto_modes))
338 		return -EINVAL;
339 
340 	BUILD_BUG_ON(BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE < BLK_CRYPTO_MAX_KEY_SIZE);
341 
342 	mode = &blk_crypto_modes[crypto_mode];
343 	if (is_hw_wrapped) {
344 		if (raw_key_size < mode->keysize ||
345 		    raw_key_size > BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE)
346 			return -EINVAL;
347 	} else {
348 		if (raw_key_size != mode->keysize)
349 			return -EINVAL;
350 	}
351 
352 	if (dun_bytes == 0 || dun_bytes > mode->ivsize)
353 		return -EINVAL;
354 
355 	if (!is_power_of_2(data_unit_size))
356 		return -EINVAL;
357 
358 	blk_key->crypto_cfg.crypto_mode = crypto_mode;
359 	blk_key->crypto_cfg.dun_bytes = dun_bytes;
360 	blk_key->crypto_cfg.data_unit_size = data_unit_size;
361 	blk_key->crypto_cfg.is_hw_wrapped = is_hw_wrapped;
362 	blk_key->data_unit_size_bits = ilog2(data_unit_size);
363 	blk_key->size = raw_key_size;
364 	memcpy(blk_key->raw, raw_key, raw_key_size);
365 
366 	return 0;
367 }
368 EXPORT_SYMBOL_GPL(blk_crypto_init_key);
369 
370 /*
371  * Check if bios with @cfg can be en/decrypted by blk-crypto (i.e. either the
372  * request queue it's submitted to supports inline crypto, or the
373  * blk-crypto-fallback is enabled and supports the cfg).
374  */
blk_crypto_config_supported(struct request_queue * q,const struct blk_crypto_config * cfg)375 bool blk_crypto_config_supported(struct request_queue *q,
376 				 const struct blk_crypto_config *cfg)
377 {
378 	if (IS_ENABLED(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) &&
379 	    !cfg->is_hw_wrapped)
380 		return true;
381 	return blk_ksm_crypto_cfg_supported(q->ksm, cfg);
382 }
383 
384 /**
385  * blk_crypto_start_using_key() - Start using a blk_crypto_key on a device
386  * @key: A key to use on the device
387  * @q: the request queue for the device
388  *
389  * Upper layers must call this function to ensure that either the hardware
390  * supports the key's crypto settings, or the crypto API fallback has transforms
391  * for the needed mode allocated and ready to go. This function may allocate
392  * an skcipher, and *should not* be called from the data path, since that might
393  * cause a deadlock
394  *
395  * Return: 0 on success; -ENOPKG if the hardware doesn't support the key and
396  *	   blk-crypto-fallback is either disabled or the needed algorithm
397  *	   is disabled in the crypto API; or another -errno code.
398  */
blk_crypto_start_using_key(const struct blk_crypto_key * key,struct request_queue * q)399 int blk_crypto_start_using_key(const struct blk_crypto_key *key,
400 			       struct request_queue *q)
401 {
402 	if (blk_ksm_crypto_cfg_supported(q->ksm, &key->crypto_cfg))
403 		return 0;
404 	if (key->crypto_cfg.is_hw_wrapped) {
405 		pr_warn_once("hardware doesn't support wrapped keys\n");
406 		return -EOPNOTSUPP;
407 	}
408 	return blk_crypto_fallback_start_using_mode(key->crypto_cfg.crypto_mode);
409 }
410 EXPORT_SYMBOL_GPL(blk_crypto_start_using_key);
411 
412 /**
413  * blk_crypto_evict_key() - Evict a blk_crypto_key from a request_queue
414  * @q: a request_queue on which I/O using the key may have been done
415  * @key: the key to evict
416  *
417  * For a given request_queue, this function removes the given blk_crypto_key
418  * from the keyslot management structures and evicts it from any underlying
419  * hardware keyslot(s) or blk-crypto-fallback keyslot it may have been
420  * programmed into.
421  *
422  * Upper layers must call this before freeing the blk_crypto_key.  It must be
423  * called for every request_queue the key may have been used on.  The key must
424  * no longer be in use by any I/O when this function is called.
425  *
426  * Context: May sleep.
427  */
blk_crypto_evict_key(struct request_queue * q,const struct blk_crypto_key * key)428 void blk_crypto_evict_key(struct request_queue *q,
429 			  const struct blk_crypto_key *key)
430 {
431 	int err;
432 
433 	if (blk_ksm_crypto_cfg_supported(q->ksm, &key->crypto_cfg))
434 		err = blk_ksm_evict_key(q->ksm, key);
435 	else
436 		err = blk_crypto_fallback_evict_key(key);
437 	/*
438 	 * An error can only occur here if the key failed to be evicted from a
439 	 * keyslot (due to a hardware or driver issue) or is allegedly still in
440 	 * use by I/O (due to a kernel bug).  Even in these cases, the key is
441 	 * still unlinked from the keyslot management structures, and the caller
442 	 * is allowed and expected to free it right away.  There's nothing
443 	 * callers can do to handle errors, so just log them and return void.
444 	 */
445 	if (err)
446 		pr_warn_ratelimited("error %d evicting key\n", err);
447 }
448 EXPORT_SYMBOL_GPL(blk_crypto_evict_key);
449