• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2019 Google LLC
4  */
5 
6 /*
7  * Refer to Documentation/block/inline-encryption.rst for detailed explanation.
8  */
9 
10 #define pr_fmt(fmt) "blk-crypto: " fmt
11 
12 #include <linux/blk-crypto.h>
13 #include <linux/blkdev.h>
14 #include <linux/keyslot-manager.h>
15 #include <linux/random.h>
16 #include <linux/siphash.h>
17 
18 #include "blk-crypto-internal.h"
19 
20 const struct blk_crypto_mode blk_crypto_modes[] = {
21 	[BLK_ENCRYPTION_MODE_AES_256_XTS] = {
22 		.cipher_str = "xts(aes)",
23 		.keysize = 64,
24 		.ivsize = 16,
25 	},
26 	[BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV] = {
27 		.cipher_str = "essiv(cbc(aes),sha256)",
28 		.keysize = 16,
29 		.ivsize = 16,
30 	},
31 	[BLK_ENCRYPTION_MODE_ADIANTUM] = {
32 		.cipher_str = "adiantum(xchacha12,aes)",
33 		.keysize = 32,
34 		.ivsize = 32,
35 	},
36 };
37 
38 /* Check that all I/O segments are data unit aligned */
bio_crypt_check_alignment(struct bio * bio)39 static int bio_crypt_check_alignment(struct bio *bio)
40 {
41 	const unsigned int data_unit_size =
42 				bio->bi_crypt_context->bc_key->data_unit_size;
43 	struct bvec_iter iter;
44 	struct bio_vec bv;
45 
46 	bio_for_each_segment(bv, bio, iter) {
47 		if (!IS_ALIGNED(bv.bv_len | bv.bv_offset, data_unit_size))
48 			return -EIO;
49 	}
50 	return 0;
51 }
52 
53 /**
54  * blk_crypto_submit_bio - handle submitting bio for inline encryption
55  *
56  * @bio_ptr: pointer to original bio pointer
57  *
58  * If the bio doesn't have inline encryption enabled or the submitter already
59  * specified a keyslot for the target device, do nothing.  Else, a raw key must
60  * have been provided, so acquire a device keyslot for it if supported.  Else,
61  * use the crypto API fallback.
62  *
63  * When the crypto API fallback is used for encryption, blk-crypto may choose to
64  * split the bio into 2 - the first one that will continue to be processed and
65  * the second one that will be resubmitted via generic_make_request.
66  * A bounce bio will be allocated to encrypt the contents of the aforementioned
67  * "first one", and *bio_ptr will be updated to this bounce bio.
68  *
69  * Return: 0 if bio submission should continue; nonzero if bio_endio() was
70  *	   already called so bio submission should abort.
71  */
blk_crypto_submit_bio(struct bio ** bio_ptr)72 int blk_crypto_submit_bio(struct bio **bio_ptr)
73 {
74 	struct bio *bio = *bio_ptr;
75 	struct request_queue *q;
76 	struct bio_crypt_ctx *bc = bio->bi_crypt_context;
77 	int err;
78 
79 	if (!bc || !bio_has_data(bio))
80 		return 0;
81 
82 	/*
83 	 * When a read bio is marked for fallback decryption, its bi_iter is
84 	 * saved so that when we decrypt the bio later, we know what part of it
85 	 * was marked for fallback decryption (when the bio is passed down after
86 	 * blk_crypto_submit bio, it may be split or advanced so we cannot rely
87 	 * on the bi_iter while decrypting in blk_crypto_endio)
88 	 */
89 	if (bio_crypt_fallback_crypted(bc))
90 		return 0;
91 
92 	err = bio_crypt_check_alignment(bio);
93 	if (err) {
94 		bio->bi_status = BLK_STS_IOERR;
95 		goto out;
96 	}
97 
98 	q = bio->bi_disk->queue;
99 
100 	if (bc->bc_ksm) {
101 		/* Key already programmed into device? */
102 		if (q->ksm == bc->bc_ksm)
103 			return 0;
104 
105 		/* Nope, release the existing keyslot. */
106 		bio_crypt_ctx_release_keyslot(bc);
107 	}
108 
109 	/* Get device keyslot if supported */
110 	if (keyslot_manager_crypto_mode_supported(q->ksm,
111 				bc->bc_key->crypto_mode,
112 				blk_crypto_key_dun_bytes(bc->bc_key),
113 				bc->bc_key->data_unit_size,
114 				bc->bc_key->is_hw_wrapped)) {
115 		err = bio_crypt_ctx_acquire_keyslot(bc, q->ksm);
116 		if (!err)
117 			return 0;
118 
119 		pr_warn_once("Failed to acquire keyslot for %s (err=%d).  Falling back to crypto API.\n",
120 			     bio->bi_disk->disk_name, err);
121 	}
122 
123 	/* Fallback to crypto API */
124 	err = blk_crypto_fallback_submit_bio(bio_ptr);
125 	if (err)
126 		goto out;
127 
128 	return 0;
129 out:
130 	bio_endio(*bio_ptr);
131 	return err;
132 }
133 
134 /**
135  * blk_crypto_endio - clean up bio w.r.t inline encryption during bio_endio
136  *
137  * @bio: the bio to clean up
138  *
139  * If blk_crypto_submit_bio decided to fallback to crypto API for this bio,
140  * we queue the bio for decryption into a workqueue and return false,
141  * and call bio_endio(bio) at a later time (after the bio has been decrypted).
142  *
143  * If the bio is not to be decrypted by the crypto API, this function releases
144  * the reference to the keyslot that blk_crypto_submit_bio got.
145  *
146  * Return: true if bio_endio should continue; false otherwise (bio_endio will
147  * be called again when bio has been decrypted).
148  */
blk_crypto_endio(struct bio * bio)149 bool blk_crypto_endio(struct bio *bio)
150 {
151 	struct bio_crypt_ctx *bc = bio->bi_crypt_context;
152 
153 	if (!bc)
154 		return true;
155 
156 	if (bio_crypt_fallback_crypted(bc)) {
157 		/*
158 		 * The only bios who's crypto is handled by the blk-crypto
159 		 * fallback when they reach here are those with
160 		 * bio_data_dir(bio) == READ, since WRITE bios that are
161 		 * encrypted by the crypto API fallback are handled by
162 		 * blk_crypto_encrypt_endio().
163 		 */
164 		return !blk_crypto_queue_decrypt_bio(bio);
165 	}
166 
167 	if (bc->bc_keyslot >= 0)
168 		bio_crypt_ctx_release_keyslot(bc);
169 
170 	return true;
171 }
172 
173 /**
174  * blk_crypto_init_key() - Prepare a key for use with blk-crypto
175  * @blk_key: Pointer to the blk_crypto_key to initialize.
176  * @raw_key: Pointer to the raw key.
177  * @raw_key_size: Size of raw key.  Must be at least the required size for the
178  *                chosen @crypto_mode; see blk_crypto_modes[].  (It's allowed
179  *                to be longer than the mode's actual key size, in order to
180  *                support inline encryption hardware that accepts wrapped keys.
181  *                @is_hw_wrapped has to be set for such keys)
182  * @is_hw_wrapped: Denotes @raw_key is wrapped.
183  * @crypto_mode: identifier for the encryption algorithm to use
184  * @dun_bytes: number of bytes that will be used to specify the DUN when this
185  *	       key is used
186  * @data_unit_size: the data unit size to use for en/decryption
187  *
188  * Return: The blk_crypto_key that was prepared, or an ERR_PTR() on error.  When
189  *	   done using the key, it must be freed with blk_crypto_free_key().
190  */
blk_crypto_init_key(struct blk_crypto_key * blk_key,const u8 * raw_key,unsigned int raw_key_size,bool is_hw_wrapped,enum blk_crypto_mode_num crypto_mode,unsigned int dun_bytes,unsigned int data_unit_size)191 int blk_crypto_init_key(struct blk_crypto_key *blk_key,
192 			const u8 *raw_key, unsigned int raw_key_size,
193 			bool is_hw_wrapped,
194 			enum blk_crypto_mode_num crypto_mode,
195 			unsigned int dun_bytes,
196 			unsigned int data_unit_size)
197 {
198 	const struct blk_crypto_mode *mode;
199 	static siphash_key_t hash_key;
200 	u32 hash;
201 
202 	memset(blk_key, 0, sizeof(*blk_key));
203 
204 	if (crypto_mode >= ARRAY_SIZE(blk_crypto_modes))
205 		return -EINVAL;
206 
207 	BUILD_BUG_ON(BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE < BLK_CRYPTO_MAX_KEY_SIZE);
208 
209 	mode = &blk_crypto_modes[crypto_mode];
210 	if (is_hw_wrapped) {
211 		if (raw_key_size < mode->keysize ||
212 		    raw_key_size > BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE)
213 			return -EINVAL;
214 	} else {
215 		if (raw_key_size != mode->keysize)
216 			return -EINVAL;
217 	}
218 
219 	if (dun_bytes <= 0 || dun_bytes > BLK_CRYPTO_MAX_IV_SIZE)
220 		return -EINVAL;
221 
222 	if (!is_power_of_2(data_unit_size))
223 		return -EINVAL;
224 
225 	blk_key->crypto_mode = crypto_mode;
226 	blk_key->data_unit_size = data_unit_size;
227 	blk_key->data_unit_size_bits = ilog2(data_unit_size);
228 	blk_key->size = raw_key_size;
229 	blk_key->is_hw_wrapped = is_hw_wrapped;
230 	memcpy(blk_key->raw, raw_key, raw_key_size);
231 
232 	/*
233 	 * The keyslot manager uses the SipHash of the key to implement O(1) key
234 	 * lookups while avoiding leaking information about the keys.  It's
235 	 * precomputed here so that it only needs to be computed once per key.
236 	 */
237 	get_random_once(&hash_key, sizeof(hash_key));
238 	hash = (u32)siphash(raw_key, raw_key_size, &hash_key);
239 	blk_crypto_key_set_hash_and_dun_bytes(blk_key, hash, dun_bytes);
240 
241 	return 0;
242 }
243 EXPORT_SYMBOL_GPL(blk_crypto_init_key);
244 
245 /**
246  * blk_crypto_start_using_mode() - Start using blk-crypto on a device
247  * @crypto_mode: the crypto mode that will be used
248  * @dun_bytes: number of bytes that will be used to specify the DUN
249  * @data_unit_size: the data unit size that will be used
250  * @is_hw_wrapped_key: whether the key will be hardware-wrapped
251  * @q: the request queue for the device
252  *
253  * Upper layers must call this function to ensure that either the hardware
254  * supports the needed crypto settings, or the crypto API fallback has
255  * transforms for the needed mode allocated and ready to go.
256  *
257  * Return: 0 on success; -ENOPKG if the hardware doesn't support the crypto
258  *	   settings and blk-crypto-fallback is either disabled or the needed
259  *	   algorithm is disabled in the crypto API; or another -errno code.
260  */
blk_crypto_start_using_mode(enum blk_crypto_mode_num crypto_mode,unsigned int dun_bytes,unsigned int data_unit_size,bool is_hw_wrapped_key,struct request_queue * q)261 int blk_crypto_start_using_mode(enum blk_crypto_mode_num crypto_mode,
262 				unsigned int dun_bytes,
263 				unsigned int data_unit_size,
264 				bool is_hw_wrapped_key,
265 				struct request_queue *q)
266 {
267 	if (keyslot_manager_crypto_mode_supported(q->ksm, crypto_mode,
268 						  dun_bytes, data_unit_size,
269 						  is_hw_wrapped_key))
270 		return 0;
271 	if (is_hw_wrapped_key) {
272 		pr_warn_once("hardware doesn't support wrapped keys\n");
273 		return -EOPNOTSUPP;
274 	}
275 	return blk_crypto_fallback_start_using_mode(crypto_mode);
276 }
277 EXPORT_SYMBOL_GPL(blk_crypto_start_using_mode);
278 
279 /**
280  * blk_crypto_evict_key() - Evict a key from any inline encryption hardware
281  *			    it may have been programmed into
282  * @q: The request queue who's keyslot manager this key might have been
283  *     programmed into
284  * @key: The key to evict
285  *
286  * Upper layers (filesystems) should call this function to ensure that a key
287  * is evicted from hardware that it might have been programmed into. This
288  * will call keyslot_manager_evict_key on the queue's keyslot manager, if one
289  * exists, and supports the crypto algorithm with the specified data unit size.
290  * Otherwise, it will evict the key from the blk-crypto-fallback's ksm.
291  *
292  * Return: 0 on success, -err on error.
293  */
blk_crypto_evict_key(struct request_queue * q,const struct blk_crypto_key * key)294 int blk_crypto_evict_key(struct request_queue *q,
295 			 const struct blk_crypto_key *key)
296 {
297 	if (q->ksm &&
298 	    keyslot_manager_crypto_mode_supported(q->ksm, key->crypto_mode,
299 						  blk_crypto_key_dun_bytes(key),
300 						  key->data_unit_size,
301 						  key->is_hw_wrapped))
302 		return keyslot_manager_evict_key(q->ksm, key);
303 
304 	return blk_crypto_fallback_evict_key(key);
305 }
306 EXPORT_SYMBOL_GPL(blk_crypto_evict_key);
307