1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright 2019 Google LLC
4 */
5
6 /*
7 * Refer to Documentation/block/inline-encryption.rst for detailed explanation.
8 */
9
10 #define pr_fmt(fmt) "blk-crypto: " fmt
11
12 #include <linux/blk-crypto.h>
13 #include <linux/blkdev.h>
14 #include <linux/keyslot-manager.h>
15 #include <linux/random.h>
16 #include <linux/siphash.h>
17
18 #include "blk-crypto-internal.h"
19
20 const struct blk_crypto_mode blk_crypto_modes[] = {
21 [BLK_ENCRYPTION_MODE_AES_256_XTS] = {
22 .cipher_str = "xts(aes)",
23 .keysize = 64,
24 .ivsize = 16,
25 },
26 [BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV] = {
27 .cipher_str = "essiv(cbc(aes),sha256)",
28 .keysize = 16,
29 .ivsize = 16,
30 },
31 [BLK_ENCRYPTION_MODE_ADIANTUM] = {
32 .cipher_str = "adiantum(xchacha12,aes)",
33 .keysize = 32,
34 .ivsize = 32,
35 },
36 };
37
38 /* Check that all I/O segments are data unit aligned */
bio_crypt_check_alignment(struct bio * bio)39 static int bio_crypt_check_alignment(struct bio *bio)
40 {
41 const unsigned int data_unit_size =
42 bio->bi_crypt_context->bc_key->data_unit_size;
43 struct bvec_iter iter;
44 struct bio_vec bv;
45
46 bio_for_each_segment(bv, bio, iter) {
47 if (!IS_ALIGNED(bv.bv_len | bv.bv_offset, data_unit_size))
48 return -EIO;
49 }
50 return 0;
51 }
52
53 /**
54 * blk_crypto_submit_bio - handle submitting bio for inline encryption
55 *
56 * @bio_ptr: pointer to original bio pointer
57 *
58 * If the bio doesn't have inline encryption enabled or the submitter already
59 * specified a keyslot for the target device, do nothing. Else, a raw key must
60 * have been provided, so acquire a device keyslot for it if supported. Else,
61 * use the crypto API fallback.
62 *
63 * When the crypto API fallback is used for encryption, blk-crypto may choose to
64 * split the bio into 2 - the first one that will continue to be processed and
65 * the second one that will be resubmitted via generic_make_request.
66 * A bounce bio will be allocated to encrypt the contents of the aforementioned
67 * "first one", and *bio_ptr will be updated to this bounce bio.
68 *
69 * Return: 0 if bio submission should continue; nonzero if bio_endio() was
70 * already called so bio submission should abort.
71 */
blk_crypto_submit_bio(struct bio ** bio_ptr)72 int blk_crypto_submit_bio(struct bio **bio_ptr)
73 {
74 struct bio *bio = *bio_ptr;
75 struct request_queue *q;
76 struct bio_crypt_ctx *bc = bio->bi_crypt_context;
77 int err;
78
79 if (!bc || !bio_has_data(bio))
80 return 0;
81
82 /*
83 * When a read bio is marked for fallback decryption, its bi_iter is
84 * saved so that when we decrypt the bio later, we know what part of it
85 * was marked for fallback decryption (when the bio is passed down after
86 * blk_crypto_submit bio, it may be split or advanced so we cannot rely
87 * on the bi_iter while decrypting in blk_crypto_endio)
88 */
89 if (bio_crypt_fallback_crypted(bc))
90 return 0;
91
92 err = bio_crypt_check_alignment(bio);
93 if (err) {
94 bio->bi_status = BLK_STS_IOERR;
95 goto out;
96 }
97
98 q = bio->bi_disk->queue;
99
100 if (bc->bc_ksm) {
101 /* Key already programmed into device? */
102 if (q->ksm == bc->bc_ksm)
103 return 0;
104
105 /* Nope, release the existing keyslot. */
106 bio_crypt_ctx_release_keyslot(bc);
107 }
108
109 /* Get device keyslot if supported */
110 if (keyslot_manager_crypto_mode_supported(q->ksm,
111 bc->bc_key->crypto_mode,
112 bc->bc_key->data_unit_size)) {
113 err = bio_crypt_ctx_acquire_keyslot(bc, q->ksm);
114 if (!err)
115 return 0;
116
117 pr_warn_once("Failed to acquire keyslot for %s (err=%d). Falling back to crypto API.\n",
118 bio->bi_disk->disk_name, err);
119 }
120
121 /* Fallback to crypto API */
122 err = blk_crypto_fallback_submit_bio(bio_ptr);
123 if (err)
124 goto out;
125
126 return 0;
127 out:
128 bio_endio(*bio_ptr);
129 return err;
130 }
131
132 /**
133 * blk_crypto_endio - clean up bio w.r.t inline encryption during bio_endio
134 *
135 * @bio: the bio to clean up
136 *
137 * If blk_crypto_submit_bio decided to fallback to crypto API for this bio,
138 * we queue the bio for decryption into a workqueue and return false,
139 * and call bio_endio(bio) at a later time (after the bio has been decrypted).
140 *
141 * If the bio is not to be decrypted by the crypto API, this function releases
142 * the reference to the keyslot that blk_crypto_submit_bio got.
143 *
144 * Return: true if bio_endio should continue; false otherwise (bio_endio will
145 * be called again when bio has been decrypted).
146 */
blk_crypto_endio(struct bio * bio)147 bool blk_crypto_endio(struct bio *bio)
148 {
149 struct bio_crypt_ctx *bc = bio->bi_crypt_context;
150
151 if (!bc)
152 return true;
153
154 if (bio_crypt_fallback_crypted(bc)) {
155 /*
156 * The only bios who's crypto is handled by the blk-crypto
157 * fallback when they reach here are those with
158 * bio_data_dir(bio) == READ, since WRITE bios that are
159 * encrypted by the crypto API fallback are handled by
160 * blk_crypto_encrypt_endio().
161 */
162 return !blk_crypto_queue_decrypt_bio(bio);
163 }
164
165 if (bc->bc_keyslot >= 0)
166 bio_crypt_ctx_release_keyslot(bc);
167
168 return true;
169 }
170
171 /**
172 * blk_crypto_init_key() - Prepare a key for use with blk-crypto
173 * @blk_key: Pointer to the blk_crypto_key to initialize.
174 * @raw_key: Pointer to the raw key.
175 * @raw_key_size: Size of raw key. Must be at least the required size for the
176 * chosen @crypto_mode; see blk_crypto_modes[]. (It's allowed
177 * to be longer than the mode's actual key size, in order to
178 * support inline encryption hardware that accepts wrapped keys.)
179 * @crypto_mode: identifier for the encryption algorithm to use
180 * @data_unit_size: the data unit size to use for en/decryption
181 *
182 * Return: The blk_crypto_key that was prepared, or an ERR_PTR() on error. When
183 * done using the key, it must be freed with blk_crypto_free_key().
184 */
blk_crypto_init_key(struct blk_crypto_key * blk_key,const u8 * raw_key,unsigned int raw_key_size,enum blk_crypto_mode_num crypto_mode,unsigned int data_unit_size)185 int blk_crypto_init_key(struct blk_crypto_key *blk_key,
186 const u8 *raw_key, unsigned int raw_key_size,
187 enum blk_crypto_mode_num crypto_mode,
188 unsigned int data_unit_size)
189 {
190 const struct blk_crypto_mode *mode;
191 static siphash_key_t hash_key;
192
193 memset(blk_key, 0, sizeof(*blk_key));
194
195 if (crypto_mode >= ARRAY_SIZE(blk_crypto_modes))
196 return -EINVAL;
197
198 BUILD_BUG_ON(BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE < BLK_CRYPTO_MAX_KEY_SIZE);
199
200 mode = &blk_crypto_modes[crypto_mode];
201 if (raw_key_size < mode->keysize ||
202 raw_key_size > BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE)
203 return -EINVAL;
204
205 if (!is_power_of_2(data_unit_size))
206 return -EINVAL;
207
208 blk_key->crypto_mode = crypto_mode;
209 blk_key->data_unit_size = data_unit_size;
210 blk_key->data_unit_size_bits = ilog2(data_unit_size);
211 blk_key->size = raw_key_size;
212 memcpy(blk_key->raw, raw_key, raw_key_size);
213
214 /*
215 * The keyslot manager uses the SipHash of the key to implement O(1) key
216 * lookups while avoiding leaking information about the keys. It's
217 * precomputed here so that it only needs to be computed once per key.
218 */
219 get_random_once(&hash_key, sizeof(hash_key));
220 blk_key->hash = siphash(raw_key, raw_key_size, &hash_key);
221
222 return 0;
223 }
224 EXPORT_SYMBOL_GPL(blk_crypto_init_key);
225
226 /**
227 * blk_crypto_evict_key() - Evict a key from any inline encryption hardware
228 * it may have been programmed into
229 * @q: The request queue who's keyslot manager this key might have been
230 * programmed into
231 * @key: The key to evict
232 *
233 * Upper layers (filesystems) should call this function to ensure that a key
234 * is evicted from hardware that it might have been programmed into. This
235 * will call keyslot_manager_evict_key on the queue's keyslot manager, if one
236 * exists, and supports the crypto algorithm with the specified data unit size.
237 * Otherwise, it will evict the key from the blk-crypto-fallback's ksm.
238 *
239 * Return: 0 on success, -err on error.
240 */
blk_crypto_evict_key(struct request_queue * q,const struct blk_crypto_key * key)241 int blk_crypto_evict_key(struct request_queue *q,
242 const struct blk_crypto_key *key)
243 {
244 if (q->ksm &&
245 keyslot_manager_crypto_mode_supported(q->ksm, key->crypto_mode,
246 key->data_unit_size))
247 return keyslot_manager_evict_key(q->ksm, key);
248
249 return blk_crypto_fallback_evict_key(key);
250 }
251 EXPORT_SYMBOL_GPL(blk_crypto_evict_key);
252