1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Inline encryption support for fscrypt
4 *
5 * Copyright 2019 Google LLC
6 */
7
8 /*
9 * With "inline encryption", the block layer handles the decryption/encryption
10 * as part of the bio, instead of the filesystem doing the crypto itself via
11 * crypto API. See Documentation/block/inline-encryption.rst. fscrypt still
12 * provides the key and IV to use.
13 */
14
15 #include <linux/blk-crypto.h>
16 #include <linux/blkdev.h>
17 #include <linux/buffer_head.h>
18 #include <linux/keyslot-manager.h>
19 #include <linux/sched/mm.h>
20 #include <linux/slab.h>
21 #include <linux/uio.h>
22
23 #include "fscrypt_private.h"
24
25 struct fscrypt_blk_crypto_key {
26 struct blk_crypto_key base;
27 int num_devs;
28 struct request_queue *devs[];
29 };
30
fscrypt_get_num_devices(struct super_block * sb)31 static int fscrypt_get_num_devices(struct super_block *sb)
32 {
33 if (sb->s_cop->get_num_devices)
34 return sb->s_cop->get_num_devices(sb);
35 return 1;
36 }
37
fscrypt_get_devices(struct super_block * sb,int num_devs,struct request_queue ** devs)38 static void fscrypt_get_devices(struct super_block *sb, int num_devs,
39 struct request_queue **devs)
40 {
41 if (num_devs == 1)
42 devs[0] = bdev_get_queue(sb->s_bdev);
43 else
44 sb->s_cop->get_devices(sb, devs);
45 }
46
fscrypt_get_dun_bytes(const struct fscrypt_info * ci)47 static unsigned int fscrypt_get_dun_bytes(const struct fscrypt_info *ci)
48 {
49 struct super_block *sb = ci->ci_inode->i_sb;
50 unsigned int flags = fscrypt_policy_flags(&ci->ci_policy);
51 int ino_bits = 64, lblk_bits = 64;
52
53 if (flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY)
54 return offsetofend(union fscrypt_iv, nonce);
55
56 if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64)
57 return sizeof(__le64);
58
59 if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32)
60 return sizeof(__le32);
61
62 /* Default case: IVs are just the file logical block number */
63 if (sb->s_cop->get_ino_and_lblk_bits)
64 sb->s_cop->get_ino_and_lblk_bits(sb, &ino_bits, &lblk_bits);
65 return DIV_ROUND_UP(lblk_bits, 8);
66 }
67
68 /* Enable inline encryption for this file if supported. */
fscrypt_select_encryption_impl(struct fscrypt_info * ci,bool is_hw_wrapped_key)69 int fscrypt_select_encryption_impl(struct fscrypt_info *ci,
70 bool is_hw_wrapped_key)
71 {
72 const struct inode *inode = ci->ci_inode;
73 struct super_block *sb = inode->i_sb;
74 struct blk_crypto_config crypto_cfg;
75 int num_devs;
76 struct request_queue **devs;
77 int i;
78
79 /* The file must need contents encryption, not filenames encryption */
80 if (!S_ISREG(inode->i_mode))
81 return 0;
82
83 /* The crypto mode must have a blk-crypto counterpart */
84 if (ci->ci_mode->blk_crypto_mode == BLK_ENCRYPTION_MODE_INVALID)
85 return 0;
86
87 /* The filesystem must be mounted with -o inlinecrypt */
88 if (!(sb->s_flags & SB_INLINECRYPT))
89 return 0;
90
91 /*
92 * When a page contains multiple logically contiguous filesystem blocks,
93 * some filesystem code only calls fscrypt_mergeable_bio() for the first
94 * block in the page. This is fine for most of fscrypt's IV generation
95 * strategies, where contiguous blocks imply contiguous IVs. But it
96 * doesn't work with IV_INO_LBLK_32. For now, simply exclude
97 * IV_INO_LBLK_32 with blocksize != PAGE_SIZE from inline encryption.
98 */
99 if ((fscrypt_policy_flags(&ci->ci_policy) &
100 FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) &&
101 sb->s_blocksize != PAGE_SIZE)
102 return 0;
103
104 /*
105 * On all the filesystem's devices, blk-crypto must support the crypto
106 * configuration that the file would use.
107 */
108 crypto_cfg.crypto_mode = ci->ci_mode->blk_crypto_mode;
109 crypto_cfg.data_unit_size = sb->s_blocksize;
110 crypto_cfg.dun_bytes = fscrypt_get_dun_bytes(ci);
111 crypto_cfg.is_hw_wrapped = is_hw_wrapped_key;
112 num_devs = fscrypt_get_num_devices(sb);
113 devs = kmalloc_array(num_devs, sizeof(*devs), GFP_KERNEL);
114 if (!devs)
115 return -ENOMEM;
116 fscrypt_get_devices(sb, num_devs, devs);
117
118 for (i = 0; i < num_devs; i++) {
119 if (!blk_crypto_config_supported(devs[i], &crypto_cfg))
120 goto out_free_devs;
121 }
122
123 ci->ci_inlinecrypt = true;
124 out_free_devs:
125 kfree(devs);
126
127 return 0;
128 }
129
fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key * prep_key,const u8 * raw_key,unsigned int raw_key_size,bool is_hw_wrapped,const struct fscrypt_info * ci)130 int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key,
131 const u8 *raw_key,
132 unsigned int raw_key_size,
133 bool is_hw_wrapped,
134 const struct fscrypt_info *ci)
135 {
136 const struct inode *inode = ci->ci_inode;
137 struct super_block *sb = inode->i_sb;
138 enum blk_crypto_mode_num crypto_mode = ci->ci_mode->blk_crypto_mode;
139 int num_devs = fscrypt_get_num_devices(sb);
140 int queue_refs = 0;
141 struct fscrypt_blk_crypto_key *blk_key;
142 int err;
143 int i;
144
145 blk_key = kzalloc(struct_size(blk_key, devs, num_devs), GFP_KERNEL);
146 if (!blk_key)
147 return -ENOMEM;
148
149 blk_key->num_devs = num_devs;
150 fscrypt_get_devices(sb, num_devs, blk_key->devs);
151
152 BUILD_BUG_ON(FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE >
153 BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE);
154
155 err = blk_crypto_init_key(&blk_key->base, raw_key, raw_key_size,
156 is_hw_wrapped, crypto_mode,
157 fscrypt_get_dun_bytes(ci), sb->s_blocksize);
158 if (err) {
159 fscrypt_err(inode, "error %d initializing blk-crypto key", err);
160 goto fail;
161 }
162
163 /*
164 * We have to start using blk-crypto on all the filesystem's devices.
165 * We also have to save all the request_queue's for later so that the
166 * key can be evicted from them. This is needed because some keys
167 * aren't destroyed until after the filesystem was already unmounted
168 * (namely, the per-mode keys in struct fscrypt_master_key).
169 */
170 for (i = 0; i < num_devs; i++) {
171 if (!blk_get_queue(blk_key->devs[i])) {
172 fscrypt_err(inode, "couldn't get request_queue");
173 err = -EAGAIN;
174 goto fail;
175 }
176 queue_refs++;
177
178 err = blk_crypto_start_using_key(&blk_key->base,
179 blk_key->devs[i]);
180 if (err) {
181 fscrypt_err(inode,
182 "error %d starting to use blk-crypto", err);
183 goto fail;
184 }
185 }
186 /*
187 * Pairs with the smp_load_acquire() in fscrypt_is_key_prepared().
188 * I.e., here we publish ->blk_key with a RELEASE barrier so that
189 * concurrent tasks can ACQUIRE it. Note that this concurrency is only
190 * possible for per-mode keys, not for per-file keys.
191 */
192 smp_store_release(&prep_key->blk_key, blk_key);
193 return 0;
194
195 fail:
196 for (i = 0; i < queue_refs; i++)
197 blk_put_queue(blk_key->devs[i]);
198 kfree_sensitive(blk_key);
199 return err;
200 }
201
fscrypt_destroy_inline_crypt_key(struct fscrypt_prepared_key * prep_key)202 void fscrypt_destroy_inline_crypt_key(struct fscrypt_prepared_key *prep_key)
203 {
204 struct fscrypt_blk_crypto_key *blk_key = prep_key->blk_key;
205 int i;
206
207 if (blk_key) {
208 for (i = 0; i < blk_key->num_devs; i++) {
209 blk_crypto_evict_key(blk_key->devs[i], &blk_key->base);
210 blk_put_queue(blk_key->devs[i]);
211 }
212 kfree_sensitive(blk_key);
213 }
214 }
215
fscrypt_derive_raw_secret(struct super_block * sb,const u8 * wrapped_key,unsigned int wrapped_key_size,u8 * raw_secret,unsigned int raw_secret_size)216 int fscrypt_derive_raw_secret(struct super_block *sb,
217 const u8 *wrapped_key,
218 unsigned int wrapped_key_size,
219 u8 *raw_secret, unsigned int raw_secret_size)
220 {
221 struct request_queue *q;
222
223 q = bdev_get_queue(sb->s_bdev);
224 if (!q->ksm)
225 return -EOPNOTSUPP;
226
227 return blk_ksm_derive_raw_secret(q->ksm, wrapped_key, wrapped_key_size,
228 raw_secret, raw_secret_size);
229 }
230
__fscrypt_inode_uses_inline_crypto(const struct inode * inode)231 bool __fscrypt_inode_uses_inline_crypto(const struct inode *inode)
232 {
233 return inode->i_crypt_info->ci_inlinecrypt;
234 }
235 EXPORT_SYMBOL_GPL(__fscrypt_inode_uses_inline_crypto);
236
fscrypt_generate_dun(const struct fscrypt_info * ci,u64 lblk_num,u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE])237 static void fscrypt_generate_dun(const struct fscrypt_info *ci, u64 lblk_num,
238 u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE])
239 {
240 union fscrypt_iv iv;
241 int i;
242
243 fscrypt_generate_iv(&iv, lblk_num, ci);
244
245 BUILD_BUG_ON(FSCRYPT_MAX_IV_SIZE > BLK_CRYPTO_MAX_IV_SIZE);
246 memset(dun, 0, BLK_CRYPTO_MAX_IV_SIZE);
247 for (i = 0; i < ci->ci_mode->ivsize/sizeof(dun[0]); i++)
248 dun[i] = le64_to_cpu(iv.dun[i]);
249 }
250
251 /**
252 * fscrypt_set_bio_crypt_ctx() - prepare a file contents bio for inline crypto
253 * @bio: a bio which will eventually be submitted to the file
254 * @inode: the file's inode
255 * @first_lblk: the first file logical block number in the I/O
256 * @gfp_mask: memory allocation flags - these must be a waiting mask so that
257 * bio_crypt_set_ctx can't fail.
258 *
259 * If the contents of the file should be encrypted (or decrypted) with inline
260 * encryption, then assign the appropriate encryption context to the bio.
261 *
262 * Normally the bio should be newly allocated (i.e. no pages added yet), as
263 * otherwise fscrypt_mergeable_bio() won't work as intended.
264 *
265 * The encryption context will be freed automatically when the bio is freed.
266 *
267 * This function also handles setting bi_skip_dm_default_key when needed.
268 */
fscrypt_set_bio_crypt_ctx(struct bio * bio,const struct inode * inode,u64 first_lblk,gfp_t gfp_mask)269 void fscrypt_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
270 u64 first_lblk, gfp_t gfp_mask)
271 {
272 const struct fscrypt_info *ci;
273 u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
274
275 if (fscrypt_inode_should_skip_dm_default_key(inode))
276 bio_set_skip_dm_default_key(bio);
277
278 if (!fscrypt_inode_uses_inline_crypto(inode))
279 return;
280 ci = inode->i_crypt_info;
281
282 fscrypt_generate_dun(ci, first_lblk, dun);
283 bio_crypt_set_ctx(bio, &ci->ci_enc_key.blk_key->base, dun, gfp_mask);
284 }
285 EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx);
286
287 /* Extract the inode and logical block number from a buffer_head. */
bh_get_inode_and_lblk_num(const struct buffer_head * bh,const struct inode ** inode_ret,u64 * lblk_num_ret)288 static bool bh_get_inode_and_lblk_num(const struct buffer_head *bh,
289 const struct inode **inode_ret,
290 u64 *lblk_num_ret)
291 {
292 struct page *page = bh->b_page;
293 const struct address_space *mapping;
294 const struct inode *inode;
295
296 /*
297 * The ext4 journal (jbd2) can submit a buffer_head it directly created
298 * for a non-pagecache page. fscrypt doesn't care about these.
299 */
300 mapping = page_mapping(page);
301 if (!mapping)
302 return false;
303 inode = mapping->host;
304
305 *inode_ret = inode;
306 *lblk_num_ret = ((u64)page->index << (PAGE_SHIFT - inode->i_blkbits)) +
307 (bh_offset(bh) >> inode->i_blkbits);
308 return true;
309 }
310
311 /**
312 * fscrypt_set_bio_crypt_ctx_bh() - prepare a file contents bio for inline
313 * crypto
314 * @bio: a bio which will eventually be submitted to the file
315 * @first_bh: the first buffer_head for which I/O will be submitted
316 * @gfp_mask: memory allocation flags
317 *
318 * Same as fscrypt_set_bio_crypt_ctx(), except this takes a buffer_head instead
319 * of an inode and block number directly.
320 */
fscrypt_set_bio_crypt_ctx_bh(struct bio * bio,const struct buffer_head * first_bh,gfp_t gfp_mask)321 void fscrypt_set_bio_crypt_ctx_bh(struct bio *bio,
322 const struct buffer_head *first_bh,
323 gfp_t gfp_mask)
324 {
325 const struct inode *inode;
326 u64 first_lblk;
327
328 if (bh_get_inode_and_lblk_num(first_bh, &inode, &first_lblk))
329 fscrypt_set_bio_crypt_ctx(bio, inode, first_lblk, gfp_mask);
330 }
331 EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx_bh);
332
333 /**
334 * fscrypt_mergeable_bio() - test whether data can be added to a bio
335 * @bio: the bio being built up
336 * @inode: the inode for the next part of the I/O
337 * @next_lblk: the next file logical block number in the I/O
338 *
339 * When building a bio which may contain data which should undergo inline
340 * encryption (or decryption) via fscrypt, filesystems should call this function
341 * to ensure that the resulting bio contains only contiguous data unit numbers.
342 * This will return false if the next part of the I/O cannot be merged with the
343 * bio because either the encryption key would be different or the encryption
344 * data unit numbers would be discontiguous.
345 *
346 * fscrypt_set_bio_crypt_ctx() must have already been called on the bio.
347 *
348 * This function also returns false if the next part of the I/O would need to
349 * have a different value for the bi_skip_dm_default_key flag.
350 *
351 * Return: true iff the I/O is mergeable
352 */
fscrypt_mergeable_bio(struct bio * bio,const struct inode * inode,u64 next_lblk)353 bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode,
354 u64 next_lblk)
355 {
356 const struct bio_crypt_ctx *bc = bio->bi_crypt_context;
357 u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
358
359 if (!!bc != fscrypt_inode_uses_inline_crypto(inode))
360 return false;
361 if (bio_should_skip_dm_default_key(bio) !=
362 fscrypt_inode_should_skip_dm_default_key(inode))
363 return false;
364 if (!bc)
365 return true;
366
367 /*
368 * Comparing the key pointers is good enough, as all I/O for each key
369 * uses the same pointer. I.e., there's currently no need to support
370 * merging requests where the keys are the same but the pointers differ.
371 */
372 if (bc->bc_key != &inode->i_crypt_info->ci_enc_key.blk_key->base)
373 return false;
374
375 fscrypt_generate_dun(inode->i_crypt_info, next_lblk, next_dun);
376 return bio_crypt_dun_is_contiguous(bc, bio->bi_iter.bi_size, next_dun);
377 }
378 EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio);
379
380 /**
381 * fscrypt_mergeable_bio_bh() - test whether data can be added to a bio
382 * @bio: the bio being built up
383 * @next_bh: the next buffer_head for which I/O will be submitted
384 *
385 * Same as fscrypt_mergeable_bio(), except this takes a buffer_head instead of
386 * an inode and block number directly.
387 *
388 * Return: true iff the I/O is mergeable
389 */
fscrypt_mergeable_bio_bh(struct bio * bio,const struct buffer_head * next_bh)390 bool fscrypt_mergeable_bio_bh(struct bio *bio,
391 const struct buffer_head *next_bh)
392 {
393 const struct inode *inode;
394 u64 next_lblk;
395
396 if (!bh_get_inode_and_lblk_num(next_bh, &inode, &next_lblk))
397 return !bio->bi_crypt_context &&
398 !bio_should_skip_dm_default_key(bio);
399
400 return fscrypt_mergeable_bio(bio, inode, next_lblk);
401 }
402 EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio_bh);
403
404 /**
405 * fscrypt_dio_supported() - check whether a direct I/O request is unsupported
406 * due to encryption constraints
407 * @iocb: the file and position the I/O is targeting
408 * @iter: the I/O data segment(s)
409 *
410 * Return: true if direct I/O is supported
411 */
fscrypt_dio_supported(struct kiocb * iocb,struct iov_iter * iter)412 bool fscrypt_dio_supported(struct kiocb *iocb, struct iov_iter *iter)
413 {
414 const struct inode *inode = file_inode(iocb->ki_filp);
415 const unsigned int blocksize = i_blocksize(inode);
416
417 /* If the file is unencrypted, no veto from us. */
418 if (!fscrypt_needs_contents_encryption(inode))
419 return true;
420
421 /* We only support direct I/O with inline crypto, not fs-layer crypto */
422 if (!fscrypt_inode_uses_inline_crypto(inode))
423 return false;
424
425 /*
426 * Since the granularity of encryption is filesystem blocks, the I/O
427 * must be block aligned -- not just disk sector aligned.
428 */
429 if (!IS_ALIGNED(iocb->ki_pos | iov_iter_alignment(iter), blocksize))
430 return false;
431
432 return true;
433 }
434 EXPORT_SYMBOL_GPL(fscrypt_dio_supported);
435
436 /**
437 * fscrypt_limit_io_blocks() - limit I/O blocks to avoid discontiguous DUNs
438 * @inode: the file on which I/O is being done
439 * @lblk: the block at which the I/O is being started from
440 * @nr_blocks: the number of blocks we want to submit starting at @pos
441 *
442 * Determine the limit to the number of blocks that can be submitted in the bio
443 * targeting @pos without causing a data unit number (DUN) discontinuity.
444 *
445 * This is normally just @nr_blocks, as normally the DUNs just increment along
446 * with the logical blocks. (Or the file is not encrypted.)
447 *
448 * In rare cases, fscrypt can be using an IV generation method that allows the
449 * DUN to wrap around within logically continuous blocks, and that wraparound
450 * will occur. If this happens, a value less than @nr_blocks will be returned
451 * so that the wraparound doesn't occur in the middle of the bio.
452 *
453 * Return: the actual number of blocks that can be submitted
454 */
fscrypt_limit_io_blocks(const struct inode * inode,u64 lblk,u64 nr_blocks)455 u64 fscrypt_limit_io_blocks(const struct inode *inode, u64 lblk, u64 nr_blocks)
456 {
457 const struct fscrypt_info *ci = inode->i_crypt_info;
458 u32 dun;
459
460 if (!fscrypt_inode_uses_inline_crypto(inode))
461 return nr_blocks;
462
463 if (nr_blocks <= 1)
464 return nr_blocks;
465
466 if (!(fscrypt_policy_flags(&ci->ci_policy) &
467 FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32))
468 return nr_blocks;
469
470 /* With IV_INO_LBLK_32, the DUN can wrap around from U32_MAX to 0. */
471
472 dun = ci->ci_hashed_ino + lblk;
473
474 return min_t(u64, nr_blocks, (u64)U32_MAX + 1 - dun);
475 }
476 EXPORT_SYMBOL_GPL(fscrypt_limit_io_blocks);
477