• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Inline encryption support for fscrypt
4  *
5  * Copyright 2019 Google LLC
6  */
7 
8 /*
9  * With "inline encryption", the block layer handles the decryption/encryption
10  * as part of the bio, instead of the filesystem doing the crypto itself via
11  * crypto API.  See Documentation/block/inline-encryption.rst.  fscrypt still
12  * provides the key and IV to use.
13  */
14 
15 #include <linux/blk-crypto.h>
16 #include <linux/blkdev.h>
17 #include <linux/buffer_head.h>
18 #include <linux/keyslot-manager.h>
19 
20 #include "fscrypt_private.h"
21 
22 struct fscrypt_blk_crypto_key {
23 	struct blk_crypto_key base;
24 	int num_devs;
25 	struct request_queue *devs[];
26 };
27 
28 /* Enable inline encryption for this file if supported. */
fscrypt_select_encryption_impl(struct fscrypt_info * ci)29 void fscrypt_select_encryption_impl(struct fscrypt_info *ci)
30 {
31 	const struct inode *inode = ci->ci_inode;
32 	struct super_block *sb = inode->i_sb;
33 
34 	/* The file must need contents encryption, not filenames encryption */
35 	if (!S_ISREG(inode->i_mode))
36 		return;
37 
38 	/* blk-crypto must implement the needed encryption algorithm */
39 	if (ci->ci_mode->blk_crypto_mode == BLK_ENCRYPTION_MODE_INVALID)
40 		return;
41 
42 	/* The filesystem must be mounted with -o inlinecrypt */
43 	if (!sb->s_cop->inline_crypt_enabled ||
44 	    !sb->s_cop->inline_crypt_enabled(sb))
45 		return;
46 
47 	ci->ci_inlinecrypt = true;
48 }
49 
fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key * prep_key,const u8 * raw_key,unsigned int raw_key_size,const struct fscrypt_info * ci)50 int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key,
51 				     const u8 *raw_key,
52 				     unsigned int raw_key_size,
53 				     const struct fscrypt_info *ci)
54 {
55 	const struct inode *inode = ci->ci_inode;
56 	struct super_block *sb = inode->i_sb;
57 	enum blk_crypto_mode_num crypto_mode = ci->ci_mode->blk_crypto_mode;
58 	int num_devs = 1;
59 	int queue_refs = 0;
60 	struct fscrypt_blk_crypto_key *blk_key;
61 	int err;
62 	int i;
63 
64 	if (sb->s_cop->get_num_devices)
65 		num_devs = sb->s_cop->get_num_devices(sb);
66 	if (WARN_ON(num_devs < 1))
67 		return -EINVAL;
68 
69 	blk_key = kzalloc(struct_size(blk_key, devs, num_devs), GFP_NOFS);
70 	if (!blk_key)
71 		return -ENOMEM;
72 
73 	blk_key->num_devs = num_devs;
74 	if (num_devs == 1)
75 		blk_key->devs[0] = bdev_get_queue(sb->s_bdev);
76 	else
77 		sb->s_cop->get_devices(sb, blk_key->devs);
78 
79 	BUILD_BUG_ON(FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE >
80 		     BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE);
81 
82 	err = blk_crypto_init_key(&blk_key->base, raw_key, raw_key_size,
83 				  crypto_mode, sb->s_blocksize);
84 	if (err) {
85 		fscrypt_err(inode, "error %d initializing blk-crypto key", err);
86 		goto fail;
87 	}
88 
89 	/*
90 	 * We have to start using blk-crypto on all the filesystem's devices.
91 	 * We also have to save all the request_queue's for later so that the
92 	 * key can be evicted from them.  This is needed because some keys
93 	 * aren't destroyed until after the filesystem was already unmounted
94 	 * (namely, the per-mode keys in struct fscrypt_master_key).
95 	 */
96 	for (i = 0; i < num_devs; i++) {
97 		if (!blk_get_queue(blk_key->devs[i])) {
98 			fscrypt_err(inode, "couldn't get request_queue");
99 			err = -EAGAIN;
100 			goto fail;
101 		}
102 		queue_refs++;
103 
104 		err = blk_crypto_start_using_mode(crypto_mode, sb->s_blocksize,
105 						  blk_key->devs[i]);
106 		if (err) {
107 			fscrypt_err(inode,
108 				    "error %d starting to use blk-crypto", err);
109 			goto fail;
110 		}
111 	}
112 	/*
113 	 * Pairs with READ_ONCE() in fscrypt_is_key_prepared().  (Only matters
114 	 * for the per-mode keys, which are shared by multiple inodes.)
115 	 */
116 	smp_store_release(&prep_key->blk_key, blk_key);
117 	return 0;
118 
119 fail:
120 	for (i = 0; i < queue_refs; i++)
121 		blk_put_queue(blk_key->devs[i]);
122 	kzfree(blk_key);
123 	return err;
124 }
125 
fscrypt_destroy_inline_crypt_key(struct fscrypt_prepared_key * prep_key)126 void fscrypt_destroy_inline_crypt_key(struct fscrypt_prepared_key *prep_key)
127 {
128 	struct fscrypt_blk_crypto_key *blk_key = prep_key->blk_key;
129 	int i;
130 
131 	if (blk_key) {
132 		for (i = 0; i < blk_key->num_devs; i++) {
133 			blk_crypto_evict_key(blk_key->devs[i], &blk_key->base);
134 			blk_put_queue(blk_key->devs[i]);
135 		}
136 		kzfree(blk_key);
137 	}
138 }
139 
fscrypt_derive_raw_secret(struct super_block * sb,const u8 * wrapped_key,unsigned int wrapped_key_size,u8 * raw_secret,unsigned int raw_secret_size)140 int fscrypt_derive_raw_secret(struct super_block *sb,
141 			      const u8 *wrapped_key,
142 			      unsigned int wrapped_key_size,
143 			      u8 *raw_secret, unsigned int raw_secret_size)
144 {
145 	struct request_queue *q;
146 
147 	q = sb->s_bdev->bd_queue;
148 	if (!q->ksm)
149 		return -EOPNOTSUPP;
150 
151 	return keyslot_manager_derive_raw_secret(q->ksm,
152 						 wrapped_key, wrapped_key_size,
153 						 raw_secret, raw_secret_size);
154 }
155 
156 /**
157  * fscrypt_inode_uses_inline_crypto - test whether an inode uses inline
158  *				      encryption
159  * @inode: an inode
160  *
161  * Return: true if the inode requires file contents encryption and if the
162  *	   encryption should be done in the block layer via blk-crypto rather
163  *	   than in the filesystem layer.
164  */
fscrypt_inode_uses_inline_crypto(const struct inode * inode)165 bool fscrypt_inode_uses_inline_crypto(const struct inode *inode)
166 {
167 	return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode) &&
168 		inode->i_crypt_info->ci_inlinecrypt;
169 }
170 EXPORT_SYMBOL_GPL(fscrypt_inode_uses_inline_crypto);
171 
172 /**
173  * fscrypt_inode_uses_fs_layer_crypto - test whether an inode uses fs-layer
174  *					encryption
175  * @inode: an inode
176  *
177  * Return: true if the inode requires file contents encryption and if the
178  *	   encryption should be done in the filesystem layer rather than in the
179  *	   block layer via blk-crypto.
180  */
fscrypt_inode_uses_fs_layer_crypto(const struct inode * inode)181 bool fscrypt_inode_uses_fs_layer_crypto(const struct inode *inode)
182 {
183 	return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode) &&
184 		!inode->i_crypt_info->ci_inlinecrypt;
185 }
186 EXPORT_SYMBOL_GPL(fscrypt_inode_uses_fs_layer_crypto);
187 
fscrypt_generate_dun(const struct fscrypt_info * ci,u64 lblk_num,u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE])188 static void fscrypt_generate_dun(const struct fscrypt_info *ci, u64 lblk_num,
189 				 u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE])
190 {
191 	union fscrypt_iv iv;
192 	int i;
193 
194 	fscrypt_generate_iv(&iv, lblk_num, ci);
195 
196 	BUILD_BUG_ON(FSCRYPT_MAX_IV_SIZE > BLK_CRYPTO_MAX_IV_SIZE);
197 	memset(dun, 0, BLK_CRYPTO_MAX_IV_SIZE);
198 	for (i = 0; i < ci->ci_mode->ivsize/sizeof(dun[0]); i++)
199 		dun[i] = le64_to_cpu(iv.dun[i]);
200 }
201 
202 /**
203  * fscrypt_set_bio_crypt_ctx - prepare a file contents bio for inline encryption
204  * @bio: a bio which will eventually be submitted to the file
205  * @inode: the file's inode
206  * @first_lblk: the first file logical block number in the I/O
207  * @gfp_mask: memory allocation flags - these must be a waiting mask so that
208  *					bio_crypt_set_ctx can't fail.
209  *
210  * If the contents of the file should be encrypted (or decrypted) with inline
211  * encryption, then assign the appropriate encryption context to the bio.
212  *
213  * Normally the bio should be newly allocated (i.e. no pages added yet), as
214  * otherwise fscrypt_mergeable_bio() won't work as intended.
215  *
216  * The encryption context will be freed automatically when the bio is freed.
217  *
218  * This function also handles setting bi_skip_dm_default_key when needed.
219  */
fscrypt_set_bio_crypt_ctx(struct bio * bio,const struct inode * inode,u64 first_lblk,gfp_t gfp_mask)220 void fscrypt_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
221 			       u64 first_lblk, gfp_t gfp_mask)
222 {
223 	const struct fscrypt_info *ci = inode->i_crypt_info;
224 	u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
225 
226 	if (fscrypt_inode_should_skip_dm_default_key(inode))
227 		bio_set_skip_dm_default_key(bio);
228 
229 	if (!fscrypt_inode_uses_inline_crypto(inode))
230 		return;
231 
232 	fscrypt_generate_dun(ci, first_lblk, dun);
233 	bio_crypt_set_ctx(bio, &ci->ci_key.blk_key->base, dun, gfp_mask);
234 }
235 EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx);
236 
237 /* Extract the inode and logical block number from a buffer_head. */
bh_get_inode_and_lblk_num(const struct buffer_head * bh,const struct inode ** inode_ret,u64 * lblk_num_ret)238 static bool bh_get_inode_and_lblk_num(const struct buffer_head *bh,
239 				      const struct inode **inode_ret,
240 				      u64 *lblk_num_ret)
241 {
242 	struct page *page = bh->b_page;
243 	const struct address_space *mapping;
244 	const struct inode *inode;
245 
246 	/*
247 	 * The ext4 journal (jbd2) can submit a buffer_head it directly created
248 	 * for a non-pagecache page.  fscrypt doesn't care about these.
249 	 */
250 	mapping = page_mapping(page);
251 	if (!mapping)
252 		return false;
253 	inode = mapping->host;
254 
255 	*inode_ret = inode;
256 	*lblk_num_ret = ((u64)page->index << (PAGE_SHIFT - inode->i_blkbits)) +
257 			(bh_offset(bh) >> inode->i_blkbits);
258 	return true;
259 }
260 
261 /**
262  * fscrypt_set_bio_crypt_ctx_bh - prepare a file contents bio for inline
263  *				  encryption
264  * @bio: a bio which will eventually be submitted to the file
265  * @first_bh: the first buffer_head for which I/O will be submitted
266  * @gfp_mask: memory allocation flags
267  *
268  * Same as fscrypt_set_bio_crypt_ctx(), except this takes a buffer_head instead
269  * of an inode and block number directly.
270  */
fscrypt_set_bio_crypt_ctx_bh(struct bio * bio,const struct buffer_head * first_bh,gfp_t gfp_mask)271 void fscrypt_set_bio_crypt_ctx_bh(struct bio *bio,
272 				 const struct buffer_head *first_bh,
273 				 gfp_t gfp_mask)
274 {
275 	const struct inode *inode;
276 	u64 first_lblk;
277 
278 	if (bh_get_inode_and_lblk_num(first_bh, &inode, &first_lblk))
279 		fscrypt_set_bio_crypt_ctx(bio, inode, first_lblk, gfp_mask);
280 }
281 EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx_bh);
282 
283 /**
284  * fscrypt_mergeable_bio - test whether data can be added to a bio
285  * @bio: the bio being built up
286  * @inode: the inode for the next part of the I/O
287  * @next_lblk: the next file logical block number in the I/O
288  *
289  * When building a bio which may contain data which should undergo inline
290  * encryption (or decryption) via fscrypt, filesystems should call this function
291  * to ensure that the resulting bio contains only logically contiguous data.
292  * This will return false if the next part of the I/O cannot be merged with the
293  * bio because either the encryption key would be different or the encryption
294  * data unit numbers would be discontiguous.
295  *
296  * fscrypt_set_bio_crypt_ctx() must have already been called on the bio.
297  *
298  * This function also returns false if the next part of the I/O would need to
299  * have a different value for the bi_skip_dm_default_key flag.
300  *
301  * Return: true iff the I/O is mergeable
302  */
fscrypt_mergeable_bio(struct bio * bio,const struct inode * inode,u64 next_lblk)303 bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode,
304 			   u64 next_lblk)
305 {
306 	const struct bio_crypt_ctx *bc = bio->bi_crypt_context;
307 	u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
308 
309 	if (!!bc != fscrypt_inode_uses_inline_crypto(inode))
310 		return false;
311 	if (bio_should_skip_dm_default_key(bio) !=
312 	    fscrypt_inode_should_skip_dm_default_key(inode))
313 		return false;
314 	if (!bc)
315 		return true;
316 
317 	/*
318 	 * Comparing the key pointers is good enough, as all I/O for each key
319 	 * uses the same pointer.  I.e., there's currently no need to support
320 	 * merging requests where the keys are the same but the pointers differ.
321 	 */
322 	if (bc->bc_key != &inode->i_crypt_info->ci_key.blk_key->base)
323 		return false;
324 
325 	fscrypt_generate_dun(inode->i_crypt_info, next_lblk, next_dun);
326 	return bio_crypt_dun_is_contiguous(bc, bio->bi_iter.bi_size, next_dun);
327 }
328 EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio);
329 
330 /**
331  * fscrypt_mergeable_bio_bh - test whether data can be added to a bio
332  * @bio: the bio being built up
333  * @next_bh: the next buffer_head for which I/O will be submitted
334  *
335  * Same as fscrypt_mergeable_bio(), except this takes a buffer_head instead of
336  * an inode and block number directly.
337  *
338  * Return: true iff the I/O is mergeable
339  */
fscrypt_mergeable_bio_bh(struct bio * bio,const struct buffer_head * next_bh)340 bool fscrypt_mergeable_bio_bh(struct bio *bio,
341 			      const struct buffer_head *next_bh)
342 {
343 	const struct inode *inode;
344 	u64 next_lblk;
345 
346 	if (!bh_get_inode_and_lblk_num(next_bh, &inode, &next_lblk))
347 		return !bio->bi_crypt_context &&
348 		       !bio_should_skip_dm_default_key(bio);
349 
350 	return fscrypt_mergeable_bio(bio, inode, next_lblk);
351 }
352 EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio_bh);
353