• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This contains encryption functions for per-file encryption.
4  *
5  * Copyright (C) 2015, Google, Inc.
6  * Copyright (C) 2015, Motorola Mobility
7  *
8  * Written by Michael Halcrow, 2014.
9  *
10  * Filename encryption additions
11  *	Uday Savagaonkar, 2014
12  * Encryption policy handling additions
13  *	Ildar Muslukhov, 2014
14  * Add fscrypt_pullback_bio_page()
15  *	Jaegeuk Kim, 2015.
16  *
17  * This has not yet undergone a rigorous security audit.
18  *
19  * The usage of AES-XTS should conform to recommendations in NIST
20  * Special Publication 800-38E and IEEE P1619/D16.
21  */
22 
23 #include <linux/pagemap.h>
24 #include <linux/mempool.h>
25 #include <linux/module.h>
26 #include <linux/scatterlist.h>
27 #include <linux/ratelimit.h>
28 #include <crypto/skcipher.h>
29 #include "fscrypt_private.h"
30 
31 static unsigned int num_prealloc_crypto_pages = 32;
32 
33 module_param(num_prealloc_crypto_pages, uint, 0444);
34 MODULE_PARM_DESC(num_prealloc_crypto_pages,
35 		"Number of crypto pages to preallocate");
36 
37 static mempool_t *fscrypt_bounce_page_pool = NULL;
38 
39 static struct workqueue_struct *fscrypt_read_workqueue;
40 static DEFINE_MUTEX(fscrypt_init_mutex);
41 
42 struct kmem_cache *fscrypt_info_cachep;
43 
fscrypt_enqueue_decrypt_work(struct work_struct * work)44 void fscrypt_enqueue_decrypt_work(struct work_struct *work)
45 {
46 	queue_work(fscrypt_read_workqueue, work);
47 }
48 EXPORT_SYMBOL(fscrypt_enqueue_decrypt_work);
49 
fscrypt_alloc_bounce_page(gfp_t gfp_flags)50 struct page *fscrypt_alloc_bounce_page(gfp_t gfp_flags)
51 {
52 	return mempool_alloc(fscrypt_bounce_page_pool, gfp_flags);
53 }
54 
55 /**
56  * fscrypt_free_bounce_page() - free a ciphertext bounce page
57  * @bounce_page: the bounce page to free, or NULL
58  *
59  * Free a bounce page that was allocated by fscrypt_encrypt_pagecache_blocks(),
60  * or by fscrypt_alloc_bounce_page() directly.
61  */
fscrypt_free_bounce_page(struct page * bounce_page)62 void fscrypt_free_bounce_page(struct page *bounce_page)
63 {
64 	if (!bounce_page)
65 		return;
66 	set_page_private(bounce_page, (unsigned long)NULL);
67 	ClearPagePrivate(bounce_page);
68 	mempool_free(bounce_page, fscrypt_bounce_page_pool);
69 }
70 EXPORT_SYMBOL(fscrypt_free_bounce_page);
71 
72 /*
73  * Generate the IV for the given data unit index within the given file.
74  * For filenames encryption, index == 0.
75  *
76  * Keep this in sync with fscrypt_limit_io_blocks().  fscrypt_limit_io_blocks()
77  * needs to know about any IV generation methods where the low bits of IV don't
78  * simply contain the data unit index (e.g., IV_INO_LBLK_32).
79  */
fscrypt_generate_iv(union fscrypt_iv * iv,u64 index,const struct fscrypt_info * ci)80 void fscrypt_generate_iv(union fscrypt_iv *iv, u64 index,
81 			 const struct fscrypt_info *ci)
82 {
83 	u8 flags = fscrypt_policy_flags(&ci->ci_policy);
84 
85 	memset(iv, 0, ci->ci_mode->ivsize);
86 
87 	if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) {
88 		WARN_ON_ONCE(index > U32_MAX);
89 		WARN_ON_ONCE(ci->ci_inode->i_ino > U32_MAX);
90 		index |= (u64)ci->ci_inode->i_ino << 32;
91 	} else if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) {
92 		WARN_ON_ONCE(index > U32_MAX);
93 		index = (u32)(ci->ci_hashed_ino + index);
94 	} else if (flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) {
95 		memcpy(iv->nonce, ci->ci_nonce, FSCRYPT_FILE_NONCE_SIZE);
96 	}
97 	iv->index = cpu_to_le64(index);
98 }
99 
100 /* Encrypt or decrypt a single "data unit" of file contents. */
fscrypt_crypt_data_unit(const struct fscrypt_info * ci,fscrypt_direction_t rw,u64 index,struct page * src_page,struct page * dest_page,unsigned int len,unsigned int offs,gfp_t gfp_flags)101 int fscrypt_crypt_data_unit(const struct fscrypt_info *ci,
102 			    fscrypt_direction_t rw, u64 index,
103 			    struct page *src_page, struct page *dest_page,
104 			    unsigned int len, unsigned int offs,
105 			    gfp_t gfp_flags)
106 {
107 	union fscrypt_iv iv;
108 	struct skcipher_request *req = NULL;
109 	DECLARE_CRYPTO_WAIT(wait);
110 	struct scatterlist dst, src;
111 	struct crypto_skcipher *tfm = ci->ci_enc_key.tfm;
112 	int res = 0;
113 
114 	if (WARN_ON_ONCE(len <= 0))
115 		return -EINVAL;
116 	if (WARN_ON_ONCE(len % FSCRYPT_CONTENTS_ALIGNMENT != 0))
117 		return -EINVAL;
118 
119 	fscrypt_generate_iv(&iv, index, ci);
120 
121 	req = skcipher_request_alloc(tfm, gfp_flags);
122 	if (!req)
123 		return -ENOMEM;
124 
125 	skcipher_request_set_callback(
126 		req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
127 		crypto_req_done, &wait);
128 
129 	sg_init_table(&dst, 1);
130 	sg_set_page(&dst, dest_page, len, offs);
131 	sg_init_table(&src, 1);
132 	sg_set_page(&src, src_page, len, offs);
133 	skcipher_request_set_crypt(req, &src, &dst, len, &iv);
134 	if (rw == FS_DECRYPT)
135 		res = crypto_wait_req(crypto_skcipher_decrypt(req), &wait);
136 	else
137 		res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
138 	skcipher_request_free(req);
139 	if (res) {
140 		fscrypt_err(ci->ci_inode,
141 			    "%scryption failed for data unit %llu: %d",
142 			    (rw == FS_DECRYPT ? "De" : "En"), index, res);
143 		return res;
144 	}
145 	return 0;
146 }
147 
148 /**
149  * fscrypt_encrypt_pagecache_blocks() - Encrypt data from a pagecache page
150  * @page: the locked pagecache page containing the data to encrypt
151  * @len: size of the data to encrypt, in bytes
152  * @offs: offset within @page of the data to encrypt, in bytes
153  * @gfp_flags: memory allocation flags; see details below
154  *
155  * This allocates a new bounce page and encrypts the given data into it.  The
156  * length and offset of the data must be aligned to the file's crypto data unit
157  * size.  Alignment to the filesystem block size fulfills this requirement, as
158  * the filesystem block size is always a multiple of the data unit size.
159  *
160  * In the bounce page, the ciphertext data will be located at the same offset at
161  * which the plaintext data was located in the source page.  Any other parts of
162  * the bounce page will be left uninitialized.
163  *
164  * This is for use by the filesystem's ->writepages() method.
165  *
166  * The bounce page allocation is mempool-backed, so it will always succeed when
167  * @gfp_flags includes __GFP_DIRECT_RECLAIM, e.g. when it's GFP_NOFS.  However,
168  * only the first page of each bio can be allocated this way.  To prevent
169  * deadlocks, for any additional pages a mask like GFP_NOWAIT must be used.
170  *
171  * Return: the new encrypted bounce page on success; an ERR_PTR() on failure
172  */
fscrypt_encrypt_pagecache_blocks(struct page * page,unsigned int len,unsigned int offs,gfp_t gfp_flags)173 struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
174 					      unsigned int len,
175 					      unsigned int offs,
176 					      gfp_t gfp_flags)
177 
178 {
179 	const struct inode *inode = page->mapping->host;
180 	const struct fscrypt_info *ci = inode->i_crypt_info;
181 	const unsigned int du_bits = ci->ci_data_unit_bits;
182 	const unsigned int du_size = 1U << du_bits;
183 	struct page *ciphertext_page;
184 	u64 index = ((u64)page->index << (PAGE_SHIFT - du_bits)) +
185 		    (offs >> du_bits);
186 	unsigned int i;
187 	int err;
188 
189 	if (WARN_ON_ONCE(!PageLocked(page)))
190 		return ERR_PTR(-EINVAL);
191 
192 	if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, du_size)))
193 		return ERR_PTR(-EINVAL);
194 
195 	ciphertext_page = fscrypt_alloc_bounce_page(gfp_flags);
196 	if (!ciphertext_page)
197 		return ERR_PTR(-ENOMEM);
198 
199 	for (i = offs; i < offs + len; i += du_size, index++) {
200 		err = fscrypt_crypt_data_unit(ci, FS_ENCRYPT, index,
201 					      page, ciphertext_page,
202 					      du_size, i, gfp_flags);
203 		if (err) {
204 			fscrypt_free_bounce_page(ciphertext_page);
205 			return ERR_PTR(err);
206 		}
207 	}
208 	SetPagePrivate(ciphertext_page);
209 	set_page_private(ciphertext_page, (unsigned long)page);
210 	return ciphertext_page;
211 }
212 EXPORT_SYMBOL(fscrypt_encrypt_pagecache_blocks);
213 
214 /**
215  * fscrypt_encrypt_block_inplace() - Encrypt a filesystem block in-place
216  * @inode:     The inode to which this block belongs
217  * @page:      The page containing the block to encrypt
218  * @len:       Size of block to encrypt.  This must be a multiple of
219  *		FSCRYPT_CONTENTS_ALIGNMENT.
220  * @offs:      Byte offset within @page at which the block to encrypt begins
221  * @lblk_num:  Filesystem logical block number of the block, i.e. the 0-based
222  *		number of the block within the file
223  * @gfp_flags: Memory allocation flags
224  *
225  * Encrypt a possibly-compressed filesystem block that is located in an
226  * arbitrary page, not necessarily in the original pagecache page.  The @inode
227  * and @lblk_num must be specified, as they can't be determined from @page.
228  *
229  * This is not compatible with FS_CFLG_SUPPORTS_SUBBLOCK_DATA_UNITS.
230  *
231  * Return: 0 on success; -errno on failure
232  */
fscrypt_encrypt_block_inplace(const struct inode * inode,struct page * page,unsigned int len,unsigned int offs,u64 lblk_num,gfp_t gfp_flags)233 int fscrypt_encrypt_block_inplace(const struct inode *inode, struct page *page,
234 				  unsigned int len, unsigned int offs,
235 				  u64 lblk_num, gfp_t gfp_flags)
236 {
237 	if (WARN_ON_ONCE(inode->i_sb->s_cop->flags &
238 			 FS_CFLG_SUPPORTS_SUBBLOCK_DATA_UNITS))
239 		return -EOPNOTSUPP;
240 	return fscrypt_crypt_data_unit(inode->i_crypt_info, FS_ENCRYPT,
241 				       lblk_num, page, page, len, offs,
242 				       gfp_flags);
243 }
244 EXPORT_SYMBOL(fscrypt_encrypt_block_inplace);
245 
246 /**
247  * fscrypt_decrypt_pagecache_blocks() - Decrypt data from a pagecache folio
248  * @folio: the pagecache folio containing the data to decrypt
249  * @len: size of the data to decrypt, in bytes
250  * @offs: offset within @folio of the data to decrypt, in bytes
251  *
252  * Decrypt data that has just been read from an encrypted file.  The data must
253  * be located in a pagecache folio that is still locked and not yet uptodate.
254  * The length and offset of the data must be aligned to the file's crypto data
255  * unit size.  Alignment to the filesystem block size fulfills this requirement,
256  * as the filesystem block size is always a multiple of the data unit size.
257  *
258  * Return: 0 on success; -errno on failure
259  */
fscrypt_decrypt_pagecache_blocks(struct folio * folio,size_t len,size_t offs)260 int fscrypt_decrypt_pagecache_blocks(struct folio *folio, size_t len,
261 				     size_t offs)
262 {
263 	const struct inode *inode = folio->mapping->host;
264 	const struct fscrypt_info *ci = inode->i_crypt_info;
265 	const unsigned int du_bits = ci->ci_data_unit_bits;
266 	const unsigned int du_size = 1U << du_bits;
267 	u64 index = ((u64)folio->index << (PAGE_SHIFT - du_bits)) +
268 		    (offs >> du_bits);
269 	size_t i;
270 	int err;
271 
272 	if (WARN_ON_ONCE(!folio_test_locked(folio)))
273 		return -EINVAL;
274 
275 	if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, du_size)))
276 		return -EINVAL;
277 
278 	for (i = offs; i < offs + len; i += du_size, index++) {
279 		struct page *page = folio_page(folio, i >> PAGE_SHIFT);
280 
281 		err = fscrypt_crypt_data_unit(ci, FS_DECRYPT, index, page,
282 					      page, du_size, i & ~PAGE_MASK,
283 					      GFP_NOFS);
284 		if (err)
285 			return err;
286 	}
287 	return 0;
288 }
289 EXPORT_SYMBOL(fscrypt_decrypt_pagecache_blocks);
290 
291 /**
292  * fscrypt_decrypt_block_inplace() - Decrypt a filesystem block in-place
293  * @inode:     The inode to which this block belongs
294  * @page:      The page containing the block to decrypt
295  * @len:       Size of block to decrypt.  This must be a multiple of
296  *		FSCRYPT_CONTENTS_ALIGNMENT.
297  * @offs:      Byte offset within @page at which the block to decrypt begins
298  * @lblk_num:  Filesystem logical block number of the block, i.e. the 0-based
299  *		number of the block within the file
300  *
301  * Decrypt a possibly-compressed filesystem block that is located in an
302  * arbitrary page, not necessarily in the original pagecache page.  The @inode
303  * and @lblk_num must be specified, as they can't be determined from @page.
304  *
305  * This is not compatible with FS_CFLG_SUPPORTS_SUBBLOCK_DATA_UNITS.
306  *
307  * Return: 0 on success; -errno on failure
308  */
fscrypt_decrypt_block_inplace(const struct inode * inode,struct page * page,unsigned int len,unsigned int offs,u64 lblk_num)309 int fscrypt_decrypt_block_inplace(const struct inode *inode, struct page *page,
310 				  unsigned int len, unsigned int offs,
311 				  u64 lblk_num)
312 {
313 	if (WARN_ON_ONCE(inode->i_sb->s_cop->flags &
314 			 FS_CFLG_SUPPORTS_SUBBLOCK_DATA_UNITS))
315 		return -EOPNOTSUPP;
316 	return fscrypt_crypt_data_unit(inode->i_crypt_info, FS_DECRYPT,
317 				       lblk_num, page, page, len, offs,
318 				       GFP_NOFS);
319 }
320 EXPORT_SYMBOL(fscrypt_decrypt_block_inplace);
321 
322 /**
323  * fscrypt_initialize() - allocate major buffers for fs encryption.
324  * @sb: the filesystem superblock
325  *
326  * We only call this when we start accessing encrypted files, since it
327  * results in memory getting allocated that wouldn't otherwise be used.
328  *
329  * Return: 0 on success; -errno on failure
330  */
fscrypt_initialize(struct super_block * sb)331 int fscrypt_initialize(struct super_block *sb)
332 {
333 	int err = 0;
334 	mempool_t *pool;
335 
336 	/* pairs with smp_store_release() below */
337 	if (likely(smp_load_acquire(&fscrypt_bounce_page_pool)))
338 		return 0;
339 
340 	/* No need to allocate a bounce page pool if this FS won't use it. */
341 	if (sb->s_cop->flags & FS_CFLG_OWN_PAGES)
342 		return 0;
343 
344 	mutex_lock(&fscrypt_init_mutex);
345 	if (fscrypt_bounce_page_pool)
346 		goto out_unlock;
347 
348 	err = -ENOMEM;
349 	pool = mempool_create_page_pool(num_prealloc_crypto_pages, 0);
350 	if (!pool)
351 		goto out_unlock;
352 	/* pairs with smp_load_acquire() above */
353 	smp_store_release(&fscrypt_bounce_page_pool, pool);
354 	err = 0;
355 out_unlock:
356 	mutex_unlock(&fscrypt_init_mutex);
357 	return err;
358 }
359 
fscrypt_msg(const struct inode * inode,const char * level,const char * fmt,...)360 void fscrypt_msg(const struct inode *inode, const char *level,
361 		 const char *fmt, ...)
362 {
363 	static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
364 				      DEFAULT_RATELIMIT_BURST);
365 	struct va_format vaf;
366 	va_list args;
367 
368 	if (!__ratelimit(&rs))
369 		return;
370 
371 	va_start(args, fmt);
372 	vaf.fmt = fmt;
373 	vaf.va = &args;
374 	if (inode && inode->i_ino)
375 		printk("%sfscrypt (%s, inode %lu): %pV\n",
376 		       level, inode->i_sb->s_id, inode->i_ino, &vaf);
377 	else if (inode)
378 		printk("%sfscrypt (%s): %pV\n", level, inode->i_sb->s_id, &vaf);
379 	else
380 		printk("%sfscrypt: %pV\n", level, &vaf);
381 	va_end(args);
382 }
383 
384 /**
385  * fscrypt_init() - Set up for fs encryption.
386  *
387  * Return: 0 on success; -errno on failure
388  */
fscrypt_init(void)389 static int __init fscrypt_init(void)
390 {
391 	int err = -ENOMEM;
392 
393 	/*
394 	 * Use an unbound workqueue to allow bios to be decrypted in parallel
395 	 * even when they happen to complete on the same CPU.  This sacrifices
396 	 * locality, but it's worthwhile since decryption is CPU-intensive.
397 	 *
398 	 * Also use a high-priority workqueue to prioritize decryption work,
399 	 * which blocks reads from completing, over regular application tasks.
400 	 */
401 	fscrypt_read_workqueue = alloc_workqueue("fscrypt_read_queue",
402 						 WQ_UNBOUND | WQ_HIGHPRI,
403 						 num_online_cpus());
404 	if (!fscrypt_read_workqueue)
405 		goto fail;
406 
407 	fscrypt_info_cachep = KMEM_CACHE(fscrypt_info, SLAB_RECLAIM_ACCOUNT);
408 	if (!fscrypt_info_cachep)
409 		goto fail_free_queue;
410 
411 	err = fscrypt_init_keyring();
412 	if (err)
413 		goto fail_free_info;
414 
415 	return 0;
416 
417 fail_free_info:
418 	kmem_cache_destroy(fscrypt_info_cachep);
419 fail_free_queue:
420 	destroy_workqueue(fscrypt_read_workqueue);
421 fail:
422 	return err;
423 }
424 late_initcall(fscrypt_init)
425