• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * linux/fs/ext4/crypto.c
3  *
4  * Copyright (C) 2015, Google, Inc.
5  *
6  * This contains encryption functions for ext4
7  *
8  * Written by Michael Halcrow, 2014.
9  *
10  * Filename encryption additions
11  *	Uday Savagaonkar, 2014
12  * Encryption policy handling additions
13  *	Ildar Muslukhov, 2014
14  *
15  * This has not yet undergone a rigorous security audit.
16  *
17  * The usage of AES-XTS should conform to recommendations in NIST
18  * Special Publication 800-38E and IEEE P1619/D16.
19  */
20 
21 #include <crypto/hash.h>
22 #include <crypto/sha.h>
23 #include <keys/user-type.h>
24 #include <keys/encrypted-type.h>
25 #include <linux/crypto.h>
26 #include <linux/ecryptfs.h>
27 #include <linux/gfp.h>
28 #include <linux/kernel.h>
29 #include <linux/key.h>
30 #include <linux/list.h>
31 #include <linux/mempool.h>
32 #include <linux/module.h>
33 #include <linux/mutex.h>
34 #include <linux/random.h>
35 #include <linux/scatterlist.h>
36 #include <linux/spinlock_types.h>
37 #include <linux/namei.h>
38 
39 #include "ext4_extents.h"
40 #include "xattr.h"
41 
42 /* Encryption added and removed here! (L: */
43 
44 static unsigned int num_prealloc_crypto_pages = 32;
45 static unsigned int num_prealloc_crypto_ctxs = 128;
46 
47 module_param(num_prealloc_crypto_pages, uint, 0444);
48 MODULE_PARM_DESC(num_prealloc_crypto_pages,
49 		 "Number of crypto pages to preallocate");
50 module_param(num_prealloc_crypto_ctxs, uint, 0444);
51 MODULE_PARM_DESC(num_prealloc_crypto_ctxs,
52 		 "Number of crypto contexts to preallocate");
53 
54 static mempool_t *ext4_bounce_page_pool;
55 
56 static LIST_HEAD(ext4_free_crypto_ctxs);
57 static DEFINE_SPINLOCK(ext4_crypto_ctx_lock);
58 
59 static struct kmem_cache *ext4_crypto_ctx_cachep;
60 struct kmem_cache *ext4_crypt_info_cachep;
61 
62 /**
63  * ext4_release_crypto_ctx() - Releases an encryption context
64  * @ctx: The encryption context to release.
65  *
66  * If the encryption context was allocated from the pre-allocated pool, returns
67  * it to that pool. Else, frees it.
68  *
69  * If there's a bounce page in the context, this frees that.
70  */
ext4_release_crypto_ctx(struct ext4_crypto_ctx * ctx)71 void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx)
72 {
73 	unsigned long flags;
74 
75 	if (ctx->flags & EXT4_WRITE_PATH_FL && ctx->w.bounce_page)
76 		mempool_free(ctx->w.bounce_page, ext4_bounce_page_pool);
77 	ctx->w.bounce_page = NULL;
78 	ctx->w.control_page = NULL;
79 	if (ctx->flags & EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL) {
80 		kmem_cache_free(ext4_crypto_ctx_cachep, ctx);
81 	} else {
82 		spin_lock_irqsave(&ext4_crypto_ctx_lock, flags);
83 		list_add(&ctx->free_list, &ext4_free_crypto_ctxs);
84 		spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags);
85 	}
86 }
87 
88 /**
89  * ext4_get_crypto_ctx() - Gets an encryption context
90  * @inode:       The inode for which we are doing the crypto
91  *
92  * Allocates and initializes an encryption context.
93  *
94  * Return: An allocated and initialized encryption context on success; error
95  * value or NULL otherwise.
96  */
ext4_get_crypto_ctx(struct inode * inode,gfp_t gfp_flags)97 struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode,
98 					    gfp_t gfp_flags)
99 {
100 	struct ext4_crypto_ctx *ctx = NULL;
101 	int res = 0;
102 	unsigned long flags;
103 	struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info;
104 
105 	if (ci == NULL)
106 		return ERR_PTR(-ENOKEY);
107 
108 	/*
109 	 * We first try getting the ctx from a free list because in
110 	 * the common case the ctx will have an allocated and
111 	 * initialized crypto tfm, so it's probably a worthwhile
112 	 * optimization. For the bounce page, we first try getting it
113 	 * from the kernel allocator because that's just about as fast
114 	 * as getting it from a list and because a cache of free pages
115 	 * should generally be a "last resort" option for a filesystem
116 	 * to be able to do its job.
117 	 */
118 	spin_lock_irqsave(&ext4_crypto_ctx_lock, flags);
119 	ctx = list_first_entry_or_null(&ext4_free_crypto_ctxs,
120 				       struct ext4_crypto_ctx, free_list);
121 	if (ctx)
122 		list_del(&ctx->free_list);
123 	spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags);
124 	if (!ctx) {
125 		ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, gfp_flags);
126 		if (!ctx) {
127 			res = -ENOMEM;
128 			goto out;
129 		}
130 		ctx->flags |= EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL;
131 	} else {
132 		ctx->flags &= ~EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL;
133 	}
134 	ctx->flags &= ~EXT4_WRITE_PATH_FL;
135 
136 out:
137 	if (res) {
138 		if (!IS_ERR_OR_NULL(ctx))
139 			ext4_release_crypto_ctx(ctx);
140 		ctx = ERR_PTR(res);
141 	}
142 	return ctx;
143 }
144 
145 struct workqueue_struct *ext4_read_workqueue;
146 static DEFINE_MUTEX(crypto_init);
147 
148 /**
149  * ext4_exit_crypto() - Shutdown the ext4 encryption system
150  */
ext4_exit_crypto(void)151 void ext4_exit_crypto(void)
152 {
153 	struct ext4_crypto_ctx *pos, *n;
154 
155 	list_for_each_entry_safe(pos, n, &ext4_free_crypto_ctxs, free_list)
156 		kmem_cache_free(ext4_crypto_ctx_cachep, pos);
157 	INIT_LIST_HEAD(&ext4_free_crypto_ctxs);
158 	if (ext4_bounce_page_pool)
159 		mempool_destroy(ext4_bounce_page_pool);
160 	ext4_bounce_page_pool = NULL;
161 	if (ext4_read_workqueue)
162 		destroy_workqueue(ext4_read_workqueue);
163 	ext4_read_workqueue = NULL;
164 	if (ext4_crypto_ctx_cachep)
165 		kmem_cache_destroy(ext4_crypto_ctx_cachep);
166 	ext4_crypto_ctx_cachep = NULL;
167 	if (ext4_crypt_info_cachep)
168 		kmem_cache_destroy(ext4_crypt_info_cachep);
169 	ext4_crypt_info_cachep = NULL;
170 }
171 
172 /**
173  * ext4_init_crypto() - Set up for ext4 encryption.
174  *
175  * We only call this when we start accessing encrypted files, since it
176  * results in memory getting allocated that wouldn't otherwise be used.
177  *
178  * Return: Zero on success, non-zero otherwise.
179  */
ext4_init_crypto(void)180 int ext4_init_crypto(void)
181 {
182 	int i, res = -ENOMEM;
183 
184 	mutex_lock(&crypto_init);
185 	if (ext4_read_workqueue)
186 		goto already_initialized;
187 	ext4_read_workqueue = alloc_workqueue("ext4_crypto", WQ_HIGHPRI, 0);
188 	if (!ext4_read_workqueue)
189 		goto fail;
190 
191 	ext4_crypto_ctx_cachep = KMEM_CACHE(ext4_crypto_ctx,
192 					    SLAB_RECLAIM_ACCOUNT);
193 	if (!ext4_crypto_ctx_cachep)
194 		goto fail;
195 
196 	ext4_crypt_info_cachep = KMEM_CACHE(ext4_crypt_info,
197 					    SLAB_RECLAIM_ACCOUNT);
198 	if (!ext4_crypt_info_cachep)
199 		goto fail;
200 
201 	for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
202 		struct ext4_crypto_ctx *ctx;
203 
204 		ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, GFP_NOFS);
205 		if (!ctx) {
206 			res = -ENOMEM;
207 			goto fail;
208 		}
209 		list_add(&ctx->free_list, &ext4_free_crypto_ctxs);
210 	}
211 
212 	ext4_bounce_page_pool =
213 		mempool_create_page_pool(num_prealloc_crypto_pages, 0);
214 	if (!ext4_bounce_page_pool) {
215 		res = -ENOMEM;
216 		goto fail;
217 	}
218 already_initialized:
219 	mutex_unlock(&crypto_init);
220 	return 0;
221 fail:
222 	ext4_exit_crypto();
223 	mutex_unlock(&crypto_init);
224 	return res;
225 }
226 
ext4_restore_control_page(struct page * data_page)227 void ext4_restore_control_page(struct page *data_page)
228 {
229 	struct ext4_crypto_ctx *ctx =
230 		(struct ext4_crypto_ctx *)page_private(data_page);
231 
232 	set_page_private(data_page, (unsigned long)NULL);
233 	ClearPagePrivate(data_page);
234 	unlock_page(data_page);
235 	ext4_release_crypto_ctx(ctx);
236 }
237 
238 /**
239  * ext4_crypt_complete() - The completion callback for page encryption
240  * @req: The asynchronous encryption request context
241  * @res: The result of the encryption operation
242  */
ext4_crypt_complete(struct crypto_async_request * req,int res)243 static void ext4_crypt_complete(struct crypto_async_request *req, int res)
244 {
245 	struct ext4_completion_result *ecr = req->data;
246 
247 	if (res == -EINPROGRESS)
248 		return;
249 	ecr->res = res;
250 	complete(&ecr->completion);
251 }
252 
253 typedef enum {
254 	EXT4_DECRYPT = 0,
255 	EXT4_ENCRYPT,
256 } ext4_direction_t;
257 
ext4_page_crypto(struct inode * inode,ext4_direction_t rw,pgoff_t index,struct page * src_page,struct page * dest_page,gfp_t gfp_flags)258 static int ext4_page_crypto(struct inode *inode,
259 			    ext4_direction_t rw,
260 			    pgoff_t index,
261 			    struct page *src_page,
262 			    struct page *dest_page,
263 			    gfp_t gfp_flags)
264 
265 {
266 	u8 xts_tweak[EXT4_XTS_TWEAK_SIZE];
267 	struct ablkcipher_request *req = NULL;
268 	DECLARE_EXT4_COMPLETION_RESULT(ecr);
269 	struct scatterlist dst, src;
270 	struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info;
271 	struct crypto_ablkcipher *tfm = ci->ci_ctfm;
272 	int res = 0;
273 
274 	req = ablkcipher_request_alloc(tfm, gfp_flags);
275 	if (!req) {
276 		printk_ratelimited(KERN_ERR
277 				   "%s: crypto_request_alloc() failed\n",
278 				   __func__);
279 		return -ENOMEM;
280 	}
281 	ablkcipher_request_set_callback(
282 		req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
283 		ext4_crypt_complete, &ecr);
284 
285 	BUILD_BUG_ON(EXT4_XTS_TWEAK_SIZE < sizeof(index));
286 	memcpy(xts_tweak, &index, sizeof(index));
287 	memset(&xts_tweak[sizeof(index)], 0,
288 	       EXT4_XTS_TWEAK_SIZE - sizeof(index));
289 
290 	sg_init_table(&dst, 1);
291 	sg_set_page(&dst, dest_page, PAGE_CACHE_SIZE, 0);
292 	sg_init_table(&src, 1);
293 	sg_set_page(&src, src_page, PAGE_CACHE_SIZE, 0);
294 	ablkcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE,
295 				     xts_tweak);
296 	if (rw == EXT4_DECRYPT)
297 		res = crypto_ablkcipher_decrypt(req);
298 	else
299 		res = crypto_ablkcipher_encrypt(req);
300 	if (res == -EINPROGRESS || res == -EBUSY) {
301 		wait_for_completion(&ecr.completion);
302 		res = ecr.res;
303 	}
304 	ablkcipher_request_free(req);
305 	if (res) {
306 		printk_ratelimited(
307 			KERN_ERR
308 			"%s: crypto_ablkcipher_encrypt() returned %d\n",
309 			__func__, res);
310 		return res;
311 	}
312 	return 0;
313 }
314 
alloc_bounce_page(struct ext4_crypto_ctx * ctx,gfp_t gfp_flags)315 static struct page *alloc_bounce_page(struct ext4_crypto_ctx *ctx,
316 				      gfp_t gfp_flags)
317 {
318 	ctx->w.bounce_page = mempool_alloc(ext4_bounce_page_pool, gfp_flags);
319 	if (ctx->w.bounce_page == NULL)
320 		return ERR_PTR(-ENOMEM);
321 	ctx->flags |= EXT4_WRITE_PATH_FL;
322 	return ctx->w.bounce_page;
323 }
324 
325 /**
326  * ext4_encrypt() - Encrypts a page
327  * @inode:          The inode for which the encryption should take place
328  * @plaintext_page: The page to encrypt. Must be locked.
329  *
330  * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx
331  * encryption context.
332  *
333  * Called on the page write path.  The caller must call
334  * ext4_restore_control_page() on the returned ciphertext page to
335  * release the bounce buffer and the encryption context.
336  *
337  * Return: An allocated page with the encrypted content on success. Else, an
338  * error value or NULL.
339  */
ext4_encrypt(struct inode * inode,struct page * plaintext_page,gfp_t gfp_flags)340 struct page *ext4_encrypt(struct inode *inode,
341 			  struct page *plaintext_page,
342 			  gfp_t gfp_flags)
343 {
344 	struct ext4_crypto_ctx *ctx;
345 	struct page *ciphertext_page = NULL;
346 	int err;
347 
348 	BUG_ON(!PageLocked(plaintext_page));
349 
350 	ctx = ext4_get_crypto_ctx(inode, gfp_flags);
351 	if (IS_ERR(ctx))
352 		return (struct page *) ctx;
353 
354 	/* The encryption operation will require a bounce page. */
355 	ciphertext_page = alloc_bounce_page(ctx, gfp_flags);
356 	if (IS_ERR(ciphertext_page))
357 		goto errout;
358 	ctx->w.control_page = plaintext_page;
359 	err = ext4_page_crypto(inode, EXT4_ENCRYPT, plaintext_page->index,
360 			       plaintext_page, ciphertext_page, gfp_flags);
361 	if (err) {
362 		ciphertext_page = ERR_PTR(err);
363 	errout:
364 		ext4_release_crypto_ctx(ctx);
365 		return ciphertext_page;
366 	}
367 	SetPagePrivate(ciphertext_page);
368 	set_page_private(ciphertext_page, (unsigned long)ctx);
369 	lock_page(ciphertext_page);
370 	return ciphertext_page;
371 }
372 
373 /**
374  * ext4_decrypt() - Decrypts a page in-place
375  * @ctx:  The encryption context.
376  * @page: The page to decrypt. Must be locked.
377  *
378  * Decrypts page in-place using the ctx encryption context.
379  *
380  * Called from the read completion callback.
381  *
382  * Return: Zero on success, non-zero otherwise.
383  */
ext4_decrypt(struct page * page)384 int ext4_decrypt(struct page *page)
385 {
386 	BUG_ON(!PageLocked(page));
387 
388 	return ext4_page_crypto(page->mapping->host, EXT4_DECRYPT,
389 				page->index, page, page, GFP_NOFS);
390 }
391 
ext4_encrypted_zeroout(struct inode * inode,struct ext4_extent * ex)392 int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex)
393 {
394 	struct ext4_crypto_ctx	*ctx;
395 	struct page		*ciphertext_page = NULL;
396 	struct bio		*bio;
397 	ext4_lblk_t		lblk = ex->ee_block;
398 	ext4_fsblk_t		pblk = ext4_ext_pblock(ex);
399 	unsigned int		len = ext4_ext_get_actual_len(ex);
400 	int			ret, err = 0;
401 
402 #if 0
403 	ext4_msg(inode->i_sb, KERN_CRIT,
404 		 "ext4_encrypted_zeroout ino %lu lblk %u len %u",
405 		 (unsigned long) inode->i_ino, lblk, len);
406 #endif
407 
408 	BUG_ON(inode->i_sb->s_blocksize != PAGE_CACHE_SIZE);
409 
410 	ctx = ext4_get_crypto_ctx(inode, GFP_NOFS);
411 	if (IS_ERR(ctx))
412 		return PTR_ERR(ctx);
413 
414 	ciphertext_page = alloc_bounce_page(ctx, GFP_NOWAIT);
415 	if (IS_ERR(ciphertext_page)) {
416 		err = PTR_ERR(ciphertext_page);
417 		goto errout;
418 	}
419 
420 	while (len--) {
421 		err = ext4_page_crypto(inode, EXT4_ENCRYPT, lblk,
422 				       ZERO_PAGE(0), ciphertext_page,
423 				       GFP_NOFS);
424 		if (err)
425 			goto errout;
426 
427 		bio = bio_alloc(GFP_NOWAIT, 1);
428 		if (!bio) {
429 			err = -ENOMEM;
430 			goto errout;
431 		}
432 		bio->bi_bdev = inode->i_sb->s_bdev;
433 		bio->bi_iter.bi_sector =
434 			pblk << (inode->i_sb->s_blocksize_bits - 9);
435 		ret = bio_add_page(bio, ciphertext_page,
436 				   inode->i_sb->s_blocksize, 0);
437 		if (ret != inode->i_sb->s_blocksize) {
438 			/* should never happen! */
439 			ext4_msg(inode->i_sb, KERN_ERR,
440 				 "bio_add_page failed: %d", ret);
441 			WARN_ON(1);
442 			bio_put(bio);
443 			err = -EIO;
444 			goto errout;
445 		}
446 		err = submit_bio_wait(WRITE, bio);
447 		if ((err == 0) && !test_bit(BIO_UPTODATE, &bio->bi_flags))
448 			err = -EIO;
449 		bio_put(bio);
450 		if (err)
451 			goto errout;
452 		lblk++; pblk++;
453 	}
454 	err = 0;
455 errout:
456 	ext4_release_crypto_ctx(ctx);
457 	return err;
458 }
459 
ext4_valid_contents_enc_mode(uint32_t mode)460 bool ext4_valid_contents_enc_mode(uint32_t mode)
461 {
462 	return (mode == EXT4_ENCRYPTION_MODE_AES_256_XTS);
463 }
464 
465 /**
466  * ext4_validate_encryption_key_size() - Validate the encryption key size
467  * @mode: The key mode.
468  * @size: The key size to validate.
469  *
470  * Return: The validated key size for @mode. Zero if invalid.
471  */
ext4_validate_encryption_key_size(uint32_t mode,uint32_t size)472 uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size)
473 {
474 	if (size == ext4_encryption_key_size(mode))
475 		return size;
476 	return 0;
477 }
478 
479 /*
480  * Validate dentries for encrypted directories to make sure we aren't
481  * potentially caching stale data after a key has been added or
482  * removed.
483  */
ext4_d_revalidate(struct dentry * dentry,unsigned int flags)484 static int ext4_d_revalidate(struct dentry *dentry, unsigned int flags)
485 {
486 	struct dentry *dir;
487 	struct ext4_crypt_info *ci;
488 	int dir_has_key, cached_with_key;
489 
490 	if (flags & LOOKUP_RCU)
491 		return -ECHILD;
492 
493 	dir = dget_parent(dentry);
494 	if (!ext4_encrypted_inode(d_inode(dir))) {
495 		dput(dir);
496 		return 0;
497 	}
498 	ci = EXT4_I(d_inode(dir))->i_crypt_info;
499 
500 	/* this should eventually be an flag in d_flags */
501 	cached_with_key = dentry->d_fsdata != NULL;
502 	dir_has_key = (ci != NULL);
503 	dput(dir);
504 
505 	/*
506 	 * If the dentry was cached without the key, and it is a
507 	 * negative dentry, it might be a valid name.  We can't check
508 	 * if the key has since been made available due to locking
509 	 * reasons, so we fail the validation so ext4_lookup() can do
510 	 * this check.
511 	 *
512 	 * We also fail the validation if the dentry was created with
513 	 * the key present, but we no longer have the key, or vice versa.
514 	 */
515 	if ((!cached_with_key && d_is_negative(dentry)) ||
516 	    (!cached_with_key && dir_has_key) ||
517 	    (cached_with_key && !dir_has_key)) {
518 #if 0				/* Revalidation debug */
519 		char buf[80];
520 		char *cp = simple_dname(dentry, buf, sizeof(buf));
521 
522 		if (IS_ERR(cp))
523 			cp = (char *) "???";
524 		pr_err("revalidate: %s %p %d %d %d\n", cp, dentry->d_fsdata,
525 		       cached_with_key, d_is_negative(dentry),
526 		       dir_has_key);
527 #endif
528 		return 0;
529 	}
530 	return 1;
531 }
532 
533 const struct dentry_operations ext4_encrypted_d_ops = {
534 	.d_revalidate = ext4_d_revalidate,
535 };
536