1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Ioctl to enable verity on a file
4 *
5 * Copyright 2019 Google LLC
6 */
7
8 #include "fsverity_private.h"
9
10 #include <linux/mount.h>
11 #include <linux/pagemap.h>
12 #include <linux/slab.h>
13 #include <linux/sched/signal.h>
14 #include <linux/uaccess.h>
15
16 struct block_buffer {
17 u32 filled;
18 bool is_root_hash;
19 u8 *data;
20 };
21
22 /* Hash a block, writing the result to the next level's pending block buffer. */
hash_one_block(struct inode * inode,const struct merkle_tree_params * params,struct ahash_request * req,struct block_buffer * cur)23 static int hash_one_block(struct inode *inode,
24 const struct merkle_tree_params *params,
25 struct ahash_request *req, struct block_buffer *cur)
26 {
27 struct block_buffer *next = cur + 1;
28 int err;
29
30 /*
31 * Safety check to prevent a buffer overflow in case of a filesystem bug
32 * that allows the file size to change despite deny_write_access(), or a
33 * bug in the Merkle tree logic itself
34 */
35 if (WARN_ON_ONCE(next->is_root_hash && next->filled != 0))
36 return -EINVAL;
37
38 /* Zero-pad the block if it's shorter than the block size. */
39 memset(&cur->data[cur->filled], 0, params->block_size - cur->filled);
40
41 err = fsverity_hash_block(params, inode, req, virt_to_page(cur->data),
42 offset_in_page(cur->data),
43 &next->data[next->filled]);
44 if (err)
45 return err;
46 next->filled += params->digest_size;
47 cur->filled = 0;
48 return 0;
49 }
50
write_merkle_tree_block(struct inode * inode,const u8 * buf,unsigned long index,const struct merkle_tree_params * params)51 static int write_merkle_tree_block(struct inode *inode, const u8 *buf,
52 unsigned long index,
53 const struct merkle_tree_params *params)
54 {
55 u64 pos = (u64)index << params->log_blocksize;
56 int err;
57
58 err = inode->i_sb->s_vop->write_merkle_tree_block(inode, buf, pos,
59 params->block_size);
60 if (err)
61 fsverity_err(inode, "Error %d writing Merkle tree block %lu",
62 err, index);
63 return err;
64 }
65
66 /*
67 * Build the Merkle tree for the given file using the given parameters, and
68 * return the root hash in @root_hash.
69 *
70 * The tree is written to a filesystem-specific location as determined by the
71 * ->write_merkle_tree_block() method. However, the blocks that comprise the
72 * tree are the same for all filesystems.
73 */
build_merkle_tree(struct file * filp,const struct merkle_tree_params * params,u8 * root_hash)74 static int build_merkle_tree(struct file *filp,
75 const struct merkle_tree_params *params,
76 u8 *root_hash)
77 {
78 struct inode *inode = file_inode(filp);
79 const u64 data_size = inode->i_size;
80 const int num_levels = params->num_levels;
81 struct ahash_request *req;
82 struct block_buffer _buffers[1 + FS_VERITY_MAX_LEVELS + 1] = {};
83 struct block_buffer *buffers = &_buffers[1];
84 unsigned long level_offset[FS_VERITY_MAX_LEVELS];
85 int level;
86 u64 offset;
87 int err;
88
89 if (data_size == 0) {
90 /* Empty file is a special case; root hash is all 0's */
91 memset(root_hash, 0, params->digest_size);
92 return 0;
93 }
94
95 /* This allocation never fails, since it's mempool-backed. */
96 req = fsverity_alloc_hash_request(params->hash_alg, GFP_KERNEL);
97
98 /*
99 * Allocate the block buffers. Buffer "-1" is for data blocks.
100 * Buffers 0 <= level < num_levels are for the actual tree levels.
101 * Buffer 'num_levels' is for the root hash.
102 */
103 for (level = -1; level < num_levels; level++) {
104 buffers[level].data = kzalloc(params->block_size, GFP_KERNEL);
105 if (!buffers[level].data) {
106 err = -ENOMEM;
107 goto out;
108 }
109 }
110 buffers[num_levels].data = root_hash;
111 buffers[num_levels].is_root_hash = true;
112
113 BUILD_BUG_ON(sizeof(level_offset) != sizeof(params->level_start));
114 memcpy(level_offset, params->level_start, sizeof(level_offset));
115
116 /* Hash each data block, also hashing the tree blocks as they fill up */
117 for (offset = 0; offset < data_size; offset += params->block_size) {
118 ssize_t bytes_read;
119 loff_t pos = offset;
120
121 buffers[-1].filled = min_t(u64, params->block_size,
122 data_size - offset);
123 bytes_read = __kernel_read(filp, buffers[-1].data,
124 buffers[-1].filled, &pos);
125 if (bytes_read < 0) {
126 err = bytes_read;
127 fsverity_err(inode, "Error %d reading file data", err);
128 goto out;
129 }
130 if (bytes_read != buffers[-1].filled) {
131 err = -EINVAL;
132 fsverity_err(inode, "Short read of file data");
133 goto out;
134 }
135 err = hash_one_block(inode, params, req, &buffers[-1]);
136 if (err)
137 goto out;
138 for (level = 0; level < num_levels; level++) {
139 if (buffers[level].filled + params->digest_size <=
140 params->block_size) {
141 /* Next block at @level isn't full yet */
142 break;
143 }
144 /* Next block at @level is full */
145
146 err = hash_one_block(inode, params, req,
147 &buffers[level]);
148 if (err)
149 goto out;
150 err = write_merkle_tree_block(inode,
151 buffers[level].data,
152 level_offset[level],
153 params);
154 if (err)
155 goto out;
156 level_offset[level]++;
157 }
158 if (fatal_signal_pending(current)) {
159 err = -EINTR;
160 goto out;
161 }
162 cond_resched();
163 }
164 /* Finish all nonempty pending tree blocks. */
165 for (level = 0; level < num_levels; level++) {
166 if (buffers[level].filled != 0) {
167 err = hash_one_block(inode, params, req,
168 &buffers[level]);
169 if (err)
170 goto out;
171 err = write_merkle_tree_block(inode,
172 buffers[level].data,
173 level_offset[level],
174 params);
175 if (err)
176 goto out;
177 }
178 }
179 /* The root hash was filled by the last call to hash_one_block(). */
180 if (WARN_ON(buffers[num_levels].filled != params->digest_size)) {
181 err = -EINVAL;
182 goto out;
183 }
184 err = 0;
185 out:
186 for (level = -1; level < num_levels; level++)
187 kfree(buffers[level].data);
188 fsverity_free_hash_request(params->hash_alg, req);
189 return err;
190 }
191
enable_verity(struct file * filp,const struct fsverity_enable_arg * arg)192 static int enable_verity(struct file *filp,
193 const struct fsverity_enable_arg *arg)
194 {
195 struct inode *inode = file_inode(filp);
196 const struct fsverity_operations *vops = inode->i_sb->s_vop;
197 struct merkle_tree_params params = { };
198 struct fsverity_descriptor *desc;
199 size_t desc_size = struct_size(desc, signature, arg->sig_size);
200 struct fsverity_info *vi;
201 int err;
202
203 /* Start initializing the fsverity_descriptor */
204 desc = kzalloc(desc_size, GFP_KERNEL);
205 if (!desc)
206 return -ENOMEM;
207 desc->version = 1;
208 desc->hash_algorithm = arg->hash_algorithm;
209 desc->log_blocksize = ilog2(arg->block_size);
210
211 /* Get the salt if the user provided one */
212 if (arg->salt_size &&
213 copy_from_user(desc->salt, u64_to_user_ptr(arg->salt_ptr),
214 arg->salt_size)) {
215 err = -EFAULT;
216 goto out;
217 }
218 desc->salt_size = arg->salt_size;
219
220 /* Get the signature if the user provided one */
221 if (arg->sig_size &&
222 copy_from_user(desc->signature, u64_to_user_ptr(arg->sig_ptr),
223 arg->sig_size)) {
224 err = -EFAULT;
225 goto out;
226 }
227 desc->sig_size = cpu_to_le32(arg->sig_size);
228
229 desc->data_size = cpu_to_le64(inode->i_size);
230
231 /* Prepare the Merkle tree parameters */
232 err = fsverity_init_merkle_tree_params(¶ms, inode,
233 arg->hash_algorithm,
234 desc->log_blocksize,
235 desc->salt, desc->salt_size);
236 if (err)
237 goto out;
238
239 /*
240 * Start enabling verity on this file, serialized by the inode lock.
241 * Fail if verity is already enabled or is already being enabled.
242 */
243 inode_lock(inode);
244 if (IS_VERITY(inode))
245 err = -EEXIST;
246 else
247 err = vops->begin_enable_verity(filp);
248 inode_unlock(inode);
249 if (err)
250 goto out;
251
252 /*
253 * Build the Merkle tree. Don't hold the inode lock during this, since
254 * on huge files this may take a very long time and we don't want to
255 * force unrelated syscalls like chown() to block forever. We don't
256 * need the inode lock here because deny_write_access() already prevents
257 * the file from being written to or truncated, and we still serialize
258 * ->begin_enable_verity() and ->end_enable_verity() using the inode
259 * lock and only allow one process to be here at a time on a given file.
260 */
261 BUILD_BUG_ON(sizeof(desc->root_hash) < FS_VERITY_MAX_DIGEST_SIZE);
262 err = build_merkle_tree(filp, ¶ms, desc->root_hash);
263 if (err) {
264 fsverity_err(inode, "Error %d building Merkle tree", err);
265 goto rollback;
266 }
267
268 /*
269 * Create the fsverity_info. Don't bother trying to save work by
270 * reusing the merkle_tree_params from above. Instead, just create the
271 * fsverity_info from the fsverity_descriptor as if it were just loaded
272 * from disk. This is simpler, and it serves as an extra check that the
273 * metadata we're writing is valid before actually enabling verity.
274 */
275 vi = fsverity_create_info(inode, desc);
276 if (IS_ERR(vi)) {
277 err = PTR_ERR(vi);
278 goto rollback;
279 }
280
281 /*
282 * Tell the filesystem to finish enabling verity on the file.
283 * Serialized with ->begin_enable_verity() by the inode lock.
284 */
285 inode_lock(inode);
286 err = vops->end_enable_verity(filp, desc, desc_size, params.tree_size);
287 inode_unlock(inode);
288 if (err) {
289 fsverity_err(inode, "%ps() failed with err %d",
290 vops->end_enable_verity, err);
291 fsverity_free_info(vi);
292 } else if (WARN_ON(!IS_VERITY(inode))) {
293 err = -EINVAL;
294 fsverity_free_info(vi);
295 } else {
296 /* Successfully enabled verity */
297
298 /*
299 * Readers can start using ->i_verity_info immediately, so it
300 * can't be rolled back once set. So don't set it until just
301 * after the filesystem has successfully enabled verity.
302 */
303 fsverity_set_info(inode, vi);
304 }
305 out:
306 kfree(params.hashstate);
307 kfree(desc);
308 return err;
309
310 rollback:
311 inode_lock(inode);
312 (void)vops->end_enable_verity(filp, NULL, 0, params.tree_size);
313 inode_unlock(inode);
314 goto out;
315 }
316
317 /**
318 * fsverity_ioctl_enable() - enable verity on a file
319 * @filp: file to enable verity on
320 * @uarg: user pointer to fsverity_enable_arg
321 *
322 * Enable fs-verity on a file. See the "FS_IOC_ENABLE_VERITY" section of
323 * Documentation/filesystems/fsverity.rst for the documentation.
324 *
325 * Return: 0 on success, -errno on failure
326 */
fsverity_ioctl_enable(struct file * filp,const void __user * uarg)327 int fsverity_ioctl_enable(struct file *filp, const void __user *uarg)
328 {
329 struct inode *inode = file_inode(filp);
330 struct fsverity_enable_arg arg;
331 int err;
332
333 if (copy_from_user(&arg, uarg, sizeof(arg)))
334 return -EFAULT;
335
336 if (arg.version != 1)
337 return -EINVAL;
338
339 if (arg.__reserved1 ||
340 memchr_inv(arg.__reserved2, 0, sizeof(arg.__reserved2)))
341 return -EINVAL;
342
343 if (!is_power_of_2(arg.block_size))
344 return -EINVAL;
345
346 if (arg.salt_size > sizeof_field(struct fsverity_descriptor, salt))
347 return -EMSGSIZE;
348
349 if (arg.sig_size > FS_VERITY_MAX_SIGNATURE_SIZE)
350 return -EMSGSIZE;
351
352 /*
353 * Require a regular file with write access. But the actual fd must
354 * still be readonly so that we can lock out all writers. This is
355 * needed to guarantee that no writable fds exist to the file once it
356 * has verity enabled, and to stabilize the data being hashed.
357 */
358
359 err = file_permission(filp, MAY_WRITE);
360 if (err)
361 return err;
362 /*
363 * __kernel_read() is used while building the Merkle tree. So, we can't
364 * allow file descriptors that were opened for ioctl access only, using
365 * the special nonstandard access mode 3. O_RDONLY only, please!
366 */
367 if (!(filp->f_mode & FMODE_READ))
368 return -EBADF;
369
370 if (IS_APPEND(inode))
371 return -EPERM;
372
373 if (S_ISDIR(inode->i_mode))
374 return -EISDIR;
375
376 if (!S_ISREG(inode->i_mode))
377 return -EINVAL;
378
379 err = mnt_want_write_file(filp);
380 if (err) /* -EROFS */
381 return err;
382
383 err = deny_write_access(filp);
384 if (err) /* -ETXTBSY */
385 goto out_drop_write;
386
387 err = enable_verity(filp, &arg);
388
389 /*
390 * We no longer drop the inode's pagecache after enabling verity. This
391 * used to be done to try to avoid a race condition where pages could be
392 * evicted after being used in the Merkle tree construction, then
393 * re-instantiated by a concurrent read. Such pages are unverified, and
394 * the backing storage could have filled them with different content, so
395 * they shouldn't be used to fulfill reads once verity is enabled.
396 *
397 * But, dropping the pagecache has a big performance impact, and it
398 * doesn't fully solve the race condition anyway. So for those reasons,
399 * and also because this race condition isn't very important relatively
400 * speaking (especially for small-ish files, where the chance of a page
401 * being used, evicted, *and* re-instantiated all while enabling verity
402 * is quite small), we no longer drop the inode's pagecache.
403 */
404
405 /*
406 * allow_write_access() is needed to pair with deny_write_access().
407 * Regardless, the filesystem won't allow writing to verity files.
408 */
409 allow_write_access(filp);
410 out_drop_write:
411 mnt_drop_write_file(filp);
412 return err;
413 }
414 EXPORT_SYMBOL_GPL(fsverity_ioctl_enable);
415