1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Ioctl to enable verity on a file
4 *
5 * Copyright 2019 Google LLC
6 */
7
8 #include "fsverity_private.h"
9
10 #include <linux/mount.h>
11 #include <linux/pagemap.h>
12 #include <linux/sched/signal.h>
13 #include <linux/uaccess.h>
14
15 struct block_buffer {
16 u32 filled;
17 bool is_root_hash;
18 u8 *data;
19 };
20
21 /* Hash a block, writing the result to the next level's pending block buffer. */
hash_one_block(struct inode * inode,const struct merkle_tree_params * params,struct ahash_request * req,struct block_buffer * cur)22 static int hash_one_block(struct inode *inode,
23 const struct merkle_tree_params *params,
24 struct ahash_request *req, struct block_buffer *cur)
25 {
26 struct block_buffer *next = cur + 1;
27 int err;
28
29 /*
30 * Safety check to prevent a buffer overflow in case of a filesystem bug
31 * that allows the file size to change despite deny_write_access(), or a
32 * bug in the Merkle tree logic itself
33 */
34 if (WARN_ON_ONCE(next->is_root_hash && next->filled != 0))
35 return -EINVAL;
36
37 /* Zero-pad the block if it's shorter than the block size. */
38 memset(&cur->data[cur->filled], 0, params->block_size - cur->filled);
39
40 err = fsverity_hash_block(params, inode, req, virt_to_page(cur->data),
41 offset_in_page(cur->data),
42 &next->data[next->filled]);
43 if (err)
44 return err;
45 next->filled += params->digest_size;
46 cur->filled = 0;
47 return 0;
48 }
49
write_merkle_tree_block(struct inode * inode,const u8 * buf,unsigned long index,const struct merkle_tree_params * params)50 static int write_merkle_tree_block(struct inode *inode, const u8 *buf,
51 unsigned long index,
52 const struct merkle_tree_params *params)
53 {
54 u64 pos = (u64)index << params->log_blocksize;
55 int err;
56
57 err = inode->i_sb->s_vop->write_merkle_tree_block(inode, buf, pos,
58 params->block_size);
59 if (err)
60 fsverity_err(inode, "Error %d writing Merkle tree block %lu",
61 err, index);
62 return err;
63 }
64
65 /*
66 * Build the Merkle tree for the given file using the given parameters, and
67 * return the root hash in @root_hash.
68 *
69 * The tree is written to a filesystem-specific location as determined by the
70 * ->write_merkle_tree_block() method. However, the blocks that comprise the
71 * tree are the same for all filesystems.
72 */
build_merkle_tree(struct file * filp,const struct merkle_tree_params * params,u8 * root_hash)73 static int build_merkle_tree(struct file *filp,
74 const struct merkle_tree_params *params,
75 u8 *root_hash)
76 {
77 struct inode *inode = file_inode(filp);
78 const u64 data_size = inode->i_size;
79 const int num_levels = params->num_levels;
80 struct ahash_request *req;
81 struct block_buffer _buffers[1 + FS_VERITY_MAX_LEVELS + 1] = {};
82 struct block_buffer *buffers = &_buffers[1];
83 unsigned long level_offset[FS_VERITY_MAX_LEVELS];
84 int level;
85 u64 offset;
86 int err;
87
88 if (data_size == 0) {
89 /* Empty file is a special case; root hash is all 0's */
90 memset(root_hash, 0, params->digest_size);
91 return 0;
92 }
93
94 /* This allocation never fails, since it's mempool-backed. */
95 req = fsverity_alloc_hash_request(params->hash_alg, GFP_KERNEL);
96
97 /*
98 * Allocate the block buffers. Buffer "-1" is for data blocks.
99 * Buffers 0 <= level < num_levels are for the actual tree levels.
100 * Buffer 'num_levels' is for the root hash.
101 */
102 for (level = -1; level < num_levels; level++) {
103 buffers[level].data = kzalloc(params->block_size, GFP_KERNEL);
104 if (!buffers[level].data) {
105 err = -ENOMEM;
106 goto out;
107 }
108 }
109 buffers[num_levels].data = root_hash;
110 buffers[num_levels].is_root_hash = true;
111
112 BUILD_BUG_ON(sizeof(level_offset) != sizeof(params->level_start));
113 memcpy(level_offset, params->level_start, sizeof(level_offset));
114
115 /* Hash each data block, also hashing the tree blocks as they fill up */
116 for (offset = 0; offset < data_size; offset += params->block_size) {
117 ssize_t bytes_read;
118 loff_t pos = offset;
119
120 buffers[-1].filled = min_t(u64, params->block_size,
121 data_size - offset);
122 bytes_read = __kernel_read(filp, buffers[-1].data,
123 buffers[-1].filled, &pos);
124 if (bytes_read < 0) {
125 err = bytes_read;
126 fsverity_err(inode, "Error %d reading file data", err);
127 goto out;
128 }
129 if (bytes_read != buffers[-1].filled) {
130 err = -EINVAL;
131 fsverity_err(inode, "Short read of file data");
132 goto out;
133 }
134 err = hash_one_block(inode, params, req, &buffers[-1]);
135 if (err)
136 goto out;
137 for (level = 0; level < num_levels; level++) {
138 if (buffers[level].filled + params->digest_size <=
139 params->block_size) {
140 /* Next block at @level isn't full yet */
141 break;
142 }
143 /* Next block at @level is full */
144
145 err = hash_one_block(inode, params, req,
146 &buffers[level]);
147 if (err)
148 goto out;
149 err = write_merkle_tree_block(inode,
150 buffers[level].data,
151 level_offset[level],
152 params);
153 if (err)
154 goto out;
155 level_offset[level]++;
156 }
157 if (fatal_signal_pending(current)) {
158 err = -EINTR;
159 goto out;
160 }
161 cond_resched();
162 }
163 /* Finish all nonempty pending tree blocks. */
164 for (level = 0; level < num_levels; level++) {
165 if (buffers[level].filled != 0) {
166 err = hash_one_block(inode, params, req,
167 &buffers[level]);
168 if (err)
169 goto out;
170 err = write_merkle_tree_block(inode,
171 buffers[level].data,
172 level_offset[level],
173 params);
174 if (err)
175 goto out;
176 }
177 }
178 /* The root hash was filled by the last call to hash_one_block(). */
179 if (WARN_ON(buffers[num_levels].filled != params->digest_size)) {
180 err = -EINVAL;
181 goto out;
182 }
183 err = 0;
184 out:
185 for (level = -1; level < num_levels; level++)
186 kfree(buffers[level].data);
187 fsverity_free_hash_request(params->hash_alg, req);
188 return err;
189 }
190
enable_verity(struct file * filp,const struct fsverity_enable_arg * arg)191 static int enable_verity(struct file *filp,
192 const struct fsverity_enable_arg *arg)
193 {
194 struct inode *inode = file_inode(filp);
195 const struct fsverity_operations *vops = inode->i_sb->s_vop;
196 struct merkle_tree_params params = { };
197 struct fsverity_descriptor *desc;
198 size_t desc_size = struct_size(desc, signature, arg->sig_size);
199 struct fsverity_info *vi;
200 int err;
201
202 /* Start initializing the fsverity_descriptor */
203 desc = kzalloc(desc_size, GFP_KERNEL);
204 if (!desc)
205 return -ENOMEM;
206 desc->version = 1;
207 desc->hash_algorithm = arg->hash_algorithm;
208 desc->log_blocksize = ilog2(arg->block_size);
209
210 /* Get the salt if the user provided one */
211 if (arg->salt_size &&
212 copy_from_user(desc->salt, u64_to_user_ptr(arg->salt_ptr),
213 arg->salt_size)) {
214 err = -EFAULT;
215 goto out;
216 }
217 desc->salt_size = arg->salt_size;
218
219 /* Get the signature if the user provided one */
220 if (arg->sig_size &&
221 copy_from_user(desc->signature, u64_to_user_ptr(arg->sig_ptr),
222 arg->sig_size)) {
223 err = -EFAULT;
224 goto out;
225 }
226 desc->sig_size = cpu_to_le32(arg->sig_size);
227
228 desc->data_size = cpu_to_le64(inode->i_size);
229
230 /* Prepare the Merkle tree parameters */
231 err = fsverity_init_merkle_tree_params(¶ms, inode,
232 arg->hash_algorithm,
233 desc->log_blocksize,
234 desc->salt, desc->salt_size);
235 if (err)
236 goto out;
237
238 /*
239 * Start enabling verity on this file, serialized by the inode lock.
240 * Fail if verity is already enabled or is already being enabled.
241 */
242 inode_lock(inode);
243 if (IS_VERITY(inode))
244 err = -EEXIST;
245 else
246 err = vops->begin_enable_verity(filp);
247 inode_unlock(inode);
248 if (err)
249 goto out;
250
251 /*
252 * Build the Merkle tree. Don't hold the inode lock during this, since
253 * on huge files this may take a very long time and we don't want to
254 * force unrelated syscalls like chown() to block forever. We don't
255 * need the inode lock here because deny_write_access() already prevents
256 * the file from being written to or truncated, and we still serialize
257 * ->begin_enable_verity() and ->end_enable_verity() using the inode
258 * lock and only allow one process to be here at a time on a given file.
259 */
260 BUILD_BUG_ON(sizeof(desc->root_hash) < FS_VERITY_MAX_DIGEST_SIZE);
261 err = build_merkle_tree(filp, ¶ms, desc->root_hash);
262 if (err) {
263 fsverity_err(inode, "Error %d building Merkle tree", err);
264 goto rollback;
265 }
266
267 /*
268 * Create the fsverity_info. Don't bother trying to save work by
269 * reusing the merkle_tree_params from above. Instead, just create the
270 * fsverity_info from the fsverity_descriptor as if it were just loaded
271 * from disk. This is simpler, and it serves as an extra check that the
272 * metadata we're writing is valid before actually enabling verity.
273 */
274 vi = fsverity_create_info(inode, desc);
275 if (IS_ERR(vi)) {
276 err = PTR_ERR(vi);
277 goto rollback;
278 }
279
280 /*
281 * Tell the filesystem to finish enabling verity on the file.
282 * Serialized with ->begin_enable_verity() by the inode lock.
283 */
284 inode_lock(inode);
285 err = vops->end_enable_verity(filp, desc, desc_size, params.tree_size);
286 inode_unlock(inode);
287 if (err) {
288 fsverity_err(inode, "%ps() failed with err %d",
289 vops->end_enable_verity, err);
290 fsverity_free_info(vi);
291 } else if (WARN_ON(!IS_VERITY(inode))) {
292 err = -EINVAL;
293 fsverity_free_info(vi);
294 } else {
295 /* Successfully enabled verity */
296
297 /*
298 * Readers can start using ->i_verity_info immediately, so it
299 * can't be rolled back once set. So don't set it until just
300 * after the filesystem has successfully enabled verity.
301 */
302 fsverity_set_info(inode, vi);
303 }
304 out:
305 kfree(params.hashstate);
306 kfree(desc);
307 return err;
308
309 rollback:
310 inode_lock(inode);
311 (void)vops->end_enable_verity(filp, NULL, 0, params.tree_size);
312 inode_unlock(inode);
313 goto out;
314 }
315
316 /**
317 * fsverity_ioctl_enable() - enable verity on a file
318 * @filp: file to enable verity on
319 * @uarg: user pointer to fsverity_enable_arg
320 *
321 * Enable fs-verity on a file. See the "FS_IOC_ENABLE_VERITY" section of
322 * Documentation/filesystems/fsverity.rst for the documentation.
323 *
324 * Return: 0 on success, -errno on failure
325 */
fsverity_ioctl_enable(struct file * filp,const void __user * uarg)326 int fsverity_ioctl_enable(struct file *filp, const void __user *uarg)
327 {
328 struct inode *inode = file_inode(filp);
329 struct fsverity_enable_arg arg;
330 int err;
331
332 if (copy_from_user(&arg, uarg, sizeof(arg)))
333 return -EFAULT;
334
335 if (arg.version != 1)
336 return -EINVAL;
337
338 if (arg.__reserved1 ||
339 memchr_inv(arg.__reserved2, 0, sizeof(arg.__reserved2)))
340 return -EINVAL;
341
342 if (!is_power_of_2(arg.block_size))
343 return -EINVAL;
344
345 if (arg.salt_size > sizeof_field(struct fsverity_descriptor, salt))
346 return -EMSGSIZE;
347
348 if (arg.sig_size > FS_VERITY_MAX_SIGNATURE_SIZE)
349 return -EMSGSIZE;
350
351 /*
352 * Require a regular file with write access. But the actual fd must
353 * still be readonly so that we can lock out all writers. This is
354 * needed to guarantee that no writable fds exist to the file once it
355 * has verity enabled, and to stabilize the data being hashed.
356 */
357
358 err = file_permission(filp, MAY_WRITE);
359 if (err)
360 return err;
361 /*
362 * __kernel_read() is used while building the Merkle tree. So, we can't
363 * allow file descriptors that were opened for ioctl access only, using
364 * the special nonstandard access mode 3. O_RDONLY only, please!
365 */
366 if (!(filp->f_mode & FMODE_READ))
367 return -EBADF;
368
369 if (IS_APPEND(inode))
370 return -EPERM;
371
372 if (S_ISDIR(inode->i_mode))
373 return -EISDIR;
374
375 if (!S_ISREG(inode->i_mode))
376 return -EINVAL;
377
378 err = mnt_want_write_file(filp);
379 if (err) /* -EROFS */
380 return err;
381
382 err = deny_write_access(filp);
383 if (err) /* -ETXTBSY */
384 goto out_drop_write;
385
386 err = enable_verity(filp, &arg);
387
388 /*
389 * We no longer drop the inode's pagecache after enabling verity. This
390 * used to be done to try to avoid a race condition where pages could be
391 * evicted after being used in the Merkle tree construction, then
392 * re-instantiated by a concurrent read. Such pages are unverified, and
393 * the backing storage could have filled them with different content, so
394 * they shouldn't be used to fulfill reads once verity is enabled.
395 *
396 * But, dropping the pagecache has a big performance impact, and it
397 * doesn't fully solve the race condition anyway. So for those reasons,
398 * and also because this race condition isn't very important relatively
399 * speaking (especially for small-ish files, where the chance of a page
400 * being used, evicted, *and* re-instantiated all while enabling verity
401 * is quite small), we no longer drop the inode's pagecache.
402 */
403
404 /*
405 * allow_write_access() is needed to pair with deny_write_access().
406 * Regardless, the filesystem won't allow writing to verity files.
407 */
408 allow_write_access(filp);
409 out_drop_write:
410 mnt_drop_write_file(filp);
411 return err;
412 }
413 EXPORT_SYMBOL_GPL(fsverity_ioctl_enable);
414