• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Landlock LSM - Filesystem management and hooks
4  *
5  * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net>
6  * Copyright © 2018-2020 ANSSI
7  * Copyright © 2021-2022 Microsoft Corporation
8  * Copyright © 2022 Günther Noack <gnoack3000@gmail.com>
9  * Copyright © 2023-2024 Google LLC
10  */
11 
12 #include <asm/ioctls.h>
13 #include <kunit/test.h>
14 #include <linux/atomic.h>
15 #include <linux/bitops.h>
16 #include <linux/bits.h>
17 #include <linux/compiler_types.h>
18 #include <linux/dcache.h>
19 #include <linux/err.h>
20 #include <linux/falloc.h>
21 #include <linux/fs.h>
22 #include <linux/init.h>
23 #include <linux/kernel.h>
24 #include <linux/limits.h>
25 #include <linux/list.h>
26 #include <linux/lsm_hooks.h>
27 #include <linux/mount.h>
28 #include <linux/namei.h>
29 #include <linux/path.h>
30 #include <linux/pid.h>
31 #include <linux/rcupdate.h>
32 #include <linux/sched/signal.h>
33 #include <linux/spinlock.h>
34 #include <linux/stat.h>
35 #include <linux/types.h>
36 #include <linux/wait_bit.h>
37 #include <linux/workqueue.h>
38 #include <uapi/linux/fiemap.h>
39 #include <uapi/linux/landlock.h>
40 
41 #include "common.h"
42 #include "cred.h"
43 #include "fs.h"
44 #include "limits.h"
45 #include "object.h"
46 #include "ruleset.h"
47 #include "setup.h"
48 
49 /* Underlying object management */
50 
release_inode(struct landlock_object * const object)51 static void release_inode(struct landlock_object *const object)
52 	__releases(object->lock)
53 {
54 	struct inode *const inode = object->underobj;
55 	struct super_block *sb;
56 
57 	if (!inode) {
58 		spin_unlock(&object->lock);
59 		return;
60 	}
61 
62 	/*
63 	 * Protects against concurrent use by hook_sb_delete() of the reference
64 	 * to the underlying inode.
65 	 */
66 	object->underobj = NULL;
67 	/*
68 	 * Makes sure that if the filesystem is concurrently unmounted,
69 	 * hook_sb_delete() will wait for us to finish iput().
70 	 */
71 	sb = inode->i_sb;
72 	atomic_long_inc(&landlock_superblock(sb)->inode_refs);
73 	spin_unlock(&object->lock);
74 	/*
75 	 * Because object->underobj was not NULL, hook_sb_delete() and
76 	 * get_inode_object() guarantee that it is safe to reset
77 	 * landlock_inode(inode)->object while it is not NULL.  It is therefore
78 	 * not necessary to lock inode->i_lock.
79 	 */
80 	rcu_assign_pointer(landlock_inode(inode)->object, NULL);
81 	/*
82 	 * Now, new rules can safely be tied to @inode with get_inode_object().
83 	 */
84 
85 	iput(inode);
86 	if (atomic_long_dec_and_test(&landlock_superblock(sb)->inode_refs))
87 		wake_up_var(&landlock_superblock(sb)->inode_refs);
88 }
89 
90 static const struct landlock_object_underops landlock_fs_underops = {
91 	.release = release_inode
92 };
93 
94 /* IOCTL helpers */
95 
96 /**
97  * is_masked_device_ioctl - Determine whether an IOCTL command is always
98  * permitted with Landlock for device files.  These commands can not be
99  * restricted on device files by enforcing a Landlock policy.
100  *
101  * @cmd: The IOCTL command that is supposed to be run.
102  *
103  * By default, any IOCTL on a device file requires the
104  * LANDLOCK_ACCESS_FS_IOCTL_DEV right.  However, we blanket-permit some
105  * commands, if:
106  *
107  * 1. The command is implemented in fs/ioctl.c's do_vfs_ioctl(),
108  *    not in f_ops->unlocked_ioctl() or f_ops->compat_ioctl().
109  *
110  * 2. The command is harmless when invoked on devices.
111  *
112  * We also permit commands that do not make sense for devices, but where the
113  * do_vfs_ioctl() implementation returns a more conventional error code.
114  *
115  * Any new IOCTL commands that are implemented in fs/ioctl.c's do_vfs_ioctl()
116  * should be considered for inclusion here.
117  *
118  * Returns: true if the IOCTL @cmd can not be restricted with Landlock for
119  * device files.
120  */
is_masked_device_ioctl(const unsigned int cmd)121 static __attribute_const__ bool is_masked_device_ioctl(const unsigned int cmd)
122 {
123 	switch (cmd) {
124 	/*
125 	 * FIOCLEX, FIONCLEX, FIONBIO and FIOASYNC manipulate the FD's
126 	 * close-on-exec and the file's buffered-IO and async flags.  These
127 	 * operations are also available through fcntl(2), and are
128 	 * unconditionally permitted in Landlock.
129 	 */
130 	case FIOCLEX:
131 	case FIONCLEX:
132 	case FIONBIO:
133 	case FIOASYNC:
134 	/*
135 	 * FIOQSIZE queries the size of a regular file, directory, or link.
136 	 *
137 	 * We still permit it, because it always returns -ENOTTY for
138 	 * other file types.
139 	 */
140 	case FIOQSIZE:
141 	/*
142 	 * FIFREEZE and FITHAW freeze and thaw the file system which the
143 	 * given file belongs to.  Requires CAP_SYS_ADMIN.
144 	 *
145 	 * These commands operate on the file system's superblock rather
146 	 * than on the file itself.  The same operations can also be
147 	 * done through any other file or directory on the same file
148 	 * system, so it is safe to permit these.
149 	 */
150 	case FIFREEZE:
151 	case FITHAW:
152 	/*
153 	 * FS_IOC_FIEMAP queries information about the allocation of
154 	 * blocks within a file.
155 	 *
156 	 * This IOCTL command only makes sense for regular files and is
157 	 * not implemented by devices. It is harmless to permit.
158 	 */
159 	case FS_IOC_FIEMAP:
160 	/*
161 	 * FIGETBSZ queries the file system's block size for a file or
162 	 * directory.
163 	 *
164 	 * This command operates on the file system's superblock rather
165 	 * than on the file itself.  The same operation can also be done
166 	 * through any other file or directory on the same file system,
167 	 * so it is safe to permit it.
168 	 */
169 	case FIGETBSZ:
170 	/*
171 	 * FICLONE, FICLONERANGE and FIDEDUPERANGE make files share
172 	 * their underlying storage ("reflink") between source and
173 	 * destination FDs, on file systems which support that.
174 	 *
175 	 * These IOCTL commands only apply to regular files
176 	 * and are harmless to permit for device files.
177 	 */
178 	case FICLONE:
179 	case FICLONERANGE:
180 	case FIDEDUPERANGE:
181 	/*
182 	 * FS_IOC_GETFSUUID and FS_IOC_GETFSSYSFSPATH both operate on
183 	 * the file system superblock, not on the specific file, so
184 	 * these operations are available through any other file on the
185 	 * same file system as well.
186 	 */
187 	case FS_IOC_GETFSUUID:
188 	case FS_IOC_GETFSSYSFSPATH:
189 		return true;
190 
191 	/*
192 	 * FIONREAD, FS_IOC_GETFLAGS, FS_IOC_SETFLAGS, FS_IOC_FSGETXATTR and
193 	 * FS_IOC_FSSETXATTR are forwarded to device implementations.
194 	 */
195 
196 	/*
197 	 * file_ioctl() commands (FIBMAP, FS_IOC_RESVSP, FS_IOC_RESVSP64,
198 	 * FS_IOC_UNRESVSP, FS_IOC_UNRESVSP64 and FS_IOC_ZERO_RANGE) are
199 	 * forwarded to device implementations, so not permitted.
200 	 */
201 
202 	/* Other commands are guarded by the access right. */
203 	default:
204 		return false;
205 	}
206 }
207 
208 /*
209  * is_masked_device_ioctl_compat - same as the helper above, but checking the
210  * "compat" IOCTL commands.
211  *
212  * The IOCTL commands with special handling in compat-mode should behave the
213  * same as their non-compat counterparts.
214  */
215 static __attribute_const__ bool
is_masked_device_ioctl_compat(const unsigned int cmd)216 is_masked_device_ioctl_compat(const unsigned int cmd)
217 {
218 	switch (cmd) {
219 	/* FICLONE is permitted, same as in the non-compat variant. */
220 	case FICLONE:
221 		return true;
222 
223 #if defined(CONFIG_X86_64)
224 	/*
225 	 * FS_IOC_RESVSP_32, FS_IOC_RESVSP64_32, FS_IOC_UNRESVSP_32,
226 	 * FS_IOC_UNRESVSP64_32, FS_IOC_ZERO_RANGE_32: not blanket-permitted,
227 	 * for consistency with their non-compat variants.
228 	 */
229 	case FS_IOC_RESVSP_32:
230 	case FS_IOC_RESVSP64_32:
231 	case FS_IOC_UNRESVSP_32:
232 	case FS_IOC_UNRESVSP64_32:
233 	case FS_IOC_ZERO_RANGE_32:
234 #endif
235 
236 	/*
237 	 * FS_IOC32_GETFLAGS, FS_IOC32_SETFLAGS are forwarded to their device
238 	 * implementations.
239 	 */
240 	case FS_IOC32_GETFLAGS:
241 	case FS_IOC32_SETFLAGS:
242 		return false;
243 	default:
244 		return is_masked_device_ioctl(cmd);
245 	}
246 }
247 
248 /* Ruleset management */
249 
get_inode_object(struct inode * const inode)250 static struct landlock_object *get_inode_object(struct inode *const inode)
251 {
252 	struct landlock_object *object, *new_object;
253 	struct landlock_inode_security *inode_sec = landlock_inode(inode);
254 
255 	rcu_read_lock();
256 retry:
257 	object = rcu_dereference(inode_sec->object);
258 	if (object) {
259 		if (likely(refcount_inc_not_zero(&object->usage))) {
260 			rcu_read_unlock();
261 			return object;
262 		}
263 		/*
264 		 * We are racing with release_inode(), the object is going
265 		 * away.  Wait for release_inode(), then retry.
266 		 */
267 		spin_lock(&object->lock);
268 		spin_unlock(&object->lock);
269 		goto retry;
270 	}
271 	rcu_read_unlock();
272 
273 	/*
274 	 * If there is no object tied to @inode, then create a new one (without
275 	 * holding any locks).
276 	 */
277 	new_object = landlock_create_object(&landlock_fs_underops, inode);
278 	if (IS_ERR(new_object))
279 		return new_object;
280 
281 	/*
282 	 * Protects against concurrent calls to get_inode_object() or
283 	 * hook_sb_delete().
284 	 */
285 	spin_lock(&inode->i_lock);
286 	if (unlikely(rcu_access_pointer(inode_sec->object))) {
287 		/* Someone else just created the object, bail out and retry. */
288 		spin_unlock(&inode->i_lock);
289 		kfree(new_object);
290 
291 		rcu_read_lock();
292 		goto retry;
293 	}
294 
295 	/*
296 	 * @inode will be released by hook_sb_delete() on its superblock
297 	 * shutdown, or by release_inode() when no more ruleset references the
298 	 * related object.
299 	 */
300 	ihold(inode);
301 	rcu_assign_pointer(inode_sec->object, new_object);
302 	spin_unlock(&inode->i_lock);
303 	return new_object;
304 }
305 
306 /* All access rights that can be tied to files. */
307 /* clang-format off */
308 #define ACCESS_FILE ( \
309 	LANDLOCK_ACCESS_FS_EXECUTE | \
310 	LANDLOCK_ACCESS_FS_WRITE_FILE | \
311 	LANDLOCK_ACCESS_FS_READ_FILE | \
312 	LANDLOCK_ACCESS_FS_TRUNCATE | \
313 	LANDLOCK_ACCESS_FS_IOCTL_DEV)
314 /* clang-format on */
315 
316 /*
317  * @path: Should have been checked by get_path_from_fd().
318  */
landlock_append_fs_rule(struct landlock_ruleset * const ruleset,const struct path * const path,access_mask_t access_rights)319 int landlock_append_fs_rule(struct landlock_ruleset *const ruleset,
320 			    const struct path *const path,
321 			    access_mask_t access_rights)
322 {
323 	int err;
324 	struct landlock_id id = {
325 		.type = LANDLOCK_KEY_INODE,
326 	};
327 
328 	/* Files only get access rights that make sense. */
329 	if (!d_is_dir(path->dentry) &&
330 	    (access_rights | ACCESS_FILE) != ACCESS_FILE)
331 		return -EINVAL;
332 	if (WARN_ON_ONCE(ruleset->num_layers != 1))
333 		return -EINVAL;
334 
335 	/* Transforms relative access rights to absolute ones. */
336 	access_rights |= LANDLOCK_MASK_ACCESS_FS &
337 			 ~landlock_get_fs_access_mask(ruleset, 0);
338 	id.key.object = get_inode_object(d_backing_inode(path->dentry));
339 	if (IS_ERR(id.key.object))
340 		return PTR_ERR(id.key.object);
341 	mutex_lock(&ruleset->lock);
342 	err = landlock_insert_rule(ruleset, id, access_rights);
343 	mutex_unlock(&ruleset->lock);
344 	/*
345 	 * No need to check for an error because landlock_insert_rule()
346 	 * increments the refcount for the new object if needed.
347 	 */
348 	landlock_put_object(id.key.object);
349 	return err;
350 }
351 
352 /* Access-control management */
353 
354 /*
355  * The lifetime of the returned rule is tied to @domain.
356  *
357  * Returns NULL if no rule is found or if @dentry is negative.
358  */
359 static const struct landlock_rule *
find_rule(const struct landlock_ruleset * const domain,const struct dentry * const dentry)360 find_rule(const struct landlock_ruleset *const domain,
361 	  const struct dentry *const dentry)
362 {
363 	const struct landlock_rule *rule;
364 	const struct inode *inode;
365 	struct landlock_id id = {
366 		.type = LANDLOCK_KEY_INODE,
367 	};
368 
369 	/* Ignores nonexistent leafs. */
370 	if (d_is_negative(dentry))
371 		return NULL;
372 
373 	inode = d_backing_inode(dentry);
374 	rcu_read_lock();
375 	id.key.object = rcu_dereference(landlock_inode(inode)->object);
376 	rule = landlock_find_rule(domain, id);
377 	rcu_read_unlock();
378 	return rule;
379 }
380 
381 /*
382  * Allows access to pseudo filesystems that will never be mountable (e.g.
383  * sockfs, pipefs), but can still be reachable through
384  * /proc/<pid>/fd/<file-descriptor>
385  */
is_nouser_or_private(const struct dentry * dentry)386 static bool is_nouser_or_private(const struct dentry *dentry)
387 {
388 	return (dentry->d_sb->s_flags & SB_NOUSER) ||
389 	       (d_is_positive(dentry) &&
390 		unlikely(IS_PRIVATE(d_backing_inode(dentry))));
391 }
392 
393 static access_mask_t
get_handled_fs_accesses(const struct landlock_ruleset * const domain)394 get_handled_fs_accesses(const struct landlock_ruleset *const domain)
395 {
396 	/* Handles all initially denied by default access rights. */
397 	return landlock_union_access_masks(domain).fs |
398 	       LANDLOCK_ACCESS_FS_INITIALLY_DENIED;
399 }
400 
401 static const struct access_masks any_fs = {
402 	.fs = ~0,
403 };
404 
get_current_fs_domain(void)405 static const struct landlock_ruleset *get_current_fs_domain(void)
406 {
407 	return landlock_get_applicable_domain(landlock_get_current_domain(),
408 					      any_fs);
409 }
410 
411 /*
412  * Check that a destination file hierarchy has more restrictions than a source
413  * file hierarchy.  This is only used for link and rename actions.
414  *
415  * @layer_masks_child2: Optional child masks.
416  */
no_more_access(const layer_mask_t (* const layer_masks_parent1)[LANDLOCK_NUM_ACCESS_FS],const layer_mask_t (* const layer_masks_child1)[LANDLOCK_NUM_ACCESS_FS],const bool child1_is_directory,const layer_mask_t (* const layer_masks_parent2)[LANDLOCK_NUM_ACCESS_FS],const layer_mask_t (* const layer_masks_child2)[LANDLOCK_NUM_ACCESS_FS],const bool child2_is_directory)417 static bool no_more_access(
418 	const layer_mask_t (*const layer_masks_parent1)[LANDLOCK_NUM_ACCESS_FS],
419 	const layer_mask_t (*const layer_masks_child1)[LANDLOCK_NUM_ACCESS_FS],
420 	const bool child1_is_directory,
421 	const layer_mask_t (*const layer_masks_parent2)[LANDLOCK_NUM_ACCESS_FS],
422 	const layer_mask_t (*const layer_masks_child2)[LANDLOCK_NUM_ACCESS_FS],
423 	const bool child2_is_directory)
424 {
425 	unsigned long access_bit;
426 
427 	for (access_bit = 0; access_bit < ARRAY_SIZE(*layer_masks_parent2);
428 	     access_bit++) {
429 		/* Ignores accesses that only make sense for directories. */
430 		const bool is_file_access =
431 			!!(BIT_ULL(access_bit) & ACCESS_FILE);
432 
433 		if (child1_is_directory || is_file_access) {
434 			/*
435 			 * Checks if the destination restrictions are a
436 			 * superset of the source ones (i.e. inherited access
437 			 * rights without child exceptions):
438 			 * restrictions(parent2) >= restrictions(child1)
439 			 */
440 			if ((((*layer_masks_parent1)[access_bit] &
441 			      (*layer_masks_child1)[access_bit]) |
442 			     (*layer_masks_parent2)[access_bit]) !=
443 			    (*layer_masks_parent2)[access_bit])
444 				return false;
445 		}
446 
447 		if (!layer_masks_child2)
448 			continue;
449 		if (child2_is_directory || is_file_access) {
450 			/*
451 			 * Checks inverted restrictions for RENAME_EXCHANGE:
452 			 * restrictions(parent1) >= restrictions(child2)
453 			 */
454 			if ((((*layer_masks_parent2)[access_bit] &
455 			      (*layer_masks_child2)[access_bit]) |
456 			     (*layer_masks_parent1)[access_bit]) !=
457 			    (*layer_masks_parent1)[access_bit])
458 				return false;
459 		}
460 	}
461 	return true;
462 }
463 
464 #define NMA_TRUE(...) KUNIT_EXPECT_TRUE(test, no_more_access(__VA_ARGS__))
465 #define NMA_FALSE(...) KUNIT_EXPECT_FALSE(test, no_more_access(__VA_ARGS__))
466 
467 #ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST
468 
test_no_more_access(struct kunit * const test)469 static void test_no_more_access(struct kunit *const test)
470 {
471 	const layer_mask_t rx0[LANDLOCK_NUM_ACCESS_FS] = {
472 		[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(0),
473 		[BIT_INDEX(LANDLOCK_ACCESS_FS_READ_FILE)] = BIT_ULL(0),
474 	};
475 	const layer_mask_t mx0[LANDLOCK_NUM_ACCESS_FS] = {
476 		[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(0),
477 		[BIT_INDEX(LANDLOCK_ACCESS_FS_MAKE_REG)] = BIT_ULL(0),
478 	};
479 	const layer_mask_t x0[LANDLOCK_NUM_ACCESS_FS] = {
480 		[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(0),
481 	};
482 	const layer_mask_t x1[LANDLOCK_NUM_ACCESS_FS] = {
483 		[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(1),
484 	};
485 	const layer_mask_t x01[LANDLOCK_NUM_ACCESS_FS] = {
486 		[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(0) |
487 							  BIT_ULL(1),
488 	};
489 	const layer_mask_t allows_all[LANDLOCK_NUM_ACCESS_FS] = {};
490 
491 	/* Checks without restriction. */
492 	NMA_TRUE(&x0, &allows_all, false, &allows_all, NULL, false);
493 	NMA_TRUE(&allows_all, &x0, false, &allows_all, NULL, false);
494 	NMA_FALSE(&x0, &x0, false, &allows_all, NULL, false);
495 
496 	/*
497 	 * Checks that we can only refer a file if no more access could be
498 	 * inherited.
499 	 */
500 	NMA_TRUE(&x0, &x0, false, &rx0, NULL, false);
501 	NMA_TRUE(&rx0, &rx0, false, &rx0, NULL, false);
502 	NMA_FALSE(&rx0, &rx0, false, &x0, NULL, false);
503 	NMA_FALSE(&rx0, &rx0, false, &x1, NULL, false);
504 
505 	/* Checks allowed referring with different nested domains. */
506 	NMA_TRUE(&x0, &x1, false, &x0, NULL, false);
507 	NMA_TRUE(&x1, &x0, false, &x0, NULL, false);
508 	NMA_TRUE(&x0, &x01, false, &x0, NULL, false);
509 	NMA_TRUE(&x0, &x01, false, &rx0, NULL, false);
510 	NMA_TRUE(&x01, &x0, false, &x0, NULL, false);
511 	NMA_TRUE(&x01, &x0, false, &rx0, NULL, false);
512 	NMA_FALSE(&x01, &x01, false, &x0, NULL, false);
513 
514 	/* Checks that file access rights are also enforced for a directory. */
515 	NMA_FALSE(&rx0, &rx0, true, &x0, NULL, false);
516 
517 	/* Checks that directory access rights don't impact file referring... */
518 	NMA_TRUE(&mx0, &mx0, false, &x0, NULL, false);
519 	/* ...but only directory referring. */
520 	NMA_FALSE(&mx0, &mx0, true, &x0, NULL, false);
521 
522 	/* Checks directory exchange. */
523 	NMA_TRUE(&mx0, &mx0, true, &mx0, &mx0, true);
524 	NMA_TRUE(&mx0, &mx0, true, &mx0, &x0, true);
525 	NMA_FALSE(&mx0, &mx0, true, &x0, &mx0, true);
526 	NMA_FALSE(&mx0, &mx0, true, &x0, &x0, true);
527 	NMA_FALSE(&mx0, &mx0, true, &x1, &x1, true);
528 
529 	/* Checks file exchange with directory access rights... */
530 	NMA_TRUE(&mx0, &mx0, false, &mx0, &mx0, false);
531 	NMA_TRUE(&mx0, &mx0, false, &mx0, &x0, false);
532 	NMA_TRUE(&mx0, &mx0, false, &x0, &mx0, false);
533 	NMA_TRUE(&mx0, &mx0, false, &x0, &x0, false);
534 	/* ...and with file access rights. */
535 	NMA_TRUE(&rx0, &rx0, false, &rx0, &rx0, false);
536 	NMA_TRUE(&rx0, &rx0, false, &rx0, &x0, false);
537 	NMA_FALSE(&rx0, &rx0, false, &x0, &rx0, false);
538 	NMA_FALSE(&rx0, &rx0, false, &x0, &x0, false);
539 	NMA_FALSE(&rx0, &rx0, false, &x1, &x1, false);
540 
541 	/*
542 	 * Allowing the following requests should not be a security risk
543 	 * because domain 0 denies execute access, and domain 1 is always
544 	 * nested with domain 0.  However, adding an exception for this case
545 	 * would mean to check all nested domains to make sure none can get
546 	 * more privileges (e.g. processes only sandboxed by domain 0).
547 	 * Moreover, this behavior (i.e. composition of N domains) could then
548 	 * be inconsistent compared to domain 1's ruleset alone (e.g. it might
549 	 * be denied to link/rename with domain 1's ruleset, whereas it would
550 	 * be allowed if nested on top of domain 0).  Another drawback would be
551 	 * to create a cover channel that could enable sandboxed processes to
552 	 * infer most of the filesystem restrictions from their domain.  To
553 	 * make it simple, efficient, safe, and more consistent, this case is
554 	 * always denied.
555 	 */
556 	NMA_FALSE(&x1, &x1, false, &x0, NULL, false);
557 	NMA_FALSE(&x1, &x1, false, &rx0, NULL, false);
558 	NMA_FALSE(&x1, &x1, true, &x0, NULL, false);
559 	NMA_FALSE(&x1, &x1, true, &rx0, NULL, false);
560 
561 	/* Checks the same case of exclusive domains with a file... */
562 	NMA_TRUE(&x1, &x1, false, &x01, NULL, false);
563 	NMA_FALSE(&x1, &x1, false, &x01, &x0, false);
564 	NMA_FALSE(&x1, &x1, false, &x01, &x01, false);
565 	NMA_FALSE(&x1, &x1, false, &x0, &x0, false);
566 	/* ...and with a directory. */
567 	NMA_FALSE(&x1, &x1, false, &x0, &x0, true);
568 	NMA_FALSE(&x1, &x1, true, &x0, &x0, false);
569 	NMA_FALSE(&x1, &x1, true, &x0, &x0, true);
570 }
571 
572 #endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */
573 
574 #undef NMA_TRUE
575 #undef NMA_FALSE
576 
577 /*
578  * Removes @layer_masks accesses that are not requested.
579  *
580  * Returns true if the request is allowed, false otherwise.
581  */
582 static bool
scope_to_request(const access_mask_t access_request,layer_mask_t (* const layer_masks)[LANDLOCK_NUM_ACCESS_FS])583 scope_to_request(const access_mask_t access_request,
584 		 layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS])
585 {
586 	const unsigned long access_req = access_request;
587 	unsigned long access_bit;
588 
589 	if (WARN_ON_ONCE(!layer_masks))
590 		return true;
591 
592 	for_each_clear_bit(access_bit, &access_req, ARRAY_SIZE(*layer_masks))
593 		(*layer_masks)[access_bit] = 0;
594 	return !memchr_inv(layer_masks, 0, sizeof(*layer_masks));
595 }
596 
597 #ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST
598 
test_scope_to_request_with_exec_none(struct kunit * const test)599 static void test_scope_to_request_with_exec_none(struct kunit *const test)
600 {
601 	/* Allows everything. */
602 	layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {};
603 
604 	/* Checks and scopes with execute. */
605 	KUNIT_EXPECT_TRUE(test, scope_to_request(LANDLOCK_ACCESS_FS_EXECUTE,
606 						 &layer_masks));
607 	KUNIT_EXPECT_EQ(test, 0,
608 			layer_masks[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)]);
609 	KUNIT_EXPECT_EQ(test, 0,
610 			layer_masks[BIT_INDEX(LANDLOCK_ACCESS_FS_WRITE_FILE)]);
611 }
612 
test_scope_to_request_with_exec_some(struct kunit * const test)613 static void test_scope_to_request_with_exec_some(struct kunit *const test)
614 {
615 	/* Denies execute and write. */
616 	layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {
617 		[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(0),
618 		[BIT_INDEX(LANDLOCK_ACCESS_FS_WRITE_FILE)] = BIT_ULL(1),
619 	};
620 
621 	/* Checks and scopes with execute. */
622 	KUNIT_EXPECT_FALSE(test, scope_to_request(LANDLOCK_ACCESS_FS_EXECUTE,
623 						  &layer_masks));
624 	KUNIT_EXPECT_EQ(test, BIT_ULL(0),
625 			layer_masks[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)]);
626 	KUNIT_EXPECT_EQ(test, 0,
627 			layer_masks[BIT_INDEX(LANDLOCK_ACCESS_FS_WRITE_FILE)]);
628 }
629 
test_scope_to_request_without_access(struct kunit * const test)630 static void test_scope_to_request_without_access(struct kunit *const test)
631 {
632 	/* Denies execute and write. */
633 	layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {
634 		[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(0),
635 		[BIT_INDEX(LANDLOCK_ACCESS_FS_WRITE_FILE)] = BIT_ULL(1),
636 	};
637 
638 	/* Checks and scopes without access request. */
639 	KUNIT_EXPECT_TRUE(test, scope_to_request(0, &layer_masks));
640 	KUNIT_EXPECT_EQ(test, 0,
641 			layer_masks[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)]);
642 	KUNIT_EXPECT_EQ(test, 0,
643 			layer_masks[BIT_INDEX(LANDLOCK_ACCESS_FS_WRITE_FILE)]);
644 }
645 
646 #endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */
647 
648 /*
649  * Returns true if there is at least one access right different than
650  * LANDLOCK_ACCESS_FS_REFER.
651  */
652 static bool
is_eacces(const layer_mask_t (* const layer_masks)[LANDLOCK_NUM_ACCESS_FS],const access_mask_t access_request)653 is_eacces(const layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS],
654 	  const access_mask_t access_request)
655 {
656 	unsigned long access_bit;
657 	/* LANDLOCK_ACCESS_FS_REFER alone must return -EXDEV. */
658 	const unsigned long access_check = access_request &
659 					   ~LANDLOCK_ACCESS_FS_REFER;
660 
661 	if (!layer_masks)
662 		return false;
663 
664 	for_each_set_bit(access_bit, &access_check, ARRAY_SIZE(*layer_masks)) {
665 		if ((*layer_masks)[access_bit])
666 			return true;
667 	}
668 	return false;
669 }
670 
671 #define IE_TRUE(...) KUNIT_EXPECT_TRUE(test, is_eacces(__VA_ARGS__))
672 #define IE_FALSE(...) KUNIT_EXPECT_FALSE(test, is_eacces(__VA_ARGS__))
673 
674 #ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST
675 
test_is_eacces_with_none(struct kunit * const test)676 static void test_is_eacces_with_none(struct kunit *const test)
677 {
678 	const layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {};
679 
680 	IE_FALSE(&layer_masks, 0);
681 	IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_REFER);
682 	IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_EXECUTE);
683 	IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_WRITE_FILE);
684 }
685 
test_is_eacces_with_refer(struct kunit * const test)686 static void test_is_eacces_with_refer(struct kunit *const test)
687 {
688 	const layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {
689 		[BIT_INDEX(LANDLOCK_ACCESS_FS_REFER)] = BIT_ULL(0),
690 	};
691 
692 	IE_FALSE(&layer_masks, 0);
693 	IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_REFER);
694 	IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_EXECUTE);
695 	IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_WRITE_FILE);
696 }
697 
test_is_eacces_with_write(struct kunit * const test)698 static void test_is_eacces_with_write(struct kunit *const test)
699 {
700 	const layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {
701 		[BIT_INDEX(LANDLOCK_ACCESS_FS_WRITE_FILE)] = BIT_ULL(0),
702 	};
703 
704 	IE_FALSE(&layer_masks, 0);
705 	IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_REFER);
706 	IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_EXECUTE);
707 
708 	IE_TRUE(&layer_masks, LANDLOCK_ACCESS_FS_WRITE_FILE);
709 }
710 
711 #endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */
712 
713 #undef IE_TRUE
714 #undef IE_FALSE
715 
716 /**
717  * is_access_to_paths_allowed - Check accesses for requests with a common path
718  *
719  * @domain: Domain to check against.
720  * @path: File hierarchy to walk through.
721  * @access_request_parent1: Accesses to check, once @layer_masks_parent1 is
722  *     equal to @layer_masks_parent2 (if any).  This is tied to the unique
723  *     requested path for most actions, or the source in case of a refer action
724  *     (i.e. rename or link), or the source and destination in case of
725  *     RENAME_EXCHANGE.
726  * @layer_masks_parent1: Pointer to a matrix of layer masks per access
727  *     masks, identifying the layers that forbid a specific access.  Bits from
728  *     this matrix can be unset according to the @path walk.  An empty matrix
729  *     means that @domain allows all possible Landlock accesses (i.e. not only
730  *     those identified by @access_request_parent1).  This matrix can
731  *     initially refer to domain layer masks and, when the accesses for the
732  *     destination and source are the same, to requested layer masks.
733  * @dentry_child1: Dentry to the initial child of the parent1 path.  This
734  *     pointer must be NULL for non-refer actions (i.e. not link nor rename).
735  * @access_request_parent2: Similar to @access_request_parent1 but for a
736  *     request involving a source and a destination.  This refers to the
737  *     destination, except in case of RENAME_EXCHANGE where it also refers to
738  *     the source.  Must be set to 0 when using a simple path request.
739  * @layer_masks_parent2: Similar to @layer_masks_parent1 but for a refer
740  *     action.  This must be NULL otherwise.
741  * @dentry_child2: Dentry to the initial child of the parent2 path.  This
742  *     pointer is only set for RENAME_EXCHANGE actions and must be NULL
743  *     otherwise.
744  *
745  * This helper first checks that the destination has a superset of restrictions
746  * compared to the source (if any) for a common path.  Because of
747  * RENAME_EXCHANGE actions, source and destinations may be swapped.  It then
748  * checks that the collected accesses and the remaining ones are enough to
749  * allow the request.
750  *
751  * Returns:
752  * - true if the access request is granted;
753  * - false otherwise.
754  */
is_access_to_paths_allowed(const struct landlock_ruleset * const domain,const struct path * const path,const access_mask_t access_request_parent1,layer_mask_t (* const layer_masks_parent1)[LANDLOCK_NUM_ACCESS_FS],const struct dentry * const dentry_child1,const access_mask_t access_request_parent2,layer_mask_t (* const layer_masks_parent2)[LANDLOCK_NUM_ACCESS_FS],const struct dentry * const dentry_child2)755 static bool is_access_to_paths_allowed(
756 	const struct landlock_ruleset *const domain,
757 	const struct path *const path,
758 	const access_mask_t access_request_parent1,
759 	layer_mask_t (*const layer_masks_parent1)[LANDLOCK_NUM_ACCESS_FS],
760 	const struct dentry *const dentry_child1,
761 	const access_mask_t access_request_parent2,
762 	layer_mask_t (*const layer_masks_parent2)[LANDLOCK_NUM_ACCESS_FS],
763 	const struct dentry *const dentry_child2)
764 {
765 	bool allowed_parent1 = false, allowed_parent2 = false, is_dom_check,
766 	     child1_is_directory = true, child2_is_directory = true;
767 	struct path walker_path;
768 	access_mask_t access_masked_parent1, access_masked_parent2;
769 	layer_mask_t _layer_masks_child1[LANDLOCK_NUM_ACCESS_FS],
770 		_layer_masks_child2[LANDLOCK_NUM_ACCESS_FS];
771 	layer_mask_t(*layer_masks_child1)[LANDLOCK_NUM_ACCESS_FS] = NULL,
772 	(*layer_masks_child2)[LANDLOCK_NUM_ACCESS_FS] = NULL;
773 
774 	if (!access_request_parent1 && !access_request_parent2)
775 		return true;
776 	if (WARN_ON_ONCE(!domain || !path))
777 		return true;
778 	if (is_nouser_or_private(path->dentry))
779 		return true;
780 	if (WARN_ON_ONCE(domain->num_layers < 1 || !layer_masks_parent1))
781 		return false;
782 
783 	if (unlikely(layer_masks_parent2)) {
784 		if (WARN_ON_ONCE(!dentry_child1))
785 			return false;
786 		/*
787 		 * For a double request, first check for potential privilege
788 		 * escalation by looking at domain handled accesses (which are
789 		 * a superset of the meaningful requested accesses).
790 		 */
791 		access_masked_parent1 = access_masked_parent2 =
792 			get_handled_fs_accesses(domain);
793 		is_dom_check = true;
794 	} else {
795 		if (WARN_ON_ONCE(dentry_child1 || dentry_child2))
796 			return false;
797 		/* For a simple request, only check for requested accesses. */
798 		access_masked_parent1 = access_request_parent1;
799 		access_masked_parent2 = access_request_parent2;
800 		is_dom_check = false;
801 	}
802 
803 	if (unlikely(dentry_child1)) {
804 		landlock_unmask_layers(
805 			find_rule(domain, dentry_child1),
806 			landlock_init_layer_masks(
807 				domain, LANDLOCK_MASK_ACCESS_FS,
808 				&_layer_masks_child1, LANDLOCK_KEY_INODE),
809 			&_layer_masks_child1, ARRAY_SIZE(_layer_masks_child1));
810 		layer_masks_child1 = &_layer_masks_child1;
811 		child1_is_directory = d_is_dir(dentry_child1);
812 	}
813 	if (unlikely(dentry_child2)) {
814 		landlock_unmask_layers(
815 			find_rule(domain, dentry_child2),
816 			landlock_init_layer_masks(
817 				domain, LANDLOCK_MASK_ACCESS_FS,
818 				&_layer_masks_child2, LANDLOCK_KEY_INODE),
819 			&_layer_masks_child2, ARRAY_SIZE(_layer_masks_child2));
820 		layer_masks_child2 = &_layer_masks_child2;
821 		child2_is_directory = d_is_dir(dentry_child2);
822 	}
823 
824 	walker_path = *path;
825 	path_get(&walker_path);
826 	/*
827 	 * We need to walk through all the hierarchy to not miss any relevant
828 	 * restriction.
829 	 */
830 	while (true) {
831 		struct dentry *parent_dentry;
832 		const struct landlock_rule *rule;
833 
834 		/*
835 		 * If at least all accesses allowed on the destination are
836 		 * already allowed on the source, respectively if there is at
837 		 * least as much as restrictions on the destination than on the
838 		 * source, then we can safely refer files from the source to
839 		 * the destination without risking a privilege escalation.
840 		 * This also applies in the case of RENAME_EXCHANGE, which
841 		 * implies checks on both direction.  This is crucial for
842 		 * standalone multilayered security policies.  Furthermore,
843 		 * this helps avoid policy writers to shoot themselves in the
844 		 * foot.
845 		 */
846 		if (unlikely(is_dom_check &&
847 			     no_more_access(
848 				     layer_masks_parent1, layer_masks_child1,
849 				     child1_is_directory, layer_masks_parent2,
850 				     layer_masks_child2,
851 				     child2_is_directory))) {
852 			allowed_parent1 = scope_to_request(
853 				access_request_parent1, layer_masks_parent1);
854 			allowed_parent2 = scope_to_request(
855 				access_request_parent2, layer_masks_parent2);
856 
857 			/* Stops when all accesses are granted. */
858 			if (allowed_parent1 && allowed_parent2)
859 				break;
860 
861 			/*
862 			 * Now, downgrades the remaining checks from domain
863 			 * handled accesses to requested accesses.
864 			 */
865 			is_dom_check = false;
866 			access_masked_parent1 = access_request_parent1;
867 			access_masked_parent2 = access_request_parent2;
868 		}
869 
870 		rule = find_rule(domain, walker_path.dentry);
871 		allowed_parent1 = landlock_unmask_layers(
872 			rule, access_masked_parent1, layer_masks_parent1,
873 			ARRAY_SIZE(*layer_masks_parent1));
874 		allowed_parent2 = landlock_unmask_layers(
875 			rule, access_masked_parent2, layer_masks_parent2,
876 			ARRAY_SIZE(*layer_masks_parent2));
877 
878 		/* Stops when a rule from each layer grants access. */
879 		if (allowed_parent1 && allowed_parent2)
880 			break;
881 jump_up:
882 		if (walker_path.dentry == walker_path.mnt->mnt_root) {
883 			if (follow_up(&walker_path)) {
884 				/* Ignores hidden mount points. */
885 				goto jump_up;
886 			} else {
887 				/*
888 				 * Stops at the real root.  Denies access
889 				 * because not all layers have granted access.
890 				 */
891 				break;
892 			}
893 		}
894 		if (unlikely(IS_ROOT(walker_path.dentry))) {
895 			/*
896 			 * Stops at disconnected root directories.  Only allows
897 			 * access to internal filesystems (e.g. nsfs, which is
898 			 * reachable through /proc/<pid>/ns/<namespace>).
899 			 */
900 			allowed_parent1 = allowed_parent2 =
901 				!!(walker_path.mnt->mnt_flags & MNT_INTERNAL);
902 			break;
903 		}
904 		parent_dentry = dget_parent(walker_path.dentry);
905 		dput(walker_path.dentry);
906 		walker_path.dentry = parent_dentry;
907 	}
908 	path_put(&walker_path);
909 
910 	return allowed_parent1 && allowed_parent2;
911 }
912 
check_access_path(const struct landlock_ruleset * const domain,const struct path * const path,access_mask_t access_request)913 static int check_access_path(const struct landlock_ruleset *const domain,
914 			     const struct path *const path,
915 			     access_mask_t access_request)
916 {
917 	layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {};
918 
919 	access_request = landlock_init_layer_masks(
920 		domain, access_request, &layer_masks, LANDLOCK_KEY_INODE);
921 	if (is_access_to_paths_allowed(domain, path, access_request,
922 				       &layer_masks, NULL, 0, NULL, NULL))
923 		return 0;
924 	return -EACCES;
925 }
926 
current_check_access_path(const struct path * const path,const access_mask_t access_request)927 static int current_check_access_path(const struct path *const path,
928 				     const access_mask_t access_request)
929 {
930 	const struct landlock_ruleset *const dom = get_current_fs_domain();
931 
932 	if (!dom)
933 		return 0;
934 	return check_access_path(dom, path, access_request);
935 }
936 
get_mode_access(const umode_t mode)937 static access_mask_t get_mode_access(const umode_t mode)
938 {
939 	switch (mode & S_IFMT) {
940 	case S_IFLNK:
941 		return LANDLOCK_ACCESS_FS_MAKE_SYM;
942 	case S_IFDIR:
943 		return LANDLOCK_ACCESS_FS_MAKE_DIR;
944 	case S_IFCHR:
945 		return LANDLOCK_ACCESS_FS_MAKE_CHAR;
946 	case S_IFBLK:
947 		return LANDLOCK_ACCESS_FS_MAKE_BLOCK;
948 	case S_IFIFO:
949 		return LANDLOCK_ACCESS_FS_MAKE_FIFO;
950 	case S_IFSOCK:
951 		return LANDLOCK_ACCESS_FS_MAKE_SOCK;
952 	case S_IFREG:
953 	case 0:
954 		/* A zero mode translates to S_IFREG. */
955 	default:
956 		/* Treats weird files as regular files. */
957 		return LANDLOCK_ACCESS_FS_MAKE_REG;
958 	}
959 }
960 
maybe_remove(const struct dentry * const dentry)961 static access_mask_t maybe_remove(const struct dentry *const dentry)
962 {
963 	if (d_is_negative(dentry))
964 		return 0;
965 	return d_is_dir(dentry) ? LANDLOCK_ACCESS_FS_REMOVE_DIR :
966 				  LANDLOCK_ACCESS_FS_REMOVE_FILE;
967 }
968 
969 /**
970  * collect_domain_accesses - Walk through a file path and collect accesses
971  *
972  * @domain: Domain to check against.
973  * @mnt_root: Last directory to check.
974  * @dir: Directory to start the walk from.
975  * @layer_masks_dom: Where to store the collected accesses.
976  *
977  * This helper is useful to begin a path walk from the @dir directory to a
978  * @mnt_root directory used as a mount point.  This mount point is the common
979  * ancestor between the source and the destination of a renamed and linked
980  * file.  While walking from @dir to @mnt_root, we record all the domain's
981  * allowed accesses in @layer_masks_dom.
982  *
983  * This is similar to is_access_to_paths_allowed() but much simpler because it
984  * only handles walking on the same mount point and only checks one set of
985  * accesses.
986  *
987  * Returns:
988  * - true if all the domain access rights are allowed for @dir;
989  * - false if the walk reached @mnt_root.
990  */
collect_domain_accesses(const struct landlock_ruleset * const domain,const struct dentry * const mnt_root,struct dentry * dir,layer_mask_t (* const layer_masks_dom)[LANDLOCK_NUM_ACCESS_FS])991 static bool collect_domain_accesses(
992 	const struct landlock_ruleset *const domain,
993 	const struct dentry *const mnt_root, struct dentry *dir,
994 	layer_mask_t (*const layer_masks_dom)[LANDLOCK_NUM_ACCESS_FS])
995 {
996 	unsigned long access_dom;
997 	bool ret = false;
998 
999 	if (WARN_ON_ONCE(!domain || !mnt_root || !dir || !layer_masks_dom))
1000 		return true;
1001 	if (is_nouser_or_private(dir))
1002 		return true;
1003 
1004 	access_dom = landlock_init_layer_masks(domain, LANDLOCK_MASK_ACCESS_FS,
1005 					       layer_masks_dom,
1006 					       LANDLOCK_KEY_INODE);
1007 
1008 	dget(dir);
1009 	while (true) {
1010 		struct dentry *parent_dentry;
1011 
1012 		/* Gets all layers allowing all domain accesses. */
1013 		if (landlock_unmask_layers(find_rule(domain, dir), access_dom,
1014 					   layer_masks_dom,
1015 					   ARRAY_SIZE(*layer_masks_dom))) {
1016 			/*
1017 			 * Stops when all handled accesses are allowed by at
1018 			 * least one rule in each layer.
1019 			 */
1020 			ret = true;
1021 			break;
1022 		}
1023 
1024 		/* We should not reach a root other than @mnt_root. */
1025 		if (dir == mnt_root || WARN_ON_ONCE(IS_ROOT(dir)))
1026 			break;
1027 
1028 		parent_dentry = dget_parent(dir);
1029 		dput(dir);
1030 		dir = parent_dentry;
1031 	}
1032 	dput(dir);
1033 	return ret;
1034 }
1035 
1036 /**
1037  * current_check_refer_path - Check if a rename or link action is allowed
1038  *
1039  * @old_dentry: File or directory requested to be moved or linked.
1040  * @new_dir: Destination parent directory.
1041  * @new_dentry: Destination file or directory.
1042  * @removable: Sets to true if it is a rename operation.
1043  * @exchange: Sets to true if it is a rename operation with RENAME_EXCHANGE.
1044  *
1045  * Because of its unprivileged constraints, Landlock relies on file hierarchies
1046  * (and not only inodes) to tie access rights to files.  Being able to link or
1047  * rename a file hierarchy brings some challenges.  Indeed, moving or linking a
1048  * file (i.e. creating a new reference to an inode) can have an impact on the
1049  * actions allowed for a set of files if it would change its parent directory
1050  * (i.e. reparenting).
1051  *
1052  * To avoid trivial access right bypasses, Landlock first checks if the file or
1053  * directory requested to be moved would gain new access rights inherited from
1054  * its new hierarchy.  Before returning any error, Landlock then checks that
1055  * the parent source hierarchy and the destination hierarchy would allow the
1056  * link or rename action.  If it is not the case, an error with EACCES is
1057  * returned to inform user space that there is no way to remove or create the
1058  * requested source file type.  If it should be allowed but the new inherited
1059  * access rights would be greater than the source access rights, then the
1060  * kernel returns an error with EXDEV.  Prioritizing EACCES over EXDEV enables
1061  * user space to abort the whole operation if there is no way to do it, or to
1062  * manually copy the source to the destination if this remains allowed, e.g.
1063  * because file creation is allowed on the destination directory but not direct
1064  * linking.
1065  *
1066  * To achieve this goal, the kernel needs to compare two file hierarchies: the
1067  * one identifying the source file or directory (including itself), and the
1068  * destination one.  This can be seen as a multilayer partial ordering problem.
1069  * The kernel walks through these paths and collects in a matrix the access
1070  * rights that are denied per layer.  These matrices are then compared to see
1071  * if the destination one has more (or the same) restrictions as the source
1072  * one.  If this is the case, the requested action will not return EXDEV, which
1073  * doesn't mean the action is allowed.  The parent hierarchy of the source
1074  * (i.e. parent directory), and the destination hierarchy must also be checked
1075  * to verify that they explicitly allow such action (i.e.  referencing,
1076  * creation and potentially removal rights).  The kernel implementation is then
1077  * required to rely on potentially four matrices of access rights: one for the
1078  * source file or directory (i.e. the child), a potentially other one for the
1079  * other source/destination (in case of RENAME_EXCHANGE), one for the source
1080  * parent hierarchy and a last one for the destination hierarchy.  These
1081  * ephemeral matrices take some space on the stack, which limits the number of
1082  * layers to a deemed reasonable number: 16.
1083  *
1084  * Returns:
1085  * - 0 if access is allowed;
1086  * - -EXDEV if @old_dentry would inherit new access rights from @new_dir;
1087  * - -EACCES if file removal or creation is denied.
1088  */
current_check_refer_path(struct dentry * const old_dentry,const struct path * const new_dir,struct dentry * const new_dentry,const bool removable,const bool exchange)1089 static int current_check_refer_path(struct dentry *const old_dentry,
1090 				    const struct path *const new_dir,
1091 				    struct dentry *const new_dentry,
1092 				    const bool removable, const bool exchange)
1093 {
1094 	const struct landlock_ruleset *const dom = get_current_fs_domain();
1095 	bool allow_parent1, allow_parent2;
1096 	access_mask_t access_request_parent1, access_request_parent2;
1097 	struct path mnt_dir;
1098 	struct dentry *old_parent;
1099 	layer_mask_t layer_masks_parent1[LANDLOCK_NUM_ACCESS_FS] = {},
1100 		     layer_masks_parent2[LANDLOCK_NUM_ACCESS_FS] = {};
1101 
1102 	if (!dom)
1103 		return 0;
1104 	if (WARN_ON_ONCE(dom->num_layers < 1))
1105 		return -EACCES;
1106 	if (unlikely(d_is_negative(old_dentry)))
1107 		return -ENOENT;
1108 	if (exchange) {
1109 		if (unlikely(d_is_negative(new_dentry)))
1110 			return -ENOENT;
1111 		access_request_parent1 =
1112 			get_mode_access(d_backing_inode(new_dentry)->i_mode);
1113 	} else {
1114 		access_request_parent1 = 0;
1115 	}
1116 	access_request_parent2 =
1117 		get_mode_access(d_backing_inode(old_dentry)->i_mode);
1118 	if (removable) {
1119 		access_request_parent1 |= maybe_remove(old_dentry);
1120 		access_request_parent2 |= maybe_remove(new_dentry);
1121 	}
1122 
1123 	/* The mount points are the same for old and new paths, cf. EXDEV. */
1124 	if (old_dentry->d_parent == new_dir->dentry) {
1125 		/*
1126 		 * The LANDLOCK_ACCESS_FS_REFER access right is not required
1127 		 * for same-directory referer (i.e. no reparenting).
1128 		 */
1129 		access_request_parent1 = landlock_init_layer_masks(
1130 			dom, access_request_parent1 | access_request_parent2,
1131 			&layer_masks_parent1, LANDLOCK_KEY_INODE);
1132 		if (is_access_to_paths_allowed(
1133 			    dom, new_dir, access_request_parent1,
1134 			    &layer_masks_parent1, NULL, 0, NULL, NULL))
1135 			return 0;
1136 		return -EACCES;
1137 	}
1138 
1139 	access_request_parent1 |= LANDLOCK_ACCESS_FS_REFER;
1140 	access_request_parent2 |= LANDLOCK_ACCESS_FS_REFER;
1141 
1142 	/* Saves the common mount point. */
1143 	mnt_dir.mnt = new_dir->mnt;
1144 	mnt_dir.dentry = new_dir->mnt->mnt_root;
1145 
1146 	/*
1147 	 * old_dentry may be the root of the common mount point and
1148 	 * !IS_ROOT(old_dentry) at the same time (e.g. with open_tree() and
1149 	 * OPEN_TREE_CLONE).  We do not need to call dget(old_parent) because
1150 	 * we keep a reference to old_dentry.
1151 	 */
1152 	old_parent = (old_dentry == mnt_dir.dentry) ? old_dentry :
1153 						      old_dentry->d_parent;
1154 
1155 	/* new_dir->dentry is equal to new_dentry->d_parent */
1156 	allow_parent1 = collect_domain_accesses(dom, mnt_dir.dentry, old_parent,
1157 						&layer_masks_parent1);
1158 	allow_parent2 = collect_domain_accesses(
1159 		dom, mnt_dir.dentry, new_dir->dentry, &layer_masks_parent2);
1160 
1161 	if (allow_parent1 && allow_parent2)
1162 		return 0;
1163 
1164 	/*
1165 	 * To be able to compare source and destination domain access rights,
1166 	 * take into account the @old_dentry access rights aggregated with its
1167 	 * parent access rights.  This will be useful to compare with the
1168 	 * destination parent access rights.
1169 	 */
1170 	if (is_access_to_paths_allowed(
1171 		    dom, &mnt_dir, access_request_parent1, &layer_masks_parent1,
1172 		    old_dentry, access_request_parent2, &layer_masks_parent2,
1173 		    exchange ? new_dentry : NULL))
1174 		return 0;
1175 
1176 	/*
1177 	 * This prioritizes EACCES over EXDEV for all actions, including
1178 	 * renames with RENAME_EXCHANGE.
1179 	 */
1180 	if (likely(is_eacces(&layer_masks_parent1, access_request_parent1) ||
1181 		   is_eacces(&layer_masks_parent2, access_request_parent2)))
1182 		return -EACCES;
1183 
1184 	/*
1185 	 * Gracefully forbids reparenting if the destination directory
1186 	 * hierarchy is not a superset of restrictions of the source directory
1187 	 * hierarchy, or if LANDLOCK_ACCESS_FS_REFER is not allowed by the
1188 	 * source or the destination.
1189 	 */
1190 	return -EXDEV;
1191 }
1192 
1193 /* Inode hooks */
1194 
hook_inode_free_security_rcu(void * inode_security)1195 static void hook_inode_free_security_rcu(void *inode_security)
1196 {
1197 	struct landlock_inode_security *inode_sec;
1198 
1199 	/*
1200 	 * All inodes must already have been untied from their object by
1201 	 * release_inode() or hook_sb_delete().
1202 	 */
1203 	inode_sec = inode_security + landlock_blob_sizes.lbs_inode;
1204 	WARN_ON_ONCE(inode_sec->object);
1205 }
1206 
1207 /* Super-block hooks */
1208 
1209 /*
1210  * Release the inodes used in a security policy.
1211  *
1212  * Cf. fsnotify_unmount_inodes() and invalidate_inodes()
1213  */
hook_sb_delete(struct super_block * const sb)1214 static void hook_sb_delete(struct super_block *const sb)
1215 {
1216 	struct inode *inode, *prev_inode = NULL;
1217 
1218 	if (!landlock_initialized)
1219 		return;
1220 
1221 	spin_lock(&sb->s_inode_list_lock);
1222 	list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
1223 		struct landlock_object *object;
1224 
1225 		/* Only handles referenced inodes. */
1226 		if (!atomic_read(&inode->i_count))
1227 			continue;
1228 
1229 		/*
1230 		 * Protects against concurrent modification of inode (e.g.
1231 		 * from get_inode_object()).
1232 		 */
1233 		spin_lock(&inode->i_lock);
1234 		/*
1235 		 * Checks I_FREEING and I_WILL_FREE  to protect against a race
1236 		 * condition when release_inode() just called iput(), which
1237 		 * could lead to a NULL dereference of inode->security or a
1238 		 * second call to iput() for the same Landlock object.  Also
1239 		 * checks I_NEW because such inode cannot be tied to an object.
1240 		 */
1241 		if (inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW)) {
1242 			spin_unlock(&inode->i_lock);
1243 			continue;
1244 		}
1245 
1246 		rcu_read_lock();
1247 		object = rcu_dereference(landlock_inode(inode)->object);
1248 		if (!object) {
1249 			rcu_read_unlock();
1250 			spin_unlock(&inode->i_lock);
1251 			continue;
1252 		}
1253 		/* Keeps a reference to this inode until the next loop walk. */
1254 		__iget(inode);
1255 		spin_unlock(&inode->i_lock);
1256 
1257 		/*
1258 		 * If there is no concurrent release_inode() ongoing, then we
1259 		 * are in charge of calling iput() on this inode, otherwise we
1260 		 * will just wait for it to finish.
1261 		 */
1262 		spin_lock(&object->lock);
1263 		if (object->underobj == inode) {
1264 			object->underobj = NULL;
1265 			spin_unlock(&object->lock);
1266 			rcu_read_unlock();
1267 
1268 			/*
1269 			 * Because object->underobj was not NULL,
1270 			 * release_inode() and get_inode_object() guarantee
1271 			 * that it is safe to reset
1272 			 * landlock_inode(inode)->object while it is not NULL.
1273 			 * It is therefore not necessary to lock inode->i_lock.
1274 			 */
1275 			rcu_assign_pointer(landlock_inode(inode)->object, NULL);
1276 			/*
1277 			 * At this point, we own the ihold() reference that was
1278 			 * originally set up by get_inode_object() and the
1279 			 * __iget() reference that we just set in this loop
1280 			 * walk.  Therefore the following call to iput() will
1281 			 * not sleep nor drop the inode because there is now at
1282 			 * least two references to it.
1283 			 */
1284 			iput(inode);
1285 		} else {
1286 			spin_unlock(&object->lock);
1287 			rcu_read_unlock();
1288 		}
1289 
1290 		if (prev_inode) {
1291 			/*
1292 			 * At this point, we still own the __iget() reference
1293 			 * that we just set in this loop walk.  Therefore we
1294 			 * can drop the list lock and know that the inode won't
1295 			 * disappear from under us until the next loop walk.
1296 			 */
1297 			spin_unlock(&sb->s_inode_list_lock);
1298 			/*
1299 			 * We can now actually put the inode reference from the
1300 			 * previous loop walk, which is not needed anymore.
1301 			 */
1302 			iput(prev_inode);
1303 			cond_resched();
1304 			spin_lock(&sb->s_inode_list_lock);
1305 		}
1306 		prev_inode = inode;
1307 	}
1308 	spin_unlock(&sb->s_inode_list_lock);
1309 
1310 	/* Puts the inode reference from the last loop walk, if any. */
1311 	if (prev_inode)
1312 		iput(prev_inode);
1313 	/* Waits for pending iput() in release_inode(). */
1314 	wait_var_event(&landlock_superblock(sb)->inode_refs,
1315 		       !atomic_long_read(&landlock_superblock(sb)->inode_refs));
1316 }
1317 
1318 /*
1319  * Because a Landlock security policy is defined according to the filesystem
1320  * topology (i.e. the mount namespace), changing it may grant access to files
1321  * not previously allowed.
1322  *
1323  * To make it simple, deny any filesystem topology modification by landlocked
1324  * processes.  Non-landlocked processes may still change the namespace of a
1325  * landlocked process, but this kind of threat must be handled by a system-wide
1326  * access-control security policy.
1327  *
1328  * This could be lifted in the future if Landlock can safely handle mount
1329  * namespace updates requested by a landlocked process.  Indeed, we could
1330  * update the current domain (which is currently read-only) by taking into
1331  * account the accesses of the source and the destination of a new mount point.
1332  * However, it would also require to make all the child domains dynamically
1333  * inherit these new constraints.  Anyway, for backward compatibility reasons,
1334  * a dedicated user space option would be required (e.g. as a ruleset flag).
1335  */
hook_sb_mount(const char * const dev_name,const struct path * const path,const char * const type,const unsigned long flags,void * const data)1336 static int hook_sb_mount(const char *const dev_name,
1337 			 const struct path *const path, const char *const type,
1338 			 const unsigned long flags, void *const data)
1339 {
1340 	if (!get_current_fs_domain())
1341 		return 0;
1342 	return -EPERM;
1343 }
1344 
hook_move_mount(const struct path * const from_path,const struct path * const to_path)1345 static int hook_move_mount(const struct path *const from_path,
1346 			   const struct path *const to_path)
1347 {
1348 	if (!get_current_fs_domain())
1349 		return 0;
1350 	return -EPERM;
1351 }
1352 
1353 /*
1354  * Removing a mount point may reveal a previously hidden file hierarchy, which
1355  * may then grant access to files, which may have previously been forbidden.
1356  */
hook_sb_umount(struct vfsmount * const mnt,const int flags)1357 static int hook_sb_umount(struct vfsmount *const mnt, const int flags)
1358 {
1359 	if (!get_current_fs_domain())
1360 		return 0;
1361 	return -EPERM;
1362 }
1363 
hook_sb_remount(struct super_block * const sb,void * const mnt_opts)1364 static int hook_sb_remount(struct super_block *const sb, void *const mnt_opts)
1365 {
1366 	if (!get_current_fs_domain())
1367 		return 0;
1368 	return -EPERM;
1369 }
1370 
1371 /*
1372  * pivot_root(2), like mount(2), changes the current mount namespace.  It must
1373  * then be forbidden for a landlocked process.
1374  *
1375  * However, chroot(2) may be allowed because it only changes the relative root
1376  * directory of the current process.  Moreover, it can be used to restrict the
1377  * view of the filesystem.
1378  */
hook_sb_pivotroot(const struct path * const old_path,const struct path * const new_path)1379 static int hook_sb_pivotroot(const struct path *const old_path,
1380 			     const struct path *const new_path)
1381 {
1382 	if (!get_current_fs_domain())
1383 		return 0;
1384 	return -EPERM;
1385 }
1386 
1387 /* Path hooks */
1388 
hook_path_link(struct dentry * const old_dentry,const struct path * const new_dir,struct dentry * const new_dentry)1389 static int hook_path_link(struct dentry *const old_dentry,
1390 			  const struct path *const new_dir,
1391 			  struct dentry *const new_dentry)
1392 {
1393 	return current_check_refer_path(old_dentry, new_dir, new_dentry, false,
1394 					false);
1395 }
1396 
hook_path_rename(const struct path * const old_dir,struct dentry * const old_dentry,const struct path * const new_dir,struct dentry * const new_dentry,const unsigned int flags)1397 static int hook_path_rename(const struct path *const old_dir,
1398 			    struct dentry *const old_dentry,
1399 			    const struct path *const new_dir,
1400 			    struct dentry *const new_dentry,
1401 			    const unsigned int flags)
1402 {
1403 	/* old_dir refers to old_dentry->d_parent and new_dir->mnt */
1404 	return current_check_refer_path(old_dentry, new_dir, new_dentry, true,
1405 					!!(flags & RENAME_EXCHANGE));
1406 }
1407 
hook_path_mkdir(const struct path * const dir,struct dentry * const dentry,const umode_t mode)1408 static int hook_path_mkdir(const struct path *const dir,
1409 			   struct dentry *const dentry, const umode_t mode)
1410 {
1411 	return current_check_access_path(dir, LANDLOCK_ACCESS_FS_MAKE_DIR);
1412 }
1413 
hook_path_mknod(const struct path * const dir,struct dentry * const dentry,const umode_t mode,const unsigned int dev)1414 static int hook_path_mknod(const struct path *const dir,
1415 			   struct dentry *const dentry, const umode_t mode,
1416 			   const unsigned int dev)
1417 {
1418 	const struct landlock_ruleset *const dom = get_current_fs_domain();
1419 
1420 	if (!dom)
1421 		return 0;
1422 	return check_access_path(dom, dir, get_mode_access(mode));
1423 }
1424 
hook_path_symlink(const struct path * const dir,struct dentry * const dentry,const char * const old_name)1425 static int hook_path_symlink(const struct path *const dir,
1426 			     struct dentry *const dentry,
1427 			     const char *const old_name)
1428 {
1429 	return current_check_access_path(dir, LANDLOCK_ACCESS_FS_MAKE_SYM);
1430 }
1431 
hook_path_unlink(const struct path * const dir,struct dentry * const dentry)1432 static int hook_path_unlink(const struct path *const dir,
1433 			    struct dentry *const dentry)
1434 {
1435 	return current_check_access_path(dir, LANDLOCK_ACCESS_FS_REMOVE_FILE);
1436 }
1437 
hook_path_rmdir(const struct path * const dir,struct dentry * const dentry)1438 static int hook_path_rmdir(const struct path *const dir,
1439 			   struct dentry *const dentry)
1440 {
1441 	return current_check_access_path(dir, LANDLOCK_ACCESS_FS_REMOVE_DIR);
1442 }
1443 
hook_path_truncate(const struct path * const path)1444 static int hook_path_truncate(const struct path *const path)
1445 {
1446 	return current_check_access_path(path, LANDLOCK_ACCESS_FS_TRUNCATE);
1447 }
1448 
1449 /* File hooks */
1450 
1451 /**
1452  * get_required_file_open_access - Get access needed to open a file
1453  *
1454  * @file: File being opened.
1455  *
1456  * Returns the access rights that are required for opening the given file,
1457  * depending on the file type and open mode.
1458  */
1459 static access_mask_t
get_required_file_open_access(const struct file * const file)1460 get_required_file_open_access(const struct file *const file)
1461 {
1462 	access_mask_t access = 0;
1463 
1464 	if (file->f_mode & FMODE_READ) {
1465 		/* A directory can only be opened in read mode. */
1466 		if (S_ISDIR(file_inode(file)->i_mode))
1467 			return LANDLOCK_ACCESS_FS_READ_DIR;
1468 		access = LANDLOCK_ACCESS_FS_READ_FILE;
1469 	}
1470 	if (file->f_mode & FMODE_WRITE)
1471 		access |= LANDLOCK_ACCESS_FS_WRITE_FILE;
1472 	/* __FMODE_EXEC is indeed part of f_flags, not f_mode. */
1473 	if (file->f_flags & __FMODE_EXEC)
1474 		access |= LANDLOCK_ACCESS_FS_EXECUTE;
1475 	return access;
1476 }
1477 
hook_file_alloc_security(struct file * const file)1478 static int hook_file_alloc_security(struct file *const file)
1479 {
1480 	/*
1481 	 * Grants all access rights, even if most of them are not checked later
1482 	 * on. It is more consistent.
1483 	 *
1484 	 * Notably, file descriptors for regular files can also be acquired
1485 	 * without going through the file_open hook, for example when using
1486 	 * memfd_create(2).
1487 	 */
1488 	landlock_file(file)->allowed_access = LANDLOCK_MASK_ACCESS_FS;
1489 	return 0;
1490 }
1491 
is_device(const struct file * const file)1492 static bool is_device(const struct file *const file)
1493 {
1494 	const struct inode *inode = file_inode(file);
1495 
1496 	return S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode);
1497 }
1498 
hook_file_open(struct file * const file)1499 static int hook_file_open(struct file *const file)
1500 {
1501 	layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {};
1502 	access_mask_t open_access_request, full_access_request, allowed_access,
1503 		optional_access;
1504 	const struct landlock_ruleset *const dom =
1505 		landlock_get_applicable_domain(
1506 			landlock_cred(file->f_cred)->domain, any_fs);
1507 
1508 	if (!dom)
1509 		return 0;
1510 
1511 	/*
1512 	 * Because a file may be opened with O_PATH, get_required_file_open_access()
1513 	 * may return 0.  This case will be handled with a future Landlock
1514 	 * evolution.
1515 	 */
1516 	open_access_request = get_required_file_open_access(file);
1517 
1518 	/*
1519 	 * We look up more access than what we immediately need for open(), so
1520 	 * that we can later authorize operations on opened files.
1521 	 */
1522 	optional_access = LANDLOCK_ACCESS_FS_TRUNCATE;
1523 	if (is_device(file))
1524 		optional_access |= LANDLOCK_ACCESS_FS_IOCTL_DEV;
1525 
1526 	full_access_request = open_access_request | optional_access;
1527 
1528 	if (is_access_to_paths_allowed(
1529 		    dom, &file->f_path,
1530 		    landlock_init_layer_masks(dom, full_access_request,
1531 					      &layer_masks, LANDLOCK_KEY_INODE),
1532 		    &layer_masks, NULL, 0, NULL, NULL)) {
1533 		allowed_access = full_access_request;
1534 	} else {
1535 		unsigned long access_bit;
1536 		const unsigned long access_req = full_access_request;
1537 
1538 		/*
1539 		 * Calculate the actual allowed access rights from layer_masks.
1540 		 * Add each access right to allowed_access which has not been
1541 		 * vetoed by any layer.
1542 		 */
1543 		allowed_access = 0;
1544 		for_each_set_bit(access_bit, &access_req,
1545 				 ARRAY_SIZE(layer_masks)) {
1546 			if (!layer_masks[access_bit])
1547 				allowed_access |= BIT_ULL(access_bit);
1548 		}
1549 	}
1550 
1551 	/*
1552 	 * For operations on already opened files (i.e. ftruncate()), it is the
1553 	 * access rights at the time of open() which decide whether the
1554 	 * operation is permitted. Therefore, we record the relevant subset of
1555 	 * file access rights in the opened struct file.
1556 	 */
1557 	landlock_file(file)->allowed_access = allowed_access;
1558 
1559 	if ((open_access_request & allowed_access) == open_access_request)
1560 		return 0;
1561 
1562 	return -EACCES;
1563 }
1564 
hook_file_truncate(struct file * const file)1565 static int hook_file_truncate(struct file *const file)
1566 {
1567 	/*
1568 	 * Allows truncation if the truncate right was available at the time of
1569 	 * opening the file, to get a consistent access check as for read, write
1570 	 * and execute operations.
1571 	 *
1572 	 * Note: For checks done based on the file's Landlock allowed access, we
1573 	 * enforce them independently of whether the current thread is in a
1574 	 * Landlock domain, so that open files passed between independent
1575 	 * processes retain their behaviour.
1576 	 */
1577 	if (landlock_file(file)->allowed_access & LANDLOCK_ACCESS_FS_TRUNCATE)
1578 		return 0;
1579 	return -EACCES;
1580 }
1581 
hook_file_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1582 static int hook_file_ioctl(struct file *file, unsigned int cmd,
1583 			   unsigned long arg)
1584 {
1585 	access_mask_t allowed_access = landlock_file(file)->allowed_access;
1586 
1587 	/*
1588 	 * It is the access rights at the time of opening the file which
1589 	 * determine whether IOCTL can be used on the opened file later.
1590 	 *
1591 	 * The access right is attached to the opened file in hook_file_open().
1592 	 */
1593 	if (allowed_access & LANDLOCK_ACCESS_FS_IOCTL_DEV)
1594 		return 0;
1595 
1596 	if (!is_device(file))
1597 		return 0;
1598 
1599 	if (is_masked_device_ioctl(cmd))
1600 		return 0;
1601 
1602 	return -EACCES;
1603 }
1604 
hook_file_ioctl_compat(struct file * file,unsigned int cmd,unsigned long arg)1605 static int hook_file_ioctl_compat(struct file *file, unsigned int cmd,
1606 				  unsigned long arg)
1607 {
1608 	access_mask_t allowed_access = landlock_file(file)->allowed_access;
1609 
1610 	/*
1611 	 * It is the access rights at the time of opening the file which
1612 	 * determine whether IOCTL can be used on the opened file later.
1613 	 *
1614 	 * The access right is attached to the opened file in hook_file_open().
1615 	 */
1616 	if (allowed_access & LANDLOCK_ACCESS_FS_IOCTL_DEV)
1617 		return 0;
1618 
1619 	if (!is_device(file))
1620 		return 0;
1621 
1622 	if (is_masked_device_ioctl_compat(cmd))
1623 		return 0;
1624 
1625 	return -EACCES;
1626 }
1627 
1628 /*
1629  * Always allow sending signals between threads of the same process.  This
1630  * ensures consistency with hook_task_kill().
1631  */
control_current_fowner(struct fown_struct * const fown)1632 static bool control_current_fowner(struct fown_struct *const fown)
1633 {
1634 	struct task_struct *p;
1635 
1636 	/*
1637 	 * Lock already held by __f_setown(), see commit 26f204380a3c ("fs: Fix
1638 	 * file_set_fowner LSM hook inconsistencies").
1639 	 */
1640 	lockdep_assert_held(&fown->lock);
1641 
1642 	/*
1643 	 * Some callers (e.g. fcntl_dirnotify) may not be in an RCU read-side
1644 	 * critical section.
1645 	 */
1646 	guard(rcu)();
1647 	p = pid_task(fown->pid, fown->pid_type);
1648 	if (!p)
1649 		return true;
1650 
1651 	return !same_thread_group(p, current);
1652 }
1653 
hook_file_set_fowner(struct file * file)1654 static void hook_file_set_fowner(struct file *file)
1655 {
1656 	struct landlock_ruleset *prev_dom;
1657 	struct landlock_ruleset *new_dom = NULL;
1658 
1659 	if (control_current_fowner(file_f_owner(file))) {
1660 		new_dom = landlock_get_current_domain();
1661 		landlock_get_ruleset(new_dom);
1662 	}
1663 
1664 	prev_dom = landlock_file(file)->fown_domain;
1665 	landlock_file(file)->fown_domain = new_dom;
1666 
1667 	/* May be called in an RCU read-side critical section. */
1668 	landlock_put_ruleset_deferred(prev_dom);
1669 }
1670 
hook_file_free_security(struct file * file)1671 static void hook_file_free_security(struct file *file)
1672 {
1673 	landlock_put_ruleset_deferred(landlock_file(file)->fown_domain);
1674 }
1675 
1676 static struct security_hook_list landlock_hooks[] __ro_after_init = {
1677 	LSM_HOOK_INIT(inode_free_security_rcu, hook_inode_free_security_rcu),
1678 
1679 	LSM_HOOK_INIT(sb_delete, hook_sb_delete),
1680 	LSM_HOOK_INIT(sb_mount, hook_sb_mount),
1681 	LSM_HOOK_INIT(move_mount, hook_move_mount),
1682 	LSM_HOOK_INIT(sb_umount, hook_sb_umount),
1683 	LSM_HOOK_INIT(sb_remount, hook_sb_remount),
1684 	LSM_HOOK_INIT(sb_pivotroot, hook_sb_pivotroot),
1685 
1686 	LSM_HOOK_INIT(path_link, hook_path_link),
1687 	LSM_HOOK_INIT(path_rename, hook_path_rename),
1688 	LSM_HOOK_INIT(path_mkdir, hook_path_mkdir),
1689 	LSM_HOOK_INIT(path_mknod, hook_path_mknod),
1690 	LSM_HOOK_INIT(path_symlink, hook_path_symlink),
1691 	LSM_HOOK_INIT(path_unlink, hook_path_unlink),
1692 	LSM_HOOK_INIT(path_rmdir, hook_path_rmdir),
1693 	LSM_HOOK_INIT(path_truncate, hook_path_truncate),
1694 
1695 	LSM_HOOK_INIT(file_alloc_security, hook_file_alloc_security),
1696 	LSM_HOOK_INIT(file_open, hook_file_open),
1697 	LSM_HOOK_INIT(file_truncate, hook_file_truncate),
1698 	LSM_HOOK_INIT(file_ioctl, hook_file_ioctl),
1699 	LSM_HOOK_INIT(file_ioctl_compat, hook_file_ioctl_compat),
1700 	LSM_HOOK_INIT(file_set_fowner, hook_file_set_fowner),
1701 	LSM_HOOK_INIT(file_free_security, hook_file_free_security),
1702 };
1703 
landlock_add_fs_hooks(void)1704 __init void landlock_add_fs_hooks(void)
1705 {
1706 	security_add_hooks(landlock_hooks, ARRAY_SIZE(landlock_hooks),
1707 			   &landlock_lsmid);
1708 }
1709 
1710 #ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST
1711 
1712 /* clang-format off */
1713 static struct kunit_case test_cases[] = {
1714 	KUNIT_CASE(test_no_more_access),
1715 	KUNIT_CASE(test_scope_to_request_with_exec_none),
1716 	KUNIT_CASE(test_scope_to_request_with_exec_some),
1717 	KUNIT_CASE(test_scope_to_request_without_access),
1718 	KUNIT_CASE(test_is_eacces_with_none),
1719 	KUNIT_CASE(test_is_eacces_with_refer),
1720 	KUNIT_CASE(test_is_eacces_with_write),
1721 	{}
1722 };
1723 /* clang-format on */
1724 
1725 static struct kunit_suite test_suite = {
1726 	.name = "landlock_fs",
1727 	.test_cases = test_cases,
1728 };
1729 
1730 kunit_test_suite(test_suite);
1731 
1732 #endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */
1733