• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * The "user cache".
4  *
5  * (C) Copyright 1991-2000 Linus Torvalds
6  *
7  * We have a per-user structure to keep track of how many
8  * processes, files etc the user has claimed, in order to be
9  * able to have per-user limits for system resources.
10  */
11 
12 #include <linux/init.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
15 #include <linux/bitops.h>
16 #include <linux/key.h>
17 #include <linux/sched/user.h>
18 #include <linux/interrupt.h>
19 #include <linux/export.h>
20 #include <linux/user_namespace.h>
21 #include <linux/proc_fs.h>
22 #include <linux/proc_ns.h>
23 
24 /*
25  * userns count is 1 for root user, 1 for init_uts_ns,
26  * and 1 for... ?
27  */
28 struct user_namespace init_user_ns = {
29 	.uid_map = {
30 		.nr_extents = 1,
31 		{
32 			.extent[0] = {
33 				.first = 0,
34 				.lower_first = 0,
35 				.count = 4294967295U,
36 			},
37 		},
38 	},
39 	.gid_map = {
40 		.nr_extents = 1,
41 		{
42 			.extent[0] = {
43 				.first = 0,
44 				.lower_first = 0,
45 				.count = 4294967295U,
46 			},
47 		},
48 	},
49 	.projid_map = {
50 		.nr_extents = 1,
51 		{
52 			.extent[0] = {
53 				.first = 0,
54 				.lower_first = 0,
55 				.count = 4294967295U,
56 			},
57 		},
58 	},
59 	.count = ATOMIC_INIT(3),
60 	.owner = GLOBAL_ROOT_UID,
61 	.group = GLOBAL_ROOT_GID,
62 	.ns.inum = PROC_USER_INIT_INO,
63 #ifdef CONFIG_USER_NS
64 	.ns.ops = &userns_operations,
65 #endif
66 	.flags = USERNS_INIT_FLAGS,
67 #ifdef CONFIG_KEYS
68 	.keyring_name_list = LIST_HEAD_INIT(init_user_ns.keyring_name_list),
69 	.keyring_sem = __RWSEM_INITIALIZER(init_user_ns.keyring_sem),
70 #endif
71 };
72 EXPORT_SYMBOL_GPL(init_user_ns);
73 
74 /*
75  * UID task count cache, to get fast user lookup in "alloc_uid"
76  * when changing user ID's (ie setuid() and friends).
77  */
78 
79 #define UIDHASH_BITS	(CONFIG_BASE_SMALL ? 3 : 7)
80 #define UIDHASH_SZ	(1 << UIDHASH_BITS)
81 #define UIDHASH_MASK		(UIDHASH_SZ - 1)
82 #define __uidhashfn(uid)	(((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
83 #define uidhashentry(uid)	(uidhash_table + __uidhashfn((__kuid_val(uid))))
84 
85 static struct kmem_cache *uid_cachep;
86 struct hlist_head uidhash_table[UIDHASH_SZ];
87 
88 /*
89  * The uidhash_lock is mostly taken from process context, but it is
90  * occasionally also taken from softirq/tasklet context, when
91  * task-structs get RCU-freed. Hence all locking must be softirq-safe.
92  * But free_uid() is also called with local interrupts disabled, and running
93  * local_bh_enable() with local interrupts disabled is an error - we'll run
94  * softirq callbacks, and they can unconditionally enable interrupts, and
95  * the caller of free_uid() didn't expect that..
96  */
97 static DEFINE_SPINLOCK(uidhash_lock);
98 
99 /* root_user.__count is 1, for init task cred */
100 struct user_struct root_user = {
101 	.__count	= REFCOUNT_INIT(1),
102 	.processes	= ATOMIC_INIT(1),
103 	.sigpending	= ATOMIC_INIT(0),
104 	.locked_shm     = 0,
105 	.uid		= GLOBAL_ROOT_UID,
106 	.ratelimit	= RATELIMIT_STATE_INIT(root_user.ratelimit, 0, 0),
107 };
108 
109 /*
110  * These routines must be called with the uidhash spinlock held!
111  */
uid_hash_insert(struct user_struct * up,struct hlist_head * hashent)112 static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
113 {
114 	hlist_add_head(&up->uidhash_node, hashent);
115 }
116 
uid_hash_remove(struct user_struct * up)117 static void uid_hash_remove(struct user_struct *up)
118 {
119 	hlist_del_init(&up->uidhash_node);
120 }
121 
uid_hash_find(kuid_t uid,struct hlist_head * hashent)122 static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent)
123 {
124 	struct user_struct *user;
125 
126 	hlist_for_each_entry(user, hashent, uidhash_node) {
127 		if (uid_eq(user->uid, uid)) {
128 			refcount_inc(&user->__count);
129 			return user;
130 		}
131 	}
132 
133 	return NULL;
134 }
135 
136 /* IRQs are disabled and uidhash_lock is held upon function entry.
137  * IRQ state (as stored in flags) is restored and uidhash_lock released
138  * upon function exit.
139  */
free_user(struct user_struct * up,unsigned long flags)140 static void free_user(struct user_struct *up, unsigned long flags)
141 	__releases(&uidhash_lock)
142 {
143 	uid_hash_remove(up);
144 	spin_unlock_irqrestore(&uidhash_lock, flags);
145 	kmem_cache_free(uid_cachep, up);
146 }
147 
148 /*
149  * Locate the user_struct for the passed UID.  If found, take a ref on it.  The
150  * caller must undo that ref with free_uid().
151  *
152  * If the user_struct could not be found, return NULL.
153  */
find_user(kuid_t uid)154 struct user_struct *find_user(kuid_t uid)
155 {
156 	struct user_struct *ret;
157 	unsigned long flags;
158 
159 	spin_lock_irqsave(&uidhash_lock, flags);
160 	ret = uid_hash_find(uid, uidhashentry(uid));
161 	spin_unlock_irqrestore(&uidhash_lock, flags);
162 	return ret;
163 }
164 
free_uid(struct user_struct * up)165 void free_uid(struct user_struct *up)
166 {
167 	unsigned long flags;
168 
169 	if (!up)
170 		return;
171 
172 	if (refcount_dec_and_lock_irqsave(&up->__count, &uidhash_lock, &flags))
173 		free_user(up, flags);
174 }
175 
alloc_uid(kuid_t uid)176 struct user_struct *alloc_uid(kuid_t uid)
177 {
178 	struct hlist_head *hashent = uidhashentry(uid);
179 	struct user_struct *up, *new;
180 
181 	spin_lock_irq(&uidhash_lock);
182 	up = uid_hash_find(uid, hashent);
183 	spin_unlock_irq(&uidhash_lock);
184 
185 	if (!up) {
186 		new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL);
187 		if (!new)
188 			return NULL;
189 
190 		new->uid = uid;
191 		refcount_set(&new->__count, 1);
192 		ratelimit_state_init(&new->ratelimit, HZ, 100);
193 		ratelimit_set_flags(&new->ratelimit, RATELIMIT_MSG_ON_RELEASE);
194 
195 		/*
196 		 * Before adding this, check whether we raced
197 		 * on adding the same user already..
198 		 */
199 		spin_lock_irq(&uidhash_lock);
200 		up = uid_hash_find(uid, hashent);
201 		if (up) {
202 			kmem_cache_free(uid_cachep, new);
203 		} else {
204 			uid_hash_insert(new, hashent);
205 			up = new;
206 		}
207 		spin_unlock_irq(&uidhash_lock);
208 	}
209 	proc_register_uid(uid);
210 
211 	return up;
212 }
213 
uid_cache_init(void)214 static int __init uid_cache_init(void)
215 {
216 	int n;
217 
218 	uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
219 			0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
220 
221 	for(n = 0; n < UIDHASH_SZ; ++n)
222 		INIT_HLIST_HEAD(uidhash_table + n);
223 
224 	/* Insert the root user immediately (init already runs as root) */
225 	spin_lock_irq(&uidhash_lock);
226 	uid_hash_insert(&root_user, uidhashentry(GLOBAL_ROOT_UID));
227 	spin_unlock_irq(&uidhash_lock);
228 	proc_register_uid(GLOBAL_ROOT_UID);
229 
230 	return 0;
231 }
232 subsys_initcall(uid_cache_init);
233