1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * The "user cache".
4 *
5 * (C) Copyright 1991-2000 Linus Torvalds
6 *
7 * We have a per-user structure to keep track of how many
8 * processes, files etc the user has claimed, in order to be
9 * able to have per-user limits for system resources.
10 */
11
12 #include <linux/init.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
15 #include <linux/bitops.h>
16 #include <linux/key.h>
17 #include <linux/sched/user.h>
18 #include <linux/interrupt.h>
19 #include <linux/export.h>
20 #include <linux/user_namespace.h>
21 #include <linux/binfmts.h>
22 #include <linux/proc_ns.h>
23
24 #include <trace/hooks/user.h>
25 #include <linux/android_kabi.h>
26
27 ANDROID_KABI_DECLONLY(address_space);
28 ANDROID_KABI_DECLONLY(cred);
29 ANDROID_KABI_DECLONLY(dentry);
30 ANDROID_KABI_DECLONLY(file);
31 ANDROID_KABI_DECLONLY(io_context);
32 ANDROID_KABI_DECLONLY(module);
33 ANDROID_KABI_DECLONLY(pid);
34 ANDROID_KABI_DECLONLY(sighand_struct);
35 ANDROID_KABI_DECLONLY(signal_struct);
36
37 #if IS_ENABLED(CONFIG_BINFMT_MISC)
38 struct binfmt_misc init_binfmt_misc = {
39 .entries = LIST_HEAD_INIT(init_binfmt_misc.entries),
40 .enabled = true,
41 .entries_lock = __RW_LOCK_UNLOCKED(init_binfmt_misc.entries_lock),
42 };
43 EXPORT_SYMBOL_GPL(init_binfmt_misc);
44 #endif
45
46 /*
47 * userns count is 1 for root user, 1 for init_uts_ns,
48 * and 1 for... ?
49 */
50 struct user_namespace init_user_ns = {
51 .uid_map = {
52 {
53 .extent[0] = {
54 .first = 0,
55 .lower_first = 0,
56 .count = 4294967295U,
57 },
58 .nr_extents = 1,
59 },
60 },
61 .gid_map = {
62 {
63 .extent[0] = {
64 .first = 0,
65 .lower_first = 0,
66 .count = 4294967295U,
67 },
68 .nr_extents = 1,
69 },
70 },
71 .projid_map = {
72 {
73 .extent[0] = {
74 .first = 0,
75 .lower_first = 0,
76 .count = 4294967295U,
77 },
78 .nr_extents = 1,
79 },
80 },
81 .ns.count = REFCOUNT_INIT(3),
82 .owner = GLOBAL_ROOT_UID,
83 .group = GLOBAL_ROOT_GID,
84 .ns.inum = PROC_USER_INIT_INO,
85 #ifdef CONFIG_USER_NS
86 .ns.ops = &userns_operations,
87 #endif
88 .flags = USERNS_INIT_FLAGS,
89 #ifdef CONFIG_KEYS
90 .keyring_name_list = LIST_HEAD_INIT(init_user_ns.keyring_name_list),
91 .keyring_sem = __RWSEM_INITIALIZER(init_user_ns.keyring_sem),
92 #endif
93 #if IS_ENABLED(CONFIG_BINFMT_MISC)
94 .binfmt_misc = &init_binfmt_misc,
95 #endif
96 };
97 EXPORT_SYMBOL_GPL(init_user_ns);
98
99 /*
100 * UID task count cache, to get fast user lookup in "alloc_uid"
101 * when changing user ID's (ie setuid() and friends).
102 */
103
104 #define UIDHASH_BITS (IS_ENABLED(CONFIG_BASE_SMALL) ? 3 : 7)
105 #define UIDHASH_SZ (1 << UIDHASH_BITS)
106 #define UIDHASH_MASK (UIDHASH_SZ - 1)
107 #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
108 #define uidhashentry(uid) (uidhash_table + __uidhashfn((__kuid_val(uid))))
109
110 static struct kmem_cache *uid_cachep;
111 static struct hlist_head uidhash_table[UIDHASH_SZ];
112
113 /*
114 * The uidhash_lock is mostly taken from process context, but it is
115 * occasionally also taken from softirq/tasklet context, when
116 * task-structs get RCU-freed. Hence all locking must be softirq-safe.
117 * But free_uid() is also called with local interrupts disabled, and running
118 * local_bh_enable() with local interrupts disabled is an error - we'll run
119 * softirq callbacks, and they can unconditionally enable interrupts, and
120 * the caller of free_uid() didn't expect that..
121 */
122 static DEFINE_SPINLOCK(uidhash_lock);
123
124 /* root_user.__count is 1, for init task cred */
125 struct user_struct root_user = {
126 .__count = REFCOUNT_INIT(1),
127 .uid = GLOBAL_ROOT_UID,
128 .ratelimit = RATELIMIT_STATE_INIT(root_user.ratelimit, 0, 0),
129 };
130
131 /*
132 * These routines must be called with the uidhash spinlock held!
133 */
uid_hash_insert(struct user_struct * up,struct hlist_head * hashent)134 static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
135 {
136 hlist_add_head(&up->uidhash_node, hashent);
137 }
138
uid_hash_remove(struct user_struct * up)139 static void uid_hash_remove(struct user_struct *up)
140 {
141 hlist_del_init(&up->uidhash_node);
142 }
143
uid_hash_find(kuid_t uid,struct hlist_head * hashent)144 static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent)
145 {
146 struct user_struct *user;
147
148 hlist_for_each_entry(user, hashent, uidhash_node) {
149 if (uid_eq(user->uid, uid)) {
150 refcount_inc(&user->__count);
151 return user;
152 }
153 }
154
155 return NULL;
156 }
157
user_epoll_alloc(struct user_struct * up)158 static int user_epoll_alloc(struct user_struct *up)
159 {
160 #ifdef CONFIG_EPOLL
161 return percpu_counter_init(&up->epoll_watches, 0, GFP_KERNEL);
162 #else
163 return 0;
164 #endif
165 }
166
user_epoll_free(struct user_struct * up)167 static void user_epoll_free(struct user_struct *up)
168 {
169 #ifdef CONFIG_EPOLL
170 percpu_counter_destroy(&up->epoll_watches);
171 #endif
172 }
173
174 /* IRQs are disabled and uidhash_lock is held upon function entry.
175 * IRQ state (as stored in flags) is restored and uidhash_lock released
176 * upon function exit.
177 */
free_user(struct user_struct * up,unsigned long flags)178 static void free_user(struct user_struct *up, unsigned long flags)
179 __releases(&uidhash_lock)
180 {
181 trace_android_vh_free_user(up);
182 uid_hash_remove(up);
183 spin_unlock_irqrestore(&uidhash_lock, flags);
184 user_epoll_free(up);
185 kmem_cache_free(uid_cachep, up);
186 }
187
188 /*
189 * Locate the user_struct for the passed UID. If found, take a ref on it. The
190 * caller must undo that ref with free_uid().
191 *
192 * If the user_struct could not be found, return NULL.
193 */
find_user(kuid_t uid)194 struct user_struct *find_user(kuid_t uid)
195 {
196 struct user_struct *ret;
197 unsigned long flags;
198
199 spin_lock_irqsave(&uidhash_lock, flags);
200 ret = uid_hash_find(uid, uidhashentry(uid));
201 spin_unlock_irqrestore(&uidhash_lock, flags);
202 return ret;
203 }
204 EXPORT_SYMBOL_GPL(find_user);
205
free_uid(struct user_struct * up)206 void free_uid(struct user_struct *up)
207 {
208 unsigned long flags;
209
210 if (!up)
211 return;
212
213 if (refcount_dec_and_lock_irqsave(&up->__count, &uidhash_lock, &flags))
214 free_user(up, flags);
215 }
216 EXPORT_SYMBOL_GPL(free_uid);
217
alloc_uid(kuid_t uid)218 struct user_struct *alloc_uid(kuid_t uid)
219 {
220 struct hlist_head *hashent = uidhashentry(uid);
221 struct user_struct *up, *new;
222
223 spin_lock_irq(&uidhash_lock);
224 up = uid_hash_find(uid, hashent);
225 spin_unlock_irq(&uidhash_lock);
226
227 if (!up) {
228 new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL);
229 if (!new)
230 return NULL;
231
232 new->uid = uid;
233 refcount_set(&new->__count, 1);
234 trace_android_vh_alloc_uid(new);
235 if (user_epoll_alloc(new)) {
236 kmem_cache_free(uid_cachep, new);
237 return NULL;
238 }
239 ratelimit_state_init(&new->ratelimit, HZ, 100);
240 ratelimit_set_flags(&new->ratelimit, RATELIMIT_MSG_ON_RELEASE);
241
242 /*
243 * Before adding this, check whether we raced
244 * on adding the same user already..
245 */
246 spin_lock_irq(&uidhash_lock);
247 up = uid_hash_find(uid, hashent);
248 if (up) {
249 user_epoll_free(new);
250 kmem_cache_free(uid_cachep, new);
251 } else {
252 uid_hash_insert(new, hashent);
253 up = new;
254 }
255 spin_unlock_irq(&uidhash_lock);
256 }
257
258 return up;
259 }
260
uid_cache_init(void)261 static int __init uid_cache_init(void)
262 {
263 int n;
264
265 uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
266 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
267
268 for(n = 0; n < UIDHASH_SZ; ++n)
269 INIT_HLIST_HEAD(uidhash_table + n);
270
271 if (user_epoll_alloc(&root_user))
272 panic("root_user epoll percpu counter alloc failed");
273
274 /* Insert the root user immediately (init already runs as root) */
275 spin_lock_irq(&uidhash_lock);
276 uid_hash_insert(&root_user, uidhashentry(GLOBAL_ROOT_UID));
277 spin_unlock_irq(&uidhash_lock);
278
279 return 0;
280 }
281 subsys_initcall(uid_cache_init);
282