• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * The "user cache".
3  *
4  * (C) Copyright 1991-2000 Linus Torvalds
5  *
6  * We have a per-user structure to keep track of how many
7  * processes, files etc the user has claimed, in order to be
8  * able to have per-user limits for system resources.
9  */
10 
11 #include <linux/init.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/bitops.h>
15 #include <linux/key.h>
16 #include <linux/interrupt.h>
17 #include <linux/export.h>
18 #include <linux/user_namespace.h>
19 #include <linux/proc_ns.h>
20 
21 /*
22  * userns count is 1 for root user, 1 for init_uts_ns,
23  * and 1 for... ?
24  */
25 struct user_namespace init_user_ns = {
26 	.uid_map = {
27 		.nr_extents = 1,
28 		.extent[0] = {
29 			.first = 0,
30 			.lower_first = 0,
31 			.count = 4294967295U,
32 		},
33 	},
34 	.gid_map = {
35 		.nr_extents = 1,
36 		.extent[0] = {
37 			.first = 0,
38 			.lower_first = 0,
39 			.count = 4294967295U,
40 		},
41 	},
42 	.projid_map = {
43 		.nr_extents = 1,
44 		.extent[0] = {
45 			.first = 0,
46 			.lower_first = 0,
47 			.count = 4294967295U,
48 		},
49 	},
50 	.count = ATOMIC_INIT(3),
51 	.owner = GLOBAL_ROOT_UID,
52 	.group = GLOBAL_ROOT_GID,
53 	.proc_inum = PROC_USER_INIT_INO,
54 	.may_mount_sysfs = true,
55 	.may_mount_proc = true,
56 };
57 EXPORT_SYMBOL_GPL(init_user_ns);
58 
59 /*
60  * UID task count cache, to get fast user lookup in "alloc_uid"
61  * when changing user ID's (ie setuid() and friends).
62  */
63 
64 #define UIDHASH_BITS	(CONFIG_BASE_SMALL ? 3 : 7)
65 #define UIDHASH_SZ	(1 << UIDHASH_BITS)
66 #define UIDHASH_MASK		(UIDHASH_SZ - 1)
67 #define __uidhashfn(uid)	(((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
68 #define uidhashentry(uid)	(uidhash_table + __uidhashfn((__kuid_val(uid))))
69 
70 static struct kmem_cache *uid_cachep;
71 struct hlist_head uidhash_table[UIDHASH_SZ];
72 
73 /*
74  * The uidhash_lock is mostly taken from process context, but it is
75  * occasionally also taken from softirq/tasklet context, when
76  * task-structs get RCU-freed. Hence all locking must be softirq-safe.
77  * But free_uid() is also called with local interrupts disabled, and running
78  * local_bh_enable() with local interrupts disabled is an error - we'll run
79  * softirq callbacks, and they can unconditionally enable interrupts, and
80  * the caller of free_uid() didn't expect that..
81  */
82 static DEFINE_SPINLOCK(uidhash_lock);
83 
84 /* root_user.__count is 1, for init task cred */
85 struct user_struct root_user = {
86 	.__count	= ATOMIC_INIT(1),
87 	.processes	= ATOMIC_INIT(1),
88 	.files		= ATOMIC_INIT(0),
89 	.sigpending	= ATOMIC_INIT(0),
90 	.locked_shm     = 0,
91 	.uid		= GLOBAL_ROOT_UID,
92 };
93 
94 /*
95  * These routines must be called with the uidhash spinlock held!
96  */
uid_hash_insert(struct user_struct * up,struct hlist_head * hashent)97 static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
98 {
99 	hlist_add_head(&up->uidhash_node, hashent);
100 }
101 
uid_hash_remove(struct user_struct * up)102 static void uid_hash_remove(struct user_struct *up)
103 {
104 	hlist_del_init(&up->uidhash_node);
105 }
106 
uid_hash_find(kuid_t uid,struct hlist_head * hashent)107 static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent)
108 {
109 	struct user_struct *user;
110 
111 	hlist_for_each_entry(user, hashent, uidhash_node) {
112 		if (uid_eq(user->uid, uid)) {
113 			atomic_inc(&user->__count);
114 			return user;
115 		}
116 	}
117 
118 	return NULL;
119 }
120 
121 /* IRQs are disabled and uidhash_lock is held upon function entry.
122  * IRQ state (as stored in flags) is restored and uidhash_lock released
123  * upon function exit.
124  */
free_user(struct user_struct * up,unsigned long flags)125 static void free_user(struct user_struct *up, unsigned long flags)
126 	__releases(&uidhash_lock)
127 {
128 	uid_hash_remove(up);
129 	spin_unlock_irqrestore(&uidhash_lock, flags);
130 	key_put(up->uid_keyring);
131 	key_put(up->session_keyring);
132 	kmem_cache_free(uid_cachep, up);
133 }
134 
135 /*
136  * Locate the user_struct for the passed UID.  If found, take a ref on it.  The
137  * caller must undo that ref with free_uid().
138  *
139  * If the user_struct could not be found, return NULL.
140  */
find_user(kuid_t uid)141 struct user_struct *find_user(kuid_t uid)
142 {
143 	struct user_struct *ret;
144 	unsigned long flags;
145 
146 	spin_lock_irqsave(&uidhash_lock, flags);
147 	ret = uid_hash_find(uid, uidhashentry(uid));
148 	spin_unlock_irqrestore(&uidhash_lock, flags);
149 	return ret;
150 }
151 
free_uid(struct user_struct * up)152 void free_uid(struct user_struct *up)
153 {
154 	unsigned long flags;
155 
156 	if (!up)
157 		return;
158 
159 	local_irq_save(flags);
160 	if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
161 		free_user(up, flags);
162 	else
163 		local_irq_restore(flags);
164 }
165 
alloc_uid(kuid_t uid)166 struct user_struct *alloc_uid(kuid_t uid)
167 {
168 	struct hlist_head *hashent = uidhashentry(uid);
169 	struct user_struct *up, *new;
170 
171 	spin_lock_irq(&uidhash_lock);
172 	up = uid_hash_find(uid, hashent);
173 	spin_unlock_irq(&uidhash_lock);
174 
175 	if (!up) {
176 		new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL);
177 		if (!new)
178 			goto out_unlock;
179 
180 		new->uid = uid;
181 		atomic_set(&new->__count, 1);
182 
183 		/*
184 		 * Before adding this, check whether we raced
185 		 * on adding the same user already..
186 		 */
187 		spin_lock_irq(&uidhash_lock);
188 		up = uid_hash_find(uid, hashent);
189 		if (up) {
190 			key_put(new->uid_keyring);
191 			key_put(new->session_keyring);
192 			kmem_cache_free(uid_cachep, new);
193 		} else {
194 			uid_hash_insert(new, hashent);
195 			up = new;
196 		}
197 		spin_unlock_irq(&uidhash_lock);
198 	}
199 
200 	return up;
201 
202 out_unlock:
203 	return NULL;
204 }
205 
uid_cache_init(void)206 static int __init uid_cache_init(void)
207 {
208 	int n;
209 
210 	uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
211 			0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
212 
213 	for(n = 0; n < UIDHASH_SZ; ++n)
214 		INIT_HLIST_HEAD(uidhash_table + n);
215 
216 	/* Insert the root user immediately (init already runs as root) */
217 	spin_lock_irq(&uidhash_lock);
218 	uid_hash_insert(&root_user, uidhashentry(GLOBAL_ROOT_UID));
219 	spin_unlock_irq(&uidhash_lock);
220 
221 	return 0;
222 }
223 
224 module_init(uid_cache_init);
225