• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Pid namespaces
3  *
4  * Authors:
5  *    (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
6  *    (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
7  *     Many thanks to Oleg Nesterov for comments and help
8  *
9  */
10 
11 #include <linux/pid.h>
12 #include <linux/pid_namespace.h>
13 #include <linux/user_namespace.h>
14 #include <linux/syscalls.h>
15 #include <linux/err.h>
16 #include <linux/acct.h>
17 #include <linux/slab.h>
18 #include <linux/proc_ns.h>
19 #include <linux/reboot.h>
20 #include <linux/export.h>
21 
22 struct pid_cache {
23 	int nr_ids;
24 	char name[16];
25 	struct kmem_cache *cachep;
26 	struct list_head list;
27 };
28 
29 static LIST_HEAD(pid_caches_lh);
30 static DEFINE_MUTEX(pid_caches_mutex);
31 static struct kmem_cache *pid_ns_cachep;
32 
33 /*
34  * creates the kmem cache to allocate pids from.
35  * @nr_ids: the number of numerical ids this pid will have to carry
36  */
37 
create_pid_cachep(int nr_ids)38 static struct kmem_cache *create_pid_cachep(int nr_ids)
39 {
40 	struct pid_cache *pcache;
41 	struct kmem_cache *cachep;
42 
43 	mutex_lock(&pid_caches_mutex);
44 	list_for_each_entry(pcache, &pid_caches_lh, list)
45 		if (pcache->nr_ids == nr_ids)
46 			goto out;
47 
48 	pcache = kmalloc(sizeof(struct pid_cache), GFP_KERNEL);
49 	if (pcache == NULL)
50 		goto err_alloc;
51 
52 	snprintf(pcache->name, sizeof(pcache->name), "pid_%d", nr_ids);
53 	cachep = kmem_cache_create(pcache->name,
54 			sizeof(struct pid) + (nr_ids - 1) * sizeof(struct upid),
55 			0, SLAB_HWCACHE_ALIGN, NULL);
56 	if (cachep == NULL)
57 		goto err_cachep;
58 
59 	pcache->nr_ids = nr_ids;
60 	pcache->cachep = cachep;
61 	list_add(&pcache->list, &pid_caches_lh);
62 out:
63 	mutex_unlock(&pid_caches_mutex);
64 	return pcache->cachep;
65 
66 err_cachep:
67 	kfree(pcache);
68 err_alloc:
69 	mutex_unlock(&pid_caches_mutex);
70 	return NULL;
71 }
72 
proc_cleanup_work(struct work_struct * work)73 static void proc_cleanup_work(struct work_struct *work)
74 {
75 	struct pid_namespace *ns = container_of(work, struct pid_namespace, proc_work);
76 	pid_ns_release_proc(ns);
77 }
78 
79 /* MAX_PID_NS_LEVEL is needed for limiting size of 'struct pid' */
80 #define MAX_PID_NS_LEVEL 32
81 
create_pid_namespace(struct user_namespace * user_ns,struct pid_namespace * parent_pid_ns)82 static struct pid_namespace *create_pid_namespace(struct user_namespace *user_ns,
83 	struct pid_namespace *parent_pid_ns)
84 {
85 	struct pid_namespace *ns;
86 	unsigned int level = parent_pid_ns->level + 1;
87 	int i;
88 	int err;
89 
90 	if (level > MAX_PID_NS_LEVEL) {
91 		err = -EINVAL;
92 		goto out;
93 	}
94 
95 	err = -ENOMEM;
96 	ns = kmem_cache_zalloc(pid_ns_cachep, GFP_KERNEL);
97 	if (ns == NULL)
98 		goto out;
99 
100 	ns->pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL);
101 	if (!ns->pidmap[0].page)
102 		goto out_free;
103 
104 	ns->pid_cachep = create_pid_cachep(level + 1);
105 	if (ns->pid_cachep == NULL)
106 		goto out_free_map;
107 
108 	err = proc_alloc_inum(&ns->proc_inum);
109 	if (err)
110 		goto out_free_map;
111 
112 	kref_init(&ns->kref);
113 	ns->level = level;
114 	ns->parent = get_pid_ns(parent_pid_ns);
115 	ns->user_ns = get_user_ns(user_ns);
116 	ns->nr_hashed = PIDNS_HASH_ADDING;
117 	INIT_WORK(&ns->proc_work, proc_cleanup_work);
118 
119 	set_bit(0, ns->pidmap[0].page);
120 	atomic_set(&ns->pidmap[0].nr_free, BITS_PER_PAGE - 1);
121 
122 	for (i = 1; i < PIDMAP_ENTRIES; i++)
123 		atomic_set(&ns->pidmap[i].nr_free, BITS_PER_PAGE);
124 
125 	return ns;
126 
127 out_free_map:
128 	kfree(ns->pidmap[0].page);
129 out_free:
130 	kmem_cache_free(pid_ns_cachep, ns);
131 out:
132 	return ERR_PTR(err);
133 }
134 
delayed_free_pidns(struct rcu_head * p)135 static void delayed_free_pidns(struct rcu_head *p)
136 {
137 	kmem_cache_free(pid_ns_cachep,
138 			container_of(p, struct pid_namespace, rcu));
139 }
140 
destroy_pid_namespace(struct pid_namespace * ns)141 static void destroy_pid_namespace(struct pid_namespace *ns)
142 {
143 	int i;
144 
145 	proc_free_inum(ns->proc_inum);
146 	for (i = 0; i < PIDMAP_ENTRIES; i++)
147 		kfree(ns->pidmap[i].page);
148 	put_user_ns(ns->user_ns);
149 	call_rcu(&ns->rcu, delayed_free_pidns);
150 }
151 
copy_pid_ns(unsigned long flags,struct user_namespace * user_ns,struct pid_namespace * old_ns)152 struct pid_namespace *copy_pid_ns(unsigned long flags,
153 	struct user_namespace *user_ns, struct pid_namespace *old_ns)
154 {
155 	if (!(flags & CLONE_NEWPID))
156 		return get_pid_ns(old_ns);
157 	if (task_active_pid_ns(current) != old_ns)
158 		return ERR_PTR(-EINVAL);
159 	return create_pid_namespace(user_ns, old_ns);
160 }
161 
free_pid_ns(struct kref * kref)162 static void free_pid_ns(struct kref *kref)
163 {
164 	struct pid_namespace *ns;
165 
166 	ns = container_of(kref, struct pid_namespace, kref);
167 	destroy_pid_namespace(ns);
168 }
169 
put_pid_ns(struct pid_namespace * ns)170 void put_pid_ns(struct pid_namespace *ns)
171 {
172 	struct pid_namespace *parent;
173 
174 	while (ns != &init_pid_ns) {
175 		parent = ns->parent;
176 		if (!kref_put(&ns->kref, free_pid_ns))
177 			break;
178 		ns = parent;
179 	}
180 }
181 EXPORT_SYMBOL_GPL(put_pid_ns);
182 
zap_pid_ns_processes(struct pid_namespace * pid_ns)183 void zap_pid_ns_processes(struct pid_namespace *pid_ns)
184 {
185 	int nr;
186 	int rc;
187 	struct task_struct *task, *me = current;
188 	int init_pids = thread_group_leader(me) ? 1 : 2;
189 
190 	/* Don't allow any more processes into the pid namespace */
191 	disable_pid_allocation(pid_ns);
192 
193 	/* Ignore SIGCHLD causing any terminated children to autoreap */
194 	spin_lock_irq(&me->sighand->siglock);
195 	me->sighand->action[SIGCHLD - 1].sa.sa_handler = SIG_IGN;
196 	spin_unlock_irq(&me->sighand->siglock);
197 
198 	/*
199 	 * The last thread in the cgroup-init thread group is terminating.
200 	 * Find remaining pid_ts in the namespace, signal and wait for them
201 	 * to exit.
202 	 *
203 	 * Note:  This signals each threads in the namespace - even those that
204 	 * 	  belong to the same thread group, To avoid this, we would have
205 	 * 	  to walk the entire tasklist looking a processes in this
206 	 * 	  namespace, but that could be unnecessarily expensive if the
207 	 * 	  pid namespace has just a few processes. Or we need to
208 	 * 	  maintain a tasklist for each pid namespace.
209 	 *
210 	 */
211 	read_lock(&tasklist_lock);
212 	nr = next_pidmap(pid_ns, 1);
213 	while (nr > 0) {
214 		rcu_read_lock();
215 
216 		task = pid_task(find_vpid(nr), PIDTYPE_PID);
217 		if (task && !__fatal_signal_pending(task))
218 			send_sig_info(SIGKILL, SEND_SIG_FORCED, task);
219 
220 		rcu_read_unlock();
221 
222 		nr = next_pidmap(pid_ns, nr);
223 	}
224 	read_unlock(&tasklist_lock);
225 
226 	/* Firstly reap the EXIT_ZOMBIE children we may have. */
227 	do {
228 		clear_thread_flag(TIF_SIGPENDING);
229 		rc = sys_wait4(-1, NULL, __WALL, NULL);
230 	} while (rc != -ECHILD);
231 
232 	/*
233 	 * sys_wait4() above can't reap the TASK_DEAD children.
234 	 * Make sure they all go away, see free_pid().
235 	 */
236 	for (;;) {
237 		set_current_state(TASK_INTERRUPTIBLE);
238 		if (pid_ns->nr_hashed == init_pids)
239 			break;
240 		schedule();
241 	}
242 	__set_current_state(TASK_RUNNING);
243 
244 	if (pid_ns->reboot)
245 		current->signal->group_exit_code = pid_ns->reboot;
246 
247 	acct_exit_ns(pid_ns);
248 	return;
249 }
250 
251 #ifdef CONFIG_CHECKPOINT_RESTORE
pid_ns_ctl_handler(struct ctl_table * table,int write,void __user * buffer,size_t * lenp,loff_t * ppos)252 static int pid_ns_ctl_handler(struct ctl_table *table, int write,
253 		void __user *buffer, size_t *lenp, loff_t *ppos)
254 {
255 	struct pid_namespace *pid_ns = task_active_pid_ns(current);
256 	struct ctl_table tmp = *table;
257 
258 	if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
259 		return -EPERM;
260 
261 	/*
262 	 * Writing directly to ns' last_pid field is OK, since this field
263 	 * is volatile in a living namespace anyway and a code writing to
264 	 * it should synchronize its usage with external means.
265 	 */
266 
267 	tmp.data = &pid_ns->last_pid;
268 	return proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
269 }
270 
271 extern int pid_max;
272 static int zero = 0;
273 static struct ctl_table pid_ns_ctl_table[] = {
274 	{
275 		.procname = "ns_last_pid",
276 		.maxlen = sizeof(int),
277 		.mode = 0666, /* permissions are checked in the handler */
278 		.proc_handler = pid_ns_ctl_handler,
279 		.extra1 = &zero,
280 		.extra2 = &pid_max,
281 	},
282 	{ }
283 };
284 static struct ctl_path kern_path[] = { { .procname = "kernel", }, { } };
285 #endif	/* CONFIG_CHECKPOINT_RESTORE */
286 
reboot_pid_ns(struct pid_namespace * pid_ns,int cmd)287 int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd)
288 {
289 	if (pid_ns == &init_pid_ns)
290 		return 0;
291 
292 	switch (cmd) {
293 	case LINUX_REBOOT_CMD_RESTART2:
294 	case LINUX_REBOOT_CMD_RESTART:
295 		pid_ns->reboot = SIGHUP;
296 		break;
297 
298 	case LINUX_REBOOT_CMD_POWER_OFF:
299 	case LINUX_REBOOT_CMD_HALT:
300 		pid_ns->reboot = SIGINT;
301 		break;
302 	default:
303 		return -EINVAL;
304 	}
305 
306 	read_lock(&tasklist_lock);
307 	force_sig(SIGKILL, pid_ns->child_reaper);
308 	read_unlock(&tasklist_lock);
309 
310 	do_exit(0);
311 
312 	/* Not reached */
313 	return 0;
314 }
315 
pidns_get(struct task_struct * task)316 static void *pidns_get(struct task_struct *task)
317 {
318 	struct pid_namespace *ns;
319 
320 	rcu_read_lock();
321 	ns = task_active_pid_ns(task);
322 	if (ns)
323 		get_pid_ns(ns);
324 	rcu_read_unlock();
325 
326 	return ns;
327 }
328 
pidns_put(void * ns)329 static void pidns_put(void *ns)
330 {
331 	put_pid_ns(ns);
332 }
333 
pidns_install(struct nsproxy * nsproxy,void * ns)334 static int pidns_install(struct nsproxy *nsproxy, void *ns)
335 {
336 	struct pid_namespace *active = task_active_pid_ns(current);
337 	struct pid_namespace *ancestor, *new = ns;
338 
339 	if (!ns_capable(new->user_ns, CAP_SYS_ADMIN) ||
340 	    !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
341 		return -EPERM;
342 
343 	/*
344 	 * Only allow entering the current active pid namespace
345 	 * or a child of the current active pid namespace.
346 	 *
347 	 * This is required for fork to return a usable pid value and
348 	 * this maintains the property that processes and their
349 	 * children can not escape their current pid namespace.
350 	 */
351 	if (new->level < active->level)
352 		return -EINVAL;
353 
354 	ancestor = new;
355 	while (ancestor->level > active->level)
356 		ancestor = ancestor->parent;
357 	if (ancestor != active)
358 		return -EINVAL;
359 
360 	put_pid_ns(nsproxy->pid_ns_for_children);
361 	nsproxy->pid_ns_for_children = get_pid_ns(new);
362 	return 0;
363 }
364 
pidns_inum(void * ns)365 static unsigned int pidns_inum(void *ns)
366 {
367 	struct pid_namespace *pid_ns = ns;
368 	return pid_ns->proc_inum;
369 }
370 
371 const struct proc_ns_operations pidns_operations = {
372 	.name		= "pid",
373 	.type		= CLONE_NEWPID,
374 	.get		= pidns_get,
375 	.put		= pidns_put,
376 	.install	= pidns_install,
377 	.inum		= pidns_inum,
378 };
379 
pid_namespaces_init(void)380 static __init int pid_namespaces_init(void)
381 {
382 	pid_ns_cachep = KMEM_CACHE(pid_namespace, SLAB_PANIC);
383 
384 #ifdef CONFIG_CHECKPOINT_RESTORE
385 	register_sysctl_paths(kern_path, pid_ns_ctl_table);
386 #endif
387 	return 0;
388 }
389 
390 __initcall(pid_namespaces_init);
391