• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Generic pidhash and scalable, time-bounded PID allocator
3  *
4  * (C) 2002-2003 Nadia Yvette Chambers, IBM
5  * (C) 2004 Nadia Yvette Chambers, Oracle
6  * (C) 2002-2004 Ingo Molnar, Red Hat
7  *
8  * pid-structures are backing objects for tasks sharing a given ID to chain
9  * against. There is very little to them aside from hashing them and
10  * parking tasks using given ID's on a list.
11  *
12  * The hash is always changed with the tasklist_lock write-acquired,
13  * and the hash is only accessed with the tasklist_lock at least
14  * read-acquired, so there's no additional SMP locking needed here.
15  *
16  * We have a list of bitmap pages, which bitmaps represent the PID space.
17  * Allocating and freeing PIDs is completely lockless. The worst-case
18  * allocation scenario when all but one out of 1 million PIDs possible are
19  * allocated already: the scanning of 32 list entries and at most PAGE_SIZE
20  * bytes. The typical fastpath is a single successful setbit. Freeing is O(1).
21  *
22  * Pid namespaces:
23  *    (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
24  *    (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
25  *     Many thanks to Oleg Nesterov for comments and help
26  *
27  */
28 
29 #include <linux/mm.h>
30 #include <linux/export.h>
31 #include <linux/slab.h>
32 #include <linux/init.h>
33 #include <linux/rculist.h>
34 #include <linux/bootmem.h>
35 #include <linux/hash.h>
36 #include <linux/pid_namespace.h>
37 #include <linux/init_task.h>
38 #include <linux/syscalls.h>
39 #include <linux/proc_ns.h>
40 #include <linux/proc_fs.h>
41 
42 #define pid_hashfn(nr, ns)	\
43 	hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift)
44 static struct hlist_head *pid_hash;
45 static unsigned int pidhash_shift = 4;
46 struct pid init_struct_pid = INIT_STRUCT_PID;
47 
48 int pid_max = PID_MAX_DEFAULT;
49 
50 #define RESERVED_PIDS		300
51 
52 int pid_max_min = RESERVED_PIDS + 1;
53 int pid_max_max = PID_MAX_LIMIT;
54 
mk_pid(struct pid_namespace * pid_ns,struct pidmap * map,int off)55 static inline int mk_pid(struct pid_namespace *pid_ns,
56 		struct pidmap *map, int off)
57 {
58 	return (map - pid_ns->pidmap)*BITS_PER_PAGE + off;
59 }
60 
61 #define find_next_offset(map, off)					\
62 		find_next_zero_bit((map)->page, BITS_PER_PAGE, off)
63 
64 /*
65  * PID-map pages start out as NULL, they get allocated upon
66  * first use and are never deallocated. This way a low pid_max
67  * value does not cause lots of bitmaps to be allocated, but
68  * the scheme scales to up to 4 million PIDs, runtime.
69  */
70 struct pid_namespace init_pid_ns = {
71 	.kref = {
72 		.refcount       = ATOMIC_INIT(2),
73 	},
74 	.pidmap = {
75 		[ 0 ... PIDMAP_ENTRIES-1] = { ATOMIC_INIT(BITS_PER_PAGE), NULL }
76 	},
77 	.last_pid = 0,
78 	.nr_hashed = PIDNS_HASH_ADDING,
79 	.level = 0,
80 	.child_reaper = &init_task,
81 	.user_ns = &init_user_ns,
82 	.proc_inum = PROC_PID_INIT_INO,
83 };
84 EXPORT_SYMBOL_GPL(init_pid_ns);
85 
86 /*
87  * Note: disable interrupts while the pidmap_lock is held as an
88  * interrupt might come in and do read_lock(&tasklist_lock).
89  *
90  * If we don't disable interrupts there is a nasty deadlock between
91  * detach_pid()->free_pid() and another cpu that does
92  * spin_lock(&pidmap_lock) followed by an interrupt routine that does
93  * read_lock(&tasklist_lock);
94  *
95  * After we clean up the tasklist_lock and know there are no
96  * irq handlers that take it we can leave the interrupts enabled.
97  * For now it is easier to be safe than to prove it can't happen.
98  */
99 
100 static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
101 
free_pidmap(struct upid * upid)102 static void free_pidmap(struct upid *upid)
103 {
104 	int nr = upid->nr;
105 	struct pidmap *map = upid->ns->pidmap + nr / BITS_PER_PAGE;
106 	int offset = nr & BITS_PER_PAGE_MASK;
107 
108 	clear_bit(offset, map->page);
109 	atomic_inc(&map->nr_free);
110 }
111 
112 /*
113  * If we started walking pids at 'base', is 'a' seen before 'b'?
114  */
pid_before(int base,int a,int b)115 static int pid_before(int base, int a, int b)
116 {
117 	/*
118 	 * This is the same as saying
119 	 *
120 	 * (a - base + MAXUINT) % MAXUINT < (b - base + MAXUINT) % MAXUINT
121 	 * and that mapping orders 'a' and 'b' with respect to 'base'.
122 	 */
123 	return (unsigned)(a - base) < (unsigned)(b - base);
124 }
125 
126 /*
127  * We might be racing with someone else trying to set pid_ns->last_pid
128  * at the pid allocation time (there's also a sysctl for this, but racing
129  * with this one is OK, see comment in kernel/pid_namespace.c about it).
130  * We want the winner to have the "later" value, because if the
131  * "earlier" value prevails, then a pid may get reused immediately.
132  *
133  * Since pids rollover, it is not sufficient to just pick the bigger
134  * value.  We have to consider where we started counting from.
135  *
136  * 'base' is the value of pid_ns->last_pid that we observed when
137  * we started looking for a pid.
138  *
139  * 'pid' is the pid that we eventually found.
140  */
set_last_pid(struct pid_namespace * pid_ns,int base,int pid)141 static void set_last_pid(struct pid_namespace *pid_ns, int base, int pid)
142 {
143 	int prev;
144 	int last_write = base;
145 	do {
146 		prev = last_write;
147 		last_write = cmpxchg(&pid_ns->last_pid, prev, pid);
148 	} while ((prev != last_write) && (pid_before(base, last_write, pid)));
149 }
150 
alloc_pidmap(struct pid_namespace * pid_ns)151 static int alloc_pidmap(struct pid_namespace *pid_ns)
152 {
153 	int i, offset, max_scan, pid, last = pid_ns->last_pid;
154 	struct pidmap *map;
155 
156 	pid = last + 1;
157 	if (pid >= pid_max)
158 		pid = RESERVED_PIDS;
159 	offset = pid & BITS_PER_PAGE_MASK;
160 	map = &pid_ns->pidmap[pid/BITS_PER_PAGE];
161 	/*
162 	 * If last_pid points into the middle of the map->page we
163 	 * want to scan this bitmap block twice, the second time
164 	 * we start with offset == 0 (or RESERVED_PIDS).
165 	 */
166 	max_scan = DIV_ROUND_UP(pid_max, BITS_PER_PAGE) - !offset;
167 	for (i = 0; i <= max_scan; ++i) {
168 		if (unlikely(!map->page)) {
169 			void *page = kzalloc(PAGE_SIZE, GFP_KERNEL);
170 			/*
171 			 * Free the page if someone raced with us
172 			 * installing it:
173 			 */
174 			spin_lock_irq(&pidmap_lock);
175 			if (!map->page) {
176 				map->page = page;
177 				page = NULL;
178 			}
179 			spin_unlock_irq(&pidmap_lock);
180 			kfree(page);
181 			if (unlikely(!map->page))
182 				break;
183 		}
184 		if (likely(atomic_read(&map->nr_free))) {
185 			for ( ; ; ) {
186 				if (!test_and_set_bit(offset, map->page)) {
187 					atomic_dec(&map->nr_free);
188 					set_last_pid(pid_ns, last, pid);
189 					return pid;
190 				}
191 				offset = find_next_offset(map, offset);
192 				if (offset >= BITS_PER_PAGE)
193 					break;
194 				pid = mk_pid(pid_ns, map, offset);
195 				if (pid >= pid_max)
196 					break;
197 			}
198 		}
199 		if (map < &pid_ns->pidmap[(pid_max-1)/BITS_PER_PAGE]) {
200 			++map;
201 			offset = 0;
202 		} else {
203 			map = &pid_ns->pidmap[0];
204 			offset = RESERVED_PIDS;
205 			if (unlikely(last == offset))
206 				break;
207 		}
208 		pid = mk_pid(pid_ns, map, offset);
209 	}
210 	return -1;
211 }
212 
next_pidmap(struct pid_namespace * pid_ns,unsigned int last)213 int next_pidmap(struct pid_namespace *pid_ns, unsigned int last)
214 {
215 	int offset;
216 	struct pidmap *map, *end;
217 
218 	if (last >= PID_MAX_LIMIT)
219 		return -1;
220 
221 	offset = (last + 1) & BITS_PER_PAGE_MASK;
222 	map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE];
223 	end = &pid_ns->pidmap[PIDMAP_ENTRIES];
224 	for (; map < end; map++, offset = 0) {
225 		if (unlikely(!map->page))
226 			continue;
227 		offset = find_next_bit((map)->page, BITS_PER_PAGE, offset);
228 		if (offset < BITS_PER_PAGE)
229 			return mk_pid(pid_ns, map, offset);
230 	}
231 	return -1;
232 }
233 
put_pid(struct pid * pid)234 void put_pid(struct pid *pid)
235 {
236 	struct pid_namespace *ns;
237 
238 	if (!pid)
239 		return;
240 
241 	ns = pid->numbers[pid->level].ns;
242 	if ((atomic_read(&pid->count) == 1) ||
243 	     atomic_dec_and_test(&pid->count)) {
244 		kmem_cache_free(ns->pid_cachep, pid);
245 		put_pid_ns(ns);
246 	}
247 }
248 EXPORT_SYMBOL_GPL(put_pid);
249 
delayed_put_pid(struct rcu_head * rhp)250 static void delayed_put_pid(struct rcu_head *rhp)
251 {
252 	struct pid *pid = container_of(rhp, struct pid, rcu);
253 	put_pid(pid);
254 }
255 
free_pid(struct pid * pid)256 void free_pid(struct pid *pid)
257 {
258 	/* We can be called with write_lock_irq(&tasklist_lock) held */
259 	int i;
260 	unsigned long flags;
261 
262 	spin_lock_irqsave(&pidmap_lock, flags);
263 	for (i = 0; i <= pid->level; i++) {
264 		struct upid *upid = pid->numbers + i;
265 		struct pid_namespace *ns = upid->ns;
266 		hlist_del_rcu(&upid->pid_chain);
267 		switch(--ns->nr_hashed) {
268 		case 2:
269 		case 1:
270 			/* When all that is left in the pid namespace
271 			 * is the reaper wake up the reaper.  The reaper
272 			 * may be sleeping in zap_pid_ns_processes().
273 			 */
274 			wake_up_process(ns->child_reaper);
275 			break;
276 		case PIDNS_HASH_ADDING:
277 			/* Handle a fork failure of the first process */
278 			WARN_ON(ns->child_reaper);
279 			ns->nr_hashed = 0;
280 			/* fall through */
281 		case 0:
282 			schedule_work(&ns->proc_work);
283 			break;
284 		}
285 	}
286 	spin_unlock_irqrestore(&pidmap_lock, flags);
287 
288 	for (i = 0; i <= pid->level; i++)
289 		free_pidmap(pid->numbers + i);
290 
291 	call_rcu(&pid->rcu, delayed_put_pid);
292 }
293 
alloc_pid(struct pid_namespace * ns)294 struct pid *alloc_pid(struct pid_namespace *ns)
295 {
296 	struct pid *pid;
297 	enum pid_type type;
298 	int i, nr;
299 	struct pid_namespace *tmp;
300 	struct upid *upid;
301 
302 	pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL);
303 	if (!pid)
304 		goto out;
305 
306 	tmp = ns;
307 	pid->level = ns->level;
308 	for (i = ns->level; i >= 0; i--) {
309 		nr = alloc_pidmap(tmp);
310 		if (nr < 0)
311 			goto out_free;
312 
313 		pid->numbers[i].nr = nr;
314 		pid->numbers[i].ns = tmp;
315 		tmp = tmp->parent;
316 	}
317 
318 	if (unlikely(is_child_reaper(pid))) {
319 		if (pid_ns_prepare_proc(ns))
320 			goto out_free;
321 	}
322 
323 	get_pid_ns(ns);
324 	atomic_set(&pid->count, 1);
325 	for (type = 0; type < PIDTYPE_MAX; ++type)
326 		INIT_HLIST_HEAD(&pid->tasks[type]);
327 
328 	upid = pid->numbers + ns->level;
329 	spin_lock_irq(&pidmap_lock);
330 	if (!(ns->nr_hashed & PIDNS_HASH_ADDING))
331 		goto out_unlock;
332 	for ( ; upid >= pid->numbers; --upid) {
333 		hlist_add_head_rcu(&upid->pid_chain,
334 				&pid_hash[pid_hashfn(upid->nr, upid->ns)]);
335 		upid->ns->nr_hashed++;
336 	}
337 	spin_unlock_irq(&pidmap_lock);
338 
339 out:
340 	return pid;
341 
342 out_unlock:
343 	spin_unlock_irq(&pidmap_lock);
344 	put_pid_ns(ns);
345 
346 out_free:
347 	while (++i <= ns->level)
348 		free_pidmap(pid->numbers + i);
349 
350 	kmem_cache_free(ns->pid_cachep, pid);
351 	pid = NULL;
352 	goto out;
353 }
354 
disable_pid_allocation(struct pid_namespace * ns)355 void disable_pid_allocation(struct pid_namespace *ns)
356 {
357 	spin_lock_irq(&pidmap_lock);
358 	ns->nr_hashed &= ~PIDNS_HASH_ADDING;
359 	spin_unlock_irq(&pidmap_lock);
360 }
361 
find_pid_ns(int nr,struct pid_namespace * ns)362 struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
363 {
364 	struct upid *pnr;
365 
366 	hlist_for_each_entry_rcu(pnr,
367 			&pid_hash[pid_hashfn(nr, ns)], pid_chain)
368 		if (pnr->nr == nr && pnr->ns == ns)
369 			return container_of(pnr, struct pid,
370 					numbers[ns->level]);
371 
372 	return NULL;
373 }
374 EXPORT_SYMBOL_GPL(find_pid_ns);
375 
find_vpid(int nr)376 struct pid *find_vpid(int nr)
377 {
378 	return find_pid_ns(nr, task_active_pid_ns(current));
379 }
380 EXPORT_SYMBOL_GPL(find_vpid);
381 
382 /*
383  * attach_pid() must be called with the tasklist_lock write-held.
384  */
attach_pid(struct task_struct * task,enum pid_type type)385 void attach_pid(struct task_struct *task, enum pid_type type)
386 {
387 	struct pid_link *link = &task->pids[type];
388 	hlist_add_head_rcu(&link->node, &link->pid->tasks[type]);
389 }
390 
__change_pid(struct task_struct * task,enum pid_type type,struct pid * new)391 static void __change_pid(struct task_struct *task, enum pid_type type,
392 			struct pid *new)
393 {
394 	struct pid_link *link;
395 	struct pid *pid;
396 	int tmp;
397 
398 	link = &task->pids[type];
399 	pid = link->pid;
400 
401 	hlist_del_rcu(&link->node);
402 	link->pid = new;
403 
404 	for (tmp = PIDTYPE_MAX; --tmp >= 0; )
405 		if (!hlist_empty(&pid->tasks[tmp]))
406 			return;
407 
408 	free_pid(pid);
409 }
410 
detach_pid(struct task_struct * task,enum pid_type type)411 void detach_pid(struct task_struct *task, enum pid_type type)
412 {
413 	__change_pid(task, type, NULL);
414 }
415 
change_pid(struct task_struct * task,enum pid_type type,struct pid * pid)416 void change_pid(struct task_struct *task, enum pid_type type,
417 		struct pid *pid)
418 {
419 	__change_pid(task, type, pid);
420 	attach_pid(task, type);
421 }
422 
423 /* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
transfer_pid(struct task_struct * old,struct task_struct * new,enum pid_type type)424 void transfer_pid(struct task_struct *old, struct task_struct *new,
425 			   enum pid_type type)
426 {
427 	new->pids[type].pid = old->pids[type].pid;
428 	hlist_replace_rcu(&old->pids[type].node, &new->pids[type].node);
429 }
430 
pid_task(struct pid * pid,enum pid_type type)431 struct task_struct *pid_task(struct pid *pid, enum pid_type type)
432 {
433 	struct task_struct *result = NULL;
434 	if (pid) {
435 		struct hlist_node *first;
436 		first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]),
437 					      lockdep_tasklist_lock_is_held());
438 		if (first)
439 			result = hlist_entry(first, struct task_struct, pids[(type)].node);
440 	}
441 	return result;
442 }
443 EXPORT_SYMBOL(pid_task);
444 
445 /*
446  * Must be called under rcu_read_lock().
447  */
find_task_by_pid_ns(pid_t nr,struct pid_namespace * ns)448 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
449 {
450 	rcu_lockdep_assert(rcu_read_lock_held(),
451 			   "find_task_by_pid_ns() needs rcu_read_lock()"
452 			   " protection");
453 	return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
454 }
455 
find_task_by_vpid(pid_t vnr)456 struct task_struct *find_task_by_vpid(pid_t vnr)
457 {
458 	return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
459 }
460 
get_task_pid(struct task_struct * task,enum pid_type type)461 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
462 {
463 	struct pid *pid;
464 	rcu_read_lock();
465 	if (type != PIDTYPE_PID)
466 		task = task->group_leader;
467 	pid = get_pid(task->pids[type].pid);
468 	rcu_read_unlock();
469 	return pid;
470 }
471 EXPORT_SYMBOL_GPL(get_task_pid);
472 
get_pid_task(struct pid * pid,enum pid_type type)473 struct task_struct *get_pid_task(struct pid *pid, enum pid_type type)
474 {
475 	struct task_struct *result;
476 	rcu_read_lock();
477 	result = pid_task(pid, type);
478 	if (result)
479 		get_task_struct(result);
480 	rcu_read_unlock();
481 	return result;
482 }
483 EXPORT_SYMBOL_GPL(get_pid_task);
484 
find_get_pid(pid_t nr)485 struct pid *find_get_pid(pid_t nr)
486 {
487 	struct pid *pid;
488 
489 	rcu_read_lock();
490 	pid = get_pid(find_vpid(nr));
491 	rcu_read_unlock();
492 
493 	return pid;
494 }
495 EXPORT_SYMBOL_GPL(find_get_pid);
496 
pid_nr_ns(struct pid * pid,struct pid_namespace * ns)497 pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
498 {
499 	struct upid *upid;
500 	pid_t nr = 0;
501 
502 	if (pid && ns->level <= pid->level) {
503 		upid = &pid->numbers[ns->level];
504 		if (upid->ns == ns)
505 			nr = upid->nr;
506 	}
507 	return nr;
508 }
509 EXPORT_SYMBOL_GPL(pid_nr_ns);
510 
pid_vnr(struct pid * pid)511 pid_t pid_vnr(struct pid *pid)
512 {
513 	return pid_nr_ns(pid, task_active_pid_ns(current));
514 }
515 EXPORT_SYMBOL_GPL(pid_vnr);
516 
__task_pid_nr_ns(struct task_struct * task,enum pid_type type,struct pid_namespace * ns)517 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
518 			struct pid_namespace *ns)
519 {
520 	pid_t nr = 0;
521 
522 	rcu_read_lock();
523 	if (!ns)
524 		ns = task_active_pid_ns(current);
525 	if (likely(pid_alive(task))) {
526 		if (type != PIDTYPE_PID) {
527 			if (type == __PIDTYPE_TGID)
528 				type = PIDTYPE_PID;
529 			task = task->group_leader;
530 		}
531 		nr = pid_nr_ns(task->pids[type].pid, ns);
532 	}
533 	rcu_read_unlock();
534 
535 	return nr;
536 }
537 EXPORT_SYMBOL(__task_pid_nr_ns);
538 
task_active_pid_ns(struct task_struct * tsk)539 struct pid_namespace *task_active_pid_ns(struct task_struct *tsk)
540 {
541 	return ns_of_pid(task_pid(tsk));
542 }
543 EXPORT_SYMBOL_GPL(task_active_pid_ns);
544 
545 /*
546  * Used by proc to find the first pid that is greater than or equal to nr.
547  *
548  * If there is a pid at nr this function is exactly the same as find_pid_ns.
549  */
find_ge_pid(int nr,struct pid_namespace * ns)550 struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
551 {
552 	struct pid *pid;
553 
554 	do {
555 		pid = find_pid_ns(nr, ns);
556 		if (pid)
557 			break;
558 		nr = next_pidmap(ns, nr);
559 	} while (nr > 0);
560 
561 	return pid;
562 }
563 
564 /*
565  * The pid hash table is scaled according to the amount of memory in the
566  * machine.  From a minimum of 16 slots up to 4096 slots at one gigabyte or
567  * more.
568  */
pidhash_init(void)569 void __init pidhash_init(void)
570 {
571 	unsigned int i, pidhash_size;
572 
573 	pid_hash = alloc_large_system_hash("PID", sizeof(*pid_hash), 0, 18,
574 					   HASH_EARLY | HASH_SMALL,
575 					   &pidhash_shift, NULL,
576 					   0, 4096);
577 	pidhash_size = 1U << pidhash_shift;
578 
579 	for (i = 0; i < pidhash_size; i++)
580 		INIT_HLIST_HEAD(&pid_hash[i]);
581 }
582 
pidmap_init(void)583 void __init pidmap_init(void)
584 {
585 	/* Veryify no one has done anything silly */
586 	BUILD_BUG_ON(PID_MAX_LIMIT >= PIDNS_HASH_ADDING);
587 
588 	/* bump default and minimum pid_max based on number of cpus */
589 	pid_max = min(pid_max_max, max_t(int, pid_max,
590 				PIDS_PER_CPU_DEFAULT * num_possible_cpus()));
591 	pid_max_min = max_t(int, pid_max_min,
592 				PIDS_PER_CPU_MIN * num_possible_cpus());
593 	pr_info("pid_max: default: %u minimum: %u\n", pid_max, pid_max_min);
594 
595 	init_pid_ns.pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL);
596 	/* Reserve PID 0. We never call free_pidmap(0) */
597 	set_bit(0, init_pid_ns.pidmap[0].page);
598 	atomic_dec(&init_pid_ns.pidmap[0].nr_free);
599 
600 	init_pid_ns.pid_cachep = KMEM_CACHE(pid,
601 			SLAB_HWCACHE_ALIGN | SLAB_PANIC);
602 }
603