• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Generic pidhash and scalable, time-bounded PID allocator
3  *
4  * (C) 2002-2003 William Irwin, IBM
5  * (C) 2004 William Irwin, Oracle
6  * (C) 2002-2004 Ingo Molnar, Red Hat
7  *
8  * pid-structures are backing objects for tasks sharing a given ID to chain
9  * against. There is very little to them aside from hashing them and
10  * parking tasks using given ID's on a list.
11  *
12  * The hash is always changed with the tasklist_lock write-acquired,
13  * and the hash is only accessed with the tasklist_lock at least
14  * read-acquired, so there's no additional SMP locking needed here.
15  *
16  * We have a list of bitmap pages, which bitmaps represent the PID space.
17  * Allocating and freeing PIDs is completely lockless. The worst-case
18  * allocation scenario when all but one out of 1 million PIDs possible are
19  * allocated already: the scanning of 32 list entries and at most PAGE_SIZE
20  * bytes. The typical fastpath is a single successful setbit. Freeing is O(1).
21  *
22  * Pid namespaces:
23  *    (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
24  *    (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
25  *     Many thanks to Oleg Nesterov for comments and help
26  *
27  */
28 
29 #include <linux/mm.h>
30 #include <linux/module.h>
31 #include <linux/slab.h>
32 #include <linux/init.h>
33 #include <linux/rculist.h>
34 #include <linux/bootmem.h>
35 #include <linux/hash.h>
36 #include <linux/pid_namespace.h>
37 #include <linux/init_task.h>
38 #include <linux/syscalls.h>
39 
40 #define pid_hashfn(nr, ns)	\
41 	hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift)
42 static struct hlist_head *pid_hash;
43 static int pidhash_shift;
44 struct pid init_struct_pid = INIT_STRUCT_PID;
45 
46 int pid_max = PID_MAX_DEFAULT;
47 
48 #define RESERVED_PIDS		300
49 
50 int pid_max_min = RESERVED_PIDS + 1;
51 int pid_max_max = PID_MAX_LIMIT;
52 
53 #define BITS_PER_PAGE		(PAGE_SIZE*8)
54 #define BITS_PER_PAGE_MASK	(BITS_PER_PAGE-1)
55 
mk_pid(struct pid_namespace * pid_ns,struct pidmap * map,int off)56 static inline int mk_pid(struct pid_namespace *pid_ns,
57 		struct pidmap *map, int off)
58 {
59 	return (map - pid_ns->pidmap)*BITS_PER_PAGE + off;
60 }
61 
62 #define find_next_offset(map, off)					\
63 		find_next_zero_bit((map)->page, BITS_PER_PAGE, off)
64 
65 /*
66  * PID-map pages start out as NULL, they get allocated upon
67  * first use and are never deallocated. This way a low pid_max
68  * value does not cause lots of bitmaps to be allocated, but
69  * the scheme scales to up to 4 million PIDs, runtime.
70  */
71 struct pid_namespace init_pid_ns = {
72 	.kref = {
73 		.refcount       = ATOMIC_INIT(2),
74 	},
75 	.pidmap = {
76 		[ 0 ... PIDMAP_ENTRIES-1] = { ATOMIC_INIT(BITS_PER_PAGE), NULL }
77 	},
78 	.last_pid = 0,
79 	.level = 0,
80 	.child_reaper = &init_task,
81 };
82 EXPORT_SYMBOL_GPL(init_pid_ns);
83 
is_container_init(struct task_struct * tsk)84 int is_container_init(struct task_struct *tsk)
85 {
86 	int ret = 0;
87 	struct pid *pid;
88 
89 	rcu_read_lock();
90 	pid = task_pid(tsk);
91 	if (pid != NULL && pid->numbers[pid->level].nr == 1)
92 		ret = 1;
93 	rcu_read_unlock();
94 
95 	return ret;
96 }
97 EXPORT_SYMBOL(is_container_init);
98 
99 /*
100  * Note: disable interrupts while the pidmap_lock is held as an
101  * interrupt might come in and do read_lock(&tasklist_lock).
102  *
103  * If we don't disable interrupts there is a nasty deadlock between
104  * detach_pid()->free_pid() and another cpu that does
105  * spin_lock(&pidmap_lock) followed by an interrupt routine that does
106  * read_lock(&tasklist_lock);
107  *
108  * After we clean up the tasklist_lock and know there are no
109  * irq handlers that take it we can leave the interrupts enabled.
110  * For now it is easier to be safe than to prove it can't happen.
111  */
112 
113 static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
114 
free_pidmap(struct upid * upid)115 static void free_pidmap(struct upid *upid)
116 {
117 	int nr = upid->nr;
118 	struct pidmap *map = upid->ns->pidmap + nr / BITS_PER_PAGE;
119 	int offset = nr & BITS_PER_PAGE_MASK;
120 
121 	clear_bit(offset, map->page);
122 	atomic_inc(&map->nr_free);
123 }
124 
alloc_pidmap(struct pid_namespace * pid_ns)125 static int alloc_pidmap(struct pid_namespace *pid_ns)
126 {
127 	int i, offset, max_scan, pid, last = pid_ns->last_pid;
128 	struct pidmap *map;
129 
130 	pid = last + 1;
131 	if (pid >= pid_max)
132 		pid = RESERVED_PIDS;
133 	offset = pid & BITS_PER_PAGE_MASK;
134 	map = &pid_ns->pidmap[pid/BITS_PER_PAGE];
135 	max_scan = (pid_max + BITS_PER_PAGE - 1)/BITS_PER_PAGE - !offset;
136 	for (i = 0; i <= max_scan; ++i) {
137 		if (unlikely(!map->page)) {
138 			void *page = kzalloc(PAGE_SIZE, GFP_KERNEL);
139 			/*
140 			 * Free the page if someone raced with us
141 			 * installing it:
142 			 */
143 			spin_lock_irq(&pidmap_lock);
144 			if (map->page)
145 				kfree(page);
146 			else
147 				map->page = page;
148 			spin_unlock_irq(&pidmap_lock);
149 			if (unlikely(!map->page))
150 				break;
151 		}
152 		if (likely(atomic_read(&map->nr_free))) {
153 			do {
154 				if (!test_and_set_bit(offset, map->page)) {
155 					atomic_dec(&map->nr_free);
156 					pid_ns->last_pid = pid;
157 					return pid;
158 				}
159 				offset = find_next_offset(map, offset);
160 				pid = mk_pid(pid_ns, map, offset);
161 			/*
162 			 * find_next_offset() found a bit, the pid from it
163 			 * is in-bounds, and if we fell back to the last
164 			 * bitmap block and the final block was the same
165 			 * as the starting point, pid is before last_pid.
166 			 */
167 			} while (offset < BITS_PER_PAGE && pid < pid_max &&
168 					(i != max_scan || pid < last ||
169 					    !((last+1) & BITS_PER_PAGE_MASK)));
170 		}
171 		if (map < &pid_ns->pidmap[(pid_max-1)/BITS_PER_PAGE]) {
172 			++map;
173 			offset = 0;
174 		} else {
175 			map = &pid_ns->pidmap[0];
176 			offset = RESERVED_PIDS;
177 			if (unlikely(last == offset))
178 				break;
179 		}
180 		pid = mk_pid(pid_ns, map, offset);
181 	}
182 	return -1;
183 }
184 
next_pidmap(struct pid_namespace * pid_ns,int last)185 int next_pidmap(struct pid_namespace *pid_ns, int last)
186 {
187 	int offset;
188 	struct pidmap *map, *end;
189 
190 	offset = (last + 1) & BITS_PER_PAGE_MASK;
191 	map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE];
192 	end = &pid_ns->pidmap[PIDMAP_ENTRIES];
193 	for (; map < end; map++, offset = 0) {
194 		if (unlikely(!map->page))
195 			continue;
196 		offset = find_next_bit((map)->page, BITS_PER_PAGE, offset);
197 		if (offset < BITS_PER_PAGE)
198 			return mk_pid(pid_ns, map, offset);
199 	}
200 	return -1;
201 }
202 
put_pid(struct pid * pid)203 void put_pid(struct pid *pid)
204 {
205 	struct pid_namespace *ns;
206 
207 	if (!pid)
208 		return;
209 
210 	ns = pid->numbers[pid->level].ns;
211 	if ((atomic_read(&pid->count) == 1) ||
212 	     atomic_dec_and_test(&pid->count)) {
213 		kmem_cache_free(ns->pid_cachep, pid);
214 		put_pid_ns(ns);
215 	}
216 }
217 EXPORT_SYMBOL_GPL(put_pid);
218 
delayed_put_pid(struct rcu_head * rhp)219 static void delayed_put_pid(struct rcu_head *rhp)
220 {
221 	struct pid *pid = container_of(rhp, struct pid, rcu);
222 	put_pid(pid);
223 }
224 
free_pid(struct pid * pid)225 void free_pid(struct pid *pid)
226 {
227 	/* We can be called with write_lock_irq(&tasklist_lock) held */
228 	int i;
229 	unsigned long flags;
230 
231 	spin_lock_irqsave(&pidmap_lock, flags);
232 	for (i = 0; i <= pid->level; i++)
233 		hlist_del_rcu(&pid->numbers[i].pid_chain);
234 	spin_unlock_irqrestore(&pidmap_lock, flags);
235 
236 	for (i = 0; i <= pid->level; i++)
237 		free_pidmap(pid->numbers + i);
238 
239 	call_rcu(&pid->rcu, delayed_put_pid);
240 }
241 
alloc_pid(struct pid_namespace * ns)242 struct pid *alloc_pid(struct pid_namespace *ns)
243 {
244 	struct pid *pid;
245 	enum pid_type type;
246 	int i, nr;
247 	struct pid_namespace *tmp;
248 	struct upid *upid;
249 
250 	pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL);
251 	if (!pid)
252 		goto out;
253 
254 	tmp = ns;
255 	for (i = ns->level; i >= 0; i--) {
256 		nr = alloc_pidmap(tmp);
257 		if (nr < 0)
258 			goto out_free;
259 
260 		pid->numbers[i].nr = nr;
261 		pid->numbers[i].ns = tmp;
262 		tmp = tmp->parent;
263 	}
264 
265 	get_pid_ns(ns);
266 	pid->level = ns->level;
267 	atomic_set(&pid->count, 1);
268 	for (type = 0; type < PIDTYPE_MAX; ++type)
269 		INIT_HLIST_HEAD(&pid->tasks[type]);
270 
271 	spin_lock_irq(&pidmap_lock);
272 	for (i = ns->level; i >= 0; i--) {
273 		upid = &pid->numbers[i];
274 		hlist_add_head_rcu(&upid->pid_chain,
275 				&pid_hash[pid_hashfn(upid->nr, upid->ns)]);
276 	}
277 	spin_unlock_irq(&pidmap_lock);
278 
279 out:
280 	return pid;
281 
282 out_free:
283 	while (++i <= ns->level)
284 		free_pidmap(pid->numbers + i);
285 
286 	kmem_cache_free(ns->pid_cachep, pid);
287 	pid = NULL;
288 	goto out;
289 }
290 
find_pid_ns(int nr,struct pid_namespace * ns)291 struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
292 {
293 	struct hlist_node *elem;
294 	struct upid *pnr;
295 
296 	hlist_for_each_entry_rcu(pnr, elem,
297 			&pid_hash[pid_hashfn(nr, ns)], pid_chain)
298 		if (pnr->nr == nr && pnr->ns == ns)
299 			return container_of(pnr, struct pid,
300 					numbers[ns->level]);
301 
302 	return NULL;
303 }
304 EXPORT_SYMBOL_GPL(find_pid_ns);
305 
find_vpid(int nr)306 struct pid *find_vpid(int nr)
307 {
308 	return find_pid_ns(nr, current->nsproxy->pid_ns);
309 }
310 EXPORT_SYMBOL_GPL(find_vpid);
311 
312 /*
313  * attach_pid() must be called with the tasklist_lock write-held.
314  */
attach_pid(struct task_struct * task,enum pid_type type,struct pid * pid)315 void attach_pid(struct task_struct *task, enum pid_type type,
316 		struct pid *pid)
317 {
318 	struct pid_link *link;
319 
320 	link = &task->pids[type];
321 	link->pid = pid;
322 	hlist_add_head_rcu(&link->node, &pid->tasks[type]);
323 }
324 
__change_pid(struct task_struct * task,enum pid_type type,struct pid * new)325 static void __change_pid(struct task_struct *task, enum pid_type type,
326 			struct pid *new)
327 {
328 	struct pid_link *link;
329 	struct pid *pid;
330 	int tmp;
331 
332 	link = &task->pids[type];
333 	pid = link->pid;
334 
335 	hlist_del_rcu(&link->node);
336 	link->pid = new;
337 
338 	for (tmp = PIDTYPE_MAX; --tmp >= 0; )
339 		if (!hlist_empty(&pid->tasks[tmp]))
340 			return;
341 
342 	free_pid(pid);
343 }
344 
detach_pid(struct task_struct * task,enum pid_type type)345 void detach_pid(struct task_struct *task, enum pid_type type)
346 {
347 	__change_pid(task, type, NULL);
348 }
349 
change_pid(struct task_struct * task,enum pid_type type,struct pid * pid)350 void change_pid(struct task_struct *task, enum pid_type type,
351 		struct pid *pid)
352 {
353 	__change_pid(task, type, pid);
354 	attach_pid(task, type, pid);
355 }
356 
357 /* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
transfer_pid(struct task_struct * old,struct task_struct * new,enum pid_type type)358 void transfer_pid(struct task_struct *old, struct task_struct *new,
359 			   enum pid_type type)
360 {
361 	new->pids[type].pid = old->pids[type].pid;
362 	hlist_replace_rcu(&old->pids[type].node, &new->pids[type].node);
363 }
364 
pid_task(struct pid * pid,enum pid_type type)365 struct task_struct *pid_task(struct pid *pid, enum pid_type type)
366 {
367 	struct task_struct *result = NULL;
368 	if (pid) {
369 		struct hlist_node *first;
370 		first = rcu_dereference(pid->tasks[type].first);
371 		if (first)
372 			result = hlist_entry(first, struct task_struct, pids[(type)].node);
373 	}
374 	return result;
375 }
376 EXPORT_SYMBOL(pid_task);
377 
378 /*
379  * Must be called under rcu_read_lock() or with tasklist_lock read-held.
380  */
find_task_by_pid_type_ns(int type,int nr,struct pid_namespace * ns)381 struct task_struct *find_task_by_pid_type_ns(int type, int nr,
382 		struct pid_namespace *ns)
383 {
384 	return pid_task(find_pid_ns(nr, ns), type);
385 }
386 
387 EXPORT_SYMBOL(find_task_by_pid_type_ns);
388 
find_task_by_vpid(pid_t vnr)389 struct task_struct *find_task_by_vpid(pid_t vnr)
390 {
391 	return find_task_by_pid_type_ns(PIDTYPE_PID, vnr,
392 			current->nsproxy->pid_ns);
393 }
394 EXPORT_SYMBOL(find_task_by_vpid);
395 
find_task_by_pid_ns(pid_t nr,struct pid_namespace * ns)396 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
397 {
398 	return find_task_by_pid_type_ns(PIDTYPE_PID, nr, ns);
399 }
400 EXPORT_SYMBOL(find_task_by_pid_ns);
401 
get_task_pid(struct task_struct * task,enum pid_type type)402 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
403 {
404 	struct pid *pid;
405 	rcu_read_lock();
406 	pid = get_pid(task->pids[type].pid);
407 	rcu_read_unlock();
408 	return pid;
409 }
410 
get_pid_task(struct pid * pid,enum pid_type type)411 struct task_struct *get_pid_task(struct pid *pid, enum pid_type type)
412 {
413 	struct task_struct *result;
414 	rcu_read_lock();
415 	result = pid_task(pid, type);
416 	if (result)
417 		get_task_struct(result);
418 	rcu_read_unlock();
419 	return result;
420 }
421 
find_get_pid(pid_t nr)422 struct pid *find_get_pid(pid_t nr)
423 {
424 	struct pid *pid;
425 
426 	rcu_read_lock();
427 	pid = get_pid(find_vpid(nr));
428 	rcu_read_unlock();
429 
430 	return pid;
431 }
432 EXPORT_SYMBOL_GPL(find_get_pid);
433 
pid_nr_ns(struct pid * pid,struct pid_namespace * ns)434 pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
435 {
436 	struct upid *upid;
437 	pid_t nr = 0;
438 
439 	if (pid && ns->level <= pid->level) {
440 		upid = &pid->numbers[ns->level];
441 		if (upid->ns == ns)
442 			nr = upid->nr;
443 	}
444 	return nr;
445 }
446 
pid_vnr(struct pid * pid)447 pid_t pid_vnr(struct pid *pid)
448 {
449 	return pid_nr_ns(pid, current->nsproxy->pid_ns);
450 }
451 EXPORT_SYMBOL_GPL(pid_vnr);
452 
task_pid_nr_ns(struct task_struct * tsk,struct pid_namespace * ns)453 pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
454 {
455 	return pid_nr_ns(task_pid(tsk), ns);
456 }
457 EXPORT_SYMBOL(task_pid_nr_ns);
458 
task_tgid_nr_ns(struct task_struct * tsk,struct pid_namespace * ns)459 pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
460 {
461 	return pid_nr_ns(task_tgid(tsk), ns);
462 }
463 EXPORT_SYMBOL(task_tgid_nr_ns);
464 
task_pgrp_nr_ns(struct task_struct * tsk,struct pid_namespace * ns)465 pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
466 {
467 	return pid_nr_ns(task_pgrp(tsk), ns);
468 }
469 EXPORT_SYMBOL(task_pgrp_nr_ns);
470 
task_session_nr_ns(struct task_struct * tsk,struct pid_namespace * ns)471 pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
472 {
473 	return pid_nr_ns(task_session(tsk), ns);
474 }
475 EXPORT_SYMBOL(task_session_nr_ns);
476 
task_active_pid_ns(struct task_struct * tsk)477 struct pid_namespace *task_active_pid_ns(struct task_struct *tsk)
478 {
479 	return ns_of_pid(task_pid(tsk));
480 }
481 EXPORT_SYMBOL_GPL(task_active_pid_ns);
482 
483 /*
484  * Used by proc to find the first pid that is greater than or equal to nr.
485  *
486  * If there is a pid at nr this function is exactly the same as find_pid_ns.
487  */
find_ge_pid(int nr,struct pid_namespace * ns)488 struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
489 {
490 	struct pid *pid;
491 
492 	do {
493 		pid = find_pid_ns(nr, ns);
494 		if (pid)
495 			break;
496 		nr = next_pidmap(ns, nr);
497 	} while (nr > 0);
498 
499 	return pid;
500 }
501 
502 /*
503  * The pid hash table is scaled according to the amount of memory in the
504  * machine.  From a minimum of 16 slots up to 4096 slots at one gigabyte or
505  * more.
506  */
pidhash_init(void)507 void __init pidhash_init(void)
508 {
509 	int i, pidhash_size;
510 	unsigned long megabytes = nr_kernel_pages >> (20 - PAGE_SHIFT);
511 
512 	pidhash_shift = max(4, fls(megabytes * 4));
513 	pidhash_shift = min(12, pidhash_shift);
514 	pidhash_size = 1 << pidhash_shift;
515 
516 	printk("PID hash table entries: %d (order: %d, %Zd bytes)\n",
517 		pidhash_size, pidhash_shift,
518 		pidhash_size * sizeof(struct hlist_head));
519 
520 	pid_hash = alloc_bootmem(pidhash_size *	sizeof(*(pid_hash)));
521 	if (!pid_hash)
522 		panic("Could not alloc pidhash!\n");
523 	for (i = 0; i < pidhash_size; i++)
524 		INIT_HLIST_HEAD(&pid_hash[i]);
525 }
526 
pidmap_init(void)527 void __init pidmap_init(void)
528 {
529 	init_pid_ns.pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL);
530 	/* Reserve PID 0. We never call free_pidmap(0) */
531 	set_bit(0, init_pid_ns.pidmap[0].page);
532 	atomic_dec(&init_pid_ns.pidmap[0].nr_free);
533 
534 	init_pid_ns.pid_cachep = KMEM_CACHE(pid,
535 			SLAB_HWCACHE_ALIGN | SLAB_PANIC);
536 }
537