• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  linux/kernel/timer.c
3  *
4  *  Kernel internal timers, basic process system calls
5  *
6  *  Copyright (C) 1991, 1992  Linus Torvalds
7  *
8  *  1997-01-28  Modified by Finn Arne Gangstad to make timers scale better.
9  *
10  *  1997-09-10  Updated NTP code according to technical memorandum Jan '96
11  *              "A Kernel Model for Precision Timekeeping" by Dave Mills
12  *  1998-12-24  Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
13  *              serialize accesses to xtime/lost_ticks).
14  *                              Copyright (C) 1998  Andrea Arcangeli
15  *  1999-03-10  Improved NTP compatibility by Ulrich Windl
16  *  2002-05-31	Move sys_sysinfo here and make its locking sane, Robert Love
17  *  2000-10-05  Implemented scalable SMP per-CPU timer handling.
18  *                              Copyright (C) 2000, 2001, 2002  Ingo Molnar
19  *              Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
20  */
21 
22 #include <linux/kernel_stat.h>
23 #include <linux/module.h>
24 #include <linux/interrupt.h>
25 #include <linux/percpu.h>
26 #include <linux/init.h>
27 #include <linux/mm.h>
28 #include <linux/swap.h>
29 #include <linux/pid_namespace.h>
30 #include <linux/notifier.h>
31 #include <linux/thread_info.h>
32 #include <linux/time.h>
33 #include <linux/jiffies.h>
34 #include <linux/posix-timers.h>
35 #include <linux/cpu.h>
36 #include <linux/syscalls.h>
37 #include <linux/delay.h>
38 #include <linux/tick.h>
39 #include <linux/kallsyms.h>
40 
41 #include <asm/uaccess.h>
42 #include <asm/unistd.h>
43 #include <asm/div64.h>
44 #include <asm/timex.h>
45 #include <asm/io.h>
46 
47 u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
48 
49 EXPORT_SYMBOL(jiffies_64);
50 
51 /*
52  * per-CPU timer vector definitions:
53  */
54 #define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
55 #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
56 #define TVN_SIZE (1 << TVN_BITS)
57 #define TVR_SIZE (1 << TVR_BITS)
58 #define TVN_MASK (TVN_SIZE - 1)
59 #define TVR_MASK (TVR_SIZE - 1)
60 
61 struct tvec {
62 	struct list_head vec[TVN_SIZE];
63 };
64 
65 struct tvec_root {
66 	struct list_head vec[TVR_SIZE];
67 };
68 
69 struct tvec_base {
70 	spinlock_t lock;
71 	struct timer_list *running_timer;
72 	unsigned long timer_jiffies;
73 	struct tvec_root tv1;
74 	struct tvec tv2;
75 	struct tvec tv3;
76 	struct tvec tv4;
77 	struct tvec tv5;
78 } ____cacheline_aligned;
79 
80 struct tvec_base boot_tvec_bases;
81 EXPORT_SYMBOL(boot_tvec_bases);
82 static DEFINE_PER_CPU(struct tvec_base *, tvec_bases) = &boot_tvec_bases;
83 
84 /*
85  * Note that all tvec_bases are 2 byte aligned and lower bit of
86  * base in timer_list is guaranteed to be zero. Use the LSB for
87  * the new flag to indicate whether the timer is deferrable
88  */
89 #define TBASE_DEFERRABLE_FLAG		(0x1)
90 
91 /* Functions below help us manage 'deferrable' flag */
tbase_get_deferrable(struct tvec_base * base)92 static inline unsigned int tbase_get_deferrable(struct tvec_base *base)
93 {
94 	return ((unsigned int)(unsigned long)base & TBASE_DEFERRABLE_FLAG);
95 }
96 
tbase_get_base(struct tvec_base * base)97 static inline struct tvec_base *tbase_get_base(struct tvec_base *base)
98 {
99 	return ((struct tvec_base *)((unsigned long)base & ~TBASE_DEFERRABLE_FLAG));
100 }
101 
timer_set_deferrable(struct timer_list * timer)102 static inline void timer_set_deferrable(struct timer_list *timer)
103 {
104 	timer->base = ((struct tvec_base *)((unsigned long)(timer->base) |
105 				       TBASE_DEFERRABLE_FLAG));
106 }
107 
108 static inline void
timer_set_base(struct timer_list * timer,struct tvec_base * new_base)109 timer_set_base(struct timer_list *timer, struct tvec_base *new_base)
110 {
111 	timer->base = (struct tvec_base *)((unsigned long)(new_base) |
112 				      tbase_get_deferrable(timer->base));
113 }
114 
round_jiffies_common(unsigned long j,int cpu,bool force_up)115 static unsigned long round_jiffies_common(unsigned long j, int cpu,
116 		bool force_up)
117 {
118 	int rem;
119 	unsigned long original = j;
120 
121 	/*
122 	 * We don't want all cpus firing their timers at once hitting the
123 	 * same lock or cachelines, so we skew each extra cpu with an extra
124 	 * 3 jiffies. This 3 jiffies came originally from the mm/ code which
125 	 * already did this.
126 	 * The skew is done by adding 3*cpunr, then round, then subtract this
127 	 * extra offset again.
128 	 */
129 	j += cpu * 3;
130 
131 	rem = j % HZ;
132 
133 	/*
134 	 * If the target jiffie is just after a whole second (which can happen
135 	 * due to delays of the timer irq, long irq off times etc etc) then
136 	 * we should round down to the whole second, not up. Use 1/4th second
137 	 * as cutoff for this rounding as an extreme upper bound for this.
138 	 * But never round down if @force_up is set.
139 	 */
140 	if (rem < HZ/4 && !force_up) /* round down */
141 		j = j - rem;
142 	else /* round up */
143 		j = j - rem + HZ;
144 
145 	/* now that we have rounded, subtract the extra skew again */
146 	j -= cpu * 3;
147 
148 	if (j <= jiffies) /* rounding ate our timeout entirely; */
149 		return original;
150 	return j;
151 }
152 
153 /**
154  * __round_jiffies - function to round jiffies to a full second
155  * @j: the time in (absolute) jiffies that should be rounded
156  * @cpu: the processor number on which the timeout will happen
157  *
158  * __round_jiffies() rounds an absolute time in the future (in jiffies)
159  * up or down to (approximately) full seconds. This is useful for timers
160  * for which the exact time they fire does not matter too much, as long as
161  * they fire approximately every X seconds.
162  *
163  * By rounding these timers to whole seconds, all such timers will fire
164  * at the same time, rather than at various times spread out. The goal
165  * of this is to have the CPU wake up less, which saves power.
166  *
167  * The exact rounding is skewed for each processor to avoid all
168  * processors firing at the exact same time, which could lead
169  * to lock contention or spurious cache line bouncing.
170  *
171  * The return value is the rounded version of the @j parameter.
172  */
__round_jiffies(unsigned long j,int cpu)173 unsigned long __round_jiffies(unsigned long j, int cpu)
174 {
175 	return round_jiffies_common(j, cpu, false);
176 }
177 EXPORT_SYMBOL_GPL(__round_jiffies);
178 
179 /**
180  * __round_jiffies_relative - function to round jiffies to a full second
181  * @j: the time in (relative) jiffies that should be rounded
182  * @cpu: the processor number on which the timeout will happen
183  *
184  * __round_jiffies_relative() rounds a time delta  in the future (in jiffies)
185  * up or down to (approximately) full seconds. This is useful for timers
186  * for which the exact time they fire does not matter too much, as long as
187  * they fire approximately every X seconds.
188  *
189  * By rounding these timers to whole seconds, all such timers will fire
190  * at the same time, rather than at various times spread out. The goal
191  * of this is to have the CPU wake up less, which saves power.
192  *
193  * The exact rounding is skewed for each processor to avoid all
194  * processors firing at the exact same time, which could lead
195  * to lock contention or spurious cache line bouncing.
196  *
197  * The return value is the rounded version of the @j parameter.
198  */
__round_jiffies_relative(unsigned long j,int cpu)199 unsigned long __round_jiffies_relative(unsigned long j, int cpu)
200 {
201 	unsigned long j0 = jiffies;
202 
203 	/* Use j0 because jiffies might change while we run */
204 	return round_jiffies_common(j + j0, cpu, false) - j0;
205 }
206 EXPORT_SYMBOL_GPL(__round_jiffies_relative);
207 
208 /**
209  * round_jiffies - function to round jiffies to a full second
210  * @j: the time in (absolute) jiffies that should be rounded
211  *
212  * round_jiffies() rounds an absolute time in the future (in jiffies)
213  * up or down to (approximately) full seconds. This is useful for timers
214  * for which the exact time they fire does not matter too much, as long as
215  * they fire approximately every X seconds.
216  *
217  * By rounding these timers to whole seconds, all such timers will fire
218  * at the same time, rather than at various times spread out. The goal
219  * of this is to have the CPU wake up less, which saves power.
220  *
221  * The return value is the rounded version of the @j parameter.
222  */
round_jiffies(unsigned long j)223 unsigned long round_jiffies(unsigned long j)
224 {
225 	return round_jiffies_common(j, raw_smp_processor_id(), false);
226 }
227 EXPORT_SYMBOL_GPL(round_jiffies);
228 
229 /**
230  * round_jiffies_relative - function to round jiffies to a full second
231  * @j: the time in (relative) jiffies that should be rounded
232  *
233  * round_jiffies_relative() rounds a time delta  in the future (in jiffies)
234  * up or down to (approximately) full seconds. This is useful for timers
235  * for which the exact time they fire does not matter too much, as long as
236  * they fire approximately every X seconds.
237  *
238  * By rounding these timers to whole seconds, all such timers will fire
239  * at the same time, rather than at various times spread out. The goal
240  * of this is to have the CPU wake up less, which saves power.
241  *
242  * The return value is the rounded version of the @j parameter.
243  */
round_jiffies_relative(unsigned long j)244 unsigned long round_jiffies_relative(unsigned long j)
245 {
246 	return __round_jiffies_relative(j, raw_smp_processor_id());
247 }
248 EXPORT_SYMBOL_GPL(round_jiffies_relative);
249 
250 /**
251  * __round_jiffies_up - function to round jiffies up to a full second
252  * @j: the time in (absolute) jiffies that should be rounded
253  * @cpu: the processor number on which the timeout will happen
254  *
255  * This is the same as __round_jiffies() except that it will never
256  * round down.  This is useful for timeouts for which the exact time
257  * of firing does not matter too much, as long as they don't fire too
258  * early.
259  */
__round_jiffies_up(unsigned long j,int cpu)260 unsigned long __round_jiffies_up(unsigned long j, int cpu)
261 {
262 	return round_jiffies_common(j, cpu, true);
263 }
264 EXPORT_SYMBOL_GPL(__round_jiffies_up);
265 
266 /**
267  * __round_jiffies_up_relative - function to round jiffies up to a full second
268  * @j: the time in (relative) jiffies that should be rounded
269  * @cpu: the processor number on which the timeout will happen
270  *
271  * This is the same as __round_jiffies_relative() except that it will never
272  * round down.  This is useful for timeouts for which the exact time
273  * of firing does not matter too much, as long as they don't fire too
274  * early.
275  */
__round_jiffies_up_relative(unsigned long j,int cpu)276 unsigned long __round_jiffies_up_relative(unsigned long j, int cpu)
277 {
278 	unsigned long j0 = jiffies;
279 
280 	/* Use j0 because jiffies might change while we run */
281 	return round_jiffies_common(j + j0, cpu, true) - j0;
282 }
283 EXPORT_SYMBOL_GPL(__round_jiffies_up_relative);
284 
285 /**
286  * round_jiffies_up - function to round jiffies up to a full second
287  * @j: the time in (absolute) jiffies that should be rounded
288  *
289  * This is the same as round_jiffies() except that it will never
290  * round down.  This is useful for timeouts for which the exact time
291  * of firing does not matter too much, as long as they don't fire too
292  * early.
293  */
round_jiffies_up(unsigned long j)294 unsigned long round_jiffies_up(unsigned long j)
295 {
296 	return round_jiffies_common(j, raw_smp_processor_id(), true);
297 }
298 EXPORT_SYMBOL_GPL(round_jiffies_up);
299 
300 /**
301  * round_jiffies_up_relative - function to round jiffies up to a full second
302  * @j: the time in (relative) jiffies that should be rounded
303  *
304  * This is the same as round_jiffies_relative() except that it will never
305  * round down.  This is useful for timeouts for which the exact time
306  * of firing does not matter too much, as long as they don't fire too
307  * early.
308  */
round_jiffies_up_relative(unsigned long j)309 unsigned long round_jiffies_up_relative(unsigned long j)
310 {
311 	return __round_jiffies_up_relative(j, raw_smp_processor_id());
312 }
313 EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
314 
315 
set_running_timer(struct tvec_base * base,struct timer_list * timer)316 static inline void set_running_timer(struct tvec_base *base,
317 					struct timer_list *timer)
318 {
319 #ifdef CONFIG_SMP
320 	base->running_timer = timer;
321 #endif
322 }
323 
internal_add_timer(struct tvec_base * base,struct timer_list * timer)324 static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
325 {
326 	unsigned long expires = timer->expires;
327 	unsigned long idx = expires - base->timer_jiffies;
328 	struct list_head *vec;
329 
330 	if (idx < TVR_SIZE) {
331 		int i = expires & TVR_MASK;
332 		vec = base->tv1.vec + i;
333 	} else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
334 		int i = (expires >> TVR_BITS) & TVN_MASK;
335 		vec = base->tv2.vec + i;
336 	} else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
337 		int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
338 		vec = base->tv3.vec + i;
339 	} else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
340 		int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
341 		vec = base->tv4.vec + i;
342 	} else if ((signed long) idx < 0) {
343 		/*
344 		 * Can happen if you add a timer with expires == jiffies,
345 		 * or you set a timer to go off in the past
346 		 */
347 		vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
348 	} else {
349 		int i;
350 		/* If the timeout is larger than 0xffffffff on 64-bit
351 		 * architectures then we use the maximum timeout:
352 		 */
353 		if (idx > 0xffffffffUL) {
354 			idx = 0xffffffffUL;
355 			expires = idx + base->timer_jiffies;
356 		}
357 		i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
358 		vec = base->tv5.vec + i;
359 	}
360 	/*
361 	 * Timers are FIFO:
362 	 */
363 	list_add_tail(&timer->entry, vec);
364 }
365 
366 #ifdef CONFIG_TIMER_STATS
__timer_stats_timer_set_start_info(struct timer_list * timer,void * addr)367 void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr)
368 {
369 	if (timer->start_site)
370 		return;
371 
372 	timer->start_site = addr;
373 	memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
374 	timer->start_pid = current->pid;
375 }
376 
timer_stats_account_timer(struct timer_list * timer)377 static void timer_stats_account_timer(struct timer_list *timer)
378 {
379 	unsigned int flag = 0;
380 
381 	if (unlikely(tbase_get_deferrable(timer->base)))
382 		flag |= TIMER_STATS_FLAG_DEFERRABLE;
383 
384 	timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
385 				 timer->function, timer->start_comm, flag);
386 }
387 
388 #else
timer_stats_account_timer(struct timer_list * timer)389 static void timer_stats_account_timer(struct timer_list *timer) {}
390 #endif
391 
392 #ifdef CONFIG_DEBUG_OBJECTS_TIMERS
393 
394 static struct debug_obj_descr timer_debug_descr;
395 
396 /*
397  * fixup_init is called when:
398  * - an active object is initialized
399  */
timer_fixup_init(void * addr,enum debug_obj_state state)400 static int timer_fixup_init(void *addr, enum debug_obj_state state)
401 {
402 	struct timer_list *timer = addr;
403 
404 	switch (state) {
405 	case ODEBUG_STATE_ACTIVE:
406 		del_timer_sync(timer);
407 		debug_object_init(timer, &timer_debug_descr);
408 		return 1;
409 	default:
410 		return 0;
411 	}
412 }
413 
414 /*
415  * fixup_activate is called when:
416  * - an active object is activated
417  * - an unknown object is activated (might be a statically initialized object)
418  */
timer_fixup_activate(void * addr,enum debug_obj_state state)419 static int timer_fixup_activate(void *addr, enum debug_obj_state state)
420 {
421 	struct timer_list *timer = addr;
422 
423 	switch (state) {
424 
425 	case ODEBUG_STATE_NOTAVAILABLE:
426 		/*
427 		 * This is not really a fixup. The timer was
428 		 * statically initialized. We just make sure that it
429 		 * is tracked in the object tracker.
430 		 */
431 		if (timer->entry.next == NULL &&
432 		    timer->entry.prev == TIMER_ENTRY_STATIC) {
433 			debug_object_init(timer, &timer_debug_descr);
434 			debug_object_activate(timer, &timer_debug_descr);
435 			return 0;
436 		} else {
437 			WARN_ON_ONCE(1);
438 		}
439 		return 0;
440 
441 	case ODEBUG_STATE_ACTIVE:
442 		WARN_ON(1);
443 
444 	default:
445 		return 0;
446 	}
447 }
448 
449 /*
450  * fixup_free is called when:
451  * - an active object is freed
452  */
timer_fixup_free(void * addr,enum debug_obj_state state)453 static int timer_fixup_free(void *addr, enum debug_obj_state state)
454 {
455 	struct timer_list *timer = addr;
456 
457 	switch (state) {
458 	case ODEBUG_STATE_ACTIVE:
459 		del_timer_sync(timer);
460 		debug_object_free(timer, &timer_debug_descr);
461 		return 1;
462 	default:
463 		return 0;
464 	}
465 }
466 
467 static struct debug_obj_descr timer_debug_descr = {
468 	.name		= "timer_list",
469 	.fixup_init	= timer_fixup_init,
470 	.fixup_activate	= timer_fixup_activate,
471 	.fixup_free	= timer_fixup_free,
472 };
473 
debug_timer_init(struct timer_list * timer)474 static inline void debug_timer_init(struct timer_list *timer)
475 {
476 	debug_object_init(timer, &timer_debug_descr);
477 }
478 
debug_timer_activate(struct timer_list * timer)479 static inline void debug_timer_activate(struct timer_list *timer)
480 {
481 	debug_object_activate(timer, &timer_debug_descr);
482 }
483 
debug_timer_deactivate(struct timer_list * timer)484 static inline void debug_timer_deactivate(struct timer_list *timer)
485 {
486 	debug_object_deactivate(timer, &timer_debug_descr);
487 }
488 
debug_timer_free(struct timer_list * timer)489 static inline void debug_timer_free(struct timer_list *timer)
490 {
491 	debug_object_free(timer, &timer_debug_descr);
492 }
493 
494 static void __init_timer(struct timer_list *timer);
495 
init_timer_on_stack(struct timer_list * timer)496 void init_timer_on_stack(struct timer_list *timer)
497 {
498 	debug_object_init_on_stack(timer, &timer_debug_descr);
499 	__init_timer(timer);
500 }
501 EXPORT_SYMBOL_GPL(init_timer_on_stack);
502 
destroy_timer_on_stack(struct timer_list * timer)503 void destroy_timer_on_stack(struct timer_list *timer)
504 {
505 	debug_object_free(timer, &timer_debug_descr);
506 }
507 EXPORT_SYMBOL_GPL(destroy_timer_on_stack);
508 
509 #else
debug_timer_init(struct timer_list * timer)510 static inline void debug_timer_init(struct timer_list *timer) { }
debug_timer_activate(struct timer_list * timer)511 static inline void debug_timer_activate(struct timer_list *timer) { }
debug_timer_deactivate(struct timer_list * timer)512 static inline void debug_timer_deactivate(struct timer_list *timer) { }
513 #endif
514 
__init_timer(struct timer_list * timer)515 static void __init_timer(struct timer_list *timer)
516 {
517 	timer->entry.next = NULL;
518 	timer->base = __raw_get_cpu_var(tvec_bases);
519 #ifdef CONFIG_TIMER_STATS
520 	timer->start_site = NULL;
521 	timer->start_pid = -1;
522 	memset(timer->start_comm, 0, TASK_COMM_LEN);
523 #endif
524 }
525 
526 /**
527  * init_timer - initialize a timer.
528  * @timer: the timer to be initialized
529  *
530  * init_timer() must be done to a timer prior calling *any* of the
531  * other timer functions.
532  */
init_timer(struct timer_list * timer)533 void init_timer(struct timer_list *timer)
534 {
535 	debug_timer_init(timer);
536 	__init_timer(timer);
537 }
538 EXPORT_SYMBOL(init_timer);
539 
init_timer_deferrable(struct timer_list * timer)540 void init_timer_deferrable(struct timer_list *timer)
541 {
542 	init_timer(timer);
543 	timer_set_deferrable(timer);
544 }
545 EXPORT_SYMBOL(init_timer_deferrable);
546 
detach_timer(struct timer_list * timer,int clear_pending)547 static inline void detach_timer(struct timer_list *timer,
548 				int clear_pending)
549 {
550 	struct list_head *entry = &timer->entry;
551 
552 	debug_timer_deactivate(timer);
553 
554 	__list_del(entry->prev, entry->next);
555 	if (clear_pending)
556 		entry->next = NULL;
557 	entry->prev = LIST_POISON2;
558 }
559 
560 /*
561  * We are using hashed locking: holding per_cpu(tvec_bases).lock
562  * means that all timers which are tied to this base via timer->base are
563  * locked, and the base itself is locked too.
564  *
565  * So __run_timers/migrate_timers can safely modify all timers which could
566  * be found on ->tvX lists.
567  *
568  * When the timer's base is locked, and the timer removed from list, it is
569  * possible to set timer->base = NULL and drop the lock: the timer remains
570  * locked.
571  */
lock_timer_base(struct timer_list * timer,unsigned long * flags)572 static struct tvec_base *lock_timer_base(struct timer_list *timer,
573 					unsigned long *flags)
574 	__acquires(timer->base->lock)
575 {
576 	struct tvec_base *base;
577 
578 	for (;;) {
579 		struct tvec_base *prelock_base = timer->base;
580 		base = tbase_get_base(prelock_base);
581 		if (likely(base != NULL)) {
582 			spin_lock_irqsave(&base->lock, *flags);
583 			if (likely(prelock_base == timer->base))
584 				return base;
585 			/* The timer has migrated to another CPU */
586 			spin_unlock_irqrestore(&base->lock, *flags);
587 		}
588 		cpu_relax();
589 	}
590 }
591 
__mod_timer(struct timer_list * timer,unsigned long expires)592 int __mod_timer(struct timer_list *timer, unsigned long expires)
593 {
594 	struct tvec_base *base, *new_base;
595 	unsigned long flags;
596 	int ret = 0;
597 
598 	timer_stats_timer_set_start_info(timer);
599 	BUG_ON(!timer->function);
600 
601 	base = lock_timer_base(timer, &flags);
602 
603 	if (timer_pending(timer)) {
604 		detach_timer(timer, 0);
605 		ret = 1;
606 	}
607 
608 	debug_timer_activate(timer);
609 
610 	new_base = __get_cpu_var(tvec_bases);
611 
612 	if (base != new_base) {
613 		/*
614 		 * We are trying to schedule the timer on the local CPU.
615 		 * However we can't change timer's base while it is running,
616 		 * otherwise del_timer_sync() can't detect that the timer's
617 		 * handler yet has not finished. This also guarantees that
618 		 * the timer is serialized wrt itself.
619 		 */
620 		if (likely(base->running_timer != timer)) {
621 			/* See the comment in lock_timer_base() */
622 			timer_set_base(timer, NULL);
623 			spin_unlock(&base->lock);
624 			base = new_base;
625 			spin_lock(&base->lock);
626 			timer_set_base(timer, base);
627 		}
628 	}
629 
630 	timer->expires = expires;
631 	internal_add_timer(base, timer);
632 	spin_unlock_irqrestore(&base->lock, flags);
633 
634 	return ret;
635 }
636 
637 EXPORT_SYMBOL(__mod_timer);
638 
639 /**
640  * add_timer_on - start a timer on a particular CPU
641  * @timer: the timer to be added
642  * @cpu: the CPU to start it on
643  *
644  * This is not very scalable on SMP. Double adds are not possible.
645  */
add_timer_on(struct timer_list * timer,int cpu)646 void add_timer_on(struct timer_list *timer, int cpu)
647 {
648 	struct tvec_base *base = per_cpu(tvec_bases, cpu);
649 	unsigned long flags;
650 
651 	timer_stats_timer_set_start_info(timer);
652 	BUG_ON(timer_pending(timer) || !timer->function);
653 	spin_lock_irqsave(&base->lock, flags);
654 	timer_set_base(timer, base);
655 	debug_timer_activate(timer);
656 	internal_add_timer(base, timer);
657 	/*
658 	 * Check whether the other CPU is idle and needs to be
659 	 * triggered to reevaluate the timer wheel when nohz is
660 	 * active. We are protected against the other CPU fiddling
661 	 * with the timer by holding the timer base lock. This also
662 	 * makes sure that a CPU on the way to idle can not evaluate
663 	 * the timer wheel.
664 	 */
665 	wake_up_idle_cpu(cpu);
666 	spin_unlock_irqrestore(&base->lock, flags);
667 }
668 
669 /**
670  * mod_timer - modify a timer's timeout
671  * @timer: the timer to be modified
672  * @expires: new timeout in jiffies
673  *
674  * mod_timer() is a more efficient way to update the expire field of an
675  * active timer (if the timer is inactive it will be activated)
676  *
677  * mod_timer(timer, expires) is equivalent to:
678  *
679  *     del_timer(timer); timer->expires = expires; add_timer(timer);
680  *
681  * Note that if there are multiple unserialized concurrent users of the
682  * same timer, then mod_timer() is the only safe way to modify the timeout,
683  * since add_timer() cannot modify an already running timer.
684  *
685  * The function returns whether it has modified a pending timer or not.
686  * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
687  * active timer returns 1.)
688  */
mod_timer(struct timer_list * timer,unsigned long expires)689 int mod_timer(struct timer_list *timer, unsigned long expires)
690 {
691 	BUG_ON(!timer->function);
692 
693 	timer_stats_timer_set_start_info(timer);
694 	/*
695 	 * This is a common optimization triggered by the
696 	 * networking code - if the timer is re-modified
697 	 * to be the same thing then just return:
698 	 */
699 	if (timer->expires == expires && timer_pending(timer))
700 		return 1;
701 
702 	return __mod_timer(timer, expires);
703 }
704 
705 EXPORT_SYMBOL(mod_timer);
706 
707 /**
708  * del_timer - deactive a timer.
709  * @timer: the timer to be deactivated
710  *
711  * del_timer() deactivates a timer - this works on both active and inactive
712  * timers.
713  *
714  * The function returns whether it has deactivated a pending timer or not.
715  * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
716  * active timer returns 1.)
717  */
del_timer(struct timer_list * timer)718 int del_timer(struct timer_list *timer)
719 {
720 	struct tvec_base *base;
721 	unsigned long flags;
722 	int ret = 0;
723 
724 	timer_stats_timer_clear_start_info(timer);
725 	if (timer_pending(timer)) {
726 		base = lock_timer_base(timer, &flags);
727 		if (timer_pending(timer)) {
728 			detach_timer(timer, 1);
729 			ret = 1;
730 		}
731 		spin_unlock_irqrestore(&base->lock, flags);
732 	}
733 
734 	return ret;
735 }
736 
737 EXPORT_SYMBOL(del_timer);
738 
739 #ifdef CONFIG_SMP
740 /**
741  * try_to_del_timer_sync - Try to deactivate a timer
742  * @timer: timer do del
743  *
744  * This function tries to deactivate a timer. Upon successful (ret >= 0)
745  * exit the timer is not queued and the handler is not running on any CPU.
746  *
747  * It must not be called from interrupt contexts.
748  */
try_to_del_timer_sync(struct timer_list * timer)749 int try_to_del_timer_sync(struct timer_list *timer)
750 {
751 	struct tvec_base *base;
752 	unsigned long flags;
753 	int ret = -1;
754 
755 	base = lock_timer_base(timer, &flags);
756 
757 	if (base->running_timer == timer)
758 		goto out;
759 
760 	ret = 0;
761 	if (timer_pending(timer)) {
762 		detach_timer(timer, 1);
763 		ret = 1;
764 	}
765 out:
766 	spin_unlock_irqrestore(&base->lock, flags);
767 
768 	return ret;
769 }
770 
771 EXPORT_SYMBOL(try_to_del_timer_sync);
772 
773 /**
774  * del_timer_sync - deactivate a timer and wait for the handler to finish.
775  * @timer: the timer to be deactivated
776  *
777  * This function only differs from del_timer() on SMP: besides deactivating
778  * the timer it also makes sure the handler has finished executing on other
779  * CPUs.
780  *
781  * Synchronization rules: Callers must prevent restarting of the timer,
782  * otherwise this function is meaningless. It must not be called from
783  * interrupt contexts. The caller must not hold locks which would prevent
784  * completion of the timer's handler. The timer's handler must not call
785  * add_timer_on(). Upon exit the timer is not queued and the handler is
786  * not running on any CPU.
787  *
788  * The function returns whether it has deactivated a pending timer or not.
789  */
del_timer_sync(struct timer_list * timer)790 int del_timer_sync(struct timer_list *timer)
791 {
792 	for (;;) {
793 		int ret = try_to_del_timer_sync(timer);
794 		if (ret >= 0)
795 			return ret;
796 		cpu_relax();
797 	}
798 }
799 
800 EXPORT_SYMBOL(del_timer_sync);
801 #endif
802 
cascade(struct tvec_base * base,struct tvec * tv,int index)803 static int cascade(struct tvec_base *base, struct tvec *tv, int index)
804 {
805 	/* cascade all the timers from tv up one level */
806 	struct timer_list *timer, *tmp;
807 	struct list_head tv_list;
808 
809 	list_replace_init(tv->vec + index, &tv_list);
810 
811 	/*
812 	 * We are removing _all_ timers from the list, so we
813 	 * don't have to detach them individually.
814 	 */
815 	list_for_each_entry_safe(timer, tmp, &tv_list, entry) {
816 		BUG_ON(tbase_get_base(timer->base) != base);
817 		internal_add_timer(base, timer);
818 	}
819 
820 	return index;
821 }
822 
823 #define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
824 
825 /**
826  * __run_timers - run all expired timers (if any) on this CPU.
827  * @base: the timer vector to be processed.
828  *
829  * This function cascades all vectors and executes all expired timer
830  * vectors.
831  */
__run_timers(struct tvec_base * base)832 static inline void __run_timers(struct tvec_base *base)
833 {
834 	struct timer_list *timer;
835 
836 	spin_lock_irq(&base->lock);
837 	while (time_after_eq(jiffies, base->timer_jiffies)) {
838 		struct list_head work_list;
839 		struct list_head *head = &work_list;
840 		int index = base->timer_jiffies & TVR_MASK;
841 
842 		/*
843 		 * Cascade timers:
844 		 */
845 		if (!index &&
846 			(!cascade(base, &base->tv2, INDEX(0))) &&
847 				(!cascade(base, &base->tv3, INDEX(1))) &&
848 					!cascade(base, &base->tv4, INDEX(2)))
849 			cascade(base, &base->tv5, INDEX(3));
850 		++base->timer_jiffies;
851 		list_replace_init(base->tv1.vec + index, &work_list);
852 		while (!list_empty(head)) {
853 			void (*fn)(unsigned long);
854 			unsigned long data;
855 
856 			timer = list_first_entry(head, struct timer_list,entry);
857 			fn = timer->function;
858 			data = timer->data;
859 
860 			timer_stats_account_timer(timer);
861 
862 			set_running_timer(base, timer);
863 			detach_timer(timer, 1);
864 			spin_unlock_irq(&base->lock);
865 			{
866 				int preempt_count = preempt_count();
867 				fn(data);
868 				if (preempt_count != preempt_count()) {
869 					printk(KERN_ERR "huh, entered %p "
870 					       "with preempt_count %08x, exited"
871 					       " with %08x?\n",
872 					       fn, preempt_count,
873 					       preempt_count());
874 					BUG();
875 				}
876 			}
877 			spin_lock_irq(&base->lock);
878 		}
879 	}
880 	set_running_timer(base, NULL);
881 	spin_unlock_irq(&base->lock);
882 }
883 
884 #ifdef CONFIG_NO_HZ
885 /*
886  * Find out when the next timer event is due to happen. This
887  * is used on S/390 to stop all activity when a cpus is idle.
888  * This functions needs to be called disabled.
889  */
__next_timer_interrupt(struct tvec_base * base)890 static unsigned long __next_timer_interrupt(struct tvec_base *base)
891 {
892 	unsigned long timer_jiffies = base->timer_jiffies;
893 	unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA;
894 	int index, slot, array, found = 0;
895 	struct timer_list *nte;
896 	struct tvec *varray[4];
897 
898 	/* Look for timer events in tv1. */
899 	index = slot = timer_jiffies & TVR_MASK;
900 	do {
901 		list_for_each_entry(nte, base->tv1.vec + slot, entry) {
902 			if (tbase_get_deferrable(nte->base))
903 				continue;
904 
905 			found = 1;
906 			expires = nte->expires;
907 			/* Look at the cascade bucket(s)? */
908 			if (!index || slot < index)
909 				goto cascade;
910 			return expires;
911 		}
912 		slot = (slot + 1) & TVR_MASK;
913 	} while (slot != index);
914 
915 cascade:
916 	/* Calculate the next cascade event */
917 	if (index)
918 		timer_jiffies += TVR_SIZE - index;
919 	timer_jiffies >>= TVR_BITS;
920 
921 	/* Check tv2-tv5. */
922 	varray[0] = &base->tv2;
923 	varray[1] = &base->tv3;
924 	varray[2] = &base->tv4;
925 	varray[3] = &base->tv5;
926 
927 	for (array = 0; array < 4; array++) {
928 		struct tvec *varp = varray[array];
929 
930 		index = slot = timer_jiffies & TVN_MASK;
931 		do {
932 			list_for_each_entry(nte, varp->vec + slot, entry) {
933 				found = 1;
934 				if (time_before(nte->expires, expires))
935 					expires = nte->expires;
936 			}
937 			/*
938 			 * Do we still search for the first timer or are
939 			 * we looking up the cascade buckets ?
940 			 */
941 			if (found) {
942 				/* Look at the cascade bucket(s)? */
943 				if (!index || slot < index)
944 					break;
945 				return expires;
946 			}
947 			slot = (slot + 1) & TVN_MASK;
948 		} while (slot != index);
949 
950 		if (index)
951 			timer_jiffies += TVN_SIZE - index;
952 		timer_jiffies >>= TVN_BITS;
953 	}
954 	return expires;
955 }
956 
957 /*
958  * Check, if the next hrtimer event is before the next timer wheel
959  * event:
960  */
cmp_next_hrtimer_event(unsigned long now,unsigned long expires)961 static unsigned long cmp_next_hrtimer_event(unsigned long now,
962 					    unsigned long expires)
963 {
964 	ktime_t hr_delta = hrtimer_get_next_event();
965 	struct timespec tsdelta;
966 	unsigned long delta;
967 
968 	if (hr_delta.tv64 == KTIME_MAX)
969 		return expires;
970 
971 	/*
972 	 * Expired timer available, let it expire in the next tick
973 	 */
974 	if (hr_delta.tv64 <= 0)
975 		return now + 1;
976 
977 	tsdelta = ktime_to_timespec(hr_delta);
978 	delta = timespec_to_jiffies(&tsdelta);
979 
980 	/*
981 	 * Limit the delta to the max value, which is checked in
982 	 * tick_nohz_stop_sched_tick():
983 	 */
984 	if (delta > NEXT_TIMER_MAX_DELTA)
985 		delta = NEXT_TIMER_MAX_DELTA;
986 
987 	/*
988 	 * Take rounding errors in to account and make sure, that it
989 	 * expires in the next tick. Otherwise we go into an endless
990 	 * ping pong due to tick_nohz_stop_sched_tick() retriggering
991 	 * the timer softirq
992 	 */
993 	if (delta < 1)
994 		delta = 1;
995 	now += delta;
996 	if (time_before(now, expires))
997 		return now;
998 	return expires;
999 }
1000 
1001 /**
1002  * get_next_timer_interrupt - return the jiffy of the next pending timer
1003  * @now: current time (in jiffies)
1004  */
get_next_timer_interrupt(unsigned long now)1005 unsigned long get_next_timer_interrupt(unsigned long now)
1006 {
1007 	struct tvec_base *base = __get_cpu_var(tvec_bases);
1008 	unsigned long expires;
1009 
1010 	spin_lock(&base->lock);
1011 	expires = __next_timer_interrupt(base);
1012 	spin_unlock(&base->lock);
1013 
1014 	if (time_before_eq(expires, now))
1015 		return now;
1016 
1017 	return cmp_next_hrtimer_event(now, expires);
1018 }
1019 #endif
1020 
1021 /*
1022  * Called from the timer interrupt handler to charge one tick to the current
1023  * process.  user_tick is 1 if the tick is user time, 0 for system.
1024  */
update_process_times(int user_tick)1025 void update_process_times(int user_tick)
1026 {
1027 	struct task_struct *p = current;
1028 	int cpu = smp_processor_id();
1029 
1030 	/* Note: this timer irq context must be accounted for as well. */
1031 	account_process_tick(p, user_tick);
1032 	run_local_timers();
1033 	if (rcu_pending(cpu))
1034 		rcu_check_callbacks(cpu, user_tick);
1035 	printk_tick();
1036 	scheduler_tick();
1037 	run_posix_cpu_timers(p);
1038 }
1039 
1040 /*
1041  * Nr of active tasks - counted in fixed-point numbers
1042  */
count_active_tasks(void)1043 static unsigned long count_active_tasks(void)
1044 {
1045 	return nr_active() * FIXED_1;
1046 }
1047 
1048 /*
1049  * Hmm.. Changed this, as the GNU make sources (load.c) seems to
1050  * imply that avenrun[] is the standard name for this kind of thing.
1051  * Nothing else seems to be standardized: the fractional size etc
1052  * all seem to differ on different machines.
1053  *
1054  * Requires xtime_lock to access.
1055  */
1056 unsigned long avenrun[3];
1057 
1058 EXPORT_SYMBOL(avenrun);
1059 
1060 /*
1061  * calc_load - given tick count, update the avenrun load estimates.
1062  * This is called while holding a write_lock on xtime_lock.
1063  */
calc_load(unsigned long ticks)1064 static inline void calc_load(unsigned long ticks)
1065 {
1066 	unsigned long active_tasks; /* fixed-point */
1067 	static int count = LOAD_FREQ;
1068 
1069 	count -= ticks;
1070 	if (unlikely(count < 0)) {
1071 		active_tasks = count_active_tasks();
1072 		do {
1073 			CALC_LOAD(avenrun[0], EXP_1, active_tasks);
1074 			CALC_LOAD(avenrun[1], EXP_5, active_tasks);
1075 			CALC_LOAD(avenrun[2], EXP_15, active_tasks);
1076 			count += LOAD_FREQ;
1077 		} while (count < 0);
1078 	}
1079 }
1080 
1081 /*
1082  * This function runs timers and the timer-tq in bottom half context.
1083  */
run_timer_softirq(struct softirq_action * h)1084 static void run_timer_softirq(struct softirq_action *h)
1085 {
1086 	struct tvec_base *base = __get_cpu_var(tvec_bases);
1087 
1088 	hrtimer_run_pending();
1089 
1090 	if (time_after_eq(jiffies, base->timer_jiffies))
1091 		__run_timers(base);
1092 }
1093 
1094 /*
1095  * Called by the local, per-CPU timer interrupt on SMP.
1096  */
run_local_timers(void)1097 void run_local_timers(void)
1098 {
1099 	hrtimer_run_queues();
1100 	raise_softirq(TIMER_SOFTIRQ);
1101 	softlockup_tick();
1102 }
1103 
1104 /*
1105  * Called by the timer interrupt. xtime_lock must already be taken
1106  * by the timer IRQ!
1107  */
update_times(unsigned long ticks)1108 static inline void update_times(unsigned long ticks)
1109 {
1110 	update_wall_time();
1111 	calc_load(ticks);
1112 }
1113 
1114 /*
1115  * The 64-bit jiffies value is not atomic - you MUST NOT read it
1116  * without sampling the sequence number in xtime_lock.
1117  * jiffies is defined in the linker script...
1118  */
1119 
do_timer(unsigned long ticks)1120 void do_timer(unsigned long ticks)
1121 {
1122 	jiffies_64 += ticks;
1123 	update_times(ticks);
1124 }
1125 
1126 #ifdef __ARCH_WANT_SYS_ALARM
1127 
1128 /*
1129  * For backwards compatibility?  This can be done in libc so Alpha
1130  * and all newer ports shouldn't need it.
1131  */
SYSCALL_DEFINE1(alarm,unsigned int,seconds)1132 SYSCALL_DEFINE1(alarm, unsigned int, seconds)
1133 {
1134 	return alarm_setitimer(seconds);
1135 }
1136 
1137 #endif
1138 
1139 #ifndef __alpha__
1140 
1141 /*
1142  * The Alpha uses getxpid, getxuid, and getxgid instead.  Maybe this
1143  * should be moved into arch/i386 instead?
1144  */
1145 
1146 /**
1147  * sys_getpid - return the thread group id of the current process
1148  *
1149  * Note, despite the name, this returns the tgid not the pid.  The tgid and
1150  * the pid are identical unless CLONE_THREAD was specified on clone() in
1151  * which case the tgid is the same in all threads of the same group.
1152  *
1153  * This is SMP safe as current->tgid does not change.
1154  */
SYSCALL_DEFINE0(getpid)1155 SYSCALL_DEFINE0(getpid)
1156 {
1157 	return task_tgid_vnr(current);
1158 }
1159 
1160 /*
1161  * Accessing ->real_parent is not SMP-safe, it could
1162  * change from under us. However, we can use a stale
1163  * value of ->real_parent under rcu_read_lock(), see
1164  * release_task()->call_rcu(delayed_put_task_struct).
1165  */
SYSCALL_DEFINE0(getppid)1166 SYSCALL_DEFINE0(getppid)
1167 {
1168 	int pid;
1169 
1170 	rcu_read_lock();
1171 	pid = task_tgid_vnr(current->real_parent);
1172 	rcu_read_unlock();
1173 
1174 	return pid;
1175 }
1176 
SYSCALL_DEFINE0(getuid)1177 SYSCALL_DEFINE0(getuid)
1178 {
1179 	/* Only we change this so SMP safe */
1180 	return current_uid();
1181 }
1182 
SYSCALL_DEFINE0(geteuid)1183 SYSCALL_DEFINE0(geteuid)
1184 {
1185 	/* Only we change this so SMP safe */
1186 	return current_euid();
1187 }
1188 
SYSCALL_DEFINE0(getgid)1189 SYSCALL_DEFINE0(getgid)
1190 {
1191 	/* Only we change this so SMP safe */
1192 	return current_gid();
1193 }
1194 
SYSCALL_DEFINE0(getegid)1195 SYSCALL_DEFINE0(getegid)
1196 {
1197 	/* Only we change this so SMP safe */
1198 	return  current_egid();
1199 }
1200 
1201 #endif
1202 
process_timeout(unsigned long __data)1203 static void process_timeout(unsigned long __data)
1204 {
1205 	wake_up_process((struct task_struct *)__data);
1206 }
1207 
1208 /**
1209  * schedule_timeout - sleep until timeout
1210  * @timeout: timeout value in jiffies
1211  *
1212  * Make the current task sleep until @timeout jiffies have
1213  * elapsed. The routine will return immediately unless
1214  * the current task state has been set (see set_current_state()).
1215  *
1216  * You can set the task state as follows -
1217  *
1218  * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
1219  * pass before the routine returns. The routine will return 0
1220  *
1221  * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1222  * delivered to the current task. In this case the remaining time
1223  * in jiffies will be returned, or 0 if the timer expired in time
1224  *
1225  * The current task state is guaranteed to be TASK_RUNNING when this
1226  * routine returns.
1227  *
1228  * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
1229  * the CPU away without a bound on the timeout. In this case the return
1230  * value will be %MAX_SCHEDULE_TIMEOUT.
1231  *
1232  * In all cases the return value is guaranteed to be non-negative.
1233  */
schedule_timeout(signed long timeout)1234 signed long __sched schedule_timeout(signed long timeout)
1235 {
1236 	struct timer_list timer;
1237 	unsigned long expire;
1238 
1239 	switch (timeout)
1240 	{
1241 	case MAX_SCHEDULE_TIMEOUT:
1242 		/*
1243 		 * These two special cases are useful to be comfortable
1244 		 * in the caller. Nothing more. We could take
1245 		 * MAX_SCHEDULE_TIMEOUT from one of the negative value
1246 		 * but I' d like to return a valid offset (>=0) to allow
1247 		 * the caller to do everything it want with the retval.
1248 		 */
1249 		schedule();
1250 		goto out;
1251 	default:
1252 		/*
1253 		 * Another bit of PARANOID. Note that the retval will be
1254 		 * 0 since no piece of kernel is supposed to do a check
1255 		 * for a negative retval of schedule_timeout() (since it
1256 		 * should never happens anyway). You just have the printk()
1257 		 * that will tell you if something is gone wrong and where.
1258 		 */
1259 		if (timeout < 0) {
1260 			printk(KERN_ERR "schedule_timeout: wrong timeout "
1261 				"value %lx\n", timeout);
1262 			dump_stack();
1263 			current->state = TASK_RUNNING;
1264 			goto out;
1265 		}
1266 	}
1267 
1268 	expire = timeout + jiffies;
1269 
1270 	setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
1271 	__mod_timer(&timer, expire);
1272 	schedule();
1273 	del_singleshot_timer_sync(&timer);
1274 
1275 	/* Remove the timer from the object tracker */
1276 	destroy_timer_on_stack(&timer);
1277 
1278 	timeout = expire - jiffies;
1279 
1280  out:
1281 	return timeout < 0 ? 0 : timeout;
1282 }
1283 EXPORT_SYMBOL(schedule_timeout);
1284 
1285 /*
1286  * We can use __set_current_state() here because schedule_timeout() calls
1287  * schedule() unconditionally.
1288  */
schedule_timeout_interruptible(signed long timeout)1289 signed long __sched schedule_timeout_interruptible(signed long timeout)
1290 {
1291 	__set_current_state(TASK_INTERRUPTIBLE);
1292 	return schedule_timeout(timeout);
1293 }
1294 EXPORT_SYMBOL(schedule_timeout_interruptible);
1295 
schedule_timeout_killable(signed long timeout)1296 signed long __sched schedule_timeout_killable(signed long timeout)
1297 {
1298 	__set_current_state(TASK_KILLABLE);
1299 	return schedule_timeout(timeout);
1300 }
1301 EXPORT_SYMBOL(schedule_timeout_killable);
1302 
schedule_timeout_uninterruptible(signed long timeout)1303 signed long __sched schedule_timeout_uninterruptible(signed long timeout)
1304 {
1305 	__set_current_state(TASK_UNINTERRUPTIBLE);
1306 	return schedule_timeout(timeout);
1307 }
1308 EXPORT_SYMBOL(schedule_timeout_uninterruptible);
1309 
1310 /* Thread ID - the internal kernel "pid" */
SYSCALL_DEFINE0(gettid)1311 SYSCALL_DEFINE0(gettid)
1312 {
1313 	return task_pid_vnr(current);
1314 }
1315 
1316 /**
1317  * do_sysinfo - fill in sysinfo struct
1318  * @info: pointer to buffer to fill
1319  */
do_sysinfo(struct sysinfo * info)1320 int do_sysinfo(struct sysinfo *info)
1321 {
1322 	unsigned long mem_total, sav_total;
1323 	unsigned int mem_unit, bitcount;
1324 	unsigned long seq;
1325 
1326 	memset(info, 0, sizeof(struct sysinfo));
1327 
1328 	do {
1329 		struct timespec tp;
1330 		seq = read_seqbegin(&xtime_lock);
1331 
1332 		/*
1333 		 * This is annoying.  The below is the same thing
1334 		 * posix_get_clock_monotonic() does, but it wants to
1335 		 * take the lock which we want to cover the loads stuff
1336 		 * too.
1337 		 */
1338 
1339 		getnstimeofday(&tp);
1340 		tp.tv_sec += wall_to_monotonic.tv_sec;
1341 		tp.tv_nsec += wall_to_monotonic.tv_nsec;
1342 		monotonic_to_bootbased(&tp);
1343 		if (tp.tv_nsec - NSEC_PER_SEC >= 0) {
1344 			tp.tv_nsec = tp.tv_nsec - NSEC_PER_SEC;
1345 			tp.tv_sec++;
1346 		}
1347 		info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
1348 
1349 		info->loads[0] = avenrun[0] << (SI_LOAD_SHIFT - FSHIFT);
1350 		info->loads[1] = avenrun[1] << (SI_LOAD_SHIFT - FSHIFT);
1351 		info->loads[2] = avenrun[2] << (SI_LOAD_SHIFT - FSHIFT);
1352 
1353 		info->procs = nr_threads;
1354 	} while (read_seqretry(&xtime_lock, seq));
1355 
1356 	si_meminfo(info);
1357 	si_swapinfo(info);
1358 
1359 	/*
1360 	 * If the sum of all the available memory (i.e. ram + swap)
1361 	 * is less than can be stored in a 32 bit unsigned long then
1362 	 * we can be binary compatible with 2.2.x kernels.  If not,
1363 	 * well, in that case 2.2.x was broken anyways...
1364 	 *
1365 	 *  -Erik Andersen <andersee@debian.org>
1366 	 */
1367 
1368 	mem_total = info->totalram + info->totalswap;
1369 	if (mem_total < info->totalram || mem_total < info->totalswap)
1370 		goto out;
1371 	bitcount = 0;
1372 	mem_unit = info->mem_unit;
1373 	while (mem_unit > 1) {
1374 		bitcount++;
1375 		mem_unit >>= 1;
1376 		sav_total = mem_total;
1377 		mem_total <<= 1;
1378 		if (mem_total < sav_total)
1379 			goto out;
1380 	}
1381 
1382 	/*
1383 	 * If mem_total did not overflow, multiply all memory values by
1384 	 * info->mem_unit and set it to 1.  This leaves things compatible
1385 	 * with 2.2.x, and also retains compatibility with earlier 2.4.x
1386 	 * kernels...
1387 	 */
1388 
1389 	info->mem_unit = 1;
1390 	info->totalram <<= bitcount;
1391 	info->freeram <<= bitcount;
1392 	info->sharedram <<= bitcount;
1393 	info->bufferram <<= bitcount;
1394 	info->totalswap <<= bitcount;
1395 	info->freeswap <<= bitcount;
1396 	info->totalhigh <<= bitcount;
1397 	info->freehigh <<= bitcount;
1398 
1399 out:
1400 	return 0;
1401 }
1402 
SYSCALL_DEFINE1(sysinfo,struct sysinfo __user *,info)1403 SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info)
1404 {
1405 	struct sysinfo val;
1406 
1407 	do_sysinfo(&val);
1408 
1409 	if (copy_to_user(info, &val, sizeof(struct sysinfo)))
1410 		return -EFAULT;
1411 
1412 	return 0;
1413 }
1414 
init_timers_cpu(int cpu)1415 static int __cpuinit init_timers_cpu(int cpu)
1416 {
1417 	int j;
1418 	struct tvec_base *base;
1419 	static char __cpuinitdata tvec_base_done[NR_CPUS];
1420 
1421 	if (!tvec_base_done[cpu]) {
1422 		static char boot_done;
1423 
1424 		if (boot_done) {
1425 			/*
1426 			 * The APs use this path later in boot
1427 			 */
1428 			base = kmalloc_node(sizeof(*base),
1429 						GFP_KERNEL | __GFP_ZERO,
1430 						cpu_to_node(cpu));
1431 			if (!base)
1432 				return -ENOMEM;
1433 
1434 			/* Make sure that tvec_base is 2 byte aligned */
1435 			if (tbase_get_deferrable(base)) {
1436 				WARN_ON(1);
1437 				kfree(base);
1438 				return -ENOMEM;
1439 			}
1440 			per_cpu(tvec_bases, cpu) = base;
1441 		} else {
1442 			/*
1443 			 * This is for the boot CPU - we use compile-time
1444 			 * static initialisation because per-cpu memory isn't
1445 			 * ready yet and because the memory allocators are not
1446 			 * initialised either.
1447 			 */
1448 			boot_done = 1;
1449 			base = &boot_tvec_bases;
1450 		}
1451 		tvec_base_done[cpu] = 1;
1452 	} else {
1453 		base = per_cpu(tvec_bases, cpu);
1454 	}
1455 
1456 	spin_lock_init(&base->lock);
1457 
1458 	for (j = 0; j < TVN_SIZE; j++) {
1459 		INIT_LIST_HEAD(base->tv5.vec + j);
1460 		INIT_LIST_HEAD(base->tv4.vec + j);
1461 		INIT_LIST_HEAD(base->tv3.vec + j);
1462 		INIT_LIST_HEAD(base->tv2.vec + j);
1463 	}
1464 	for (j = 0; j < TVR_SIZE; j++)
1465 		INIT_LIST_HEAD(base->tv1.vec + j);
1466 
1467 	base->timer_jiffies = jiffies;
1468 	return 0;
1469 }
1470 
1471 #ifdef CONFIG_HOTPLUG_CPU
migrate_timer_list(struct tvec_base * new_base,struct list_head * head)1472 static void migrate_timer_list(struct tvec_base *new_base, struct list_head *head)
1473 {
1474 	struct timer_list *timer;
1475 
1476 	while (!list_empty(head)) {
1477 		timer = list_first_entry(head, struct timer_list, entry);
1478 		detach_timer(timer, 0);
1479 		timer_set_base(timer, new_base);
1480 		internal_add_timer(new_base, timer);
1481 	}
1482 }
1483 
migrate_timers(int cpu)1484 static void __cpuinit migrate_timers(int cpu)
1485 {
1486 	struct tvec_base *old_base;
1487 	struct tvec_base *new_base;
1488 	int i;
1489 
1490 	BUG_ON(cpu_online(cpu));
1491 	old_base = per_cpu(tvec_bases, cpu);
1492 	new_base = get_cpu_var(tvec_bases);
1493 	/*
1494 	 * The caller is globally serialized and nobody else
1495 	 * takes two locks at once, deadlock is not possible.
1496 	 */
1497 	spin_lock_irq(&new_base->lock);
1498 	spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
1499 
1500 	BUG_ON(old_base->running_timer);
1501 
1502 	for (i = 0; i < TVR_SIZE; i++)
1503 		migrate_timer_list(new_base, old_base->tv1.vec + i);
1504 	for (i = 0; i < TVN_SIZE; i++) {
1505 		migrate_timer_list(new_base, old_base->tv2.vec + i);
1506 		migrate_timer_list(new_base, old_base->tv3.vec + i);
1507 		migrate_timer_list(new_base, old_base->tv4.vec + i);
1508 		migrate_timer_list(new_base, old_base->tv5.vec + i);
1509 	}
1510 
1511 	spin_unlock(&old_base->lock);
1512 	spin_unlock_irq(&new_base->lock);
1513 	put_cpu_var(tvec_bases);
1514 }
1515 #endif /* CONFIG_HOTPLUG_CPU */
1516 
timer_cpu_notify(struct notifier_block * self,unsigned long action,void * hcpu)1517 static int __cpuinit timer_cpu_notify(struct notifier_block *self,
1518 				unsigned long action, void *hcpu)
1519 {
1520 	long cpu = (long)hcpu;
1521 	switch(action) {
1522 	case CPU_UP_PREPARE:
1523 	case CPU_UP_PREPARE_FROZEN:
1524 		if (init_timers_cpu(cpu) < 0)
1525 			return NOTIFY_BAD;
1526 		break;
1527 #ifdef CONFIG_HOTPLUG_CPU
1528 	case CPU_DEAD:
1529 	case CPU_DEAD_FROZEN:
1530 		migrate_timers(cpu);
1531 		break;
1532 #endif
1533 	default:
1534 		break;
1535 	}
1536 	return NOTIFY_OK;
1537 }
1538 
1539 static struct notifier_block __cpuinitdata timers_nb = {
1540 	.notifier_call	= timer_cpu_notify,
1541 };
1542 
1543 
init_timers(void)1544 void __init init_timers(void)
1545 {
1546 	int err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE,
1547 				(void *)(long)smp_processor_id());
1548 
1549 	init_timer_stats();
1550 
1551 	BUG_ON(err == NOTIFY_BAD);
1552 	register_cpu_notifier(&timers_nb);
1553 	open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
1554 }
1555 
1556 /**
1557  * msleep - sleep safely even with waitqueue interruptions
1558  * @msecs: Time in milliseconds to sleep for
1559  */
msleep(unsigned int msecs)1560 void msleep(unsigned int msecs)
1561 {
1562 	unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1563 
1564 	while (timeout)
1565 		timeout = schedule_timeout_uninterruptible(timeout);
1566 }
1567 
1568 EXPORT_SYMBOL(msleep);
1569 
1570 /**
1571  * msleep_interruptible - sleep waiting for signals
1572  * @msecs: Time in milliseconds to sleep for
1573  */
msleep_interruptible(unsigned int msecs)1574 unsigned long msleep_interruptible(unsigned int msecs)
1575 {
1576 	unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1577 
1578 	while (timeout && !signal_pending(current))
1579 		timeout = schedule_timeout_interruptible(timeout);
1580 	return jiffies_to_msecs(timeout);
1581 }
1582 
1583 EXPORT_SYMBOL(msleep_interruptible);
1584