• Home
  • Raw
  • Download

Lines Matching +full:timeout +full:- +full:ms

1 // SPDX-License-Identifier: GPL-2.0
7 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
9 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
11 * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
14 * 1999-03-10 Improved NTP compatibility by Ulrich Windl
15 * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
16 * 2000-10-05 Implemented scalable SMP per-CPU timer handling.
33 #include <linux/posix-timers.h>
54 #include "tick-internal.h"
84 * timeout timers (networking, disk I/O ...) are canceled before expiry. If
85 * the timeout expires it indicates that normal operation is disturbed, so it
86 * does not matter much whether the timeout comes with a slight delay.
93 * capacity of the last wheel level are force expired at the maximum timeout
105 * 0 0 1 ms 0 ms - 63 ms
106 * 1 64 8 ms 64 ms - 511 ms
107 * 2 128 64 ms 512 ms - 4095 ms (512ms - ~4s)
108 * 3 192 512 ms 4096 ms - 32767 ms (~4s - ~32s)
109 * 4 256 4096 ms (~4s) 32768 ms - 262143 ms (~32s - ~4m)
110 * 5 320 32768 ms (~32s) 262144 ms - 2097151 ms (~4m - ~34m)
111 * 6 384 262144 ms (~4m) 2097152 ms - 16777215 ms (~34m - ~4h)
112 * 7 448 2097152 ms (~34m) 16777216 ms - 134217727 ms (~4h - ~1d)
113 * 8 512 16777216 ms (~4h) 134217728 ms - 1073741822 ms (~1d - ~12d)
117 * 0 0 3 ms 0 ms - 210 ms
118 * 1 64 26 ms 213 ms - 1703 ms (213ms - ~1s)
119 * 2 128 213 ms 1706 ms - 13650 ms (~1s - ~13s)
120 * 3 192 1706 ms (~1s) 13653 ms - 109223 ms (~13s - ~1m)
121 * 4 256 13653 ms (~13s) 109226 ms - 873810 ms (~1m - ~14m)
122 * 5 320 109226 ms (~1m) 873813 ms - 6990503 ms (~14m - ~1h)
123 * 6 384 873813 ms (~14m) 6990506 ms - 55924050 ms (~1h - ~15h)
124 * 7 448 6990506 ms (~1h) 55924053 ms - 447392423 ms (~15h - ~5d)
125 * 8 512 55924053 ms (~15h) 447392426 ms - 3579139406 ms (~5d - ~41d)
129 * 0 0 4 ms 0 ms - 255 ms
130 * 1 64 32 ms 256 ms - 2047 ms (256ms - ~2s)
131 * 2 128 256 ms 2048 ms - 16383 ms (~2s - ~16s)
132 * 3 192 2048 ms (~2s) 16384 ms - 131071 ms (~16s - ~2m)
133 * 4 256 16384 ms (~16s) 131072 ms - 1048575 ms (~2m - ~17m)
134 * 5 320 131072 ms (~2m) 1048576 ms - 8388607 ms (~17m - ~2h)
135 * 6 384 1048576 ms (~17m) 8388608 ms - 67108863 ms (~2h - ~18h)
136 * 7 448 8388608 ms (~2h) 67108864 ms - 536870911 ms (~18h - ~6d)
137 * 8 512 67108864 ms (~18h) 536870912 ms - 4294967288 ms (~6d - ~49d)
141 * 0 0 10 ms 0 ms - 630 ms
142 * 1 64 80 ms 640 ms - 5110 ms (640ms - ~5s)
143 * 2 128 640 ms 5120 ms - 40950 ms (~5s - ~40s)
144 * 3 192 5120 ms (~5s) 40960 ms - 327670 ms (~40s - ~5m)
145 * 4 256 40960 ms (~40s) 327680 ms - 2621430 ms (~5m - ~43m)
146 * 5 320 327680 ms (~5m) 2621440 ms - 20971510 ms (~43m - ~5h)
147 * 6 384 2621440 ms (~43m) 20971520 ms - 167772150 ms (~5h - ~1d)
148 * 7 448 20971520 ms (~5h) 167772160 ms - 1342177270 ms (~1d - ~15d)
154 #define LVL_CLK_MASK (LVL_CLK_DIV - 1)
163 #define LVL_START(n) ((LVL_SIZE - 1) << (((n) - 1) * LVL_CLK_SHIFT))
168 #define LVL_MASK (LVL_SIZE - 1)
180 #define WHEEL_TIMEOUT_MAX (WHEEL_TIMEOUT_CUTOFF - LVL_GRAN(LVL_DEPTH - 1))
301 j = j - rem; in round_jiffies_common()
303 j = j - rem + HZ; in round_jiffies_common()
306 j -= cpu * 3; in round_jiffies_common()
316 * __round_jiffies - function to round jiffies to a full second
318 * @cpu: the processor number on which the timeout will happen
342 * __round_jiffies_relative - function to round jiffies to a full second
344 * @cpu: the processor number on which the timeout will happen
366 return round_jiffies_common(j + j0, cpu, false) - j0; in __round_jiffies_relative()
371 * round_jiffies - function to round jiffies to a full second
392 * round_jiffies_relative - function to round jiffies to a full second
413 * __round_jiffies_up - function to round jiffies up to a full second
415 * @cpu: the processor number on which the timeout will happen
429 * __round_jiffies_up_relative - function to round jiffies up to a full second
431 * @cpu: the processor number on which the timeout will happen
443 return round_jiffies_common(j + j0, cpu, true) - j0; in __round_jiffies_up_relative()
448 * round_jiffies_up - function to round jiffies up to a full second
463 * round_jiffies_up_relative - function to round jiffies up to a full second
480 return (timer->flags & TIMER_ARRAYMASK) >> TIMER_ARRAYSHIFT; in timer_get_idx()
485 timer->flags = (timer->flags & ~TIMER_ARRAYMASK) | in timer_set_idx()
500 * - Timer is armed at the edge of a tick in calc_index()
501 * - Truncation of the expiry time in the outer wheel levels in calc_index()
513 unsigned long delta = expires - clk; in calc_wheel_index()
543 idx = calc_index(expires, LVL_DEPTH - 1, bucket_expiry); in calc_wheel_index()
558 if (timer->flags & TIMER_DEFERRABLE) { in trigger_dyntick_cpu()
559 if (tick_nohz_full_cpu(base->cpu)) in trigger_dyntick_cpu()
560 wake_up_nohz_cpu(base->cpu); in trigger_dyntick_cpu()
567 * then it can't set base->is_idle as we hold the base lock: in trigger_dyntick_cpu()
569 if (base->is_idle) in trigger_dyntick_cpu()
570 wake_up_nohz_cpu(base->cpu); in trigger_dyntick_cpu()
582 hlist_add_head(&timer->entry, base->vectors + idx); in enqueue_timer()
583 __set_bit(idx, base->pending_map); in enqueue_timer()
586 trace_timer_start(timer, timer->expires, timer->flags); in enqueue_timer()
591 * (bucket_expiry) instead of timer->expires. in enqueue_timer()
593 if (time_before(bucket_expiry, base->next_expiry)) { in enqueue_timer()
598 base->next_expiry = bucket_expiry; in enqueue_timer()
599 base->timers_pending = true; in enqueue_timer()
600 base->next_expiry_recalc = false; in enqueue_timer()
610 idx = calc_wheel_index(timer->expires, base->clk, &bucket_expiry); in internal_add_timer()
620 return ((struct timer_list *) addr)->function; in timer_debug_hint()
627 return (timer->entry.pprev == NULL && in timer_is_static_object()
628 timer->entry.next == TIMER_ENTRY_STATIC); in timer_is_static_object()
633 * - an active object is initialized
657 * - an active object is activated
658 * - an unknown non-static object is activated
679 * - an active object is freed
697 * - an untracked/uninit-ed object is found
792 timer->entry.pprev = NULL; in do_init_timer()
793 timer->function = func; in do_init_timer()
796 timer->flags = flags | raw_smp_processor_id(); in do_init_timer()
797 lockdep_init_map(&timer->lockdep_map, name, key, 0); in do_init_timer()
801 * init_timer_key - initialize a timer
823 struct hlist_node *entry = &timer->entry; in detach_timer()
829 entry->pprev = NULL; in detach_timer()
830 entry->next = LIST_POISON2; in detach_timer()
841 if (hlist_is_singular_node(&timer->entry, base->vectors + idx)) { in detach_if_pending()
842 __clear_bit(idx, base->pending_map); in detach_if_pending()
843 base->next_expiry_recalc = true; in detach_if_pending()
898 * Also while executing timers, base->clk is 1 offset ahead in forward_timer_base()
901 if ((long)(jnow - base->clk) < 1) in forward_timer_base()
908 if (time_after(base->next_expiry, jnow)) { in forward_timer_base()
909 base->clk = jnow; in forward_timer_base()
911 if (WARN_ON_ONCE(time_before(base->next_expiry, base->clk))) in forward_timer_base()
913 base->clk = base->next_expiry; in forward_timer_base()
924 * be found in the base->vectors array.
931 __acquires(timer->base->lock) in lock_timer_base()
939 * might re-read @tf between the check for TIMER_MIGRATING in lock_timer_base()
942 tf = READ_ONCE(timer->flags); in lock_timer_base()
946 raw_spin_lock_irqsave(&base->lock, *flags); in lock_timer_base()
947 if (timer->flags == tf) in lock_timer_base()
949 raw_spin_unlock_irqrestore(&base->lock, *flags); in lock_timer_base()
967 BUG_ON(!timer->function); in __mod_timer()
970 * This is a common optimization triggered by the networking code - if in __mod_timer()
971 * the timer is re-modified to have the same timeout or ends up in the in __mod_timer()
980 long diff = timer->expires - expires; in __mod_timer()
997 time_before_eq(timer->expires, expires)) { in __mod_timer()
1002 clk = base->clk; in __mod_timer()
1012 timer->expires = expires; in __mod_timer()
1013 else if (time_after(timer->expires, expires)) in __mod_timer()
1014 timer->expires = expires; in __mod_timer()
1027 new_base = get_target_base(base, timer->flags); in __mod_timer()
1037 if (likely(base->running_timer != timer)) { in __mod_timer()
1039 timer->flags |= TIMER_MIGRATING; in __mod_timer()
1041 raw_spin_unlock(&base->lock); in __mod_timer()
1043 raw_spin_lock(&base->lock); in __mod_timer()
1044 WRITE_ONCE(timer->flags, in __mod_timer()
1045 (timer->flags & ~TIMER_BASEMASK) | base->cpu); in __mod_timer()
1052 timer->expires = expires; in __mod_timer()
1059 if (idx != UINT_MAX && clk == base->clk) in __mod_timer()
1065 raw_spin_unlock_irqrestore(&base->lock, flags); in __mod_timer()
1071 * mod_timer_pending - modify a pending timer's timeout
1073 * @expires: new timeout in jiffies
1076 * but will not re-activate and modify already deleted timers.
1087 * mod_timer - modify a timer's timeout
1089 * @expires: new timeout in jiffies
1096 * del_timer(timer); timer->expires = expires; add_timer(timer);
1099 * same timer, then mod_timer() is the only safe way to modify the timeout,
1113 * timer_reduce - Modify a timer's timeout if it would reduce the timeout
1115 * @expires: New timeout in jiffies
1128 * add_timer - start a timer
1131 * The kernel will do a ->function(@timer) callback from the
1132 * timer interrupt at the ->expires point in the future. The
1135 * The timer's ->expires, ->function fields must be set prior calling this
1138 * Timers with an ->expires field in the past will be executed in the next
1144 __mod_timer(timer, timer->expires, MOD_TIMER_NOTPENDING); in add_timer()
1149 * add_timer_on - start a timer on a particular CPU
1160 BUG_ON(timer_pending(timer) || !timer->function); in add_timer_on()
1162 new_base = get_timer_cpu_base(timer->flags, cpu); in add_timer_on()
1171 timer->flags |= TIMER_MIGRATING; in add_timer_on()
1173 raw_spin_unlock(&base->lock); in add_timer_on()
1175 raw_spin_lock(&base->lock); in add_timer_on()
1176 WRITE_ONCE(timer->flags, in add_timer_on()
1177 (timer->flags & ~TIMER_BASEMASK) | cpu); in add_timer_on()
1183 raw_spin_unlock_irqrestore(&base->lock, flags); in add_timer_on()
1188 * del_timer - deactivate a timer.
1191 * del_timer() deactivates a timer - this works on both active and inactive
1209 raw_spin_unlock_irqrestore(&base->lock, flags); in del_timer()
1217 * try_to_del_timer_sync - Try to deactivate a timer
1227 int ret = -1; in try_to_del_timer_sync()
1233 if (base->running_timer != timer) in try_to_del_timer_sync()
1236 raw_spin_unlock_irqrestore(&base->lock, flags); in try_to_del_timer_sync()
1245 spin_lock_init(&base->expiry_lock); in timer_base_init_expiry_lock()
1250 spin_lock(&base->expiry_lock); in timer_base_lock_expiry()
1255 spin_unlock(&base->expiry_lock); in timer_base_unlock_expiry()
1261 * If there is a waiter for base->expiry_lock, then it was waiting for the
1267 if (atomic_read(&base->timer_waiters)) { in timer_sync_wait_running()
1268 raw_spin_unlock_irq(&base->lock); in timer_sync_wait_running()
1269 spin_unlock(&base->expiry_lock); in timer_sync_wait_running()
1270 spin_lock(&base->expiry_lock); in timer_sync_wait_running()
1271 raw_spin_lock_irq(&base->lock); in timer_sync_wait_running()
1289 tf = READ_ONCE(timer->flags); in del_timer_wait_running()
1301 atomic_inc(&base->timer_waiters); in del_timer_wait_running()
1302 spin_lock_bh(&base->expiry_lock); in del_timer_wait_running()
1303 atomic_dec(&base->timer_waiters); in del_timer_wait_running()
1304 spin_unlock_bh(&base->expiry_lock); in del_timer_wait_running()
1317 * del_timer_sync - deactivate a timer and wait for the handler to finish.
1336 * ---- ----
1339 * base->running_timer = mytimer;
1344 * while (base->running_timer == mytimer);
1364 lock_map_acquire(&timer->lockdep_map); in del_timer_sync()
1365 lock_map_release(&timer->lockdep_map); in del_timer_sync()
1372 WARN_ON(in_irq() && !(timer->flags & TIMER_IRQSAFE)); in del_timer_sync()
1400 * timer->lockdep_map, make a copy and use that here. in call_timer_fn()
1404 lockdep_copy_map(&lockdep_map, &timer->lockdep_map); in call_timer_fn()
1420 WARN_ONCE(1, "timer: %pS preempt leak: %08x -> %08x\n", in call_timer_fn()
1435 * This value is required only for tracing. base->clk was in expire_timers()
1437 * is related to the old base->clk value. in expire_timers()
1439 unsigned long baseclk = base->clk - 1; in expire_timers()
1445 timer = hlist_entry(head->first, struct timer_list, entry); in expire_timers()
1447 base->running_timer = timer; in expire_timers()
1450 fn = timer->function; in expire_timers()
1452 if (timer->flags & TIMER_IRQSAFE) { in expire_timers()
1453 raw_spin_unlock(&base->lock); in expire_timers()
1455 raw_spin_lock(&base->lock); in expire_timers()
1456 base->running_timer = NULL; in expire_timers()
1458 raw_spin_unlock_irq(&base->lock); in expire_timers()
1460 raw_spin_lock_irq(&base->lock); in expire_timers()
1461 base->running_timer = NULL; in expire_timers()
1470 unsigned long clk = base->clk = base->next_expiry; in collect_expired_timers()
1478 if (__test_and_clear_bit(idx, base->pending_map)) { in collect_expired_timers()
1479 vec = base->vectors + idx; in collect_expired_timers()
1503 pos = find_next_bit(base->pending_map, end, start); in next_pending_bucket()
1505 return pos - start; in next_pending_bucket()
1507 pos = find_next_bit(base->pending_map, start, offset); in next_pending_bucket()
1508 return pos < start ? pos + LVL_SIZE - start : -1; in next_pending_bucket()
1513 * hold base->lock.
1520 next = base->clk + NEXT_TIMER_MAX_DELTA; in __next_timer_interrupt()
1521 clk = base->clk; in __next_timer_interrupt()
1537 if (pos <= ((LVL_CLK_DIV - lvl_clk) & LVL_CLK_MASK)) in __next_timer_interrupt()
1544 * next expiring bucket in that level. base->clk is the next in __next_timer_interrupt()
1581 base->next_expiry_recalc = false; in __next_timer_interrupt()
1582 base->timers_pending = !(next == base->clk + NEXT_TIMER_MAX_DELTA); in __next_timer_interrupt()
1622 * get_next_timer_interrupt - return the time (clock mono) of the next timer
1642 raw_spin_lock(&base->lock); in get_next_timer_interrupt()
1643 if (base->next_expiry_recalc) in get_next_timer_interrupt()
1644 base->next_expiry = __next_timer_interrupt(base); in get_next_timer_interrupt()
1645 nextevt = base->next_expiry; in get_next_timer_interrupt()
1649 * base. We can only do that when @basej is past base->clk in get_next_timer_interrupt()
1650 * otherwise we might rewind base->clk. in get_next_timer_interrupt()
1652 if (time_after(basej, base->clk)) { in get_next_timer_interrupt()
1654 base->clk = basej; in get_next_timer_interrupt()
1655 else if (time_after(nextevt, base->clk)) in get_next_timer_interrupt()
1656 base->clk = nextevt; in get_next_timer_interrupt()
1661 base->is_idle = false; in get_next_timer_interrupt()
1663 if (base->timers_pending) in get_next_timer_interrupt()
1664 expires = basem + (u64)(nextevt - basej) * TICK_NSEC; in get_next_timer_interrupt()
1672 if ((expires - basem) > TICK_NSEC) in get_next_timer_interrupt()
1673 base->is_idle = true; in get_next_timer_interrupt()
1675 raw_spin_unlock(&base->lock); in get_next_timer_interrupt()
1681 * timer_clear_idle - Clear the idle state of the timer base
1695 base->is_idle = false; in timer_clear_idle()
1723 * __run_timers - run all expired timers (if any) on this CPU.
1731 if (time_before(jiffies, base->next_expiry)) in __run_timers()
1735 raw_spin_lock_irq(&base->lock); in __run_timers()
1737 while (time_after_eq(jiffies, base->clk) && in __run_timers()
1738 time_after_eq(jiffies, base->next_expiry)) { in __run_timers()
1745 WARN_ON_ONCE(!levels && !base->next_expiry_recalc); in __run_timers()
1746 base->clk++; in __run_timers()
1747 base->next_expiry = __next_timer_interrupt(base); in __run_timers()
1749 while (levels--) in __run_timers()
1752 raw_spin_unlock_irq(&base->lock); in __run_timers()
1757 * This function runs timers and the timer-tq in bottom half context.
1769 * Called by the local, per-CPU timer interrupt on SMP.
1777 if (time_before(jiffies, base->next_expiry)) { in run_local_timers()
1782 if (time_before(jiffies, base->next_expiry)) in run_local_timers()
1799 struct process_timer *timeout = from_timer(timeout, t, timer); in process_timeout() local
1801 wake_up_process(timeout->task); in process_timeout()
1805 * schedule_timeout - sleep until timeout
1806 * @timeout: timeout value in jiffies
1808 * Make the current task sleep until @timeout jiffies have elapsed.
1812 * %TASK_RUNNING - the scheduler is called, but the task does not sleep
1816 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
1820 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1827 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
1828 * the CPU away without a bound on the timeout. In this case the return
1833 * to be non-negative.
1835 signed long __sched schedule_timeout(signed long timeout) in schedule_timeout() argument
1840 switch (timeout) in schedule_timeout()
1860 if (timeout < 0) { in schedule_timeout()
1861 printk(KERN_ERR "schedule_timeout: wrong timeout " in schedule_timeout()
1862 "value %lx\n", timeout); in schedule_timeout()
1864 current->state = TASK_RUNNING; in schedule_timeout()
1869 expire = timeout + jiffies; in schedule_timeout()
1880 timeout = expire - jiffies; in schedule_timeout()
1883 return timeout < 0 ? 0 : timeout; in schedule_timeout()
1891 signed long __sched schedule_timeout_interruptible(signed long timeout) in schedule_timeout_interruptible() argument
1894 return schedule_timeout(timeout); in schedule_timeout_interruptible()
1898 signed long __sched schedule_timeout_killable(signed long timeout) in schedule_timeout_killable() argument
1901 return schedule_timeout(timeout); in schedule_timeout_killable()
1905 signed long __sched schedule_timeout_uninterruptible(signed long timeout) in schedule_timeout_uninterruptible() argument
1908 return schedule_timeout(timeout); in schedule_timeout_uninterruptible()
1916 signed long __sched schedule_timeout_idle(signed long timeout) in schedule_timeout_idle() argument
1919 return schedule_timeout(timeout); in schedule_timeout_idle()
1930 int cpu = new_base->cpu; in migrate_timer_list()
1935 is_pinned = timer->flags & TIMER_PINNED; in migrate_timer_list()
1939 detach_if_pending(timer, get_timer_base(timer->flags), false); in migrate_timer_list()
1940 timer->flags = (timer->flags & ~TIMER_BASEMASK) | cpu; in migrate_timer_list()
1959 raw_spin_lock_irqsave(&new_base->lock, flags); in __migrate_timers()
1960 raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); in __migrate_timers()
1969 BUG_ON(old_base->running_timer); in __migrate_timers()
1972 migrate_timer_list(new_base, old_base->vectors + i, in __migrate_timers()
1975 raw_spin_unlock(&old_base->lock); in __migrate_timers()
1976 raw_spin_unlock_irqrestore(&new_base->lock, flags); in __migrate_timers()
1986 int cpu = new_base->cpu; in migrate_timer_list()
1989 timer = hlist_entry(head->first, struct timer_list, entry); in migrate_timer_list()
1991 timer->flags = (timer->flags & ~TIMER_BASEMASK) | cpu; in migrate_timer_list()
2005 base->clk = jiffies; in timers_prepare_cpu()
2006 base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA; in timers_prepare_cpu()
2007 base->timers_pending = false; in timers_prepare_cpu()
2008 base->is_idle = false; in timers_prepare_cpu()
2043 raw_spin_lock_irq(&new_base->lock); in timers_dead_cpu()
2044 raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); in timers_dead_cpu()
2052 BUG_ON(old_base->running_timer); in timers_dead_cpu()
2055 migrate_timer_list(new_base, old_base->vectors + i); in timers_dead_cpu()
2057 raw_spin_unlock(&old_base->lock); in timers_dead_cpu()
2058 raw_spin_unlock_irq(&new_base->lock); in timers_dead_cpu()
2075 base->cpu = cpu; in init_timer_cpu()
2076 raw_spin_lock_init(&base->lock); in init_timer_cpu()
2077 base->clk = jiffies; in init_timer_cpu()
2078 base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA; in init_timer_cpu()
2099 * msleep - sleep safely even with waitqueue interruptions
2104 unsigned long timeout = msecs_to_jiffies(msecs) + 1; in msleep() local
2106 while (timeout) in msleep()
2107 timeout = schedule_timeout_uninterruptible(timeout); in msleep()
2113 * msleep_interruptible - sleep waiting for signals
2118 unsigned long timeout = msecs_to_jiffies(msecs) + 1; in msleep_interruptible() local
2120 while (timeout && !signal_pending(current)) in msleep_interruptible()
2121 timeout = schedule_timeout_interruptible(timeout); in msleep_interruptible()
2122 return jiffies_to_msecs(timeout); in msleep_interruptible()
2128 * usleep_range - Sleep for an approximate time
2132 * In non-atomic context where the exact wakeup time is flexible, use
2134 * by avoiding the CPU-hogging busy-wait of udelay(), and the range reduces
2135 * power usage by allowing hrtimers to take advantage of an already-
2141 u64 delta = (u64)(max - min) * NSEC_PER_USEC; in usleep_range()