• Home
  • Raw
  • Download

Lines Matching refs:base

578 trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer)  in trigger_dyntick_cpu()  argument
588 if (tick_nohz_full_cpu(base->cpu)) in trigger_dyntick_cpu()
589 wake_up_nohz_cpu(base->cpu); in trigger_dyntick_cpu()
598 if (base->is_idle) in trigger_dyntick_cpu()
599 wake_up_nohz_cpu(base->cpu); in trigger_dyntick_cpu()
607 static void enqueue_timer(struct timer_base *base, struct timer_list *timer, in enqueue_timer() argument
611 hlist_add_head(&timer->entry, base->vectors + idx); in enqueue_timer()
612 __set_bit(idx, base->pending_map); in enqueue_timer()
622 if (time_before(bucket_expiry, base->next_expiry)) { in enqueue_timer()
627 base->next_expiry = bucket_expiry; in enqueue_timer()
628 base->timers_pending = true; in enqueue_timer()
629 base->next_expiry_recalc = false; in enqueue_timer()
630 trigger_dyntick_cpu(base, timer); in enqueue_timer()
634 static void internal_add_timer(struct timer_base *base, struct timer_list *timer) in internal_add_timer() argument
639 idx = calc_wheel_index(timer->expires, base->clk, &bucket_expiry); in internal_add_timer()
640 enqueue_timer(base, timer, idx, bucket_expiry); in internal_add_timer()
892 static int detach_if_pending(struct timer_list *timer, struct timer_base *base, in detach_if_pending() argument
900 if (hlist_is_singular_node(&timer->entry, base->vectors + idx)) { in detach_if_pending()
901 __clear_bit(idx, base->pending_map); in detach_if_pending()
902 base->next_expiry_recalc = true; in detach_if_pending()
911 struct timer_base *base = per_cpu_ptr(&timer_bases[BASE_STD], cpu); in get_timer_cpu_base() local
918 base = per_cpu_ptr(&timer_bases[BASE_DEF], cpu); in get_timer_cpu_base()
919 return base; in get_timer_cpu_base()
924 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); in get_timer_this_cpu_base() local
931 base = this_cpu_ptr(&timer_bases[BASE_DEF]); in get_timer_this_cpu_base()
932 return base; in get_timer_this_cpu_base()
941 get_target_base(struct timer_base *base, unsigned tflags) in get_target_base() argument
951 static inline void forward_timer_base(struct timer_base *base) in forward_timer_base() argument
960 if ((long)(jnow - base->clk) < 1) in forward_timer_base()
967 if (time_after(base->next_expiry, jnow)) { in forward_timer_base()
968 base->clk = jnow; in forward_timer_base()
970 if (WARN_ON_ONCE(time_before(base->next_expiry, base->clk))) in forward_timer_base()
972 base->clk = base->next_expiry; in forward_timer_base()
990 __acquires(timer->base->lock) in lock_timer_base()
993 struct timer_base *base; in lock_timer_base() local
1004 base = get_timer_base(tf); in lock_timer_base()
1005 raw_spin_lock_irqsave(&base->lock, *flags); in lock_timer_base()
1007 return base; in lock_timer_base()
1008 raw_spin_unlock_irqrestore(&base->lock, *flags); in lock_timer_base()
1022 struct timer_base *base, *new_base; in __mod_timer() local
1052 base = lock_timer_base(timer, &flags); in __mod_timer()
1053 forward_timer_base(base); in __mod_timer()
1061 clk = base->clk; in __mod_timer()
1078 base = lock_timer_base(timer, &flags); in __mod_timer()
1079 forward_timer_base(base); in __mod_timer()
1082 ret = detach_if_pending(timer, base, false); in __mod_timer()
1086 new_base = get_target_base(base, timer->flags); in __mod_timer()
1088 if (base != new_base) { in __mod_timer()
1096 if (likely(base->running_timer != timer)) { in __mod_timer()
1100 raw_spin_unlock(&base->lock); in __mod_timer()
1101 base = new_base; in __mod_timer()
1102 raw_spin_lock(&base->lock); in __mod_timer()
1104 (timer->flags & ~TIMER_BASEMASK) | base->cpu); in __mod_timer()
1105 forward_timer_base(base); in __mod_timer()
1118 if (idx != UINT_MAX && clk == base->clk) in __mod_timer()
1119 enqueue_timer(base, timer, idx, bucket_expiry); in __mod_timer()
1121 internal_add_timer(base, timer); in __mod_timer()
1124 raw_spin_unlock_irqrestore(&base->lock, flags); in __mod_timer()
1216 struct timer_base *new_base, *base; in add_timer_on() local
1228 base = lock_timer_base(timer, &flags); in add_timer_on()
1229 if (base != new_base) { in add_timer_on()
1232 raw_spin_unlock(&base->lock); in add_timer_on()
1233 base = new_base; in add_timer_on()
1234 raw_spin_lock(&base->lock); in add_timer_on()
1238 forward_timer_base(base); in add_timer_on()
1241 internal_add_timer(base, timer); in add_timer_on()
1242 raw_spin_unlock_irqrestore(&base->lock, flags); in add_timer_on()
1259 struct timer_base *base; in del_timer() local
1266 base = lock_timer_base(timer, &flags); in del_timer()
1267 ret = detach_if_pending(timer, base, true); in del_timer()
1268 raw_spin_unlock_irqrestore(&base->lock, flags); in del_timer()
1284 struct timer_base *base; in try_to_del_timer_sync() local
1290 base = lock_timer_base(timer, &flags); in try_to_del_timer_sync()
1292 if (base->running_timer != timer) in try_to_del_timer_sync()
1293 ret = detach_if_pending(timer, base, true); in try_to_del_timer_sync()
1295 raw_spin_unlock_irqrestore(&base->lock, flags); in try_to_del_timer_sync()
1302 static __init void timer_base_init_expiry_lock(struct timer_base *base) in timer_base_init_expiry_lock() argument
1304 spin_lock_init(&base->expiry_lock); in timer_base_init_expiry_lock()
1307 static inline void timer_base_lock_expiry(struct timer_base *base) in timer_base_lock_expiry() argument
1309 spin_lock(&base->expiry_lock); in timer_base_lock_expiry()
1312 static inline void timer_base_unlock_expiry(struct timer_base *base) in timer_base_unlock_expiry() argument
1314 spin_unlock(&base->expiry_lock); in timer_base_unlock_expiry()
1324 static void timer_sync_wait_running(struct timer_base *base) in timer_sync_wait_running() argument
1326 if (atomic_read(&base->timer_waiters)) { in timer_sync_wait_running()
1327 raw_spin_unlock_irq(&base->lock); in timer_sync_wait_running()
1328 spin_unlock(&base->expiry_lock); in timer_sync_wait_running()
1329 spin_lock(&base->expiry_lock); in timer_sync_wait_running()
1330 raw_spin_lock_irq(&base->lock); in timer_sync_wait_running()
1350 struct timer_base *base = get_timer_base(tf); in del_timer_wait_running() local
1360 atomic_inc(&base->timer_waiters); in del_timer_wait_running()
1361 spin_lock_bh(&base->expiry_lock); in del_timer_wait_running()
1362 atomic_dec(&base->timer_waiters); in del_timer_wait_running()
1363 spin_unlock_bh(&base->expiry_lock); in del_timer_wait_running()
1367 static inline void timer_base_init_expiry_lock(struct timer_base *base) { } in timer_base_init_expiry_lock() argument
1368 static inline void timer_base_lock_expiry(struct timer_base *base) { } in timer_base_lock_expiry() argument
1369 static inline void timer_base_unlock_expiry(struct timer_base *base) { } in timer_base_unlock_expiry() argument
1370 static inline void timer_sync_wait_running(struct timer_base *base) { } in timer_sync_wait_running() argument
1498 static void expire_timers(struct timer_base *base, struct hlist_head *head) in expire_timers() argument
1505 unsigned long baseclk = base->clk - 1; in expire_timers()
1513 base->running_timer = timer; in expire_timers()
1519 raw_spin_unlock(&base->lock); in expire_timers()
1521 raw_spin_lock(&base->lock); in expire_timers()
1522 base->running_timer = NULL; in expire_timers()
1524 raw_spin_unlock_irq(&base->lock); in expire_timers()
1526 raw_spin_lock_irq(&base->lock); in expire_timers()
1527 base->running_timer = NULL; in expire_timers()
1528 timer_sync_wait_running(base); in expire_timers()
1533 static int collect_expired_timers(struct timer_base *base, in collect_expired_timers() argument
1536 unsigned long clk = base->clk = base->next_expiry; in collect_expired_timers()
1544 if (__test_and_clear_bit(idx, base->pending_map)) { in collect_expired_timers()
1545 vec = base->vectors + idx; in collect_expired_timers()
1563 static int next_pending_bucket(struct timer_base *base, unsigned offset, in next_pending_bucket() argument
1569 pos = find_next_bit(base->pending_map, end, start); in next_pending_bucket()
1573 pos = find_next_bit(base->pending_map, start, offset); in next_pending_bucket()
1581 static unsigned long __next_timer_interrupt(struct timer_base *base) in __next_timer_interrupt() argument
1586 next = base->clk + NEXT_TIMER_MAX_DELTA; in __next_timer_interrupt()
1587 clk = base->clk; in __next_timer_interrupt()
1589 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); in __next_timer_interrupt()
1647 base->next_expiry_recalc = false; in __next_timer_interrupt()
1648 base->timers_pending = !(next == base->clk + NEXT_TIMER_MAX_DELTA); in __next_timer_interrupt()
1697 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); in get_next_timer_interrupt() local
1708 raw_spin_lock(&base->lock); in get_next_timer_interrupt()
1709 if (base->next_expiry_recalc) in get_next_timer_interrupt()
1710 base->next_expiry = __next_timer_interrupt(base); in get_next_timer_interrupt()
1711 nextevt = base->next_expiry; in get_next_timer_interrupt()
1718 if (time_after(basej, base->clk)) { in get_next_timer_interrupt()
1720 base->clk = basej; in get_next_timer_interrupt()
1721 else if (time_after(nextevt, base->clk)) in get_next_timer_interrupt()
1722 base->clk = nextevt; in get_next_timer_interrupt()
1727 base->is_idle = false; in get_next_timer_interrupt()
1729 if (base->timers_pending) in get_next_timer_interrupt()
1739 base->is_idle = true; in get_next_timer_interrupt()
1741 raw_spin_unlock(&base->lock); in get_next_timer_interrupt()
1753 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); in timer_clear_idle() local
1761 base->is_idle = false; in timer_clear_idle()
1769 static inline void __run_timers(struct timer_base *base) in __run_timers() argument
1774 if (time_before(jiffies, base->next_expiry)) in __run_timers()
1777 timer_base_lock_expiry(base); in __run_timers()
1778 raw_spin_lock_irq(&base->lock); in __run_timers()
1780 while (time_after_eq(jiffies, base->clk) && in __run_timers()
1781 time_after_eq(jiffies, base->next_expiry)) { in __run_timers()
1782 levels = collect_expired_timers(base, heads); in __run_timers()
1790 WARN_ON_ONCE(!levels && !base->next_expiry_recalc in __run_timers()
1791 && base->timers_pending); in __run_timers()
1792 base->clk++; in __run_timers()
1793 base->next_expiry = __next_timer_interrupt(base); in __run_timers()
1796 expire_timers(base, heads + levels); in __run_timers()
1798 raw_spin_unlock_irq(&base->lock); in __run_timers()
1799 timer_base_unlock_expiry(base); in __run_timers()
1807 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); in run_timer_softirq() local
1809 __run_timers(base); in run_timer_softirq()
1819 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); in run_local_timers() local
1823 if (time_before(jiffies, base->next_expiry)) { in run_local_timers()
1827 base++; in run_local_timers()
1828 if (time_before(jiffies, base->next_expiry)) in run_local_timers()
2006 struct timer_base *base; in timers_prepare_cpu() local
2010 base = per_cpu_ptr(&timer_bases[b], cpu); in timers_prepare_cpu()
2011 base->clk = jiffies; in timers_prepare_cpu()
2012 base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA; in timers_prepare_cpu()
2013 base->next_expiry_recalc = false; in timers_prepare_cpu()
2014 base->timers_pending = false; in timers_prepare_cpu()
2015 base->is_idle = false; in timers_prepare_cpu()
2060 struct timer_base *base; in init_timer_cpu() local
2064 base = per_cpu_ptr(&timer_bases[i], cpu); in init_timer_cpu()
2065 base->cpu = cpu; in init_timer_cpu()
2066 raw_spin_lock_init(&base->lock); in init_timer_cpu()
2067 base->clk = jiffies; in init_timer_cpu()
2068 base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA; in init_timer_cpu()
2069 timer_base_init_expiry_lock(base); in init_timer_cpu()