• Home
  • Raw
  • Download

Lines Matching refs:base

555 trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer)  in trigger_dyntick_cpu()  argument
565 if (tick_nohz_full_cpu(base->cpu)) in trigger_dyntick_cpu()
566 wake_up_nohz_cpu(base->cpu); in trigger_dyntick_cpu()
575 if (base->is_idle) in trigger_dyntick_cpu()
576 wake_up_nohz_cpu(base->cpu); in trigger_dyntick_cpu()
584 static void enqueue_timer(struct timer_base *base, struct timer_list *timer, in enqueue_timer() argument
588 hlist_add_head(&timer->entry, base->vectors + idx); in enqueue_timer()
589 __set_bit(idx, base->pending_map); in enqueue_timer()
599 if (time_before(bucket_expiry, base->next_expiry)) { in enqueue_timer()
604 base->next_expiry = bucket_expiry; in enqueue_timer()
605 base->timers_pending = true; in enqueue_timer()
606 base->next_expiry_recalc = false; in enqueue_timer()
607 trigger_dyntick_cpu(base, timer); in enqueue_timer()
611 static void internal_add_timer(struct timer_base *base, struct timer_list *timer) in internal_add_timer() argument
616 idx = calc_wheel_index(timer->expires, base->clk, &bucket_expiry); in internal_add_timer()
617 enqueue_timer(base, timer, idx, bucket_expiry); in internal_add_timer()
839 static int detach_if_pending(struct timer_list *timer, struct timer_base *base, in detach_if_pending() argument
847 if (hlist_is_singular_node(&timer->entry, base->vectors + idx)) { in detach_if_pending()
848 __clear_bit(idx, base->pending_map); in detach_if_pending()
849 base->next_expiry_recalc = true; in detach_if_pending()
858 struct timer_base *base = per_cpu_ptr(&timer_bases[BASE_STD], cpu); in get_timer_cpu_base() local
865 base = per_cpu_ptr(&timer_bases[BASE_DEF], cpu); in get_timer_cpu_base()
866 return base; in get_timer_cpu_base()
871 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); in get_timer_this_cpu_base() local
878 base = this_cpu_ptr(&timer_bases[BASE_DEF]); in get_timer_this_cpu_base()
879 return base; in get_timer_this_cpu_base()
888 get_target_base(struct timer_base *base, unsigned tflags) in get_target_base() argument
898 static inline void forward_timer_base(struct timer_base *base) in forward_timer_base() argument
907 if ((long)(jnow - base->clk) < 1) in forward_timer_base()
914 if (time_after(base->next_expiry, jnow)) { in forward_timer_base()
915 base->clk = jnow; in forward_timer_base()
917 if (WARN_ON_ONCE(time_before(base->next_expiry, base->clk))) in forward_timer_base()
919 base->clk = base->next_expiry; in forward_timer_base()
937 __acquires(timer->base->lock) in lock_timer_base()
940 struct timer_base *base; in lock_timer_base() local
951 base = get_timer_base(tf); in lock_timer_base()
952 raw_spin_lock_irqsave(&base->lock, *flags); in lock_timer_base()
954 return base; in lock_timer_base()
955 raw_spin_unlock_irqrestore(&base->lock, *flags); in lock_timer_base()
969 struct timer_base *base, *new_base; in __mod_timer() local
999 base = lock_timer_base(timer, &flags); in __mod_timer()
1000 forward_timer_base(base); in __mod_timer()
1008 clk = base->clk; in __mod_timer()
1025 base = lock_timer_base(timer, &flags); in __mod_timer()
1026 forward_timer_base(base); in __mod_timer()
1029 ret = detach_if_pending(timer, base, false); in __mod_timer()
1033 new_base = get_target_base(base, timer->flags); in __mod_timer()
1035 if (base != new_base) { in __mod_timer()
1043 if (likely(base->running_timer != timer)) { in __mod_timer()
1047 raw_spin_unlock(&base->lock); in __mod_timer()
1048 base = new_base; in __mod_timer()
1049 raw_spin_lock(&base->lock); in __mod_timer()
1051 (timer->flags & ~TIMER_BASEMASK) | base->cpu); in __mod_timer()
1052 forward_timer_base(base); in __mod_timer()
1065 if (idx != UINT_MAX && clk == base->clk) in __mod_timer()
1066 enqueue_timer(base, timer, idx, bucket_expiry); in __mod_timer()
1068 internal_add_timer(base, timer); in __mod_timer()
1071 raw_spin_unlock_irqrestore(&base->lock, flags); in __mod_timer()
1163 struct timer_base *new_base, *base; in add_timer_on() local
1175 base = lock_timer_base(timer, &flags); in add_timer_on()
1176 if (base != new_base) { in add_timer_on()
1179 raw_spin_unlock(&base->lock); in add_timer_on()
1180 base = new_base; in add_timer_on()
1181 raw_spin_lock(&base->lock); in add_timer_on()
1185 forward_timer_base(base); in add_timer_on()
1188 internal_add_timer(base, timer); in add_timer_on()
1189 raw_spin_unlock_irqrestore(&base->lock, flags); in add_timer_on()
1206 struct timer_base *base; in del_timer() local
1213 base = lock_timer_base(timer, &flags); in del_timer()
1214 ret = detach_if_pending(timer, base, true); in del_timer()
1215 raw_spin_unlock_irqrestore(&base->lock, flags); in del_timer()
1231 struct timer_base *base; in try_to_del_timer_sync() local
1237 base = lock_timer_base(timer, &flags); in try_to_del_timer_sync()
1239 if (base->running_timer != timer) in try_to_del_timer_sync()
1240 ret = detach_if_pending(timer, base, true); in try_to_del_timer_sync()
1242 raw_spin_unlock_irqrestore(&base->lock, flags); in try_to_del_timer_sync()
1249 static __init void timer_base_init_expiry_lock(struct timer_base *base) in timer_base_init_expiry_lock() argument
1251 spin_lock_init(&base->expiry_lock); in timer_base_init_expiry_lock()
1254 static inline void timer_base_lock_expiry(struct timer_base *base) in timer_base_lock_expiry() argument
1256 spin_lock(&base->expiry_lock); in timer_base_lock_expiry()
1259 static inline void timer_base_unlock_expiry(struct timer_base *base) in timer_base_unlock_expiry() argument
1261 spin_unlock(&base->expiry_lock); in timer_base_unlock_expiry()
1271 static void timer_sync_wait_running(struct timer_base *base) in timer_sync_wait_running() argument
1273 if (atomic_read(&base->timer_waiters)) { in timer_sync_wait_running()
1274 raw_spin_unlock_irq(&base->lock); in timer_sync_wait_running()
1275 spin_unlock(&base->expiry_lock); in timer_sync_wait_running()
1276 spin_lock(&base->expiry_lock); in timer_sync_wait_running()
1277 raw_spin_lock_irq(&base->lock); in timer_sync_wait_running()
1297 struct timer_base *base = get_timer_base(tf); in del_timer_wait_running() local
1307 atomic_inc(&base->timer_waiters); in del_timer_wait_running()
1308 spin_lock_bh(&base->expiry_lock); in del_timer_wait_running()
1309 atomic_dec(&base->timer_waiters); in del_timer_wait_running()
1310 spin_unlock_bh(&base->expiry_lock); in del_timer_wait_running()
1314 static inline void timer_base_init_expiry_lock(struct timer_base *base) { } in timer_base_init_expiry_lock() argument
1315 static inline void timer_base_lock_expiry(struct timer_base *base) { } in timer_base_lock_expiry() argument
1316 static inline void timer_base_unlock_expiry(struct timer_base *base) { } in timer_base_unlock_expiry() argument
1317 static inline void timer_sync_wait_running(struct timer_base *base) { } in timer_sync_wait_running() argument
1445 static void expire_timers(struct timer_base *base, struct hlist_head *head) in expire_timers() argument
1452 unsigned long baseclk = base->clk - 1; in expire_timers()
1460 base->running_timer = timer; in expire_timers()
1466 raw_spin_unlock(&base->lock); in expire_timers()
1468 raw_spin_lock(&base->lock); in expire_timers()
1469 base->running_timer = NULL; in expire_timers()
1471 raw_spin_unlock_irq(&base->lock); in expire_timers()
1473 raw_spin_lock_irq(&base->lock); in expire_timers()
1474 base->running_timer = NULL; in expire_timers()
1475 timer_sync_wait_running(base); in expire_timers()
1480 static int collect_expired_timers(struct timer_base *base, in collect_expired_timers() argument
1483 unsigned long clk = base->clk = base->next_expiry; in collect_expired_timers()
1491 if (__test_and_clear_bit(idx, base->pending_map)) { in collect_expired_timers()
1492 vec = base->vectors + idx; in collect_expired_timers()
1510 static int next_pending_bucket(struct timer_base *base, unsigned offset, in next_pending_bucket() argument
1516 pos = find_next_bit(base->pending_map, end, start); in next_pending_bucket()
1520 pos = find_next_bit(base->pending_map, start, offset); in next_pending_bucket()
1528 static unsigned long __next_timer_interrupt(struct timer_base *base) in __next_timer_interrupt() argument
1533 next = base->clk + NEXT_TIMER_MAX_DELTA; in __next_timer_interrupt()
1534 clk = base->clk; in __next_timer_interrupt()
1536 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); in __next_timer_interrupt()
1594 base->next_expiry_recalc = false; in __next_timer_interrupt()
1595 base->timers_pending = !(next == base->clk + NEXT_TIMER_MAX_DELTA); in __next_timer_interrupt()
1644 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); in get_next_timer_interrupt() local
1655 raw_spin_lock(&base->lock); in get_next_timer_interrupt()
1656 if (base->next_expiry_recalc) in get_next_timer_interrupt()
1657 base->next_expiry = __next_timer_interrupt(base); in get_next_timer_interrupt()
1658 nextevt = base->next_expiry; in get_next_timer_interrupt()
1665 if (time_after(basej, base->clk)) { in get_next_timer_interrupt()
1667 base->clk = basej; in get_next_timer_interrupt()
1668 else if (time_after(nextevt, base->clk)) in get_next_timer_interrupt()
1669 base->clk = nextevt; in get_next_timer_interrupt()
1674 base->is_idle = false; in get_next_timer_interrupt()
1676 if (base->timers_pending) in get_next_timer_interrupt()
1686 base->is_idle = true; in get_next_timer_interrupt()
1688 raw_spin_unlock(&base->lock); in get_next_timer_interrupt()
1700 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); in timer_clear_idle() local
1708 base->is_idle = false; in timer_clear_idle()
1716 static inline void __run_timers(struct timer_base *base) in __run_timers() argument
1721 if (time_before(jiffies, base->next_expiry)) in __run_timers()
1724 timer_base_lock_expiry(base); in __run_timers()
1725 raw_spin_lock_irq(&base->lock); in __run_timers()
1727 while (time_after_eq(jiffies, base->clk) && in __run_timers()
1728 time_after_eq(jiffies, base->next_expiry)) { in __run_timers()
1729 levels = collect_expired_timers(base, heads); in __run_timers()
1737 WARN_ON_ONCE(!levels && !base->next_expiry_recalc in __run_timers()
1738 && base->timers_pending); in __run_timers()
1739 base->clk++; in __run_timers()
1740 base->next_expiry = __next_timer_interrupt(base); in __run_timers()
1743 expire_timers(base, heads + levels); in __run_timers()
1745 raw_spin_unlock_irq(&base->lock); in __run_timers()
1746 timer_base_unlock_expiry(base); in __run_timers()
1754 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); in run_timer_softirq() local
1756 __run_timers(base); in run_timer_softirq()
1766 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); in run_local_timers() local
1770 if (time_before(jiffies, base->next_expiry)) { in run_local_timers()
1774 base++; in run_local_timers()
1775 if (time_before(jiffies, base->next_expiry)) in run_local_timers()
1955 struct timer_base *base; in timers_prepare_cpu() local
1959 base = per_cpu_ptr(&timer_bases[b], cpu); in timers_prepare_cpu()
1960 base->clk = jiffies; in timers_prepare_cpu()
1961 base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA; in timers_prepare_cpu()
1962 base->timers_pending = false; in timers_prepare_cpu()
1963 base->is_idle = false; in timers_prepare_cpu()
2008 struct timer_base *base; in init_timer_cpu() local
2012 base = per_cpu_ptr(&timer_bases[i], cpu); in init_timer_cpu()
2013 base->cpu = cpu; in init_timer_cpu()
2014 raw_spin_lock_init(&base->lock); in init_timer_cpu()
2015 base->clk = jiffies; in init_timer_cpu()
2016 base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA; in init_timer_cpu()
2017 timer_base_init_expiry_lock(base); in init_timer_cpu()