• Home
  • Raw
  • Download

Lines Matching refs:base

124 static inline bool hrtimer_base_is_online(struct hrtimer_cpu_base *base)  in hrtimer_base_is_online()  argument
129 return likely(base->online); in hrtimer_base_is_online()
168 __acquires(&timer->base->lock) in lock_hrtimer_base()
170 struct hrtimer_clock_base *base; in lock_hrtimer_base() local
173 base = READ_ONCE(timer->base); in lock_hrtimer_base()
174 if (likely(base != &migration_base)) { in lock_hrtimer_base()
175 raw_spin_lock_irqsave(&base->cpu_base->lock, *flags); in lock_hrtimer_base()
176 if (likely(base == timer->base)) in lock_hrtimer_base()
177 return base; in lock_hrtimer_base()
179 raw_spin_unlock_irqrestore(&base->cpu_base->lock, *flags); in lock_hrtimer_base()
226 static inline struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base, int pinned) in get_target_base() argument
228 if (!hrtimer_base_is_online(base)) { in get_target_base()
238 return base; in get_target_base()
254 switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base, in switch_hrtimer_base() argument
259 int basenum = base->index; in switch_hrtimer_base()
266 if (base != new_base) { in switch_hrtimer_base()
277 return base; in switch_hrtimer_base()
280 WRITE_ONCE(timer->base, &migration_base); in switch_hrtimer_base()
281 raw_spin_unlock(&base->cpu_base->lock); in switch_hrtimer_base()
287 raw_spin_lock(&base->cpu_base->lock); in switch_hrtimer_base()
289 WRITE_ONCE(timer->base, base); in switch_hrtimer_base()
292 WRITE_ONCE(timer->base, new_base); in switch_hrtimer_base()
306 __acquires(&timer->base->cpu_base->lock) in lock_hrtimer_base()
308 struct hrtimer_clock_base *base = timer->base; in lock_hrtimer_base() local
310 raw_spin_lock_irqsave(&base->cpu_base->lock, *flags); in lock_hrtimer_base()
312 return base; in lock_hrtimer_base()
523 #define for_each_active_base(base, cpu_base, active) \ argument
524 while ((base = __next_base((cpu_base), &(active))))
531 struct hrtimer_clock_base *base; in __hrtimer_next_event_base() local
534 for_each_active_base(base, cpu_base, active) { in __hrtimer_next_event_base()
538 next = timerqueue_getnext(&base->active); in __hrtimer_next_event_base()
548 expires = ktime_sub(hrtimer_get_expires(timer), base->offset); in __hrtimer_next_event_base()
649 static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base) in hrtimer_update_base() argument
651 ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset; in hrtimer_update_base()
652 ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset; in hrtimer_update_base()
653 ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset; in hrtimer_update_base()
655 ktime_t now = ktime_get_update_offsets_now(&base->clock_was_set_seq, in hrtimer_update_base()
658 base->clock_base[HRTIMER_BASE_REALTIME_SOFT].offset = *offs_real; in hrtimer_update_base()
659 base->clock_base[HRTIMER_BASE_BOOTTIME_SOFT].offset = *offs_boot; in hrtimer_update_base()
660 base->clock_base[HRTIMER_BASE_TAI_SOFT].offset = *offs_tai; in hrtimer_update_base()
754 struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases); in hrtimer_switch_to_hres() local
758 base->cpu); in hrtimer_switch_to_hres()
761 base->hres_active = 1; in hrtimer_switch_to_hres()
790 struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases); in retrigger_next_event() local
809 raw_spin_lock(&base->lock); in retrigger_next_event()
810 hrtimer_update_base(base); in retrigger_next_event()
811 if (hrtimer_hres_active(base)) in retrigger_next_event()
812 hrtimer_force_reprogram(base, 0); in retrigger_next_event()
814 hrtimer_update_next_event(base); in retrigger_next_event()
815 raw_spin_unlock(&base->lock); in retrigger_next_event()
828 struct hrtimer_clock_base *base = timer->base; in hrtimer_reprogram() local
829 ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset); in hrtimer_reprogram()
848 struct hrtimer_cpu_base *timer_cpu_base = base->cpu_base; in hrtimer_reprogram()
868 if (base->cpu_base != cpu_base) in hrtimer_reprogram()
889 struct hrtimer_clock_base *base; in update_needs_ipi() local
929 for_each_active_base(base, cpu_base, active) { in update_needs_ipi()
932 next = timerqueue_getnext(&base->active); in update_needs_ipi()
933 expires = ktime_sub(next->expires, base->offset); in update_needs_ipi()
938 if (base->clockid < HRTIMER_BASE_MONOTONIC_SOFT) in update_needs_ipi()
1033 __releases(&timer->base->cpu_base->lock) in unlock_hrtimer_base()
1035 raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags); in unlock_hrtimer_base()
1101 struct hrtimer_clock_base *base, in enqueue_hrtimer() argument
1105 WARN_ON_ONCE(!base->cpu_base->online); in enqueue_hrtimer()
1107 base->cpu_base->active_bases |= 1 << base->index; in enqueue_hrtimer()
1112 return timerqueue_add(&base->active, &timer->node); in enqueue_hrtimer()
1126 struct hrtimer_clock_base *base, in __remove_hrtimer() argument
1129 struct hrtimer_cpu_base *cpu_base = base->cpu_base; in __remove_hrtimer()
1137 if (!timerqueue_del(&base->active, &timer->node)) in __remove_hrtimer()
1138 cpu_base->active_bases &= ~(1 << base->index); in __remove_hrtimer()
1156 remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, in remove_hrtimer() argument
1173 reprogram = base->cpu_base == this_cpu_ptr(&hrtimer_bases); in remove_hrtimer()
1186 __remove_hrtimer(timer, base, state, reprogram); in remove_hrtimer()
1235 struct hrtimer_clock_base *base) in __hrtimer_start_range_ns() argument
1249 force_local = base->cpu_base == this_cpu_base; in __hrtimer_start_range_ns()
1250 force_local &= base->cpu_base->next_timer == timer; in __hrtimer_start_range_ns()
1269 remove_hrtimer(timer, base, true, force_local); in __hrtimer_start_range_ns()
1272 tim = ktime_add_safe(tim, base->get_time()); in __hrtimer_start_range_ns()
1280 new_base = switch_hrtimer_base(timer, base, in __hrtimer_start_range_ns()
1283 new_base = base; in __hrtimer_start_range_ns()
1330 struct hrtimer_clock_base *base; in hrtimer_start_range_ns() local
1345 base = lock_hrtimer_base(timer, &flags); in hrtimer_start_range_ns()
1347 if (__hrtimer_start_range_ns(timer, tim, delta_ns, mode, base)) in hrtimer_start_range_ns()
1367 struct hrtimer_clock_base *base; in hrtimer_try_to_cancel() local
1380 base = lock_hrtimer_base(timer, &flags); in hrtimer_try_to_cancel()
1383 ret = remove_hrtimer(timer, base, false, false); in hrtimer_try_to_cancel()
1393 static void hrtimer_cpu_base_init_expiry_lock(struct hrtimer_cpu_base *base) in hrtimer_cpu_base_init_expiry_lock() argument
1395 spin_lock_init(&base->softirq_expiry_lock); in hrtimer_cpu_base_init_expiry_lock()
1398 static void hrtimer_cpu_base_lock_expiry(struct hrtimer_cpu_base *base) in hrtimer_cpu_base_lock_expiry() argument
1399 __acquires(&base->softirq_expiry_lock) in hrtimer_cpu_base_lock_expiry()
1401 spin_lock(&base->softirq_expiry_lock); in hrtimer_cpu_base_lock_expiry()
1404 static void hrtimer_cpu_base_unlock_expiry(struct hrtimer_cpu_base *base) in hrtimer_cpu_base_unlock_expiry() argument
1405 __releases(&base->softirq_expiry_lock) in hrtimer_cpu_base_unlock_expiry()
1407 spin_unlock(&base->softirq_expiry_lock); in hrtimer_cpu_base_unlock_expiry()
1429 static __always_inline bool is_migration_base(struct hrtimer_clock_base *base) in is_migration_base() argument
1431 return base == &migration_base; in is_migration_base()
1434 static __always_inline bool is_migration_base(struct hrtimer_clock_base *base) in is_migration_base() argument
1459 struct hrtimer_clock_base *base = READ_ONCE(timer->base); in hrtimer_cancel_wait_running() local
1465 if (!timer->is_soft || is_migration_base(base)) { in hrtimer_cancel_wait_running()
1477 atomic_inc(&base->cpu_base->timer_waiters); in hrtimer_cancel_wait_running()
1478 spin_lock_bh(&base->cpu_base->softirq_expiry_lock); in hrtimer_cancel_wait_running()
1479 atomic_dec(&base->cpu_base->timer_waiters); in hrtimer_cancel_wait_running()
1480 spin_unlock_bh(&base->cpu_base->softirq_expiry_lock); in hrtimer_cancel_wait_running()
1484 hrtimer_cpu_base_init_expiry_lock(struct hrtimer_cpu_base *base) { } in hrtimer_cpu_base_init_expiry_lock() argument
1486 hrtimer_cpu_base_lock_expiry(struct hrtimer_cpu_base *base) { } in hrtimer_cpu_base_lock_expiry() argument
1488 hrtimer_cpu_base_unlock_expiry(struct hrtimer_cpu_base *base) { } in hrtimer_cpu_base_unlock_expiry() argument
1489 static inline void hrtimer_sync_wait_running(struct hrtimer_cpu_base *base, in hrtimer_sync_wait_running() argument
1614 int base; in __hrtimer_init() local
1637 base = softtimer ? HRTIMER_MAX_CLOCK_BASES / 2 : 0; in __hrtimer_init()
1638 base += hrtimer_clockid_to_base(clock_id); in __hrtimer_init()
1641 timer->base = &cpu_base->clock_base[base]; in __hrtimer_init()
1674 struct hrtimer_clock_base *base; in hrtimer_active() local
1678 base = READ_ONCE(timer->base); in hrtimer_active()
1679 seq = raw_read_seqcount_begin(&base->seq); in hrtimer_active()
1682 base->running == timer) in hrtimer_active()
1685 } while (read_seqcount_retry(&base->seq, seq) || in hrtimer_active()
1686 base != READ_ONCE(timer->base)); in hrtimer_active()
1711 struct hrtimer_clock_base *base, in __run_hrtimer() argument
1722 base->running = timer; in __run_hrtimer()
1731 raw_write_seqcount_barrier(&base->seq); in __run_hrtimer()
1733 __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, 0); in __run_hrtimer()
1770 enqueue_hrtimer(timer, base, HRTIMER_MODE_ABS); in __run_hrtimer()
1779 raw_write_seqcount_barrier(&base->seq); in __run_hrtimer()
1781 WARN_ON_ONCE(base->running != timer); in __run_hrtimer()
1782 base->running = NULL; in __run_hrtimer()
1788 struct hrtimer_clock_base *base; in __hrtimer_run_queues() local
1791 for_each_active_base(base, cpu_base, active) { in __hrtimer_run_queues()
1795 basenow = ktime_add(now, base->offset); in __hrtimer_run_queues()
1797 while ((node = timerqueue_getnext(&base->active))) { in __hrtimer_run_queues()
1817 __run_hrtimer(cpu_base, base, timer, &basenow, flags); in __hrtimer_run_queues()
2153 restart->nanosleep.clockid = t.timer.base->clockid; in hrtimer_nanosleep()
2262 timer->base = new_base; in migrate_hrtimer_list()