• Home
  • Raw
  • Download

Lines Matching refs:base

147 static inline bool is_migration_base(struct hrtimer_clock_base *base)  in is_migration_base()  argument
149 return base == &migration_base; in is_migration_base()
168 struct hrtimer_clock_base *base; in lock_hrtimer_base() local
171 base = READ_ONCE(timer->base); in lock_hrtimer_base()
172 if (likely(base != &migration_base)) { in lock_hrtimer_base()
173 raw_spin_lock_irqsave(&base->cpu_base->lock, *flags); in lock_hrtimer_base()
174 if (likely(base == timer->base)) in lock_hrtimer_base()
175 return base; in lock_hrtimer_base()
177 raw_spin_unlock_irqrestore(&base->cpu_base->lock, *flags); in lock_hrtimer_base()
202 struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base, in get_target_base() argument
209 return base; in get_target_base()
225 switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base, in switch_hrtimer_base() argument
230 int basenum = base->index; in switch_hrtimer_base()
237 if (base != new_base) { in switch_hrtimer_base()
248 return base; in switch_hrtimer_base()
251 WRITE_ONCE(timer->base, &migration_base); in switch_hrtimer_base()
252 raw_spin_unlock(&base->cpu_base->lock); in switch_hrtimer_base()
258 raw_spin_lock(&base->cpu_base->lock); in switch_hrtimer_base()
260 WRITE_ONCE(timer->base, base); in switch_hrtimer_base()
263 WRITE_ONCE(timer->base, new_base); in switch_hrtimer_base()
276 static inline bool is_migration_base(struct hrtimer_clock_base *base) in is_migration_base() argument
284 struct hrtimer_clock_base *base = timer->base; in lock_hrtimer_base() local
286 raw_spin_lock_irqsave(&base->cpu_base->lock, *flags); in lock_hrtimer_base()
288 return base; in lock_hrtimer_base()
499 #define for_each_active_base(base, cpu_base, active) \ argument
500 while ((base = __next_base((cpu_base), &(active))))
507 struct hrtimer_clock_base *base; in __hrtimer_next_event_base() local
510 for_each_active_base(base, cpu_base, active) { in __hrtimer_next_event_base()
514 next = timerqueue_getnext(&base->active); in __hrtimer_next_event_base()
524 expires = ktime_sub(hrtimer_get_expires(timer), base->offset); in __hrtimer_next_event_base()
625 static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base) in hrtimer_update_base() argument
627 ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset; in hrtimer_update_base()
628 ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset; in hrtimer_update_base()
629 ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset; in hrtimer_update_base()
631 ktime_t now = ktime_get_update_offsets_now(&base->clock_was_set_seq, in hrtimer_update_base()
634 base->clock_base[HRTIMER_BASE_REALTIME_SOFT].offset = *offs_real; in hrtimer_update_base()
635 base->clock_base[HRTIMER_BASE_BOOTTIME_SOFT].offset = *offs_boot; in hrtimer_update_base()
636 base->clock_base[HRTIMER_BASE_TAI_SOFT].offset = *offs_tai; in hrtimer_update_base()
737 struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases); in hrtimer_switch_to_hres() local
741 base->cpu); in hrtimer_switch_to_hres()
744 base->hres_active = 1; in hrtimer_switch_to_hres()
773 struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases); in retrigger_next_event() local
789 if (!__hrtimer_hres_active(base) && !tick_nohz_active) in retrigger_next_event()
792 raw_spin_lock(&base->lock); in retrigger_next_event()
793 hrtimer_update_base(base); in retrigger_next_event()
794 if (__hrtimer_hres_active(base)) in retrigger_next_event()
795 hrtimer_force_reprogram(base, 0); in retrigger_next_event()
797 hrtimer_update_next_event(base); in retrigger_next_event()
798 raw_spin_unlock(&base->lock); in retrigger_next_event()
811 struct hrtimer_clock_base *base = timer->base; in hrtimer_reprogram() local
812 ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset); in hrtimer_reprogram()
831 struct hrtimer_cpu_base *timer_cpu_base = base->cpu_base; in hrtimer_reprogram()
851 if (base->cpu_base != cpu_base) in hrtimer_reprogram()
872 struct hrtimer_clock_base *base; in update_needs_ipi() local
912 for_each_active_base(base, cpu_base, active) { in update_needs_ipi()
915 next = timerqueue_getnext(&base->active); in update_needs_ipi()
916 expires = ktime_sub(next->expires, base->offset); in update_needs_ipi()
921 if (base->clockid < HRTIMER_BASE_MONOTONIC_SOFT) in update_needs_ipi()
1017 raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags); in unlock_hrtimer_base()
1081 struct hrtimer_clock_base *base, in enqueue_hrtimer() argument
1086 base->cpu_base->active_bases |= 1 << base->index; in enqueue_hrtimer()
1091 return timerqueue_add(&base->active, &timer->node); in enqueue_hrtimer()
1105 struct hrtimer_clock_base *base, in __remove_hrtimer() argument
1108 struct hrtimer_cpu_base *cpu_base = base->cpu_base; in __remove_hrtimer()
1116 if (!timerqueue_del(&base->active, &timer->node)) in __remove_hrtimer()
1117 cpu_base->active_bases &= ~(1 << base->index); in __remove_hrtimer()
1135 remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, in remove_hrtimer() argument
1152 reprogram = base->cpu_base == this_cpu_ptr(&hrtimer_bases); in remove_hrtimer()
1165 __remove_hrtimer(timer, base, state, reprogram); in remove_hrtimer()
1214 struct hrtimer_clock_base *base) in __hrtimer_start_range_ns() argument
1227 force_local = base->cpu_base == this_cpu_ptr(&hrtimer_bases); in __hrtimer_start_range_ns()
1228 force_local &= base->cpu_base->next_timer == timer; in __hrtimer_start_range_ns()
1241 remove_hrtimer(timer, base, true, force_local); in __hrtimer_start_range_ns()
1244 tim = ktime_add_safe(tim, base->get_time()); in __hrtimer_start_range_ns()
1252 new_base = switch_hrtimer_base(timer, base, in __hrtimer_start_range_ns()
1255 new_base = base; in __hrtimer_start_range_ns()
1283 struct hrtimer_clock_base *base; in hrtimer_start_range_ns() local
1296 base = lock_hrtimer_base(timer, &flags); in hrtimer_start_range_ns()
1298 if (__hrtimer_start_range_ns(timer, tim, delta_ns, mode, base)) in hrtimer_start_range_ns()
1318 struct hrtimer_clock_base *base; in hrtimer_try_to_cancel() local
1331 base = lock_hrtimer_base(timer, &flags); in hrtimer_try_to_cancel()
1334 ret = remove_hrtimer(timer, base, false, false); in hrtimer_try_to_cancel()
1344 static void hrtimer_cpu_base_init_expiry_lock(struct hrtimer_cpu_base *base) in hrtimer_cpu_base_init_expiry_lock() argument
1346 spin_lock_init(&base->softirq_expiry_lock); in hrtimer_cpu_base_init_expiry_lock()
1349 static void hrtimer_cpu_base_lock_expiry(struct hrtimer_cpu_base *base) in hrtimer_cpu_base_lock_expiry() argument
1351 spin_lock(&base->softirq_expiry_lock); in hrtimer_cpu_base_lock_expiry()
1354 static void hrtimer_cpu_base_unlock_expiry(struct hrtimer_cpu_base *base) in hrtimer_cpu_base_unlock_expiry() argument
1356 spin_unlock(&base->softirq_expiry_lock); in hrtimer_cpu_base_unlock_expiry()
1396 struct hrtimer_clock_base *base = READ_ONCE(timer->base); in hrtimer_cancel_wait_running() local
1402 if (!timer->is_soft || is_migration_base(base)) { in hrtimer_cancel_wait_running()
1414 atomic_inc(&base->cpu_base->timer_waiters); in hrtimer_cancel_wait_running()
1415 spin_lock_bh(&base->cpu_base->softirq_expiry_lock); in hrtimer_cancel_wait_running()
1416 atomic_dec(&base->cpu_base->timer_waiters); in hrtimer_cancel_wait_running()
1417 spin_unlock_bh(&base->cpu_base->softirq_expiry_lock); in hrtimer_cancel_wait_running()
1421 hrtimer_cpu_base_init_expiry_lock(struct hrtimer_cpu_base *base) { } in hrtimer_cpu_base_init_expiry_lock() argument
1423 hrtimer_cpu_base_lock_expiry(struct hrtimer_cpu_base *base) { } in hrtimer_cpu_base_lock_expiry() argument
1425 hrtimer_cpu_base_unlock_expiry(struct hrtimer_cpu_base *base) { } in hrtimer_cpu_base_unlock_expiry() argument
1426 static inline void hrtimer_sync_wait_running(struct hrtimer_cpu_base *base, in hrtimer_sync_wait_running() argument
1532 int base = hrtimer_clock_to_base_table[clock_id]; in hrtimer_clockid_to_base() local
1534 if (likely(base != HRTIMER_MAX_CLOCK_BASES)) in hrtimer_clockid_to_base()
1535 return base; in hrtimer_clockid_to_base()
1546 int base; in __hrtimer_init() local
1569 base = softtimer ? HRTIMER_MAX_CLOCK_BASES / 2 : 0; in __hrtimer_init()
1570 base += hrtimer_clockid_to_base(clock_id); in __hrtimer_init()
1573 timer->base = &cpu_base->clock_base[base]; in __hrtimer_init()
1606 struct hrtimer_clock_base *base; in hrtimer_active() local
1610 base = READ_ONCE(timer->base); in hrtimer_active()
1611 seq = raw_read_seqcount_begin(&base->seq); in hrtimer_active()
1614 base->running == timer) in hrtimer_active()
1617 } while (read_seqcount_retry(&base->seq, seq) || in hrtimer_active()
1618 base != READ_ONCE(timer->base)); in hrtimer_active()
1643 struct hrtimer_clock_base *base, in __run_hrtimer() argument
1654 base->running = timer; in __run_hrtimer()
1663 raw_write_seqcount_barrier(&base->seq); in __run_hrtimer()
1665 __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, 0); in __run_hrtimer()
1702 enqueue_hrtimer(timer, base, HRTIMER_MODE_ABS); in __run_hrtimer()
1711 raw_write_seqcount_barrier(&base->seq); in __run_hrtimer()
1713 WARN_ON_ONCE(base->running != timer); in __run_hrtimer()
1714 base->running = NULL; in __run_hrtimer()
1720 struct hrtimer_clock_base *base; in __hrtimer_run_queues() local
1723 for_each_active_base(base, cpu_base, active) { in __hrtimer_run_queues()
1727 basenow = ktime_add(now, base->offset); in __hrtimer_run_queues()
1729 while ((node = timerqueue_getnext(&base->active))) { in __hrtimer_run_queues()
1749 __run_hrtimer(cpu_base, base, timer, &basenow, flags); in __hrtimer_run_queues()
2108 restart->nanosleep.clockid = t.timer.base->clockid; in hrtimer_nanosleep()
2206 timer->base = new_base; in migrate_hrtimer_list()