/kernel/sched/ |
D | loadavg.c | 81 long nr_active, delta = 0; in calc_load_fold_active() local 87 delta = nr_active - this_rq->calc_load_active; in calc_load_fold_active() 91 return delta; in calc_load_fold_active() 237 long delta; in calc_load_nohz_start() local 243 delta = calc_load_fold_active(this_rq, 0); in calc_load_nohz_start() 244 if (delta) { in calc_load_nohz_start() 247 atomic_long_add(delta, &calc_load_nohz[idx]); in calc_load_nohz_start() 274 long delta = 0; in calc_load_nohz_fold() local 277 delta = atomic_long_xchg(&calc_load_nohz[idx], 0); in calc_load_nohz_fold() 279 return delta; in calc_load_nohz_fold() [all …]
|
D | stats.h | 9 rq_sched_info_arrive(struct rq *rq, unsigned long long delta) in rq_sched_info_arrive() argument 12 rq->rq_sched_info.run_delay += delta; in rq_sched_info_arrive() 21 rq_sched_info_depart(struct rq *rq, unsigned long long delta) in rq_sched_info_depart() argument 24 rq->rq_cpu_time += delta; in rq_sched_info_depart() 28 rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) in rq_sched_info_dequeued() argument 31 rq->rq_sched_info.run_delay += delta; in rq_sched_info_dequeued() 44 static inline void rq_sched_info_arrive (struct rq *rq, unsigned long long delta) { } in rq_sched_info_arrive() argument 45 static inline void rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) { } in rq_sched_info_dequeued() argument 46 static inline void rq_sched_info_depart (struct rq *rq, unsigned long long delta) { } in rq_sched_info_depart() argument 158 unsigned long long now = rq_clock(rq), delta = 0; in sched_info_dequeued() local [all …]
|
D | pelt.c | 110 accumulate_sum(u64 delta, struct sched_avg *sa, in accumulate_sum() argument 113 u32 contrib = (u32)delta; /* p == 0 -> delta < 1024 */ in accumulate_sum() 116 delta += sa->period_contrib; in accumulate_sum() 117 periods = delta / 1024; /* A period is 1024us (~1ms) */ in accumulate_sum() 131 delta %= 1024; in accumulate_sum() 133 1024 - sa->period_contrib, delta); in accumulate_sum() 135 sa->period_contrib = delta; in accumulate_sum() 179 u64 delta; in ___update_load_sum() local 181 delta = now - sa->last_update_time; in ___update_load_sum() 186 if ((s64)delta < 0) { in ___update_load_sum() [all …]
|
D | cputime.c | 35 static void irqtime_account_delta(struct irqtime *irqtime, u64 delta, in irqtime_account_delta() argument 41 cpustat[idx] += delta; in irqtime_account_delta() 42 irqtime->total += delta; in irqtime_account_delta() 43 irqtime->tick_delta += delta; in irqtime_account_delta() 54 s64 delta; in irqtime_account_irq() local 61 delta = sched_clock_cpu(cpu) - irqtime->irq_start_time; in irqtime_account_irq() 62 irqtime->irq_start_time += delta; in irqtime_account_irq() 71 irqtime_account_delta(irqtime, delta, CPUTIME_IRQ); in irqtime_account_irq() 73 irqtime_account_delta(irqtime, delta, CPUTIME_SOFTIRQ); in irqtime_account_irq() 80 u64 delta; in irqtime_tick_accounted() local [all …]
|
D | pelt.h | 58 static inline void update_rq_clock_pelt(struct rq *rq, s64 delta) in update_rq_clock_pelt() argument 82 delta = cap_scale(delta, arch_scale_cpu_capacity(cpu_of(rq))); in update_rq_clock_pelt() 83 delta = cap_scale(delta, arch_scale_freq_capacity(cpu_of(rq))); in update_rq_clock_pelt() 85 rq->clock_pelt += delta; in update_rq_clock_pelt() 173 update_rq_clock_pelt(struct rq *rq, s64 delta) { } in update_rq_clock_pelt() argument
|
D | clock.c | 268 s64 delta; in sched_clock_local() local 272 delta = now - scd->tick_raw; in sched_clock_local() 273 if (unlikely(delta < 0)) in sched_clock_local() 274 delta = 0; in sched_clock_local() 285 clock = gtod + delta; in sched_clock_local()
|
D | psi.c | 260 u32 delta; in get_recent_times() local 273 delta = times[s] - groupc->times_prev[aggregator][s]; in get_recent_times() 276 times[s] = delta; in get_recent_times() 277 if (delta) in get_recent_times() 629 u32 delta; in record_times() local 633 delta = now - groupc->state_start; in record_times() 637 groupc->times[PSI_IO_SOME] += delta; in record_times() 639 groupc->times[PSI_IO_FULL] += delta; in record_times() 643 groupc->times[PSI_MEM_SOME] += delta; in record_times() 645 groupc->times[PSI_MEM_FULL] += delta; in record_times() [all …]
|
/kernel/trace/ |
D | trace_benchmark.c | 41 u64 delta; in trace_do_benchmark() local 60 delta = stop - start; in trace_do_benchmark() 67 bm_first = delta; in trace_do_benchmark() 73 bm_last = delta; in trace_do_benchmark() 75 if (delta > bm_max) in trace_do_benchmark() 76 bm_max = delta; in trace_do_benchmark() 77 if (!bm_min || delta < bm_min) in trace_do_benchmark() 78 bm_min = delta; in trace_do_benchmark() 92 bm_total += delta; in trace_do_benchmark() 93 bm_totalsq += delta * delta; in trace_do_benchmark() [all …]
|
D | trace_irqsoff.c | 305 static bool report_latency(struct trace_array *tr, u64 delta) in report_latency() argument 308 if (delta < tracing_thresh) in report_latency() 311 if (delta <= tr->max_latency) in report_latency() 323 u64 T0, T1, delta; in check_critical_timing() local 329 delta = T1-T0; in check_critical_timing() 335 if (!report_latency(tr, delta)) in check_critical_timing() 341 if (!report_latency(tr, delta)) in check_critical_timing() 354 tr->max_latency = delta; in check_critical_timing()
|
D | ring_buffer.c | 361 static inline int test_time_stamp(u64 delta) in test_time_stamp() argument 363 if (delta & TS_DELTA_TEST) in test_time_stamp() 417 u64 delta; member 2308 rb_add_time_stamp(struct ring_buffer_event *event, u64 delta, bool abs) in rb_add_time_stamp() argument 2317 event->time_delta = delta & TS_MASK; in rb_add_time_stamp() 2318 event->array[0] = delta >> TS_SHIFT; in rb_add_time_stamp() 2348 u64 delta = info->delta; in rb_update_event() local 2352 delta = 0; in rb_update_event() 2361 event = rb_add_time_stamp(event, info->delta, abs); in rb_update_event() 2363 delta = 0; in rb_update_event() [all …]
|
/kernel/ |
D | tsacct.c | 26 u64 delta; in bacct_add_tsk() local 31 delta = ktime_get_ns() - tsk->start_time; in bacct_add_tsk() 33 do_div(delta, NSEC_PER_USEC); in bacct_add_tsk() 34 stats->ac_etime = delta; in bacct_add_tsk() 36 do_div(delta, USEC_PER_SEC); in bacct_add_tsk() 37 stats->ac_btime = get_seconds() - delta; in bacct_add_tsk() 119 u64 time, delta; in __acct_update_integrals() local 125 delta = time - tsk->acct_timexpd; in __acct_update_integrals() 127 if (delta < TICK_NSEC) in __acct_update_integrals() 136 tsk->acct_rss_mem1 += delta * get_mm_rss(tsk->mm) >> 10; in __acct_update_integrals() [all …]
|
D | torture.c | 85 unsigned long delta; in torture_offline() local 114 delta = jiffies - starttime; in torture_offline() 115 *sum_offl += delta; in torture_offline() 117 *min_offl = delta; in torture_offline() 118 *max_offl = delta; in torture_offline() 120 if (*min_offl > delta) in torture_offline() 121 *min_offl = delta; in torture_offline() 122 if (*max_offl < delta) in torture_offline() 123 *max_offl = delta; in torture_offline() 138 unsigned long delta; in torture_online() local [all …]
|
D | async.c | 114 ktime_t uninitialized_var(calltime), delta, rettime; in async_run_entry_fn() 126 delta = ktime_sub(rettime, calltime); in async_run_entry_fn() 130 (long long)ktime_to_ns(delta) >> 10); in async_run_entry_fn() 290 ktime_t uninitialized_var(starttime), delta, endtime; in async_synchronize_cookie_domain() 301 delta = ktime_sub(endtime, starttime); in async_synchronize_cookie_domain() 305 (long long)ktime_to_ns(delta) >> 10); in async_synchronize_cookie_domain()
|
D | watchdog_hld.c | 78 ktime_t delta, now = ktime_get_mono_fast_ns(); in watchdog_check_timestamp() local 80 delta = now - __this_cpu_read(last_timestamp); in watchdog_check_timestamp() 81 if (delta < watchdog_hrtimer_sample_threshold) { in watchdog_check_timestamp()
|
/kernel/time/ |
D | tick-sched.c | 57 ktime_t delta; in tick_do_update_jiffies64() local 63 delta = ktime_sub(now, READ_ONCE(last_jiffies_update)); in tick_do_update_jiffies64() 64 if (delta < tick_period) in tick_do_update_jiffies64() 70 delta = ktime_sub(now, last_jiffies_update); in tick_do_update_jiffies64() 71 if (delta >= tick_period) { in tick_do_update_jiffies64() 73 delta = ktime_sub(delta, tick_period); in tick_do_update_jiffies64() 79 if (unlikely(delta >= tick_period)) { in tick_do_update_jiffies64() 82 ticks = ktime_divns(delta, incr); in tick_do_update_jiffies64() 521 ktime_t delta; in update_ts_time_stats() local 524 delta = ktime_sub(now, ts->idle_entrytime); in update_ts_time_stats() [all …]
|
D | clockevents.c | 236 int64_t delta; in clockevents_program_min_delta() local 240 delta = dev->min_delta_ns; in clockevents_program_min_delta() 241 dev->next_event = ktime_add_ns(ktime_get(), delta); in clockevents_program_min_delta() 247 clc = ((unsigned long long) delta * dev->mult) >> dev->shift; in clockevents_program_min_delta() 275 int64_t delta = 0; in clockevents_program_min_delta() local 279 delta += dev->min_delta_ns; in clockevents_program_min_delta() 280 dev->next_event = ktime_add_ns(ktime_get(), delta); in clockevents_program_min_delta() 286 clc = ((unsigned long long) delta * dev->mult) >> dev->shift; in clockevents_program_min_delta() 307 int64_t delta; in clockevents_program_event() local 326 delta = ktime_to_ns(ktime_sub(expires, ktime_get())); in clockevents_program_event() [all …]
|
D | timekeeping.c | 146 static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta) in tk_update_sleep_time() argument 148 tk->offs_boot = ktime_add(tk->offs_boot, delta); in tk_update_sleep_time() 221 u64 now, last, mask, max, delta; in timekeeping_get_delta() local 239 delta = clocksource_delta(now, last, mask); in timekeeping_get_delta() 245 if (unlikely((~delta & mask) < (mask >> 3))) { in timekeeping_get_delta() 247 delta = 0; in timekeeping_get_delta() 251 if (unlikely(delta > max)) { in timekeeping_get_delta() 253 delta = tkr->clock->max_cycles; in timekeeping_get_delta() 256 return delta; in timekeeping_get_delta() 264 u64 cycle_now, delta; in timekeeping_get_delta() local [all …]
|
D | timecounter.c | 82 u64 delta = (cycle_tstamp - tc->cycle_last) & tc->cc->mask; in timecounter_cyc2time() local 90 if (delta > tc->cc->mask / 2) { in timecounter_cyc2time() 91 delta = (tc->cycle_last - cycle_tstamp) & tc->cc->mask; in timecounter_cyc2time() 92 nsec -= cc_cyc2ns_backwards(tc->cc, delta, tc->mask, frac); in timecounter_cyc2time() 94 nsec += cyclecounter_cyc2ns(tc->cc, delta, tc->mask, &frac); in timecounter_cyc2time()
|
D | alarmtimer.c | 266 ktime_t delta; in alarmtimer_suspend() local 273 delta = ktime_sub(next->expires, base->gettime()); in alarmtimer_suspend() 274 if (!min || (delta < min)) { in alarmtimer_suspend() 276 min = delta; in alarmtimer_suspend() 444 ktime_t delta; in alarm_forward() local 446 delta = ktime_sub(now, alarm->node.expires); in alarm_forward() 448 if (delta < 0) in alarm_forward() 451 if (unlikely(delta >= interval)) { in alarm_forward() 454 overrun = ktime_divns(delta, incr); in alarm_forward() 487 ktime_t delta; in alarmtimer_freezerset() local [all …]
|
D | ntp.c | 402 s64 delta; in second_overflow() local 467 delta = ntp_offset_chunk(time_offset); in second_overflow() 468 time_offset -= delta; in second_overflow() 469 tick_length += delta; in second_overflow() 886 long delta, delta_mod; in hardpps_update_freq() local 906 delta = shift_right(ftemp - pps_freq, NTP_SCALE_SHIFT); in hardpps_update_freq() 908 if (delta > PPS_MAXWANDER || delta < -PPS_MAXWANDER) { in hardpps_update_freq() 910 "hardpps: PPSWANDER: change=%ld\n", delta); in hardpps_update_freq() 922 delta_mod = delta; in hardpps_update_freq() 936 return delta; in hardpps_update_freq()
|
D | hrtimer.c | 921 ktime_t delta; in hrtimer_forward() local 923 delta = ktime_sub(now, hrtimer_get_expires(timer)); in hrtimer_forward() 925 if (delta < 0) in hrtimer_forward() 934 if (unlikely(delta >= interval)) { in hrtimer_forward() 937 orun = ktime_divns(delta, incr); in hrtimer_forward() 1614 ktime_t expires_next, now, entry_time, delta; in hrtimer_interrupt() local 1687 delta = ktime_sub(now, entry_time); in hrtimer_interrupt() 1688 if ((unsigned int)delta > cpu_base->max_hang_time) in hrtimer_interrupt() 1689 cpu_base->max_hang_time = (unsigned int) delta; in hrtimer_interrupt() 1694 if (delta > 100 * NSEC_PER_MSEC) in hrtimer_interrupt() [all …]
|
D | clocksource.c | 190 u64 csnow, wdnow, cslast, wdlast, delta; in clocksource_watchdog() local 223 delta = clocksource_delta(wdnow, cs->wd_last, watchdog->mask); in clocksource_watchdog() 224 wd_nsec = clocksource_cyc2ns(delta, watchdog->mult, in clocksource_watchdog() 227 delta = clocksource_delta(csnow, cs->cs_last, cs->mask); in clocksource_watchdog() 228 cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift); in clocksource_watchdog() 558 u64 now, delta, nsec = 0; in clocksource_stop_suspend_timing() local 574 delta = clocksource_delta(now, suspend_start, in clocksource_stop_suspend_timing() 576 nsec = mul_u64_u32_shr(delta, suspend_clocksource->mult, in clocksource_stop_suspend_timing()
|
D | timer.c | 499 unsigned long delta = expires - clk; in calc_wheel_index() local 502 if (delta < LVL_START(1)) { in calc_wheel_index() 504 } else if (delta < LVL_START(2)) { in calc_wheel_index() 506 } else if (delta < LVL_START(3)) { in calc_wheel_index() 508 } else if (delta < LVL_START(4)) { in calc_wheel_index() 510 } else if (delta < LVL_START(5)) { in calc_wheel_index() 512 } else if (delta < LVL_START(6)) { in calc_wheel_index() 514 } else if (delta < LVL_START(7)) { in calc_wheel_index() 516 } else if (LVL_DEPTH > 8 && delta < LVL_START(8)) { in calc_wheel_index() 518 } else if ((long) delta < 0) { in calc_wheel_index() [all …]
|
/kernel/cgroup/ |
D | rstat.c | 321 struct cgroup_base_stat delta; in cgroup_base_stat_flush() local 331 delta.cputime.utime = cputime.utime - last_cputime->utime; in cgroup_base_stat_flush() 332 delta.cputime.stime = cputime.stime - last_cputime->stime; in cgroup_base_stat_flush() 333 delta.cputime.sum_exec_runtime = cputime.sum_exec_runtime - in cgroup_base_stat_flush() 338 cgroup_base_stat_accumulate(&delta, &cgrp->pending_bstat); in cgroup_base_stat_flush() 342 cgroup_base_stat_accumulate(&cgrp->bstat, &delta); in cgroup_base_stat_flush() 344 cgroup_base_stat_accumulate(&parent->pending_bstat, &delta); in cgroup_base_stat_flush()
|
/kernel/bpf/ |
D | tnum.c | 23 u64 chi = min ^ max, delta; in tnum_range() local 33 delta = (1ULL << bits) - 1; in tnum_range() 34 return TNUM(min & ~delta, delta); in tnum_range()
|