/kernel/sched/ |
D | loadavg.c | 81 long nr_active, delta = 0; in calc_load_fold_active() local 87 delta = nr_active - this_rq->calc_load_active; in calc_load_fold_active() 91 return delta; in calc_load_fold_active() 236 long delta; in calc_load_nohz_fold() local 238 delta = calc_load_fold_active(rq, 0); in calc_load_nohz_fold() 239 if (delta) { in calc_load_nohz_fold() 242 atomic_long_add(delta, &calc_load_nohz[idx]); in calc_load_nohz_fold() 287 long delta = 0; in calc_load_nohz_read() local 290 delta = atomic_long_xchg(&calc_load_nohz[idx], 0); in calc_load_nohz_read() 292 return delta; in calc_load_nohz_read() [all …]
|
D | stats.h | 9 rq_sched_info_arrive(struct rq *rq, unsigned long long delta) in rq_sched_info_arrive() argument 12 rq->rq_sched_info.run_delay += delta; in rq_sched_info_arrive() 21 rq_sched_info_depart(struct rq *rq, unsigned long long delta) in rq_sched_info_depart() argument 24 rq->rq_cpu_time += delta; in rq_sched_info_depart() 28 rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) in rq_sched_info_dequeued() argument 31 rq->rq_sched_info.run_delay += delta; in rq_sched_info_dequeued() 44 static inline void rq_sched_info_arrive (struct rq *rq, unsigned long long delta) { } in rq_sched_info_arrive() argument 45 static inline void rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) { } in rq_sched_info_dequeued() argument 46 static inline void rq_sched_info_depart (struct rq *rq, unsigned long long delta) { } in rq_sched_info_depart() argument 179 unsigned long long now = rq_clock(rq), delta = 0; in sched_info_dequeued() local [all …]
|
D | cputime.c | 37 static void irqtime_account_delta(struct irqtime *irqtime, u64 delta, in irqtime_account_delta() argument 43 cpustat[idx] += delta; in irqtime_account_delta() 44 irqtime->total += delta; in irqtime_account_delta() 45 irqtime->tick_delta += delta; in irqtime_account_delta() 56 s64 delta; in irqtime_account_irq() local 63 delta = sched_clock_cpu(cpu) - irqtime->irq_start_time; in irqtime_account_irq() 64 irqtime->irq_start_time += delta; in irqtime_account_irq() 73 irqtime_account_delta(irqtime, delta, CPUTIME_IRQ); in irqtime_account_irq() 75 irqtime_account_delta(irqtime, delta, CPUTIME_SOFTIRQ); in irqtime_account_irq() 77 trace_android_rvh_account_irq(curr, cpu, delta); in irqtime_account_irq() [all …]
|
D | pelt.c | 165 accumulate_sum(u64 delta, struct sched_avg *sa, in accumulate_sum() argument 168 u32 contrib = (u32)delta; /* p == 0 -> delta < 1024 */ in accumulate_sum() 171 delta += sa->period_contrib; in accumulate_sum() 172 periods = delta / 1024; /* A period is 1024us (~1ms) */ in accumulate_sum() 186 delta %= 1024; in accumulate_sum() 199 1024 - sa->period_contrib, delta); in accumulate_sum() 202 sa->period_contrib = delta; in accumulate_sum() 246 u64 delta; in ___update_load_sum() local 248 delta = now - sa->last_update_time; in ___update_load_sum() 253 if ((s64)delta < 0) { in ___update_load_sum() [all …]
|
D | pelt.h | 78 static inline void update_rq_clock_pelt(struct rq *rq, s64 delta) in update_rq_clock_pelt() argument 80 delta <<= READ_ONCE(sched_pelt_lshift); in update_rq_clock_pelt() 82 per_cpu(clock_task_mult, rq->cpu) += delta; in update_rq_clock_pelt() 106 delta = cap_scale(delta, arch_scale_cpu_capacity(cpu_of(rq))); in update_rq_clock_pelt() 107 delta = cap_scale(delta, arch_scale_freq_capacity(cpu_of(rq))); in update_rq_clock_pelt() 109 rq->clock_pelt += delta; in update_rq_clock_pelt() 209 update_rq_clock_pelt(struct rq *rq, s64 delta) { } in update_rq_clock_pelt() argument
|
D | clock.c | 268 s64 delta; in sched_clock_local() local 272 delta = now - scd->tick_raw; in sched_clock_local() 273 if (unlikely(delta < 0)) in sched_clock_local() 274 delta = 0; in sched_clock_local() 285 clock = gtod + delta; in sched_clock_local()
|
D | psi.c | 270 u32 delta; in get_recent_times() local 283 delta = times[s] - groupc->times_prev[aggregator][s]; in get_recent_times() 286 times[s] = delta; in get_recent_times() 287 if (delta) in get_recent_times() 700 u32 delta; in record_times() local 704 delta = now - groupc->state_start; in record_times() 708 groupc->times[PSI_IO_SOME] += delta; in record_times() 710 groupc->times[PSI_IO_FULL] += delta; in record_times() 714 groupc->times[PSI_MEM_SOME] += delta; in record_times() 716 groupc->times[PSI_MEM_FULL] += delta; in record_times() [all …]
|
/kernel/trace/ |
D | trace_benchmark.c | 41 u64 delta; in trace_do_benchmark() local 60 delta = stop - start; in trace_do_benchmark() 67 bm_first = delta; in trace_do_benchmark() 73 bm_last = delta; in trace_do_benchmark() 75 if (delta > bm_max) in trace_do_benchmark() 76 bm_max = delta; in trace_do_benchmark() 77 if (!bm_min || delta < bm_min) in trace_do_benchmark() 78 bm_min = delta; in trace_do_benchmark() 92 bm_total += delta; in trace_do_benchmark() 93 bm_totalsq += delta * delta; in trace_do_benchmark() [all …]
|
D | trace_irqsoff.c | 306 static bool report_latency(struct trace_array *tr, u64 delta) in report_latency() argument 309 if (delta < tracing_thresh) in report_latency() 312 if (delta <= tr->max_latency) in report_latency() 324 u64 T0, T1, delta; in check_critical_timing() local 330 delta = T1-T0; in check_critical_timing() 336 if (!report_latency(tr, delta)) in check_critical_timing() 342 if (!report_latency(tr, delta)) in check_critical_timing() 355 tr->max_latency = delta; in check_critical_timing()
|
D | ring_buffer.c | 372 static inline int test_time_stamp(u64 delta) in test_time_stamp() argument 374 if (delta & TS_DELTA_TEST) in test_time_stamp() 429 u64 delta; member 2702 rb_add_time_stamp(struct ring_buffer_event *event, u64 delta, bool abs) in rb_add_time_stamp() argument 2711 event->time_delta = delta & TS_MASK; in rb_add_time_stamp() 2712 event->array[0] = delta >> TS_SHIFT; in rb_add_time_stamp() 2739 (unsigned long long)info->delta, in rb_check_timestamp() 2754 u64 *delta, in rb_add_timestamp() argument 2760 if (unlikely(info->delta > (1ULL << 59))) { in rb_add_timestamp() 2778 info->delta = 0; in rb_add_timestamp() [all …]
|
/kernel/ |
D | torture.c | 91 unsigned long delta; in torture_offline() local 127 delta = jiffies - starttime; in torture_offline() 128 *sum_offl += delta; in torture_offline() 130 *min_offl = delta; in torture_offline() 131 *max_offl = delta; in torture_offline() 133 if (*min_offl > delta) in torture_offline() 134 *min_offl = delta; in torture_offline() 135 if (*max_offl < delta) in torture_offline() 136 *max_offl = delta; in torture_offline() 151 unsigned long delta; in torture_online() local [all …]
|
D | tsacct.c | 26 u64 delta; in bacct_add_tsk() local 32 delta = ktime_get_ns() - tsk->start_time; in bacct_add_tsk() 34 do_div(delta, NSEC_PER_USEC); in bacct_add_tsk() 35 stats->ac_etime = delta; in bacct_add_tsk() 37 btime = ktime_get_real_seconds() - div_u64(delta, USEC_PER_SEC); in bacct_add_tsk() 121 u64 time, delta; in __acct_update_integrals() local 127 delta = time - tsk->acct_timexpd; in __acct_update_integrals() 129 if (delta < TICK_NSEC) in __acct_update_integrals() 138 tsk->acct_rss_mem1 += delta * get_mm_rss(tsk->mm) >> 10; in __acct_update_integrals() 139 tsk->acct_vm_mem1 += delta * tsk->mm->total_vm >> 10; in __acct_update_integrals()
|
D | async.c | 114 ktime_t calltime, delta, rettime; in async_run_entry_fn() local 126 delta = ktime_sub(rettime, calltime); in async_run_entry_fn() 130 (long long)ktime_to_ns(delta) >> 10); in async_run_entry_fn() 328 ktime_t starttime, delta, endtime; in async_synchronize_cookie_domain() local 339 delta = ktime_sub(endtime, starttime); in async_synchronize_cookie_domain() 343 (long long)ktime_to_ns(delta) >> 10); in async_synchronize_cookie_domain()
|
D | watchdog_hld.c | 78 ktime_t delta, now = ktime_get_mono_fast_ns(); in watchdog_check_timestamp() local 80 delta = now - __this_cpu_read(last_timestamp); in watchdog_check_timestamp() 81 if (delta < watchdog_hrtimer_sample_threshold) { in watchdog_check_timestamp()
|
/kernel/time/ |
D | clockevents.c | 236 int64_t delta; in clockevents_program_min_delta() local 240 delta = dev->min_delta_ns; in clockevents_program_min_delta() 241 dev->next_event = ktime_add_ns(ktime_get(), delta); in clockevents_program_min_delta() 247 clc = ((unsigned long long) delta * dev->mult) >> dev->shift; in clockevents_program_min_delta() 275 int64_t delta = 0; in clockevents_program_min_delta() local 279 delta += dev->min_delta_ns; in clockevents_program_min_delta() 280 dev->next_event = ktime_add_ns(ktime_get(), delta); in clockevents_program_min_delta() 286 clc = ((unsigned long long) delta * dev->mult) >> dev->shift; in clockevents_program_min_delta() 307 int64_t delta; in clockevents_program_event() local 326 delta = ktime_to_ns(ktime_sub(expires, ktime_get())); in clockevents_program_event() [all …]
|
D | timekeeping.c | 168 static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta) in tk_update_sleep_time() argument 170 tk->offs_boot = ktime_add(tk->offs_boot, delta); in tk_update_sleep_time() 243 u64 now, last, mask, max, delta; in timekeeping_get_delta() local 261 delta = clocksource_delta(now, last, mask); in timekeeping_get_delta() 267 if (unlikely((~delta & mask) < (mask >> 3))) { in timekeeping_get_delta() 269 delta = 0; in timekeeping_get_delta() 273 if (unlikely(delta > max)) { in timekeeping_get_delta() 275 delta = tkr->clock->max_cycles; in timekeeping_get_delta() 278 return delta; in timekeeping_get_delta() 286 u64 cycle_now, delta; in timekeeping_get_delta() local [all …]
|
D | tick-sched.c | 58 ktime_t delta; in tick_do_update_jiffies64() local 95 delta = ktime_sub(now, tick_next_period); in tick_do_update_jiffies64() 96 if (unlikely(delta >= TICK_NSEC)) { in tick_do_update_jiffies64() 100 ticks += ktime_divns(delta, incr); in tick_do_update_jiffies64() 604 ktime_t delta; in update_ts_time_stats() local 607 delta = ktime_sub(now, ts->idle_entrytime); in update_ts_time_stats() 609 ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta); in update_ts_time_stats() 611 ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); in update_ts_time_stats() 663 ktime_t delta = ktime_sub(now, ts->idle_entrytime); in get_cpu_idle_time_us() local 665 idle = ktime_add(ts->idle_sleeptime, delta); in get_cpu_idle_time_us() [all …]
|
D | timecounter.c | 82 u64 delta = (cycle_tstamp - tc->cycle_last) & tc->cc->mask; in timecounter_cyc2time() local 90 if (delta > tc->cc->mask / 2) { in timecounter_cyc2time() 91 delta = (tc->cycle_last - cycle_tstamp) & tc->cc->mask; in timecounter_cyc2time() 92 nsec -= cc_cyc2ns_backwards(tc->cc, delta, tc->mask, frac); in timecounter_cyc2time() 94 nsec += cyclecounter_cyc2ns(tc->cc, delta, tc->mask, &frac); in timecounter_cyc2time()
|
D | alarmtimer.c | 273 ktime_t delta; in alarmtimer_suspend() local 280 delta = ktime_sub(next->expires, base->get_ktime()); in alarmtimer_suspend() 281 if (!min || (delta < min)) { in alarmtimer_suspend() 283 min = delta; in alarmtimer_suspend() 451 ktime_t delta; in alarm_forward() local 453 delta = ktime_sub(now, alarm->node.expires); in alarm_forward() 455 if (delta < 0) in alarm_forward() 458 if (unlikely(delta >= interval)) { in alarm_forward() 461 overrun = ktime_divns(delta, incr); in alarm_forward() 518 ktime_t delta; in alarmtimer_freezerset() local [all …]
|
D | clocksource.c | 243 s64 delta; in clocksource_verify_percpu() local 256 delta = (s64)((csnow_mid - csnow_begin) & cs->mask); in clocksource_verify_percpu() 257 if (delta < 0) in clocksource_verify_percpu() 259 delta = (csnow_end - csnow_mid) & cs->mask; in clocksource_verify_percpu() 260 if (delta < 0) in clocksource_verify_percpu() 262 delta = clocksource_delta(csnow_end, csnow_begin, cs->mask); in clocksource_verify_percpu() 263 cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift); in clocksource_verify_percpu() 283 u64 csnow, wdnow, cslast, wdlast, delta; in clocksource_watchdog() local 318 delta = clocksource_delta(wdnow, cs->wd_last, watchdog->mask); in clocksource_watchdog() 319 wd_nsec = clocksource_cyc2ns(delta, watchdog->mult, in clocksource_watchdog() [all …]
|
D | ntp.c | 402 s64 delta; in second_overflow() local 467 delta = ntp_offset_chunk(time_offset); in second_overflow() 468 time_offset -= delta; in second_overflow() 469 tick_length += delta; in second_overflow() 886 long delta, delta_mod; in hardpps_update_freq() local 906 delta = shift_right(ftemp - pps_freq, NTP_SCALE_SHIFT); in hardpps_update_freq() 908 if (delta > PPS_MAXWANDER || delta < -PPS_MAXWANDER) { in hardpps_update_freq() 910 "hardpps: PPSWANDER: change=%ld\n", delta); in hardpps_update_freq() 922 delta_mod = delta; in hardpps_update_freq() 936 return delta; in hardpps_update_freq()
|
D | hrtimer.c | 938 ktime_t delta; in hrtimer_forward() local 940 delta = ktime_sub(now, hrtimer_get_expires(timer)); in hrtimer_forward() 942 if (delta < 0) in hrtimer_forward() 951 if (unlikely(delta >= interval)) { in hrtimer_forward() 954 orun = ktime_divns(delta, incr); in hrtimer_forward() 1682 ktime_t expires_next, now, entry_time, delta; in hrtimer_interrupt() local 1755 delta = ktime_sub(now, entry_time); in hrtimer_interrupt() 1756 if ((unsigned int)delta > cpu_base->max_hang_time) in hrtimer_interrupt() 1757 cpu_base->max_hang_time = (unsigned int) delta; in hrtimer_interrupt() 1762 if (delta > 100 * NSEC_PER_MSEC) in hrtimer_interrupt() [all …]
|
D | timer.c | 519 unsigned long delta = expires - clk; in calc_wheel_index() local 522 if (delta < LVL_START(1)) { in calc_wheel_index() 524 } else if (delta < LVL_START(2)) { in calc_wheel_index() 526 } else if (delta < LVL_START(3)) { in calc_wheel_index() 528 } else if (delta < LVL_START(4)) { in calc_wheel_index() 530 } else if (delta < LVL_START(5)) { in calc_wheel_index() 532 } else if (delta < LVL_START(6)) { in calc_wheel_index() 534 } else if (delta < LVL_START(7)) { in calc_wheel_index() 536 } else if (LVL_DEPTH > 8 && delta < LVL_START(8)) { in calc_wheel_index() 538 } else if ((long) delta < 0) { in calc_wheel_index() [all …]
|
/kernel/cgroup/ |
D | rstat.c | 316 struct cgroup_base_stat cur, delta; in cgroup_base_stat_flush() local 326 delta = cur; in cgroup_base_stat_flush() 327 cgroup_base_stat_sub(&delta, &rstatc->last_bstat); in cgroup_base_stat_flush() 328 cgroup_base_stat_add(&cgrp->bstat, &delta); in cgroup_base_stat_flush() 329 cgroup_base_stat_add(&rstatc->last_bstat, &delta); in cgroup_base_stat_flush() 333 delta = cgrp->bstat; in cgroup_base_stat_flush() 334 cgroup_base_stat_sub(&delta, &cgrp->last_bstat); in cgroup_base_stat_flush() 335 cgroup_base_stat_add(&parent->bstat, &delta); in cgroup_base_stat_flush() 336 cgroup_base_stat_add(&cgrp->last_bstat, &delta); in cgroup_base_stat_flush()
|
/kernel/bpf/ |
D | tnum.c | 23 u64 chi = min ^ max, delta; in tnum_range() local 33 delta = (1ULL << bits) - 1; in tnum_range() 34 return TNUM(min & ~delta, delta); in tnum_range()
|