Home
last modified time | relevance | path

Searched refs:delta (Results 1 – 25 of 41) sorted by relevance

12

/kernel/sched/
Dstats.c23 u64 delta = rq_clock(rq) - schedstat_val(stats->wait_start); in __update_stats_wait_end() local
32 __schedstat_set(stats->wait_start, delta); in __update_stats_wait_end()
37 trace_sched_stat_wait(p, delta); in __update_stats_wait_end()
41 max(schedstat_val(stats->wait_max), delta)); in __update_stats_wait_end()
43 __schedstat_add(stats->wait_sum, delta); in __update_stats_wait_end()
56 u64 delta = rq_clock(rq) - sleep_start; in __update_stats_enqueue_sleeper() local
58 if ((s64)delta < 0) in __update_stats_enqueue_sleeper()
59 delta = 0; in __update_stats_enqueue_sleeper()
61 if (unlikely(delta > schedstat_val(stats->sleep_max))) in __update_stats_enqueue_sleeper()
62 __schedstat_set(stats->sleep_max, delta); in __update_stats_enqueue_sleeper()
[all …]
Dloadavg.c80 long nr_active, delta = 0; in calc_load_fold_active() local
86 delta = nr_active - this_rq->calc_load_active; in calc_load_fold_active()
90 return delta; in calc_load_fold_active()
235 long delta; in calc_load_nohz_fold() local
237 delta = calc_load_fold_active(rq, 0); in calc_load_nohz_fold()
238 if (delta) { in calc_load_nohz_fold()
241 atomic_long_add(delta, &calc_load_nohz[idx]); in calc_load_nohz_fold()
286 long delta = 0; in calc_load_nohz_read() local
289 delta = atomic_long_xchg(&calc_load_nohz[idx], 0); in calc_load_nohz_read()
291 return delta; in calc_load_nohz_read()
[all …]
Dstats.h13 rq_sched_info_arrive(struct rq *rq, unsigned long long delta) in rq_sched_info_arrive() argument
16 rq->rq_sched_info.run_delay += delta; in rq_sched_info_arrive()
25 rq_sched_info_depart(struct rq *rq, unsigned long long delta) in rq_sched_info_depart() argument
28 rq->rq_cpu_time += delta; in rq_sched_info_depart()
32 rq_sched_info_dequeue(struct rq *rq, unsigned long long delta) in rq_sched_info_dequeue() argument
35 rq->rq_sched_info.run_delay += delta; in rq_sched_info_dequeue()
72 static inline void rq_sched_info_arrive (struct rq *rq, unsigned long long delta) { } in rq_sched_info_arrive() argument
73 static inline void rq_sched_info_dequeue(struct rq *rq, unsigned long long delta) { } in rq_sched_info_dequeue() argument
74 static inline void rq_sched_info_depart (struct rq *rq, unsigned long long delta) { } in rq_sched_info_depart() argument
113 void psi_account_irqtime(struct task_struct *task, u32 delta);
[all …]
Dpelt.c104 accumulate_sum(u64 delta, struct sched_avg *sa, in accumulate_sum() argument
107 u32 contrib = (u32)delta; /* p == 0 -> delta < 1024 */ in accumulate_sum()
110 delta += sa->period_contrib; in accumulate_sum()
111 periods = delta / 1024; /* A period is 1024us (~1ms) */ in accumulate_sum()
125 delta %= 1024; in accumulate_sum()
138 1024 - sa->period_contrib, delta); in accumulate_sum()
141 sa->period_contrib = delta; in accumulate_sum()
185 u64 delta; in ___update_load_sum() local
187 delta = now - sa->last_update_time; in ___update_load_sum()
192 if ((s64)delta < 0) { in ___update_load_sum()
[all …]
Dcputime.c36 static void irqtime_account_delta(struct irqtime *irqtime, u64 delta, in irqtime_account_delta() argument
42 cpustat[idx] += delta; in irqtime_account_delta()
43 irqtime->total += delta; in irqtime_account_delta()
44 irqtime->tick_delta += delta; in irqtime_account_delta()
56 s64 delta; in irqtime_account_irq() local
64 delta = sched_clock_cpu(cpu) - irqtime->irq_start_time; in irqtime_account_irq()
65 irqtime->irq_start_time += delta; in irqtime_account_irq()
75 irqtime_account_delta(irqtime, delta, CPUTIME_IRQ); in irqtime_account_irq()
78 irqtime_account_delta(irqtime, delta, CPUTIME_SOFTIRQ); in irqtime_account_irq()
82 trace_android_rvh_account_irq(curr, cpu, delta, irq_start); in irqtime_account_irq()
[all …]
Dpelt.h103 static inline void update_rq_clock_pelt(struct rq *rq, s64 delta) in update_rq_clock_pelt() argument
126 delta = cap_scale(delta, arch_scale_cpu_capacity(cpu_of(rq))); in update_rq_clock_pelt()
127 delta = cap_scale(delta, arch_scale_freq_capacity(cpu_of(rq))); in update_rq_clock_pelt()
129 rq->clock_pelt += delta; in update_rq_clock_pelt()
144 static inline void update_rq_clock_task_mult(struct rq *rq, s64 delta) in update_rq_clock_task_mult() argument
146 delta <<= READ_ONCE(sched_pelt_lshift); in update_rq_clock_task_mult()
148 rq->clock_task_mult += delta; in update_rq_clock_task_mult()
150 update_rq_clock_pelt(rq, delta); in update_rq_clock_task_mult()
261 update_rq_clock_task_mult(struct rq *rq, s64 delta) { } in update_rq_clock_task_mult() argument
Dcore_sched.c243 u64 delta, now = rq_clock(rq->core); in __sched_core_account_forceidle() local
255 delta = now - rq->core->core_forceidle_start; in __sched_core_account_forceidle()
256 if (unlikely((s64)delta <= 0)) in __sched_core_account_forceidle()
270 delta *= rq->core->core_forceidle_count; in __sched_core_account_forceidle()
271 delta = div_u64(delta, rq->core->core_forceidle_occupation); in __sched_core_account_forceidle()
285 __account_forceidle_time(p, delta); in __sched_core_account_forceidle()
Dclock.c266 s64 delta; in sched_clock_local() local
270 delta = now - scd->tick_raw; in sched_clock_local()
271 if (unlikely(delta < 0)) in sched_clock_local()
272 delta = 0; in sched_clock_local()
283 clock = gtod + delta; in sched_clock_local()
Dpsi.c264 u32 delta; in get_recent_times() local
277 delta = times[s] - groupc->times_prev[aggregator][s]; in get_recent_times()
280 times[s] = delta; in get_recent_times()
281 if (delta) in get_recent_times()
704 u32 delta; in record_times() local
706 delta = now - groupc->state_start; in record_times()
710 groupc->times[PSI_IO_SOME] += delta; in record_times()
712 groupc->times[PSI_IO_FULL] += delta; in record_times()
716 groupc->times[PSI_MEM_SOME] += delta; in record_times()
718 groupc->times[PSI_MEM_FULL] += delta; in record_times()
[all …]
/kernel/trace/
Dtrace_benchmark.c41 u64 delta; in trace_do_benchmark() local
60 delta = stop - start; in trace_do_benchmark()
67 bm_first = delta; in trace_do_benchmark()
73 bm_last = delta; in trace_do_benchmark()
75 if (delta > bm_max) in trace_do_benchmark()
76 bm_max = delta; in trace_do_benchmark()
77 if (!bm_min || delta < bm_min) in trace_do_benchmark()
78 bm_min = delta; in trace_do_benchmark()
92 bm_total += delta; in trace_do_benchmark()
93 bm_totalsq += delta * delta; in trace_do_benchmark()
[all …]
Dtrace_benchmark.h17 TP_PROTO(const char *str, u64 delta),
19 TP_ARGS(str, delta),
23 __field( u64, delta)
28 __entry->delta = delta;
31 TP_printk("%s delta=%llu", __entry->str, __entry->delta),
Dring_buffer.c390 u64 delta; member
2782 rb_add_time_stamp(struct ring_buffer_event *event, u64 delta, bool abs) in rb_add_time_stamp() argument
2791 event->time_delta = delta & TS_MASK; in rb_add_time_stamp()
2792 event->array[0] = delta >> TS_SHIFT; in rb_add_time_stamp()
2816 (unsigned long long)info->delta, in rb_check_timestamp()
2831 u64 *delta, in rb_add_timestamp() argument
2837 if (unlikely(info->delta > (1ULL << 59))) { in rb_add_timestamp()
2843 info->delta &= ABS_TS_MASK; in rb_add_timestamp()
2862 info->delta = 0; in rb_add_timestamp()
2864 *event = rb_add_time_stamp(*event, info->delta, abs); in rb_add_timestamp()
[all …]
Dtrace_irqsoff.c309 static bool report_latency(struct trace_array *tr, u64 delta) in report_latency() argument
312 if (delta < tracing_thresh) in report_latency()
315 if (delta <= tr->max_latency) in report_latency()
327 u64 T0, T1, delta; in check_critical_timing() local
333 delta = T1-T0; in check_critical_timing()
337 if (!report_latency(tr, delta)) in check_critical_timing()
343 if (!report_latency(tr, delta)) in check_critical_timing()
356 tr->max_latency = delta; in check_critical_timing()
/kernel/
Dtsacct.c26 u64 now_ns, delta; in bacct_add_tsk() local
34 delta = now_ns - tsk->group_leader->start_time; in bacct_add_tsk()
36 do_div(delta, NSEC_PER_USEC); in bacct_add_tsk()
37 stats->ac_tgetime = delta; in bacct_add_tsk()
38 delta = now_ns - tsk->start_time; in bacct_add_tsk()
39 do_div(delta, NSEC_PER_USEC); in bacct_add_tsk()
40 stats->ac_etime = delta; in bacct_add_tsk()
42 btime = ktime_get_real_seconds() - div_u64(delta, USEC_PER_SEC); in bacct_add_tsk()
127 u64 time, delta; in __acct_update_integrals() local
133 delta = time - tsk->acct_timexpd; in __acct_update_integrals()
[all …]
Dtorture.c199 unsigned long delta; in torture_offline() local
235 delta = jiffies - starttime; in torture_offline()
236 *sum_offl += delta; in torture_offline()
238 *min_offl = delta; in torture_offline()
239 *max_offl = delta; in torture_offline()
241 if (*min_offl > delta) in torture_offline()
242 *min_offl = delta; in torture_offline()
243 if (*max_offl < delta) in torture_offline()
244 *max_offl = delta; in torture_offline()
261 unsigned long delta; in torture_online() local
[all …]
Dwatchdog_hld.c78 ktime_t delta, now = ktime_get_mono_fast_ns(); in watchdog_check_timestamp() local
80 delta = now - __this_cpu_read(last_timestamp); in watchdog_check_timestamp()
81 if (delta < watchdog_hrtimer_sample_threshold) { in watchdog_check_timestamp()
/kernel/time/
Dtimekeeping.c168 static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta) in tk_update_sleep_time() argument
170 tk->offs_boot = ktime_add(tk->offs_boot, delta); in tk_update_sleep_time()
243 u64 now, last, mask, max, delta; in timekeeping_get_delta() local
261 delta = clocksource_delta(now, last, mask); in timekeeping_get_delta()
267 if (unlikely((~delta & mask) < (mask >> 3))) { in timekeeping_get_delta()
269 delta = 0; in timekeeping_get_delta()
273 if (unlikely(delta > max)) { in timekeeping_get_delta()
275 delta = tkr->clock->max_cycles; in timekeeping_get_delta()
278 return delta; in timekeeping_get_delta()
286 u64 cycle_now, delta; in timekeeping_get_delta() local
[all …]
Dclockevents.c236 int64_t delta; in clockevents_program_min_delta() local
240 delta = dev->min_delta_ns; in clockevents_program_min_delta()
241 dev->next_event = ktime_add_ns(ktime_get(), delta); in clockevents_program_min_delta()
247 clc = ((unsigned long long) delta * dev->mult) >> dev->shift; in clockevents_program_min_delta()
275 int64_t delta = 0; in clockevents_program_min_delta() local
279 delta += dev->min_delta_ns; in clockevents_program_min_delta()
280 dev->next_event = ktime_add_ns(ktime_get(), delta); in clockevents_program_min_delta()
286 clc = ((unsigned long long) delta * dev->mult) >> dev->shift; in clockevents_program_min_delta()
307 int64_t delta; in clockevents_program_event() local
326 delta = ktime_to_ns(ktime_sub(expires, ktime_get())); in clockevents_program_event()
[all …]
Dtick-sched.c61 ktime_t delta, nextp; in tick_do_update_jiffies64() local
104 delta = ktime_sub(now, tick_next_period); in tick_do_update_jiffies64()
105 if (unlikely(delta >= TICK_NSEC)) { in tick_do_update_jiffies64()
109 ticks += ktime_divns(delta, incr); in tick_do_update_jiffies64()
670 ktime_t delta; in update_ts_time_stats() local
673 delta = ktime_sub(now, ts->idle_entrytime); in update_ts_time_stats()
675 ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta); in update_ts_time_stats()
677 ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); in update_ts_time_stats()
729 ktime_t delta = ktime_sub(now, ts->idle_entrytime); in get_cpu_idle_time_us() local
731 idle = ktime_add(ts->idle_sleeptime, delta); in get_cpu_idle_time_us()
[all …]
Dtimecounter.c82 u64 delta = (cycle_tstamp - tc->cycle_last) & tc->cc->mask; in timecounter_cyc2time() local
90 if (delta > tc->cc->mask / 2) { in timecounter_cyc2time()
91 delta = (tc->cycle_last - cycle_tstamp) & tc->cc->mask; in timecounter_cyc2time()
92 nsec -= cc_cyc2ns_backwards(tc->cc, delta, tc->mask, frac); in timecounter_cyc2time()
94 nsec += cyclecounter_cyc2ns(tc->cc, delta, tc->mask, &frac); in timecounter_cyc2time()
Dalarmtimer.c266 ktime_t delta; in alarmtimer_suspend() local
273 delta = ktime_sub(next->expires, base->get_ktime()); in alarmtimer_suspend()
274 if (!min || (delta < min)) { in alarmtimer_suspend()
276 min = delta; in alarmtimer_suspend()
444 ktime_t delta; in alarm_forward() local
446 delta = ktime_sub(now, alarm->node.expires); in alarm_forward()
448 if (delta < 0) in alarm_forward()
451 if (unlikely(delta >= interval)) { in alarm_forward()
454 overrun = ktime_divns(delta, incr); in alarm_forward()
511 ktime_t delta; in alarmtimer_freezerset() local
[all …]
Dclocksource.c339 s64 delta; in clocksource_verify_percpu() local
362 delta = (s64)((csnow_mid - csnow_begin) & cs->mask); in clocksource_verify_percpu()
363 if (delta < 0) in clocksource_verify_percpu()
365 delta = (csnow_end - csnow_mid) & cs->mask; in clocksource_verify_percpu()
366 if (delta < 0) in clocksource_verify_percpu()
368 delta = clocksource_delta(csnow_end, csnow_begin, cs->mask); in clocksource_verify_percpu()
369 cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift); in clocksource_verify_percpu()
400 u64 csnow, wdnow, cslast, wdlast, delta; in clocksource_watchdog() local
458 delta = clocksource_delta(wdnow, cs->wd_last, watchdog->mask); in clocksource_watchdog()
459 wd_nsec = clocksource_cyc2ns(delta, watchdog->mult, in clocksource_watchdog()
[all …]
Dntp.c402 s64 delta; in second_overflow() local
467 delta = ntp_offset_chunk(time_offset); in second_overflow()
468 time_offset -= delta; in second_overflow()
469 tick_length += delta; in second_overflow()
934 long delta, delta_mod; in hardpps_update_freq() local
954 delta = shift_right(ftemp - pps_freq, NTP_SCALE_SHIFT); in hardpps_update_freq()
956 if (delta > PPS_MAXWANDER || delta < -PPS_MAXWANDER) { in hardpps_update_freq()
958 "hardpps: PPSWANDER: change=%ld\n", delta); in hardpps_update_freq()
970 delta_mod = delta; in hardpps_update_freq()
984 return delta; in hardpps_update_freq()
Dhrtimer.c1040 ktime_t delta; in hrtimer_forward() local
1042 delta = ktime_sub(now, hrtimer_get_expires(timer)); in hrtimer_forward()
1044 if (delta < 0) in hrtimer_forward()
1053 if (unlikely(delta >= interval)) { in hrtimer_forward()
1056 orun = ktime_divns(delta, incr); in hrtimer_forward()
1784 ktime_t expires_next, now, entry_time, delta; in hrtimer_interrupt() local
1857 delta = ktime_sub(now, entry_time); in hrtimer_interrupt()
1858 if ((unsigned int)delta > cpu_base->max_hang_time) in hrtimer_interrupt()
1859 cpu_base->max_hang_time = (unsigned int) delta; in hrtimer_interrupt()
1864 if (delta > 100 * NSEC_PER_MSEC) in hrtimer_interrupt()
[all …]
/kernel/cgroup/
Drstat.c363 struct cgroup_base_stat delta; in cgroup_base_stat_flush() local
373 delta = rstatc->bstat; in cgroup_base_stat_flush()
377 cgroup_base_stat_sub(&delta, &rstatc->last_bstat); in cgroup_base_stat_flush()
378 cgroup_base_stat_add(&cgrp->bstat, &delta); in cgroup_base_stat_flush()
379 cgroup_base_stat_add(&rstatc->last_bstat, &delta); in cgroup_base_stat_flush()
383 delta = cgrp->bstat; in cgroup_base_stat_flush()
384 cgroup_base_stat_sub(&delta, &cgrp->last_bstat); in cgroup_base_stat_flush()
385 cgroup_base_stat_add(&parent->bstat, &delta); in cgroup_base_stat_flush()
386 cgroup_base_stat_add(&cgrp->last_bstat, &delta); in cgroup_base_stat_flush()

12