Home
last modified time | relevance | path

Searched refs:u64 (Results 1 – 25 of 143) sorted by relevance

123456

/kernel/sched/
Dcputime.c35 static void irqtime_account_delta(struct irqtime *irqtime, u64 delta, in irqtime_account_delta()
38 u64 *cpustat = kcpustat_this_cpu->cpustat; in irqtime_account_delta()
77 static u64 irqtime_tick_accounted(u64 maxtime) in irqtime_tick_accounted()
80 u64 delta; in irqtime_tick_accounted()
92 static u64 irqtime_tick_accounted(u64 dummy) in irqtime_tick_accounted()
100 u64 tmp) in task_group_account_field()
118 void account_user_time(struct task_struct *p, u64 cputime) in account_user_time()
143 void account_guest_time(struct task_struct *p, u64 cputime) in account_guest_time()
145 u64 *cpustat = kcpustat_this_cpu->cpustat; in account_guest_time()
169 u64 cputime, enum cpu_usage_stat index) in account_system_index_time()
[all …]
Dclock.c87 __read_mostly u64 __sched_clock_offset;
88 static __read_mostly u64 __gtod_offset;
91 u64 tick_raw;
92 u64 tick_gtod;
93 u64 clock;
249 static inline u64 wrap_min(u64 x, u64 y) in wrap_min()
254 static inline u64 wrap_max(u64 x, u64 y) in wrap_max()
265 static u64 sched_clock_local(struct sched_clock_data *scd) in sched_clock_local()
267 u64 now, clock, old_clock, min_clock, max_clock, gtod; in sched_clock_local()
298 static u64 sched_clock_remote(struct sched_clock_data *scd) in sched_clock_remote()
[all …]
Dpelt.h4 int __update_load_avg_blocked_se(u64 now, struct sched_entity *se);
5 int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se);
6 int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq);
7 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
8 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
11 int update_irq_load_avg(struct rq *rq, u64 running);
14 update_irq_load_avg(struct rq *rq, u64 running) in update_irq_load_avg()
117 static inline u64 rq_clock_pelt(struct rq *rq) in rq_clock_pelt()
127 static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) in cfs_rq_clock_pelt()
135 static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) in cfs_rq_clock_pelt()
[all …]
Dpelt.c37 static u64 decay_load(u64 val, u64 n) in decay_load()
63 static u32 __accumulate_pelt_segments(u64 periods, u32 d1, u32 d3) in __accumulate_pelt_segments()
70 c1 = decay_load((u64)d1, periods); in __accumulate_pelt_segments()
110 accumulate_sum(u64 delta, struct sched_avg *sa, in accumulate_sum()
114 u64 periods; in accumulate_sum()
126 sa->util_sum = decay_load((u64)(sa->util_sum), periods); in accumulate_sum()
176 ___update_load_sum(u64 now, struct sched_avg *sa, in ___update_load_sum()
179 u64 delta; in ___update_load_sum()
266 int __update_load_avg_blocked_se(u64 now, struct sched_entity *se) in __update_load_avg_blocked_se()
277 int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se) in __update_load_avg_se()
[all …]
Dsched.h155 #define RUNTIME_INF ((u64)~0ULL)
245 u64 rt_runtime;
254 u64 dl_runtime;
255 u64 dl_period;
283 u64 bw;
284 u64 total_bw;
290 void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus) in __dl_sub()
297 void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus) in __dl_add()
304 bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw) in __dl_overflow()
310 extern void dl_change_utilization(struct task_struct *p, u64 new_bw);
[all …]
Dcpuacct.c24 u64 usages[CPUACCT_STAT_NSTATS];
98 static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu, in cpuacct_cpuusage_read()
102 u64 data; in cpuacct_cpuusage_read()
134 static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val) in cpuacct_cpuusage_write()
155 static u64 __cpuusage_read(struct cgroup_subsys_state *css, in __cpuusage_read()
159 u64 totalcpuusage = 0; in __cpuusage_read()
168 static u64 cpuusage_user_read(struct cgroup_subsys_state *css, in cpuusage_user_read()
174 static u64 cpuusage_sys_read(struct cgroup_subsys_state *css, in cpuusage_sys_read()
180 static u64 cpuusage_read(struct cgroup_subsys_state *css, struct cftype *cft) in cpuusage_read()
186 u64 val) in cpuusage_write()
[all …]
Dpsi.c172 static u64 psi_period __read_mostly;
242 u64 now, state_start; in get_recent_times()
283 u64 time, u64 period) in calc_avgs()
306 u64 deltas[NR_PSI_STATES - 1] = { 0, }; in collect_percpu_times()
333 deltas[s] += (u64)times[s] * nonidle; in collect_percpu_times()
357 static u64 update_averages(struct psi_group *group, u64 now) in update_averages()
360 u64 expires, period; in update_averages()
361 u64 avg_next_update; in update_averages()
416 u64 now; in psi_avgs_work()
446 static void window_reset(struct psi_window *win, u64 now, u64 value, in window_reset()
[all …]
/kernel/time/
Dtimecounter.c10 u64 start_tstamp) in timecounter_init()
31 static u64 timecounter_read_delta(struct timecounter *tc) in timecounter_read_delta()
33 u64 cycle_now, cycle_delta; in timecounter_read_delta()
34 u64 ns_offset; in timecounter_read_delta()
52 u64 timecounter_read(struct timecounter *tc) in timecounter_read()
54 u64 nsec; in timecounter_read()
69 static u64 cc_cyc2ns_backwards(const struct cyclecounter *cc, in cc_cyc2ns_backwards()
70 u64 cycles, u64 mask, u64 frac) in cc_cyc2ns_backwards()
72 u64 ns = (u64) cycles; in cc_cyc2ns_backwards()
79 u64 timecounter_cyc2time(struct timecounter *tc, in timecounter_cyc2time()
[all …]
Dtimekeeping.c73 static u64 cycles_at_suspend;
75 static u64 dummy_clock_read(struct clocksource *cs) in dummy_clock_read()
99 while (tk->tkr_mono.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_mono.shift)) { in tk_normalize_xtime()
100 tk->tkr_mono.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_mono.shift; in tk_normalize_xtime()
103 while (tk->tkr_raw.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_raw.shift)) { in tk_normalize_xtime()
104 tk->tkr_raw.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_raw.shift; in tk_normalize_xtime()
121 tk->tkr_mono.xtime_nsec = (u64)ts->tv_nsec << tk->tkr_mono.shift; in tk_set_xtime()
127 tk->tkr_mono.xtime_nsec += (u64)ts->tv_nsec << tk->tkr_mono.shift; in tk_xtime_add()
171 static inline u64 tk_clock_read(const struct tk_read_base *tkr) in tk_clock_read()
181 static void timekeeping_check_update(struct timekeeper *tk, u64 offset) in timekeeping_check_update()
[all …]
Dposix-cpu-timers.c23 void posix_cputimers_group_init(struct posix_cputimers *pct, u64 cpu_limit) in posix_cputimers_group_init()
40 u64 nsecs = rlim_new * NSEC_PER_SEC; in update_rlimit_cpu()
125 static u64 bump_cpu_timer(struct k_itimer *timer, u64 now) in bump_cpu_timer()
127 u64 delta, incr, expires = timer->it.cpu.node.expires; in bump_cpu_timer()
197 static u64 cpu_clock_sample(const clockid_t clkid, struct task_struct *p) in cpu_clock_sample()
199 u64 utime, stime; in cpu_clock_sample()
217 static inline void store_samples(u64 *samples, u64 stime, u64 utime, u64 rtime) in store_samples()
224 static void task_sample_cputime(struct task_struct *p, u64 *samples) in task_sample_cputime()
226 u64 stime, utime; in task_sample_cputime()
233 u64 *samples) in proc_sample_cputime_atomic()
[all …]
Dtimekeeping_internal.h17 static inline u64 clocksource_delta(u64 now, u64 last, u64 mask) in clocksource_delta()
19 u64 ret = (now - last) & mask; in clocksource_delta()
28 static inline u64 clocksource_delta(u64 now, u64 last, u64 mask) in clocksource_delta()
Dsched_clock.c39 u64 epoch_ns;
40 u64 epoch_cyc;
41 u64 sched_clock_mask;
42 u64 (*read_sched_clock)(void);
68 u64 (*actual_read_sched_clock)(void);
76 static u64 notrace jiffy_sched_clock_read(void) in jiffy_sched_clock_read()
82 return (u64)(jiffies - INITIAL_JIFFIES); in jiffy_sched_clock_read()
91 static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift) in cyc_to_ns()
98 u64 cyc, res; in sched_clock()
144 u64 cyc; in update_sched_clock()
[all …]
Djiffies.c35 static u64 jiffies_read(struct clocksource *cs) in jiffies_read()
37 return (u64) jiffies; in jiffies_read()
65 u64 get_jiffies_64(void) in get_jiffies_64()
68 u64 ret; in get_jiffies_64()
97 u64 nsec_per_tick, shift_hz; in register_refined_jiffies()
109 shift_hz = (u64)cycles_per_second << 8; in register_refined_jiffies()
113 nsec_per_tick = (u64)NSEC_PER_SEC << 8; in register_refined_jiffies()
Dvsyscall.c20 u64 nsec, sec; in update_vdso_data()
36 nsec += ((u64)tk->wall_to_monotonic.tv_nsec << tk->tkr_mono.shift); in update_vdso_data()
37 while (nsec >= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) { in update_vdso_data()
38 nsec -= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift); in update_vdso_data()
47 nsec += (u64)tk->monotonic_to_boot.tv_nsec << tk->tkr_mono.shift; in update_vdso_data()
53 while (nsec >= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) { in update_vdso_data()
54 nsec -= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift); in update_vdso_data()
74 u64 nsec; in update_vsyscall()
Dtime.c629 __timespec64_to_jiffies(u64 sec, long nsec) in __timespec64_to_jiffies()
638 (((u64)nsec * NSEC_CONVERSION) >> in __timespec64_to_jiffies()
646 return __timespec64_to_jiffies((u64)sec, nsec); in __timespec_to_jiffies()
664 value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC, in jiffies_to_timespec64()
702 value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC, in jiffies_to_timeval()
720 return div_u64((u64)x * TICK_NSEC, NSEC_PER_SEC / USER_HZ); in jiffies_to_clock_t()
737 return div_u64((u64)x * HZ, USER_HZ); in clock_t_to_jiffies()
742 u64 jiffies_64_to_clock_t(u64 x) in jiffies_64_to_clock_t()
764 u64 nsec_to_clock_t(u64 x) in nsec_to_clock_t()
781 u64 jiffies64_to_nsecs(u64 j) in jiffies64_to_nsecs()
[all …]
Dclocksource.c47 u64 tmp; in clocks_calc_mult_shift()
54 tmp = ((u64)maxsec * from) >> 32; in clocks_calc_mult_shift()
65 tmp = (u64) to << sft; in clocks_calc_mult_shift()
94 static u64 suspend_start;
197 static bool cs_watchdog_read(struct clocksource *cs, u64 *csnow, u64 *wdnow) in cs_watchdog_read()
200 u64 wd_end, wd_delta; in cs_watchdog_read()
229 u64 csnow, wdnow, cslast, wdlast, delta; in clocksource_watchdog()
566 void clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles) in clocksource_start_suspend_timing()
604 u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 cycle_now) in clocksource_stop_suspend_timing()
606 u64 now, delta, nsec = 0; in clocksource_stop_suspend_timing()
[all …]
/kernel/trace/
Dtrace_benchmark.c14 static u64 bm_total;
15 static u64 bm_totalsq;
16 static u64 bm_last;
17 static u64 bm_max;
18 static u64 bm_min;
19 static u64 bm_first;
20 static u64 bm_cnt;
21 static u64 bm_stddev;
39 u64 start; in trace_do_benchmark()
40 u64 stop; in trace_do_benchmark()
[all …]
Dbpf_trace.c62 u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
63 u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
214 BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1, in BPF_CALL_5() argument
215 u64, arg2, u64, arg3) in BPF_CALL_5()
220 u64 unsafe_addr; in BPF_CALL_5()
347 get_map_perf_counter(struct bpf_map *map, u64 flags, in get_map_perf_counter()
348 u64 *value, u64 *enabled, u64 *running) in get_map_perf_counter()
352 u64 index = flags & BPF_F_INDEX_MASK; in get_map_perf_counter()
369 BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags) in BPF_CALL_2() argument
371 u64 value = 0; in BPF_CALL_2()
[all …]
Dtrace_hwlat.c67 static u64 nmi_ts_start;
68 static u64 nmi_total_ts;
76 static u64 last_tracing_thresh = DEFAULT_LAT_THRESHOLD * NSEC_PER_USEC;
80 u64 seqnum; /* unique sequence */
81 u64 duration; /* delta */
82 u64 outer_duration; /* delta (outer loop) */
83 u64 nmi_total_ts; /* Total time spent in NMIs */
93 u64 count; /* total since reset */
95 u64 sample_window; /* total sampling window (on+off) */
96 u64 sample_width; /* active sampling portion of window */
[all …]
Dtrace_clock.c32 u64 notrace trace_clock_local(void) in trace_clock_local()
34 u64 clock; in trace_clock_local()
57 u64 notrace trace_clock(void) in trace_clock()
70 u64 notrace trace_clock_jiffies(void) in trace_clock_jiffies()
87 u64 prev_time;
94 u64 notrace trace_clock_global(void) in trace_clock_global()
98 u64 now, prev_time; in trace_clock_global()
155 u64 notrace trace_clock_counter(void) in trace_clock_counter()
/kernel/
Dkcov.c80 u64 handle;
90 static struct kcov_remote *kcov_remote_find(u64 handle) in kcov_remote_find()
101 static struct kcov_remote *kcov_remote_add(struct kcov *kcov, u64 handle) in kcov_remote_add()
201 static void notrace write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip) in write_comp_data()
204 u64 *area; in write_comp_data()
205 u64 count, start_index, end_pos, max_pos; in write_comp_data()
217 area = (u64 *)t->kcov_area; in write_comp_data()
224 end_pos = (start_index + KCOV_WORDS_PER_CMP) * sizeof(u64); in write_comp_data()
252 void notrace __sanitizer_cov_trace_cmp8(u64 arg1, u64 arg2) in __sanitizer_cov_trace_cmp8()
279 void notrace __sanitizer_cov_trace_const_cmp8(u64 arg1, u64 arg2) in __sanitizer_cov_trace_const_cmp8()
[all …]
/kernel/irq/
Dtimings.c290 u64 last_ts;
291 u64 ema_time[PREDICTION_BUFFER_SIZE];
300 static u64 irq_timings_ema_new(u64 value, u64 ema_old) in irq_timings_ema_new()
382 static u64 __irq_timings_next_event(struct irqt_stat *irqs, int irq, u64 now) in __irq_timings_next_event()
435 static __always_inline int irq_timings_interval_index(u64 interval) in irq_timings_interval_index()
441 u64 interval_us = (interval >> 10) / PREDICTION_FACTOR; in irq_timings_interval_index()
447 u64 interval) in __irq_timings_store()
473 static inline void irq_timings_store(int irq, struct irqt_stat *irqs, u64 ts) in irq_timings_store()
475 u64 old_ts = irqs->last_ts; in irq_timings_store()
476 u64 interval; in irq_timings_store()
[all …]
/kernel/bpf/
Dtnum.c16 struct tnum tnum_const(u64 value) in tnum_const()
21 struct tnum tnum_range(u64 min, u64 max) in tnum_range()
23 u64 chi = min ^ max, delta; in tnum_range()
64 u64 sm, sv, sigma, chi, mu; in tnum_add()
76 u64 dv, alpha, beta, chi, mu; in tnum_sub()
88 u64 alpha, beta, v; in tnum_and()
98 u64 v, mu; in tnum_or()
107 u64 v, mu; in tnum_xor()
117 static struct tnum hma(struct tnum acc, u64 value, u64 mask) in hma()
131 u64 pi; in tnum_mul()
[all …]
Dcore.c754 u64 __weak bpf_jit_alloc_exec_limit(void) in bpf_jit_alloc_exec_limit()
767 bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 1, in bpf_jit_charge_init()
874 u64 *func_addr, bool *func_addr_fixed) in bpf_jit_get_func_addr()
1144 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) in __bpf_call_base()
1317 static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack) in ___bpf_prog_run()
1408 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32; in ___bpf_prog_run()
1412 DST = (u64) (u32) (((s32) DST) >> (SRC & 31)); in ___bpf_prog_run()
1415 DST = (u64) (u32) (((s32) DST) >> IMM); in ___bpf_prog_run()
1464 DST = (__force u64) cpu_to_be64(DST); in ___bpf_prog_run()
1477 DST = (__force u64) cpu_to_le64(DST); in ___bpf_prog_run()
[all …]
/kernel/events/
Dcore.c430 u64 tmp = perf_sample_period_ns; in update_perf_cpu_limits()
495 static DEFINE_PER_CPU(u64, running_sample_length);
497 static u64 __report_avg;
498 static u64 __report_allowed;
511 void perf_sample_event_took(u64 sample_len_ns) in perf_sample_event_took()
513 u64 max_len = READ_ONCE(perf_sample_allowed_ns); in perf_sample_event_took()
514 u64 running_len; in perf_sample_event_took()
515 u64 avg_len; in perf_sample_event_took()
573 static u64 perf_event_time(struct perf_event *event);
582 static inline u64 perf_clock(void) in perf_clock()
[all …]

123456