Home
last modified time | relevance | path

Searched refs:u64 (Results 1 – 25 of 171) sorted by relevance

1234567

/kernel/sched/
Dpelt.h4 int __update_load_avg_blocked_se(u64 now, struct sched_entity *se);
5 int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se);
6 int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq);
7 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
8 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
11 int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity);
13 static inline u64 thermal_load_avg(struct rq *rq) in thermal_load_avg()
19 update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity) in update_thermal_load_avg()
24 static inline u64 thermal_load_avg(struct rq *rq) in thermal_load_avg()
31 int update_irq_load_avg(struct rq *rq, u64 running);
[all …]
Dcputime.c37 static void irqtime_account_delta(struct irqtime *irqtime, u64 delta, in irqtime_account_delta()
40 u64 *cpustat = kcpustat_this_cpu->cpustat; in irqtime_account_delta()
91 static u64 irqtime_tick_accounted(u64 maxtime) in irqtime_tick_accounted()
94 u64 delta; in irqtime_tick_accounted()
106 static u64 irqtime_tick_accounted(u64 dummy) in irqtime_tick_accounted()
114 u64 tmp) in task_group_account_field()
132 void account_user_time(struct task_struct *p, u64 cputime) in account_user_time()
157 void account_guest_time(struct task_struct *p, u64 cputime) in account_guest_time()
159 u64 *cpustat = kcpustat_this_cpu->cpustat; in account_guest_time()
183 u64 cputime, enum cpu_usage_stat index) in account_system_index_time()
[all …]
Dclock.c87 __read_mostly u64 __sched_clock_offset;
88 static __read_mostly u64 __gtod_offset;
91 u64 tick_raw;
92 u64 tick_gtod;
93 u64 clock;
249 static inline u64 wrap_min(u64 x, u64 y) in wrap_min()
254 static inline u64 wrap_max(u64 x, u64 y) in wrap_max()
265 static u64 sched_clock_local(struct sched_clock_data *scd) in sched_clock_local()
267 u64 now, clock, old_clock, min_clock, max_clock, gtod; in sched_clock_local()
298 static u64 sched_clock_remote(struct sched_clock_data *scd) in sched_clock_remote()
[all …]
Dpelt.c35 static u64 decay_load(u64 val, u64 n) in decay_load()
61 static u32 __accumulate_pelt_segments(u64 periods, u32 d1, u32 d3) in __accumulate_pelt_segments()
68 c1 = decay_load((u64)d1, periods); in __accumulate_pelt_segments()
106 accumulate_sum(u64 delta, struct sched_avg *sa, in accumulate_sum()
110 u64 periods; in accumulate_sum()
122 sa->util_sum = decay_load((u64)(sa->util_sum), periods); in accumulate_sum()
183 int ___update_load_sum(u64 now, struct sched_avg *sa, in ___update_load_sum()
186 u64 delta; in ___update_load_sum()
299 int __update_load_avg_blocked_se(u64 now, struct sched_entity *se) in __update_load_avg_blocked_se()
311 int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se) in __update_load_avg_se()
[all …]
Dcpuacct.c28 u64 __percpu *cpuusage;
48 static DEFINE_PER_CPU(u64, root_cpuacct_cpuusage);
67 ca->cpuusage = alloc_percpu(u64); in cpuacct_css_alloc()
95 static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu, in cpuacct_cpuusage_read()
98 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); in cpuacct_cpuusage_read()
99 u64 *cpustat = per_cpu_ptr(ca->cpustat, cpu)->cpustat; in cpuacct_cpuusage_read()
100 u64 data; in cpuacct_cpuusage_read()
137 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); in cpuacct_cpuusage_write()
138 u64 *cpustat = per_cpu_ptr(ca->cpustat, cpu)->cpustat; in cpuacct_cpuusage_write()
161 static u64 __cpuusage_read(struct cgroup_subsys_state *css, in __cpuusage_read()
[all …]
Dsched.h162 #define RUNTIME_INF ((u64)~0ULL)
205 static inline void update_avg(u64 *avg, u64 sample) in update_avg()
265 u64 rt_runtime;
274 u64 dl_runtime;
275 u64 dl_period;
303 u64 bw;
304 u64 total_bw;
310 void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus) in __dl_sub()
317 void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus) in __dl_add()
324 u64 old_bw, u64 new_bw) in __dl_overflow()
[all …]
Dpsi.c184 static u64 psi_period __read_mostly;
286 u64 now, state_start; in get_recent_times()
327 u64 time, u64 period) in calc_avgs()
350 u64 deltas[NR_PSI_STATES - 1] = { 0, }; in collect_percpu_times()
377 deltas[s] += (u64)times[s] * nonidle; in collect_percpu_times()
401 static u64 update_averages(struct psi_group *group, u64 now) in update_averages()
404 u64 expires, period; in update_averages()
405 u64 avg_next_update; in update_averages()
460 u64 now; in psi_avgs_work()
490 static void window_reset(struct psi_window *win, u64 now, u64 value, in window_reset()
[all …]
/kernel/time/
Dtimecounter.c10 u64 start_tstamp) in timecounter_init()
31 static u64 timecounter_read_delta(struct timecounter *tc) in timecounter_read_delta()
33 u64 cycle_now, cycle_delta; in timecounter_read_delta()
34 u64 ns_offset; in timecounter_read_delta()
52 u64 timecounter_read(struct timecounter *tc) in timecounter_read()
54 u64 nsec; in timecounter_read()
69 static u64 cc_cyc2ns_backwards(const struct cyclecounter *cc, in cc_cyc2ns_backwards()
70 u64 cycles, u64 mask, u64 frac) in cc_cyc2ns_backwards()
72 u64 ns = (u64) cycles; in cc_cyc2ns_backwards()
79 u64 timecounter_cyc2time(const struct timecounter *tc, in timecounter_cyc2time()
[all …]
Dtimekeeping_internal.h19 static inline u64 clocksource_delta(u64 now, u64 last, u64 mask) in clocksource_delta()
21 u64 ret = (now - last) & mask; in clocksource_delta()
30 static inline u64 clocksource_delta(u64 now, u64 last, u64 mask) in clocksource_delta()
Dtimekeeping.c78 static u64 cycles_at_suspend;
80 static u64 dummy_clock_read(struct clocksource *cs) in dummy_clock_read()
120 while (tk->tkr_mono.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_mono.shift)) { in tk_normalize_xtime()
121 tk->tkr_mono.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_mono.shift; in tk_normalize_xtime()
124 while (tk->tkr_raw.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_raw.shift)) { in tk_normalize_xtime()
125 tk->tkr_raw.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_raw.shift; in tk_normalize_xtime()
142 tk->tkr_mono.xtime_nsec = (u64)ts->tv_nsec << tk->tkr_mono.shift; in tk_set_xtime()
148 tk->tkr_mono.xtime_nsec += (u64)ts->tv_nsec << tk->tkr_mono.shift; in tk_xtime_add()
192 static inline u64 tk_clock_read(const struct tk_read_base *tkr) in tk_clock_read()
202 static void timekeeping_check_update(struct timekeeper *tk, u64 offset) in timekeeping_check_update()
[all …]
Dposix-cpu-timers.c23 void posix_cputimers_group_init(struct posix_cputimers *pct, u64 cpu_limit) in posix_cputimers_group_init()
40 u64 nsecs = rlim_new * NSEC_PER_SEC; in update_rlimit_cpu()
115 static u64 bump_cpu_timer(struct k_itimer *timer, u64 now) in bump_cpu_timer()
117 u64 delta, incr, expires = timer->it.cpu.node.expires; in bump_cpu_timer()
187 static u64 cpu_clock_sample(const clockid_t clkid, struct task_struct *p) in cpu_clock_sample()
189 u64 utime, stime; in cpu_clock_sample()
207 static inline void store_samples(u64 *samples, u64 stime, u64 utime, u64 rtime) in store_samples()
214 static void task_sample_cputime(struct task_struct *p, u64 *samples) in task_sample_cputime()
216 u64 stime, utime; in task_sample_cputime()
223 u64 *samples) in proc_sample_cputime_atomic()
[all …]
Dsched_clock.c44 u64 (*actual_read_sched_clock)(void);
52 static u64 notrace jiffy_sched_clock_read(void) in jiffy_sched_clock_read()
58 return (u64)(jiffies - INITIAL_JIFFIES); in jiffy_sched_clock_read()
67 static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift) in cyc_to_ns()
85 u64 cyc, res; in sched_clock()
130 u64 cyc; in update_sched_clock()
131 u64 ns; in update_sched_clock()
153 void sched_clock_register(u64 (*read)(void), int bits, unsigned long rate) in sched_clock_register()
155 u64 res, wrap, new_mask, new_epoch, cyc, ns; in sched_clock_register()
260 static u64 notrace suspended_sched_clock_read(void) in suspended_sched_clock_read()
Djiffies.c16 static u64 jiffies_read(struct clocksource *cs) in jiffies_read()
18 return (u64) jiffies; in jiffies_read()
48 u64 get_jiffies_64(void) in get_jiffies_64()
51 u64 ret; in get_jiffies_64()
80 u64 nsec_per_tick, shift_hz; in register_refined_jiffies()
92 shift_hz = (u64)cycles_per_second << 8; in register_refined_jiffies()
96 nsec_per_tick = (u64)NSEC_PER_SEC << 8; in register_refined_jiffies()
/kernel/trace/
Dsynth_event_gen_test.c48 u64 vals[7]; in test_gen_synth_cmd()
114 vals[1] = (u64)(long)"hula hoops"; /* next_comm_field */ in test_gen_synth_cmd()
118 vals[5] = (u64)(long)"thneed"; /* my_string_field */ in test_gen_synth_cmd()
139 u64 vals[7]; in test_empty_synth_event()
219 vals[1] = (u64)(long)"tiddlywinks"; /* next_comm_field */ in test_empty_synth_event()
223 vals[5] = (u64)(long)"thneed_2.0"; /* my_string_field */ in test_empty_synth_event()
255 u64 vals[9]; in test_create_synth_event()
291 vals[1] = (u64)(long)"tiddlywinks"; /* next_comm_field */ in test_create_synth_event()
293 vals[3] = (u64)(long)"xrayspecs"; /* dynstring_field_1 */ in test_create_synth_event()
296 vals[6] = (u64)(long)"thneed"; /* my_string_field */ in test_create_synth_event()
[all …]
Dtrace_benchmark.c14 static u64 bm_total;
15 static u64 bm_totalsq;
16 static u64 bm_last;
17 static u64 bm_max;
18 static u64 bm_min;
19 static u64 bm_first;
20 static u64 bm_cnt;
21 static u64 bm_stddev;
39 u64 start; in trace_do_benchmark()
40 u64 stop; in trace_do_benchmark()
[all …]
Dtrace_osnoise.c57 u64 count;
58 u64 delta_start;
65 u64 count;
66 u64 arrival_time;
67 u64 delta_start;
76 u64 count;
77 u64 arrival_time;
78 u64 delta_start;
85 u64 count;
86 u64 arrival_time;
[all …]
Dtrace_clock.c32 u64 notrace trace_clock_local(void) in trace_clock_local()
34 u64 clock; in trace_clock_local()
57 u64 notrace trace_clock(void) in trace_clock()
70 u64 notrace trace_clock_jiffies(void) in trace_clock_jiffies()
87 u64 prev_time;
94 u64 notrace trace_clock_global(void) in trace_clock_global()
98 u64 now, prev_time; in trace_clock_global()
155 u64 notrace trace_clock_counter(void) in trace_clock_counter()
Dbpf_trace.c74 u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
75 u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
78 u64 flags, const struct btf **btf,
368 BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1, in BPF_CALL_5() argument
369 u64, arg2, u64, arg3) in BPF_CALL_5()
371 u64 args[MAX_TRACE_PRINTK_VARARGS] = { arg1, arg2, arg3 }; in BPF_CALL_5()
471 u32, btf_ptr_size, u64, flags) in BPF_CALL_4() argument
496 get_map_perf_counter(struct bpf_map *map, u64 flags, in get_map_perf_counter()
497 u64 *value, u64 *enabled, u64 *running) in get_map_perf_counter()
501 u64 index = flags & BPF_F_INDEX_MASK; in get_map_perf_counter()
[all …]
Dtrace_hwlat.c76 u64 nmi_ts_start;
77 u64 nmi_total_ts;
89 static u64 last_tracing_thresh = DEFAULT_LAT_THRESHOLD * NSEC_PER_USEC;
93 u64 seqnum; /* unique sequence */
94 u64 duration; /* delta */
95 u64 outer_duration; /* delta (outer loop) */
96 u64 nmi_total_ts; /* Total time spent in NMIs */
107 u64 count; /* total since reset */
109 u64 sample_window; /* total sampling window (on+off) */
110 u64 sample_width; /* active sampling portion of window */
[all …]
/kernel/irq/
Dtimings.c290 u64 last_ts;
291 u64 ema_time[PREDICTION_BUFFER_SIZE];
300 static u64 irq_timings_ema_new(u64 value, u64 ema_old) in irq_timings_ema_new()
382 static u64 __irq_timings_next_event(struct irqt_stat *irqs, int irq, u64 now) in __irq_timings_next_event()
435 static __always_inline int irq_timings_interval_index(u64 interval) in irq_timings_interval_index()
441 u64 interval_us = (interval >> 10) / PREDICTION_FACTOR; in irq_timings_interval_index()
447 u64 interval) in __irq_timings_store()
473 static inline void irq_timings_store(int irq, struct irqt_stat *irqs, u64 ts) in irq_timings_store()
475 u64 old_ts = irqs->last_ts; in irq_timings_store()
476 u64 interval; in irq_timings_store()
[all …]
/kernel/
Dkcov.c80 u64 handle;
102 static struct kcov_remote *kcov_remote_find(u64 handle) in kcov_remote_find()
114 static struct kcov_remote *kcov_remote_add(struct kcov *kcov, u64 handle) in kcov_remote_add()
211 static void notrace write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip) in write_comp_data()
214 u64 *area; in write_comp_data()
215 u64 count, start_index, end_pos, max_pos; in write_comp_data()
227 area = (u64 *)t->kcov_area; in write_comp_data()
234 end_pos = (start_index + KCOV_WORDS_PER_CMP) * sizeof(u64); in write_comp_data()
262 void notrace __sanitizer_cov_trace_cmp8(u64 arg1, u64 arg2) in __sanitizer_cov_trace_cmp8()
289 void notrace __sanitizer_cov_trace_const_cmp8(u64 arg1, u64 arg2) in __sanitizer_cov_trace_const_cmp8()
[all …]
/kernel/bpf/
Dtnum.c16 struct tnum tnum_const(u64 value) in tnum_const()
21 struct tnum tnum_range(u64 min, u64 max) in tnum_range()
23 u64 chi = min ^ max, delta; in tnum_range()
64 u64 sm, sv, sigma, chi, mu; in tnum_add()
76 u64 dv, alpha, beta, chi, mu; in tnum_sub()
88 u64 alpha, beta, v; in tnum_and()
98 u64 v, mu; in tnum_or()
107 u64 v, mu; in tnum_xor()
124 u64 acc_v = a.value * b.value; in tnum_mul()
146 u64 v, mu; in tnum_intersect()
[all …]
Dstackmap.c23 u64 data[];
61 sizeof(struct bpf_stack_build_id) : sizeof(u64); in stack_map_data_size()
66 u64 elem_size = sizeof(struct stack_map_bucket) + in prealloc_elems_and_freelist()
67 (u64)smap->map.value_size; in prealloc_elems_and_freelist()
93 u64 cost, n_buckets; in stack_map_alloc()
107 BUILD_BUG_ON(sizeof(struct bpf_stack_build_id) % sizeof(u64)); in stack_map_alloc()
148 u64 *ips, u32 trace_nr, bool user) in stack_map_get_build_id_offset()
241 u64 *to = entry->ip; in get_callchain_entry_for_task()
246 to[i] = (u64)(from[i]); in get_callchain_entry_for_task()
258 struct perf_callchain_entry *trace, u64 flags) in __bpf_get_stackid()
[all …]
/kernel/kcsan/
Dkcsan.h133 u64 old, u64 new, u64 mask);
140 u64 old, u64 new, u64 mask);
/kernel/events/
Dcore.c440 u64 tmp = perf_sample_period_ns; in update_perf_cpu_limits()
503 static DEFINE_PER_CPU(u64, running_sample_length);
505 static u64 __report_avg;
506 static u64 __report_allowed;
519 void perf_sample_event_took(u64 sample_len_ns) in perf_sample_event_took()
521 u64 max_len = READ_ONCE(perf_sample_allowed_ns); in perf_sample_event_took()
522 u64 running_len; in perf_sample_event_took()
523 u64 avg_len; in perf_sample_event_took()
581 static u64 perf_event_time(struct perf_event *event);
585 static inline u64 perf_clock(void) in perf_clock()
[all …]

1234567