/kernel/sched/ |
D | cpufreq_schedutil.c | 86 static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time) in sugov_should_update_freq() argument 103 delta_ns = time - sg_policy->last_freq_update_time; in sugov_should_update_freq() 109 static bool sugov_up_down_rate_limit(struct sugov_policy *sg_policy, u64 time, in sugov_up_down_rate_limit() argument 114 delta_ns = time - sg_policy->last_freq_update_time; in sugov_up_down_rate_limit() 127 static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time, in sugov_update_commit() argument 132 if (sugov_up_down_rate_limit(sg_policy, time, next_freq)) { in sugov_update_commit() 142 sg_policy->last_freq_update_time = time; in sugov_update_commit() 203 static void sugov_get_util(unsigned long *util, unsigned long *max, u64 time) in sugov_get_util() argument 213 delta = time - rq->age_stamp; in sugov_get_util() 226 static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time, in sugov_set_iowait_boost() argument [all …]
|
D | cpufreq.c | 35 void (*func)(struct update_util_data *data, u64 time, in cpufreq_add_update_util_hook() argument
|
/kernel/ |
D | latencytop.c | 127 latency_record[i].time += lat->time; in account_global_scheduler_latency() 128 if (lat->time > latency_record[i].max) in account_global_scheduler_latency() 129 latency_record[i].max = lat->time; in account_global_scheduler_latency() 190 lat.time = usecs; in __account_scheduler_latency() 217 mylat->time += lat.time; in __account_scheduler_latency() 218 if (lat.time > mylat->max) in __account_scheduler_latency() 219 mylat->max = lat.time; in __account_scheduler_latency() 250 lr->count, lr->time, lr->max); in lstats_show()
|
D | tsacct.c | 128 cputime_t time, dtime; in __acct_update_integrals() local 134 time = stime + utime; in __acct_update_integrals() 135 dtime = time - tsk->acct_timexpd; in __acct_update_integrals() 142 tsk->acct_timexpd = time; in __acct_update_integrals()
|
D | compat.c | 47 __get_user(txc->time.tv_sec, &utp->time.tv_sec) || in compat_get_timex() 48 __get_user(txc->time.tv_usec, &utp->time.tv_usec) || in compat_get_timex() 75 __put_user(txc->time.tv_sec, &utp->time.tv_sec) || in compat_put_timex() 76 __put_user(txc->time.tv_usec, &utp->time.tv_usec) || in compat_put_timex() 1032 COMPAT_SYSCALL_DEFINE1(time, compat_time_t __user *, tloc) in COMPAT_SYSCALL_DEFINE1() argument
|
D | Kconfig.preempt | 11 time, but there are no guarantees and occasional longer delays
|
D | futex.c | 2466 restart->futex.time = abs_time->tv64; in futex_wait() 2487 t.tv64 = restart->futex.time; in futex_wait_restart() 2507 ktime_t *time, int trylock) in futex_lock_pi() argument 2517 if (time) { in futex_lock_pi() 2522 hrtimer_set_expires(&to->timer, *time); in futex_lock_pi()
|
D | cpuset.c | 72 time64_t time; /* clock (secs) when val computed */ member 1415 fmp->time = 0; in fmeter_init() 1426 ticks = now - fmp->time; in fmeter_update() 1434 fmp->time = now; in fmeter_update()
|
/kernel/trace/ |
D | ring_buffer_benchmark.c | 231 unsigned long long time; in ring_buffer_producer() local 295 time = ktime_us_delta(end_time, start_time); in ring_buffer_producer() 323 trace_printk("Time: %lld (usecs)\n", time); in ring_buffer_producer() 336 do_div(time, USEC_PER_MSEC); in ring_buffer_producer() 337 if (time) in ring_buffer_producer() 338 hit /= (long)time; in ring_buffer_producer() 351 if (time) in ring_buffer_producer() 352 missed /= (long)time; in ring_buffer_producer()
|
D | Kconfig | 186 This option measures the time spent in irqs-off critical 210 This option measures the time spent in preemption-off critical 242 time, this tracer will detect it. This is useful for testing 248 hwlat_detector/width - time in usecs for how long to spin for 249 hwlat_detector/window - time in usecs between the start of each 265 file. Every time a latency is greater than tracing_thresh, it will 499 compile time, a table is made of all the locations that ftrace 556 with the event enabled. This adds a bit more time for kernel boot 570 default and can be enabled at run-time. 618 run), and calls the tracepoint. Each iteration will record the time [all …]
|
D | trace_functions_graph.c | 91 { TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) }, 93 { TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) }, 436 u64 time = trace_clock_local(); in __trace_graph_function() local 444 .calltime = time, in __trace_graph_function() 445 .rettime = time, in __trace_graph_function()
|
D | ftrace.c | 478 unsigned long long time; member 555 if (a->time < b->time) in function_stat_cmp() 557 if (a->time > b->time) in function_stat_cmp() 611 avg = rec->time; in function_stat_show() 632 rec->time * rec->time; in function_stat_show() 642 trace_print_graph_duration(rec->time, &s); in function_stat_show() 924 rec->time += calltime; in profile_graph_return()
|
D | blktrace.c | 97 t->time = ktime_to_ns(ktime_get()); in trace_note() 266 t->time = ktime_to_ns(ktime_get()); in __blk_add_trace() 1401 .time = iter->ts, in blk_trace_synthesize_old_trace()
|
D | ring_buffer.c | 700 u64 time; in ring_buffer_time_stamp() local 703 time = rb_time_stamp(buffer); in ring_buffer_time_stamp() 706 return time; in ring_buffer_time_stamp()
|
/kernel/time/ |
D | ntp.c | 691 ts.tv_sec = txc->time.tv_sec; in ntp_validate_timex() 692 ts.tv_nsec = txc->time.tv_usec; in ntp_validate_timex() 697 if (!timeval_inject_offset_valid(&txc->time)) in ntp_validate_timex() 765 txc->time.tv_sec = (time_t)ts->tv_sec; in __do_adjtimex() 766 txc->time.tv_usec = ts->tv_nsec; in __do_adjtimex() 768 txc->time.tv_usec /= NSEC_PER_USEC; in __do_adjtimex() 775 txc->time.tv_sec--; in __do_adjtimex() 780 txc->time.tv_sec++; in __do_adjtimex()
|
D | timer_stats.c | 286 ktime_t time; in tstats_show() local 296 time = ktime_sub(time_stop, time_start); in tstats_show() 298 period = ktime_to_timespec64(time); in tstats_show()
|
D | Kconfig | 45 # Automatically adjust the min. reprogramming time for 87 Most of the time you want to say Y here. 174 real-time workloads that at present do not tend to be run on
|
D | timeconst.bc | 42 print "/* Automatically generated by kernel/time/timeconst.bc */\n"
|
D | time.c | 63 SYSCALL_DEFINE1(time, time_t __user *, tloc) in SYSCALL_DEFINE1() argument
|
D | timekeeping.c | 2322 delta.tv_sec = txc->time.tv_sec; in do_adjtimex() 2323 delta.tv_nsec = txc->time.tv_usec; in do_adjtimex()
|
/kernel/power/ |
D | suspend_test.c | 80 status = rtc_read_time(rtc, &alm.time); in test_wakealarm() 85 rtc_tm_to_time(&alm.time, &now); in test_wakealarm() 88 rtc_time_to_tm(now + TEST_SUSPEND_SECONDS, &alm.time); in test_wakealarm()
|
D | Kconfig | 234 CAUTION: this option will cause your machine's real-time clock to be 235 set to an invalid time after a resume. 243 APM compliant BIOSes. If you say Y here, the system time will be
|
/kernel/locking/ |
D | lockdep_proc.c | 412 static void seq_time(struct seq_file *m, s64 time) in seq_time() argument 416 snprint_time(num, sizeof(num), time); in seq_time()
|
D | lockdep.c | 176 static void lock_time_inc(struct lock_time *lt, u64 time) in lock_time_inc() argument 178 if (time > lt->max) in lock_time_inc() 179 lt->max = time; in lock_time_inc() 181 if (time < lt->min || !lt->nr) in lock_time_inc() 182 lt->min = time; in lock_time_inc() 184 lt->total += time; in lock_time_inc()
|
/kernel/events/ |
D | core.c | 624 return t->time; in perf_cgroup_event_time() 636 info->time += now - info->timestamp; in __update_cgrp_time() 1393 ctx->time += now - ctx->timestamp; in update_context_time() 1404 return ctx ? ctx->time : 0; in perf_event_time() 1434 run_end = ctx->time; in update_event_times() 1589 size += sizeof(data->time); in perf_event__id_header_size() 2119 u64 now = ctx->time; in group_sched_in() 5603 data->time = perf_event_clock(event); in __perf_event_header__init_id() 5634 perf_output_put(handle, data->time); in __perf_event__output_id_sample() 5774 perf_output_put(handle, data->time); in perf_output_sample() [all …]
|