/kernel/sched/ |
D | loadavg.c | 158 unsigned long active, unsigned int n) in calc_load_n() argument 160 return calc_load(load, fixed_power_int(exp, FSHIFT, n), active); in calc_load_n() 294 long delta, active, n; in calc_global_nohz() local 304 active = atomic_long_read(&calc_load_tasks); in calc_global_nohz() 305 active = active > 0 ? active * FIXED_1 : 0; in calc_global_nohz() 307 avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n); in calc_global_nohz() 308 avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n); in calc_global_nohz() 309 avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n); in calc_global_nohz() 340 long active, delta; in calc_global_load() local 353 active = atomic_long_read(&calc_load_tasks); in calc_global_load() [all …]
|
D | rt.c | 81 array = &rt_rq->active; in init_rt_rq() 1142 struct rt_prio_array *array = &rt_rq->active; in dec_rt_prio() 1273 struct rt_prio_array *array = &rt_rq->active; in __enqueue_rt_entity() 1307 struct rt_prio_array *array = &rt_rq->active; in __dequeue_rt_entity() 1399 struct rt_prio_array *array = &rt_rq->active; in requeue_rt_entity() 1588 struct rt_prio_array *array = &rt_rq->active; in pick_next_rt_entity()
|
D | core.c | 927 if (p->uclamp[clamp_id].active) in uclamp_eff_value() 959 uc_se->active = true; in uclamp_rq_inc_id() 998 uc_se->active = false; in uclamp_rq_dec_id() 1069 if (p->uclamp[clamp_id].active) { in uclamp_update_active() 1231 p->uclamp[clamp_id].active = false; in uclamp_fork()
|
D | sched.h | 594 struct rt_prio_array active; member
|
/kernel/time/ |
D | hrtimer.c | 487 __next_base(struct hrtimer_cpu_base *cpu_base, unsigned int *active) in __next_base() argument 491 if (!*active) in __next_base() 494 idx = __ffs(*active); in __next_base() 495 *active &= ~(1U << idx); in __next_base() 500 #define for_each_active_base(base, cpu_base, active) \ argument 501 while ((base = __next_base((cpu_base), &(active)))) 505 unsigned int active, in __hrtimer_next_event_base() argument 511 for_each_active_base(base, cpu_base, active) { in __hrtimer_next_event_base() 515 next = timerqueue_getnext(&base->active); in __hrtimer_next_event_base() 569 unsigned int active; in __hrtimer_get_next_event() local [all …]
|
D | timer_list.c | 88 curr = timerqueue_getnext(&base->active); in print_active_timers()
|
/kernel/gcov/ |
D | gcc_3_4.c | 186 unsigned int active = num_counter_active(info); in gcov_info_reset() local 189 for (i = 0; i < active; i++) { in gcov_info_reset() 256 unsigned int active; in gcov_info_dup() local 259 active = num_counter_active(info); in gcov_info_dup() 260 dup = kzalloc(struct_size(dup, counts, active), GFP_KERNEL); in gcov_info_dup() 277 for (i = 0; i < active ; i++) { in gcov_info_dup() 301 unsigned int active = num_counter_active(info); in gcov_info_free() local 304 for (i = 0; i < active ; i++) in gcov_info_free()
|
D | gcc_4_7.c | 272 unsigned int active; in gcov_info_dup() local 295 active = num_counter_active(info); in gcov_info_dup() 297 fi_size += sizeof(struct gcov_ctr_info) * active; in gcov_info_dup() 309 for (ct_idx = 0; ct_idx < active; ct_idx++) { in gcov_info_dup() 338 unsigned int active; in gcov_info_free() local 346 active = num_counter_active(info); in gcov_info_free() 354 for (ct_idx = 0; ct_idx < active; ct_idx++, ci_ptr++) in gcov_info_free()
|
/kernel/ |
D | context_tracking.c | 71 if (__this_cpu_read(context_tracking.active)) { in __context_tracking_enter() 151 if (__this_cpu_read(context_tracking.active)) { in __context_tracking_exit() 193 if (!per_cpu(context_tracking.active, cpu)) { in context_tracking_cpu_set() 194 per_cpu(context_tracking.active, cpu) = true; in context_tracking_cpu_set()
|
D | pid_namespace.c | 385 struct pid_namespace *active = task_active_pid_ns(current); in pidns_install() local 400 if (new->level < active->level) in pidns_install() 404 while (ancestor->level > active->level) in pidns_install() 406 if (ancestor != active) in pidns_install() 416 struct pid_namespace *active = task_active_pid_ns(current); in pidns_get_parent() local 424 if (p == active) in pidns_get_parent()
|
D | acct.c | 88 int active; member 112 if (acct->active) { in check_free_space() 116 acct->active = 0; in check_free_space() 123 acct->active = 1; in check_free_space() 130 return acct->active; in check_free_space()
|
D | cpu.c | 2224 static DEVICE_ATTR(active, 0444, show_smt_active, NULL);
|
D | signal.c | 3650 struct pid_namespace *active = task_active_pid_ns(current); in access_pidfd_pidns() local 3656 if (p == active) in access_pidfd_pidns()
|
/kernel/power/ |
D | wakelock.c | 49 if (wl->ws->active == show_active) in pm_show_wakelocks() 113 bool active; in __wakelocks_gc() local 117 active = wl->ws->active; in __wakelocks_gc() 123 if (!active) { in __wakelocks_gc()
|
D | Kconfig | 60 It creates an image which is saved in your active swap. Upon the next 133 state automatically whenever there are no active wakeup sources.
|
/kernel/bpf/ |
D | bpf_lru_list.c | 142 struct list_head *active = &l->lists[BPF_LRU_LIST_T_ACTIVE]; in __bpf_lru_list_rotate_active() local 146 first_node = list_first_entry(active, struct bpf_lru_node, list); in __bpf_lru_list_rotate_active() 147 list_for_each_entry_safe_reverse(node, tmp_node, active, list) { in __bpf_lru_list_rotate_active()
|
/kernel/printk/ |
D | printk.c | 3126 dumper->active = true; in kmsg_dump() 3139 dumper->active = false; in kmsg_dump() 3170 if (!dumper->active) in kmsg_dump_get_line_nolock() 3257 if (!dumper->active) in kmsg_dump_get_buffer()
|
/kernel/debug/kdb/ |
D | kdb_main.c | 2100 struct kmsg_dumper dumper = { .active = 1 }; in kdb_dmesg()
|
/kernel/events/ |
D | core.c | 5080 bool active; in __perf_event_period() local 5089 active = (event->state == PERF_EVENT_STATE_ACTIVE); in __perf_event_period() 5090 if (active) { in __perf_event_period() 5105 if (active) { in __perf_event_period()
|
/kernel/trace/ |
D | Kconfig | 552 otherwise has native performance as long as no tracing is active.
|