/kernel/sched/ |
D | loadavg.c | 100 calc_load(unsigned long load, unsigned long exp, unsigned long active) in calc_load() argument 104 newload = load * exp + active * (FIXED_1 - exp); in calc_load() 105 if (active >= load) in calc_load() 294 unsigned long active, unsigned int n) in calc_load_n() argument 296 return calc_load(load, fixed_power_int(exp, FSHIFT, n), active); in calc_load_n() 310 long delta, active, n; in calc_global_nohz() local 319 active = atomic_long_read(&calc_load_tasks); in calc_global_nohz() 320 active = active > 0 ? active * FIXED_1 : 0; in calc_global_nohz() 322 avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n); in calc_global_nohz() 323 avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n); in calc_global_nohz() [all …]
|
D | rt.c | 82 array = &rt_rq->active; in init_rt_rq() 893 struct rt_prio_array *array = &rt_rq->active; in dump_throttled_rt_tasks() 1133 struct rt_prio_array *array = &rt_rq->active; in dec_rt_prio() 1264 struct rt_prio_array *array = &rt_rq->active; in __enqueue_rt_entity() 1298 struct rt_prio_array *array = &rt_rq->active; in __dequeue_rt_entity() 1392 struct rt_prio_array *array = &rt_rq->active; in requeue_rt_entity() 1537 struct rt_prio_array *array = &rt_rq->active; in pick_next_rt_entity()
|
D | sched.h | 479 struct rt_prio_array active; member
|
/kernel/gcov/ |
D | gcc_3_4.c | 173 unsigned int active = num_counter_active(info); in gcov_info_reset() local 176 for (i = 0; i < active; i++) { in gcov_info_reset() 243 unsigned int active; in gcov_info_dup() local 246 active = num_counter_active(info); in gcov_info_dup() 248 sizeof(struct gcov_ctr_info) * active, GFP_KERNEL); in gcov_info_dup() 265 for (i = 0; i < active ; i++) { in gcov_info_dup() 289 unsigned int active = num_counter_active(info); in gcov_info_free() local 292 for (i = 0; i < active ; i++) in gcov_info_free()
|
D | gcc_4_7.c | 259 unsigned int active; in gcov_info_dup() local 282 active = num_counter_active(info); in gcov_info_dup() 284 fi_size += sizeof(struct gcov_ctr_info) * active; in gcov_info_dup() 296 for (ct_idx = 0; ct_idx < active; ct_idx++) { in gcov_info_dup() 325 unsigned int active; in gcov_info_free() local 333 active = num_counter_active(info); in gcov_info_free() 341 for (ct_idx = 0; ct_idx < active; ct_idx++, ci_ptr++) in gcov_info_free()
|
/kernel/ |
D | context_tracking.c | 70 if (__this_cpu_read(context_tracking.active)) { in __context_tracking_enter() 150 if (__this_cpu_read(context_tracking.active)) { in __context_tracking_exit() 192 if (!per_cpu(context_tracking.active, cpu)) { in context_tracking_cpu_set() 193 per_cpu(context_tracking.active, cpu) = true; in context_tracking_cpu_set()
|
D | pid_namespace.c | 381 struct pid_namespace *active = task_active_pid_ns(current); in pidns_install() local 396 if (new->level < active->level) in pidns_install() 400 while (ancestor->level > active->level) in pidns_install() 402 if (ancestor != active) in pidns_install() 412 struct pid_namespace *active = task_active_pid_ns(current); in pidns_get_parent() local 420 if (p == active) in pidns_get_parent()
|
D | acct.c | 85 int active; member 109 if (acct->active) { in check_free_space() 113 acct->active = 0; in check_free_space() 120 acct->active = 1; in check_free_space() 127 return acct->active; in check_free_space()
|
/kernel/power/ |
D | wakelock.c | 48 if (wl->ws.active == show_active) in pm_show_wakelocks() 112 bool active; in __wakelocks_gc() local 116 active = wl->ws.active; in __wakelocks_gc() 122 if (!active) { in __wakelocks_gc()
|
D | Kconfig | 60 It creates an image which is saved in your active swap. Upon the next 123 state automatically whenever there are no active wakeup sources.
|
/kernel/time/ |
D | hrtimer.c | 467 unsigned int active = cpu_base->active_bases; in __hrtimer_get_next_event() local 470 for (; active; base++, active >>= 1) { in __hrtimer_get_next_event() 474 if (!(active & 0x01)) in __hrtimer_get_next_event() 477 next = timerqueue_getnext(&base->active); in __hrtimer_get_next_event() 875 return timerqueue_add(&base->active, &timer->node); in enqueue_hrtimer() 899 if (!timerqueue_del(&base->active, &timer->node)) in __remove_hrtimer() 1288 unsigned int active = cpu_base->active_bases; in __hrtimer_run_queues() local 1290 for (; active; base++, active >>= 1) { in __hrtimer_run_queues() 1294 if (!(active & 0x01)) in __hrtimer_run_queues() 1299 while ((node = timerqueue_getnext(&base->active))) { in __hrtimer_run_queues() [all …]
|
D | timer_list.c | 105 curr = timerqueue_getnext(&base->active); in print_active_timers()
|
/kernel/printk/ |
D | printk.c | 3032 dumper->active = true; in kmsg_dump() 3045 dumper->active = false; in kmsg_dump() 3076 if (!dumper->active) in kmsg_dump_get_line_nolock() 3163 if (!dumper->active) in kmsg_dump_get_buffer()
|
/kernel/events/ |
D | core.c | 4561 bool active; in __perf_event_period() local 4570 active = (event->state == PERF_EVENT_STATE_ACTIVE); in __perf_event_period() 4571 if (active) { in __perf_event_period() 4586 if (active) { in __perf_event_period()
|
/kernel/trace/ |
D | Kconfig | 511 otherwise has native performance as long as no tracing is active.
|
/kernel/debug/kdb/ |
D | kdb_main.c | 2087 struct kmsg_dumper dumper = { .active = 1 }; in kdb_dmesg()
|