Home
last modified time | relevance | path

Searched refs:active (Results 1 – 19 of 19) sorted by relevance

/kernel/sched/
Dloadavg.c158 unsigned long active, unsigned int n) in calc_load_n() argument
160 return calc_load(load, fixed_power_int(exp, FSHIFT, n), active); in calc_load_n()
307 long delta, active, n; in calc_global_nohz() local
317 active = atomic_long_read(&calc_load_tasks); in calc_global_nohz()
318 active = active > 0 ? active * FIXED_1 : 0; in calc_global_nohz()
320 avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n); in calc_global_nohz()
321 avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n); in calc_global_nohz()
322 avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n); in calc_global_nohz()
353 long active, delta; in calc_global_load() local
366 active = atomic_long_read(&calc_load_tasks); in calc_global_load()
[all …]
Drt.c90 array = &rt_rq->active; in init_rt_rq()
1172 struct rt_prio_array *array = &rt_rq->active; in dec_rt_prio()
1304 struct rt_prio_array *array = &rt_rq->active; in __enqueue_rt_entity()
1338 struct rt_prio_array *array = &rt_rq->active; in __dequeue_rt_entity()
1456 struct rt_prio_array *array = &rt_rq->active; in requeue_rt_entity()
1709 struct rt_prio_array *array = &rt_rq->active; in pick_next_rt_entity()
Dcore.c1510 if (p->uclamp[clamp_id].active) in uclamp_eff_value()
1543 uc_se->active = true; in uclamp_rq_inc_id()
1601 if (unlikely(!uc_se->active)) in uclamp_rq_dec_id()
1610 uc_se->active = false; in uclamp_rq_dec_id()
1680 if (!p->uclamp[clamp_id].active) in uclamp_rq_reinc_id()
1945 p->uclamp[clamp_id].active = false; in uclamp_fork()
Dsched.h648 struct rt_prio_array active; member
/kernel/time/
Dhrtimer.c486 __next_base(struct hrtimer_cpu_base *cpu_base, unsigned int *active) in __next_base() argument
490 if (!*active) in __next_base()
493 idx = __ffs(*active); in __next_base()
494 *active &= ~(1U << idx); in __next_base()
499 #define for_each_active_base(base, cpu_base, active) \ argument
500 while ((base = __next_base((cpu_base), &(active))))
504 unsigned int active, in __hrtimer_next_event_base() argument
510 for_each_active_base(base, cpu_base, active) { in __hrtimer_next_event_base()
514 next = timerqueue_getnext(&base->active); in __hrtimer_next_event_base()
571 unsigned int active; in __hrtimer_get_next_event() local
[all …]
Dtimer_list.c75 curr = timerqueue_getnext(&base->active); in print_active_timers()
/kernel/gcov/
Dgcc_4_7.c283 unsigned int active; in gcov_info_dup() local
306 active = num_counter_active(info); in gcov_info_dup()
308 fi_size += sizeof(struct gcov_ctr_info) * active; in gcov_info_dup()
320 for (ct_idx = 0; ct_idx < active; ct_idx++) { in gcov_info_dup()
349 unsigned int active; in gcov_info_free() local
357 active = num_counter_active(info); in gcov_info_free()
365 for (ct_idx = 0; ct_idx < active; ct_idx++, ci_ptr++) in gcov_info_free()
/kernel/
Dcontext_tracking.c71 if (__this_cpu_read(context_tracking.active)) { in __context_tracking_enter()
152 if (__this_cpu_read(context_tracking.active)) { in __context_tracking_exit()
195 if (!per_cpu(context_tracking.active, cpu)) { in context_tracking_cpu_set()
196 per_cpu(context_tracking.active, cpu) = true; in context_tracking_cpu_set()
Dpid_namespace.c395 struct pid_namespace *active = task_active_pid_ns(current); in pidns_install() local
410 if (new->level < active->level) in pidns_install()
414 while (ancestor->level > active->level) in pidns_install()
416 if (ancestor != active) in pidns_install()
426 struct pid_namespace *active = task_active_pid_ns(current); in pidns_get_parent() local
434 if (p == active) in pidns_get_parent()
Dacct.c88 int active; member
112 if (acct->active) { in check_free_space()
116 acct->active = 0; in check_free_space()
123 acct->active = 1; in check_free_space()
130 return acct->active; in check_free_space()
Dcpu.c2586 static DEVICE_ATTR_RO(active);
Dsignal.c3819 struct pid_namespace *active = task_active_pid_ns(current); in access_pidfd_pidns() local
3825 if (p == active) in access_pidfd_pidns()
/kernel/power/
Dwakelock.c48 if (wl->ws->active == show_active) in pm_show_wakelocks()
110 bool active; in __wakelocks_gc() local
114 active = wl->ws->active; in __wakelocks_gc()
120 if (!active) { in __wakelocks_gc()
DKconfig63 It creates an image which is saved in your active swap. Upon the next
144 state automatically whenever there are no active wakeup sources.
/kernel/bpf/
Dtrampoline.c576 if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1)) { in __bpf_prog_enter()
609 __this_cpu_dec(*(prog->active)); in __bpf_prog_exit()
619 if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1)) { in __bpf_prog_enter_sleepable()
629 __this_cpu_dec(*(prog->active)); in __bpf_prog_exit_sleepable()
Dbpf_lru_list.c147 struct list_head *active = &l->lists[BPF_LRU_LIST_T_ACTIVE]; in __bpf_lru_list_rotate_active() local
151 first_node = list_first_entry(active, struct bpf_lru_node, list); in __bpf_lru_list_rotate_active()
152 list_for_each_entry_safe_reverse(node, tmp_node, active, list) { in __bpf_lru_list_rotate_active()
Dcore.c99 fp->active = alloc_percpu_gfp(int, GFP_KERNEL_ACCOUNT | gfp_extra_flags); in bpf_prog_alloc_no_stats()
100 if (!fp->active) { in bpf_prog_alloc_no_stats()
130 free_percpu(prog->active); in bpf_prog_alloc()
248 fp_old->active = NULL; in bpf_prog_realloc()
264 free_percpu(fp->active); in __bpf_prog_free()
1134 fp->active = NULL; in bpf_prog_clone_free()
/kernel/events/
Dcore.c745 if (!__load_acquire(&t->active)) in perf_cgroup_event_time_now()
777 __store_release(&info->active, 0); in update_cgrp_time_from_cpuctx()
826 __store_release(&info->active, 1); in perf_cgroup_set_timestamp()
5674 bool active; in __perf_event_period() local
5683 active = (event->state == PERF_EVENT_STATE_ACTIVE); in __perf_event_period()
5684 if (active) { in __perf_event_period()
5699 if (active) { in __perf_event_period()
/kernel/trace/
DKconfig216 otherwise has native performance as long as no tracing is active.