/kernel/sched/ |
D | loadavg.c | 157 unsigned long active, unsigned int n) in calc_load_n() argument 159 return calc_load(load, fixed_power_int(exp, FSHIFT, n), active); in calc_load_n() 306 long delta, active, n; in calc_global_nohz() local 316 active = atomic_long_read(&calc_load_tasks); in calc_global_nohz() 317 active = active > 0 ? active * FIXED_1 : 0; in calc_global_nohz() 319 avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n); in calc_global_nohz() 320 avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n); in calc_global_nohz() 321 avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n); in calc_global_nohz() 352 long active, delta; in calc_global_load() local 365 active = atomic_long_read(&calc_load_tasks); in calc_global_load() [all …]
|
D | rt.c | 141 array = &rt_rq->active; in init_rt_rq() 1210 struct rt_prio_array *array = &rt_rq->active; in dec_rt_prio() 1448 struct rt_prio_array *array = &rt_rq->active; in __enqueue_rt_entity() 1482 struct rt_prio_array *array = &rt_rq->active; in __dequeue_rt_entity() 1607 struct rt_prio_array *array = &rt_rq->active; in requeue_rt_entity() 1863 struct rt_prio_array *array = &rt_rq->active; in pick_next_rt_entity()
|
D | core.c | 1540 if (p->uclamp[clamp_id].active) in uclamp_eff_value() 1573 uc_se->active = true; in uclamp_rq_inc_id() 1631 if (unlikely(!uc_se->active)) in uclamp_rq_dec_id() 1640 uc_se->active = false; in uclamp_rq_dec_id() 1710 if (!p->uclamp[clamp_id].active) in uclamp_rq_reinc_id() 2009 p->uclamp[clamp_id].active = false; in uclamp_fork()
|
D | sched.h | 675 struct rt_prio_array active; member
|
/kernel/time/ |
D | hrtimer.c | 486 __next_base(struct hrtimer_cpu_base *cpu_base, unsigned int *active) in __next_base() argument 490 if (!*active) in __next_base() 493 idx = __ffs(*active); in __next_base() 494 *active &= ~(1U << idx); in __next_base() 499 #define for_each_active_base(base, cpu_base, active) \ argument 500 while ((base = __next_base((cpu_base), &(active)))) 504 unsigned int active, in __hrtimer_next_event_base() argument 510 for_each_active_base(base, cpu_base, active) { in __hrtimer_next_event_base() 514 next = timerqueue_getnext(&base->active); in __hrtimer_next_event_base() 571 unsigned int active; in __hrtimer_get_next_event() local [all …]
|
D | timer_list.c | 75 curr = timerqueue_getnext(&base->active); in print_active_timers()
|
/kernel/gcov/ |
D | gcc_4_7.c | 283 unsigned int active; in gcov_info_dup() local 306 active = num_counter_active(info); in gcov_info_dup() 308 fi_size += sizeof(struct gcov_ctr_info) * active; in gcov_info_dup() 320 for (ct_idx = 0; ct_idx < active; ct_idx++) { in gcov_info_dup() 349 unsigned int active; in gcov_info_free() local 357 active = num_counter_active(info); in gcov_info_free() 365 for (ct_idx = 0; ct_idx < active; ct_idx++, ci_ptr++) in gcov_info_free()
|
/kernel/bpf/ |
D | memalloc.c | 85 local_t active; member 190 WARN_ON_ONCE(local_inc_return(&c->active) != 1); in alloc_bulk() 193 local_dec(&c->active); in alloc_bulk() 271 WARN_ON_ONCE(local_inc_return(&c->active) != 1); in free_bulk() 277 local_dec(&c->active); in free_bulk() 554 if (local_inc_return(&c->active) == 1) { in unit_alloc() 559 local_dec(&c->active); in unit_alloc() 582 if (local_inc_return(&c->active) == 1) { in unit_free() 594 local_dec(&c->active); in unit_free()
|
D | bpf_lru_list.c | 147 struct list_head *active = &l->lists[BPF_LRU_LIST_T_ACTIVE]; in __bpf_lru_list_rotate_active() local 151 first_node = list_first_entry(active, struct bpf_lru_node, list); in __bpf_lru_list_rotate_active() 152 list_for_each_entry_safe_reverse(node, tmp_node, active, list) { in __bpf_lru_list_rotate_active()
|
D | trampoline.c | 888 if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) { in __bpf_prog_enter_recur() 924 this_cpu_dec(*(prog->active)); in __bpf_prog_exit_recur() 963 if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) { in __bpf_prog_enter_sleepable_recur() 976 this_cpu_dec(*(prog->active)); in __bpf_prog_exit_sleepable_recur()
|
D | core.c | 101 fp->active = alloc_percpu_gfp(int, GFP_KERNEL_ACCOUNT | gfp_extra_flags); in bpf_prog_alloc_no_stats() 102 if (!fp->active) { in bpf_prog_alloc_no_stats() 136 free_percpu(prog->active); in bpf_prog_alloc() 254 fp_old->active = NULL; in bpf_prog_realloc() 270 free_percpu(fp->active); in __bpf_prog_free() 1383 fp->active = NULL; in bpf_prog_clone_free()
|
/kernel/power/ |
D | wakelock.c | 48 if (wl->ws->active == show_active) in pm_show_wakelocks() 110 bool active; in __wakelocks_gc() local 114 active = wl->ws->active; in __wakelocks_gc() 120 if (!active) { in __wakelocks_gc()
|
D | Kconfig | 63 It creates an image which is saved in your active swap. Upon the next 144 state automatically whenever there are no active wakeup sources.
|
/kernel/ |
D | pid_namespace.c | 395 struct pid_namespace *active = task_active_pid_ns(current); in pidns_install() local 410 if (new->level < active->level) in pidns_install() 414 while (ancestor->level > active->level) in pidns_install() 416 if (ancestor != active) in pidns_install() 426 struct pid_namespace *active = task_active_pid_ns(current); in pidns_get_parent() local 434 if (p == active) in pidns_get_parent()
|
D | context_tracking.c | 478 if (ct->active) { in __ct_user_enter() 614 if (ct->active) { in __ct_user_exit() 698 if (!per_cpu(context_tracking.active, cpu)) { in ct_cpu_track_user() 699 per_cpu(context_tracking.active, cpu) = true; in ct_cpu_track_user()
|
D | acct.c | 107 int active; member 131 if (acct->active) { in check_free_space() 135 acct->active = 0; in check_free_space() 142 acct->active = 1; in check_free_space() 149 return acct->active; in check_free_space()
|
D | cpu.c | 2611 static DEVICE_ATTR_RO(active);
|
D | signal.c | 3809 struct pid_namespace *active = task_active_pid_ns(current); in access_pidfd_pidns() local 3815 if (p == active) in access_pidfd_pidns()
|
/kernel/trace/ |
D | trace_events_user.c | 1790 int i, active = 0, busy = 0, flags; in user_seq_show() local 1818 active++; in user_seq_show() 1824 seq_printf(m, "Active: %d\n", active); in user_seq_show()
|
D | bpf_trace.c | 2268 if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) { in __bpf_trace_run() 2276 this_cpu_dec(*(prog->active)); in __bpf_trace_run()
|
D | Kconfig | 241 otherwise has native performance as long as no tracing is active.
|
/kernel/events/ |
D | core.c | 744 if (!__load_acquire(&t->active)) in perf_cgroup_event_time_now() 776 __store_release(&info->active, 0); in update_cgrp_time_from_cpuctx() 796 if (info->active) in update_cgrp_time_from_event() 822 __store_release(&info->active, 1); in perf_cgroup_set_timestamp() 5599 bool active; in __perf_event_period() local 5608 active = (event->state == PERF_EVENT_STATE_ACTIVE); in __perf_event_period() 5609 if (active) { in __perf_event_period() 5624 if (active) { in __perf_event_period()
|