/kernel/sched/ |
D | fair.c | 502 return tg->idle > 0; in tg_is_idle() 507 return cfs_rq->idle > 0; in cfs_rq_is_idle() 5252 cfs_b->idle = 0; in __assign_cfs_rq_runtime() 5528 if (rq->curr == rq->idle && rq->cfs.nr_running) in unthrottle_cfs_rq() 5597 if (cfs_b->idle && !throttled) in do_sched_cfs_period_timer() 5602 cfs_b->idle = 1; in do_sched_cfs_period_timer() 5627 cfs_b->idle = 0; in do_sched_cfs_period_timer() 5826 int idle = 0; in sched_cfs_period_timer() local 5835 idle = do_sched_cfs_period_timer(cfs_b, overrun, flags); in sched_cfs_period_timer() 5868 if (idle) in sched_cfs_period_timer() [all …]
|
D | idle.c | 443 return rq->idle; in pick_task_idle() 449 struct task_struct *next = rq->idle; in pick_next_task_idle() 499 DEFINE_SCHED_CLASS(idle) = {
|
D | core.c | 273 rq->core->core_forceidle_count && rq->curr == rq->idle) in sched_core_dequeue() 1146 if (set_nr_and_not_polling(rq->idle)) in wake_up_idle_cpu() 3915 if (!set_nr_if_polling(rq->idle)) in send_call_function_single_ipi() 6066 return (task_rq(t)->idle == t); in is_task_rq_idle() 6245 if (p == rq_i->idle) { in pick_next_task() 6321 if (rq->core->core_forceidle_count && next == rq->idle) in pick_next_task() 6341 if (dst->curr != dst->idle) in try_steal_cookie() 6345 if (p == src->idle) in try_steal_cookie() 6355 if (p->core_occupation > dst->idle->core_occupation) in try_steal_cookie() 7149 if (unlikely(p == rq->idle)) { in rt_mutex_setprio() [all …]
|
D | stats.h | 297 if (prev != rq->idle) in sched_info_switch() 300 if (next != rq->idle) in sched_info_switch()
|
D | rt.c | 76 int idle = 0; in sched_rt_period_timer() local 86 idle = do_sched_rt_period_timer(rt_b, overrun); in sched_rt_period_timer() 89 if (idle) in sched_rt_period_timer() 93 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART; in sched_rt_period_timer() 909 int i, idle = 1, throttled = 0; in do_sched_rt_period_timer() local 967 if (rt_rq->rt_nr_running && rq->curr == rq->idle) in do_sched_rt_period_timer() 971 idle = 0; in do_sched_rt_period_timer() 974 idle = 0; in do_sched_rt_period_timer() 989 return idle; in do_sched_rt_period_timer()
|
D | core_sched.c | 278 if (p == rq_i->idle) in __sched_core_account_forceidle()
|
D | sched.h | 355 u8 idle; member 383 int idle; member 500 extern int sched_group_set_idle(struct task_group *tg, long idle); 643 int idle; member 1028 struct task_struct *idle; member
|
D | cputime.c | 414 } else if (p == this_rq()->idle) { in irqtime_account_process_tick() 528 else if ((p != this_rq()->idle) || (irq_count() != HARDIRQ_OFFSET)) in account_process_tick()
|
/kernel/time/ |
D | Kconfig | 83 Tracks idle state on behalf of RCU. 109 bool "Idle dynticks system (tickless idle)" 112 This option enables a tickless idle system: timer interrupts 113 will only trigger on an as-needed basis when the system is idle. 187 This is the old config entry that enables dynticks idle.
|
D | tick-sched.c | 718 ktime_t now, idle; in get_cpu_idle_time_us() local 726 idle = ts->idle_sleeptime; in get_cpu_idle_time_us() 731 idle = ktime_add(ts->idle_sleeptime, delta); in get_cpu_idle_time_us() 733 idle = ts->idle_sleeptime; in get_cpu_idle_time_us() 737 return ktime_to_us(idle); in get_cpu_idle_time_us()
|
/kernel/ |
D | cpu.c | 613 struct task_struct *idle = idle_thread_get(cpu); in bringup_cpu() local 619 scs_task_reset(idle); in bringup_cpu() 620 kasan_unpoison_task_stack(idle); in bringup_cpu() 630 ret = __cpu_up(cpu, idle); in bringup_cpu() 639 struct task_struct *idle = idle_thread_get(cpu); in finish_cpu() local 640 struct mm_struct *mm = idle->active_mm; in finish_cpu() 647 idle->active_mm = &init_mm; in finish_cpu() 1409 struct task_struct *idle; in _cpu_up() local 1428 idle = idle_thread_get(cpu); in _cpu_up() 1429 if (IS_ERR(idle)) { in _cpu_up() [all …]
|
D | smp.c | 74 u64 idle; member 309 cfd_seq_data_add(seq->idle, CFD_SEQ_NOCPU, cpu, CFD_SEQ_IDLE, data, &n_data, now); in csd_lock_print_extended() 718 cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->idle, CFD_SEQ_NOCPU, in flush_smp_call_function_queue()
|
D | Kconfig.preempt | 125 will execute a task from the same 'core group', forcing idle when no
|
D | fork.c | 2679 static inline void init_idle_pids(struct task_struct *idle) in init_idle_pids() argument 2684 INIT_HLIST_NODE(&idle->pid_links[type]); /* not really needed */ in init_idle_pids() 2685 init_task_pid(idle, type, &init_struct_pid); in init_idle_pids() 2703 .idle = 1, in fork_idle()
|
D | workqueue.c | 4817 bool idle = true; in show_one_workqueue() local 4822 idle = false; in show_one_workqueue() 4826 if (idle) /* Nothing to print for idle workqueue */ in show_one_workqueue()
|
/kernel/rcu/ |
D | Kconfig | 11 # Dynticks-idle tracking 90 idle, and user-mode execution as quiescent states. Not for 107 context switches on all online CPUs, including idle ones, 123 these readers to appear in the idle loop as well as on the 125 including idle ones, so use with caution. Not for manual 297 bool "Tasks Trace RCU readers use memory barriers in user and idle" 302 to CPUs executing in userspace or idle during tasks trace
|
/kernel/debug/kdb/ |
D | kdb_main.c | 2278 int idle = 0, daemon = 0; in kdb_ps_suppressed() local 2284 ++idle; in kdb_ps_suppressed() 2290 if (idle || daemon) { in kdb_ps_suppressed() 2291 if (idle) in kdb_ps_suppressed() 2293 idle, idle == 1 ? "" : "es", in kdb_ps_suppressed()
|