/kernel/sched/ |
D | fair.c | 442 return tg->idle > 0; in tg_is_idle() 447 return cfs_rq->idle > 0; in cfs_rq_is_idle() 4961 cfs_b->idle = 0; in __assign_cfs_rq_runtime() 5249 if (rq->curr == rq->idle && rq->cfs.nr_running) in unthrottle_cfs_rq() 5318 if (cfs_b->idle && !throttled) in do_sched_cfs_period_timer() 5323 cfs_b->idle = 1; in do_sched_cfs_period_timer() 5348 cfs_b->idle = 0; in do_sched_cfs_period_timer() 5547 int idle = 0; in sched_cfs_period_timer() local 5556 idle = do_sched_cfs_period_timer(cfs_b, overrun, flags); in sched_cfs_period_timer() 5589 if (idle) in sched_cfs_period_timer() [all …]
|
D | idle.c | 446 return rq->idle; in pick_task_idle() 452 struct task_struct *next = rq->idle; in pick_next_task_idle() 502 DEFINE_SCHED_CLASS(idle) = {
|
D | core.c | 1089 if (set_nr_and_not_polling(rq->idle)) in wake_up_idle_cpu() 3796 if (!set_nr_if_polling(rq->idle)) in send_call_function_single_ipi() 3828 if (set_nr_if_polling(rq->idle)) { in wake_up_if_idle() 5827 return (task_rq(t)->idle == t); in is_task_rq_idle() 6029 if (rq_i->idle == p && rq_i->nr_running) { in pick_next_task() 6142 if (dst->curr != dst->idle) in try_steal_cookie() 6146 if (p == src->idle) in try_steal_cookie() 6156 if (p->core_occupation > dst->idle->core_occupation) in try_steal_cookie() 7035 if (unlikely(p == rq->idle)) { in rt_mutex_setprio() 7245 if (rq->curr != rq->idle) in idle_cpu() [all …]
|
D | stats.h | 259 if (prev != rq->idle) in sched_info_switch() 262 if (next != rq->idle) in sched_info_switch()
|
D | Makefile | 26 obj-y += idle.o fair.o rt.o deadline.o
|
D | rt.c | 25 int idle = 0; in sched_rt_period_timer() local 35 idle = do_sched_rt_period_timer(rt_b, overrun); in sched_rt_period_timer() 38 if (idle) in sched_rt_period_timer() 42 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART; in sched_rt_period_timer() 869 int i, idle = 1, throttled = 0; in do_sched_rt_period_timer() local 927 if (rt_rq->rt_nr_running && rq->curr == rq->idle) in do_sched_rt_period_timer() 931 idle = 0; in do_sched_rt_period_timer() 934 idle = 0; in do_sched_rt_period_timer() 949 return idle; in do_sched_rt_period_timer()
|
D | sched.h | 375 u8 idle; member 401 int idle; member 521 extern int sched_group_set_idle(struct task_group *tg, long idle); 620 int idle; member 995 struct task_struct *idle; member
|
D | cputime.c | 405 } else if (p == this_rq()->idle) { in irqtime_account_process_tick() 514 else if ((p != this_rq()->idle) || (irq_count() != HARDIRQ_OFFSET)) in account_process_tick()
|
/kernel/time/ |
D | Kconfig | 100 bool "Idle dynticks system (tickless idle)" 103 This option enables a tickless idle system: timer interrupts 104 will only trigger on an as-needed basis when the system is idle. 172 This is the old config entry that enables dynticks idle.
|
D | tick-sched.c | 718 ktime_t now, idle; in get_cpu_idle_time_us() local 726 idle = ts->idle_sleeptime; in get_cpu_idle_time_us() 731 idle = ktime_add(ts->idle_sleeptime, delta); in get_cpu_idle_time_us() 733 idle = ts->idle_sleeptime; in get_cpu_idle_time_us() 737 return ktime_to_us(idle); in get_cpu_idle_time_us()
|
/kernel/rcu/ |
D | Kconfig | 84 only voluntary context switch (not preemption!), idle, and 94 switches on all online CPUs, including idle ones, so use 103 these readers to appear in the idle loop as well as on the CPU 105 idle ones, so use with caution. 174 bool "Accelerate last non-dyntick-idle CPU's grace periods" 178 This option permits CPUs to enter dynticks-idle state even if 260 bool "Tasks Trace RCU readers use memory barriers in user and idle" 265 to CPUs executing in userspace or idle during tasks trace
|
/kernel/ |
D | cpu.c | 591 struct task_struct *idle = idle_thread_get(cpu); in bringup_cpu() local 597 scs_task_reset(idle); in bringup_cpu() 598 kasan_unpoison_task_stack(idle); in bringup_cpu() 608 ret = __cpu_up(cpu, idle); in bringup_cpu() 617 struct task_struct *idle = idle_thread_get(cpu); in finish_cpu() local 618 struct mm_struct *mm = idle->active_mm; in finish_cpu() 625 idle->active_mm = &init_mm; in finish_cpu() 1371 struct task_struct *idle; in _cpu_up() local 1390 idle = idle_thread_get(cpu); in _cpu_up() 1391 if (IS_ERR(idle)) { in _cpu_up() [all …]
|
D | smp.c | 74 u64 idle; member 305 cfd_seq_data_add(seq->idle, CFD_SEQ_NOCPU, cpu, CFD_SEQ_IDLE, data, &n_data, now); in csd_lock_print_extended() 691 cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->idle, CFD_SEQ_NOCPU, in flush_smp_call_function_from_idle()
|
D | Kconfig.preempt | 110 will execute a task from the same 'core group', forcing idle when no
|
D | fork.c | 2563 static inline void init_idle_pids(struct task_struct *idle) in init_idle_pids() argument 2568 INIT_HLIST_NODE(&idle->pid_links[type]); /* not really needed */ in init_idle_pids() 2569 init_task_pid(idle, type, &init_struct_pid); in init_idle_pids()
|
D | workqueue.c | 4833 bool idle = true; in show_one_workqueue() local 4838 idle = false; in show_one_workqueue() 4842 if (idle) /* Nothing to print for idle workqueue */ in show_one_workqueue()
|
/kernel/debug/kdb/ |
D | kdb_main.c | 2327 int idle = 0, daemon = 0; in kdb_ps_suppressed() local 2333 ++idle; in kdb_ps_suppressed() 2339 if (idle || daemon) { in kdb_ps_suppressed() 2340 if (idle) in kdb_ps_suppressed() 2342 idle, idle == 1 ? "" : "es", in kdb_ps_suppressed()
|