Searched refs:dl (Results 1 – 12 of 12) sorted by relevance
/kernel/sched/ |
D | deadline.c | 26 return container_of(dl_se, struct task_struct, dl); in dl_task_of() 31 return container_of(dl_rq, struct rq, dl); in rq_of_dl_rq() 39 return &rq->dl; in dl_rq_of_se() 126 return &cpu_rq(i)->dl.dl_bw; in dl_bw_of() 222 BUG_ON(p->dl.flags & SCHED_FLAG_SUGOV); in dl_change_utilization() 228 if (p->dl.dl_non_contending) { in dl_change_utilization() 229 sub_running_bw(&p->dl, &rq->dl); in dl_change_utilization() 230 p->dl.dl_non_contending = 0; in dl_change_utilization() 238 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1) in dl_change_utilization() 241 __sub_rq_bw(p->dl.dl_bw, &rq->dl); in dl_change_utilization() [all …]
|
D | cpudeadline.c | 31 u64 orig_dl = cp->elements[idx].dl; in cpudl_heapify_down() 46 cp->elements[l].dl)) { in cpudl_heapify_down() 48 largest_dl = cp->elements[l].dl; in cpudl_heapify_down() 51 cp->elements[r].dl)) in cpudl_heapify_down() 59 cp->elements[idx].dl = cp->elements[largest].dl; in cpudl_heapify_down() 65 cp->elements[idx].dl = orig_dl; in cpudl_heapify_down() 74 u64 orig_dl = cp->elements[idx].dl; in cpudl_heapify_up() 81 if (dl_time_before(orig_dl, cp->elements[p].dl)) in cpudl_heapify_up() 85 cp->elements[idx].dl = cp->elements[p].dl; in cpudl_heapify_up() 91 cp->elements[idx].dl = orig_dl; in cpudl_heapify_up() [all …]
|
D | cpudeadline.h | 6 u64 dl; member 20 void cpudl_set(struct cpudl *cp, int cpu, u64 dl);
|
D | sched.h | 341 return cap_scale(p->dl.dl_deadline, cap) >= p->dl.dl_runtime; in dl_task_fits_capacity() 955 struct dl_rq dl; member 1934 return rq->dl.dl_nr_running > 0; in sched_dl_runnable() 2376 rq->dl.extra_bw += bw; in __dl_update() 2383 struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw); in __dl_update() local 2385 dl->extra_bw += bw; in __dl_update() 2652 return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT; in cpu_bw_dl()
|
D | core.c | 804 if (rq->dl.dl_nr_running) in sched_can_stop_tick() 3325 RB_CLEAR_NODE(&p->dl.rb_node); in __sched_fork() 3326 init_dl_task_timer(&p->dl); in __sched_fork() 3327 init_dl_inactive_task_timer(&p->dl); in __sched_fork() 5197 dl_entity_preempt(&pi_task->dl, &p->dl))) { in rt_mutex_setprio() 5198 p->dl.pi_se = pi_task->dl.pi_se; in rt_mutex_setprio() 5201 p->dl.pi_se = &p->dl; in rt_mutex_setprio() 5205 p->dl.pi_se = &p->dl; in rt_mutex_setprio() 5210 p->dl.pi_se = &p->dl; in rt_mutex_setprio() 7543 init_dl_rq(&rq->dl); in sched_init()
|
D | debug.c | 1021 P(dl.runtime); in proc_sched_show_task() 1022 P(dl.deadline); in proc_sched_show_task()
|
/kernel/trace/ |
D | trace_probe_tmpl.h | 199 u32 *dl; /* Data location */ in store_trace_args() local 204 dl = data + arg->offset; in store_trace_args() 207 *dl = make_data_loc(maxlen, dyndata - base); in store_trace_args() 208 ret = process_fetch_insn(arg->code, rec, dl, base); in store_trace_args() 211 *dl = make_data_loc(0, dyndata - base); in store_trace_args()
|
D | trace_probe.h | 61 #define get_loc_len(dl) ((u32)(dl) >> 16) argument 62 #define get_loc_offs(dl) ((u32)(dl) & 0xffff) argument 64 static nokprobe_inline void *get_loc_data(u32 *dl, void *ent) in get_loc_data() argument 66 return (u8 *)ent + get_loc_offs(*dl); in get_loc_data()
|
/kernel/locking/ |
D | lockdep_proc.c | 409 const struct lock_stat_data *dl = l, *dr = r; in lock_stat_cmp() local 412 nl = dl->stats.read_waittime.nr + dl->stats.write_waittime.nr; in lock_stat_cmp()
|
D | rtmutex.c | 231 &(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = (p)->dl.deadline } 682 waiter->deadline = task->dl.deadline; in rt_mutex_adjust_prio_chain() 955 waiter->deadline = task->dl.deadline; in task_blocks_on_rt_mutex()
|
/kernel/time/ |
D | posix-cpu-timers.c | 808 if (tsk->dl.dl_overrun) { in check_dl_overrun() 809 tsk->dl.dl_overrun = 0; in check_dl_overrun() 1091 if (dl_task(tsk) && tsk->dl.dl_overrun) in fastpath_timer_check()
|
/kernel/cgroup/ |
D | cpuset.c | 2247 cs->sum_migrate_dl_bw += task->dl.dl_bw; in cpuset_can_attach()
|