Home
last modified time | relevance | path

Searched refs:sd (Results 1 – 14 of 14) sorted by relevance

/kernel/sched/
Dtopology.c38 static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, in sched_domain_debug_one() argument
41 struct sched_group *group = sd->groups; in sched_domain_debug_one()
42 unsigned long flags = sd->flags; in sched_domain_debug_one()
49 cpumask_pr_args(sched_domain_span(sd)), sd->name); in sched_domain_debug_one()
51 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { in sched_domain_debug_one()
62 if ((meta_flags & SDF_SHARED_CHILD) && sd->child && in sched_domain_debug_one()
63 !(sd->child->flags & flag)) in sched_domain_debug_one()
67 if ((meta_flags & SDF_SHARED_PARENT) && sd->parent && in sched_domain_debug_one()
68 !(sd->parent->flags & flag)) in sched_domain_debug_one()
87 if (!(sd->flags & SD_OVERLAP) && in sched_domain_debug_one()
[all …]
Dstats.c128 struct sched_domain *sd; in show_schedstat() local
148 for_each_domain(cpu, sd) { in show_schedstat()
152 cpumask_pr_args(sched_domain_span(sd))); in show_schedstat()
156 sd->lb_count[itype], in show_schedstat()
157 sd->lb_balanced[itype], in show_schedstat()
158 sd->lb_failed[itype], in show_schedstat()
159 sd->lb_imbalance[itype], in show_schedstat()
160 sd->lb_gained[itype], in show_schedstat()
161 sd->lb_hot_gained[itype], in show_schedstat()
162 sd->lb_nobusyq[itype], in show_schedstat()
[all …]
Dfair.c2177 struct sched_domain *sd; in task_numa_migrate() local
2192 sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu)); in task_numa_migrate()
2193 if (sd) { in task_numa_migrate()
2194 env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2; in task_numa_migrate()
2195 env.imb_numa_nr = sd->imb_numa_nr; in task_numa_migrate()
2205 if (unlikely(!sd)) { in task_numa_migrate()
6468 wake_affine_weight(struct sched_domain *sd, struct task_struct *p, in wake_affine_weight() argument
6495 prev_eff_load *= 100 + (sd->imbalance_pct - 100) / 2; in wake_affine_weight()
6510 static int wake_affine(struct sched_domain *sd, struct task_struct *p, in wake_affine() argument
6519 target = wake_affine_weight(sd, p, this_cpu, prev_cpu, sync); in wake_affine()
[all …]
Dsched.h1060 struct sched_domain __rcu *sd; member
1806 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
1820 struct sched_domain *sd, *hsd = NULL; in highest_flag_domain() local
1822 for_each_domain(cpu, sd) { in highest_flag_domain()
1823 if (!(sd->flags & flag)) in highest_flag_domain()
1825 hsd = sd; in highest_flag_domain()
1833 struct sched_domain *sd; in lowest_flag_domain() local
1835 for_each_domain(cpu, sd) { in lowest_flag_domain()
1836 if (sd->flags & flag) in lowest_flag_domain()
1840 return sd; in lowest_flag_domain()
[all …]
Ddebug.c378 static void register_sd(struct sched_domain *sd, struct dentry *parent) in register_sd() argument
381 debugfs_create_##type(#member, mode, parent, &sd->member) in register_sd()
393 debugfs_create_file("flags", 0444, parent, &sd->flags, &sd_flags_fops); in register_sd()
417 struct sched_domain *sd; in update_sched_domain_debugfs() local
426 for_each_domain(cpu, sd) { in update_sched_domain_debugfs()
432 register_sd(sd, d_sd); in update_sched_domain_debugfs()
Ddeadline.c2135 struct sched_domain *sd; in find_later_rq() local
2176 for_each_domain(cpu, sd) { in find_later_rq()
2177 if (sd->flags & SD_WAKE_AFFINE) { in find_later_rq()
2185 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) { in find_later_rq()
2191 sched_domain_span(sd)); in find_later_rq()
Drt.c1976 struct sched_domain *sd; in find_lowest_rq() local
2035 for_each_domain(cpu, sd) { in find_lowest_rq()
2036 if (sd->flags & SD_WAKE_AFFINE) { in find_lowest_rq()
2044 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) { in find_lowest_rq()
2050 sched_domain_span(sd)); in find_lowest_rq()
Dcore.c1092 struct sched_domain *sd; in get_nohz_timer_target() local
1109 for_each_domain(cpu, sd) { in get_nohz_timer_target()
1110 for_each_cpu_and(i, sched_domain_span(sd), hk_mask) { in get_nohz_timer_target()
3744 struct sched_domain *sd; in ttwu_stat() local
3748 for_each_domain(rq->cpu, sd) { in ttwu_stat()
3749 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { in ttwu_stat()
3750 __schedstat_inc(sd->ttwu_wake_remote); in ttwu_stat()
6378 static bool steal_cookie_task(int cpu, struct sched_domain *sd) in steal_cookie_task() argument
6382 for_each_cpu_wrap(i, sched_domain_span(sd), cpu) { in steal_cookie_task()
6398 struct sched_domain *sd; in sched_core_balance() local
[all …]
/kernel/entry/
Dsyscall_user_dispatch.c36 struct syscall_user_dispatch *sd = &current->syscall_dispatch; in syscall_user_dispatch() local
39 if (likely(instruction_pointer(regs) - sd->offset < sd->len)) in syscall_user_dispatch()
45 if (likely(sd->selector)) { in syscall_user_dispatch()
50 if (unlikely(__get_user(state, sd->selector))) { in syscall_user_dispatch()
64 sd->on_dispatch = true; in syscall_user_dispatch()
/kernel/
Dseccomp.c175 const struct seccomp_data *sd) in seccomp_cache_check_allow() argument
241 static void populate_seccomp_data(struct seccomp_data *sd) in populate_seccomp_data() argument
251 sd->nr = syscall_get_nr(task, regs); in populate_seccomp_data()
252 sd->arch = syscall_get_arch(task); in populate_seccomp_data()
254 sd->args[0] = args[0]; in populate_seccomp_data()
255 sd->args[1] = args[1]; in populate_seccomp_data()
256 sd->args[2] = args[2]; in populate_seccomp_data()
257 sd->args[3] = args[3]; in populate_seccomp_data()
258 sd->args[4] = args[4]; in populate_seccomp_data()
259 sd->args[5] = args[5]; in populate_seccomp_data()
[all …]
/kernel/irq/
Ddebugfs.c19 const struct irq_bit_descr *sd, int size) in irq_debug_show_bits() argument
23 for (i = 0; i < size; i++, sd++) { in irq_debug_show_bits()
24 if (state & sd->mask) in irq_debug_show_bits()
25 seq_printf(m, "%*s%s\n", ind + 12, "", sd->name); in irq_debug_show_bits()
/kernel/trace/
Dbpf_trace.c622 u64 flags, struct perf_sample_data *sd) in __bpf_perf_event_output() argument
647 return perf_event_output(event, sd, regs); in __bpf_perf_event_output()
670 struct perf_sample_data *sd; in BPF_CALL_5() local
682 sd = &sds->sds[nest_level - 1]; in BPF_CALL_5()
689 perf_sample_data_init(sd, 0, 0); in BPF_CALL_5()
690 sd->raw = &raw; in BPF_CALL_5()
691 sd->sample_flags |= PERF_SAMPLE_RAW; in BPF_CALL_5()
693 err = __bpf_perf_event_output(regs, map, flags, sd); in BPF_CALL_5()
735 struct perf_sample_data *sd; in bpf_event_output() local
747 sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]); in bpf_event_output()
[all …]
/kernel/rcu/
Drcutorture.c2614 int sd; in rcu_torture_fwd_prog_nr() local
2637 sd = cur_ops->stall_dur() + 1; in rcu_torture_fwd_prog_nr()
2638 sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div; in rcu_torture_fwd_prog_nr()
2639 dur = sd4 + torture_random(&trs) % (sd - sd4); in rcu_torture_fwd_prog_nr()
/kernel/events/
Dcore.c3072 struct stop_event_data *sd = info; in __perf_event_stop() local
3073 struct perf_event *event = sd->event; in __perf_event_stop()
3100 if (sd->restart) in __perf_event_stop()
3108 struct stop_event_data sd = { in perf_event_stop() local
3127 __perf_event_stop, &sd); in perf_event_stop()
7863 struct stop_event_data sd = { in __perf_event_output_stop() local
7884 ro->err = __perf_event_stop(&sd); in __perf_event_output_stop()