Home
last modified time | relevance | path

Searched refs:sd (Results 1 – 13 of 13) sorted by relevance

/kernel/sched/
Dtopology.c28 static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, in sched_domain_debug_one() argument
31 struct sched_group *group = sd->groups; in sched_domain_debug_one()
37 if (!(sd->flags & SD_LOAD_BALANCE)) { in sched_domain_debug_one()
39 if (sd->parent) in sched_domain_debug_one()
45 cpumask_pr_args(sched_domain_span(sd)), sd->name); in sched_domain_debug_one()
47 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { in sched_domain_debug_one()
68 if (!(sd->flags & SD_OVERLAP) && in sched_domain_debug_one()
81 if ((sd->flags & SD_OVERLAP) && in sched_domain_debug_one()
90 if (group == sd->groups && sd->child && in sched_domain_debug_one()
91 !cpumask_equal(sched_domain_span(sd->child), in sched_domain_debug_one()
[all …]
Dstats.c25 struct sched_domain *sd; in show_schedstat() local
45 for_each_domain(cpu, sd) { in show_schedstat()
49 cpumask_pr_args(sched_domain_span(sd))); in show_schedstat()
53 sd->lb_count[itype], in show_schedstat()
54 sd->lb_balanced[itype], in show_schedstat()
55 sd->lb_failed[itype], in show_schedstat()
56 sd->lb_imbalance[itype], in show_schedstat()
57 sd->lb_gained[itype], in show_schedstat()
58 sd->lb_hot_gained[itype], in show_schedstat()
59 sd->lb_nobusyq[itype], in show_schedstat()
[all …]
Dfair.c1752 struct sched_domain *sd; in task_numa_migrate() local
1767 sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu)); in task_numa_migrate()
1768 if (sd) in task_numa_migrate()
1769 env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2; in task_numa_migrate()
1778 if (unlikely(!sd)) { in task_numa_migrate()
5494 wake_affine_weight(struct sched_domain *sd, struct task_struct *p, in wake_affine_weight() argument
5521 prev_eff_load *= 100 + (sd->imbalance_pct - 100) / 2; in wake_affine_weight()
5536 static int wake_affine(struct sched_domain *sd, struct task_struct *p, in wake_affine() argument
5545 target = wake_affine_weight(sd, p, this_cpu, prev_cpu, sync); in wake_affine()
5551 schedstat_inc(sd->ttwu_move_affine); in wake_affine()
[all …]
Ddebug.c249 sd_alloc_ctl_domain_table(struct sched_domain *sd) in sd_alloc_ctl_domain_table() argument
256 …set_table_entry(&table[0], "min_interval", &sd->min_interval, sizeof(long), 0644, proc_doulo… in sd_alloc_ctl_domain_table()
257 …set_table_entry(&table[1], "max_interval", &sd->max_interval, sizeof(long), 0644, proc_doulo… in sd_alloc_ctl_domain_table()
258 …set_table_entry(&table[2], "busy_factor", &sd->busy_factor, sizeof(int), 0644, proc_dointve… in sd_alloc_ctl_domain_table()
259 …set_table_entry(&table[3], "imbalance_pct", &sd->imbalance_pct, sizeof(int), 0644, proc_doi… in sd_alloc_ctl_domain_table()
260 …set_table_entry(&table[4], "cache_nice_tries", &sd->cache_nice_tries, sizeof(int), 0644, pro… in sd_alloc_ctl_domain_table()
261 set_table_entry(&table[5], "flags", &sd->flags, sizeof(int), 0644, proc_dointvec_minmax); in sd_alloc_ctl_domain_table()
262 …set_table_entry(&table[6], "max_newidle_lb_cost", &sd->max_newidle_lb_cost, sizeof(long), 0644, pr… in sd_alloc_ctl_domain_table()
263 set_table_entry(&table[7], "name", sd->name, CORENAME_MAX_SIZE, 0444, proc_dostring); in sd_alloc_ctl_domain_table()
272 struct sched_domain *sd; in sd_alloc_ctl_cpu_table() local
[all …]
Dsched.h930 struct sched_domain __rcu *sd; member
1347 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
1350 #define for_each_lower_domain(sd) for (; sd; sd = sd->child) argument
1363 struct sched_domain *sd, *hsd = NULL; in highest_flag_domain() local
1365 for_each_domain(cpu, sd) { in highest_flag_domain()
1366 if (!(sd->flags & flag)) in highest_flag_domain()
1368 hsd = sd; in highest_flag_domain()
1376 struct sched_domain *sd; in lowest_flag_domain() local
1378 for_each_domain(cpu, sd) { in lowest_flag_domain()
1379 if (sd->flags & flag) in lowest_flag_domain()
[all …]
Drt.c1684 struct sched_domain *sd; in find_lowest_rq() local
1719 for_each_domain(cpu, sd) { in find_lowest_rq()
1720 if (sd->flags & SD_WAKE_AFFINE) { in find_lowest_rq()
1728 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) { in find_lowest_rq()
1734 sched_domain_span(sd)); in find_lowest_rq()
Ddeadline.c1878 struct sched_domain *sd; in find_later_rq() local
1919 for_each_domain(cpu, sd) { in find_later_rq()
1920 if (sd->flags & SD_WAKE_AFFINE) { in find_later_rq()
1928 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) { in find_later_rq()
1934 sched_domain_span(sd)); in find_later_rq()
Dcore.c556 struct sched_domain *sd; in get_nohz_timer_target() local
562 for_each_domain(cpu, sd) { in get_nohz_timer_target()
563 for_each_cpu(i, sched_domain_span(sd)) { in get_nohz_timer_target()
2190 struct sched_domain *sd; in ttwu_stat() local
2194 for_each_domain(rq->cpu, sd) { in ttwu_stat()
2195 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { in ttwu_stat()
2196 __schedstat_inc(sd->ttwu_wake_remote); in ttwu_stat()
6665 rq->sd = NULL; in sched_init()
/kernel/
Dseccomp.c144 static void populate_seccomp_data(struct seccomp_data *sd) in populate_seccomp_data() argument
150 sd->nr = syscall_get_nr(task, regs); in populate_seccomp_data()
151 sd->arch = syscall_get_arch(task); in populate_seccomp_data()
153 sd->args[0] = args[0]; in populate_seccomp_data()
154 sd->args[1] = args[1]; in populate_seccomp_data()
155 sd->args[2] = args[2]; in populate_seccomp_data()
156 sd->args[3] = args[3]; in populate_seccomp_data()
157 sd->args[4] = args[4]; in populate_seccomp_data()
158 sd->args[5] = args[5]; in populate_seccomp_data()
159 sd->instruction_pointer = KSTK_EIP(task); in populate_seccomp_data()
[all …]
/kernel/trace/
Dbpf_trace.c421 u64 flags, struct perf_sample_data *sd) in __bpf_perf_event_output() argument
446 return perf_event_output(event, sd, regs); in __bpf_perf_event_output()
470 struct perf_sample_data *sd; in BPF_CALL_5() local
478 sd = &sds->sds[nest_level - 1]; in BPF_CALL_5()
485 perf_sample_data_init(sd, 0, 0); in BPF_CALL_5()
486 sd->raw = &raw; in BPF_CALL_5()
488 err = __bpf_perf_event_output(regs, map, flags, sd); in BPF_CALL_5()
531 struct perf_sample_data *sd; in bpf_event_output() local
539 sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]); in bpf_event_output()
543 perf_sample_data_init(sd, 0, 0); in bpf_event_output()
[all …]
/kernel/irq/
Ddebugfs.c19 const struct irq_bit_descr *sd, int size) in irq_debug_show_bits() argument
23 for (i = 0; i < size; i++, sd++) { in irq_debug_show_bits()
24 if (state & sd->mask) in irq_debug_show_bits()
25 seq_printf(m, "%*s%s\n", ind + 12, "", sd->name); in irq_debug_show_bits()
/kernel/rcu/
Drcutorture.c1774 int sd; in rcu_torture_fwd_prog_nr() local
1794 sd = cur_ops->stall_dur() + 1; in rcu_torture_fwd_prog_nr()
1795 sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div; in rcu_torture_fwd_prog_nr()
1796 dur = sd4 + torture_random(&trs) % (sd - sd4); in rcu_torture_fwd_prog_nr()
/kernel/events/
Dcore.c2836 struct stop_event_data *sd = info; in __perf_event_stop() local
2837 struct perf_event *event = sd->event; in __perf_event_stop()
2864 if (sd->restart) in __perf_event_stop()
2872 struct stop_event_data sd = { in perf_event_stop() local
2891 __perf_event_stop, &sd); in perf_event_stop()
6939 struct stop_event_data sd = { in __perf_event_output_stop() local
6960 ro->err = __perf_event_stop(&sd); in __perf_event_output_stop()