/kernel/linux/linux-5.10/arch/ia64/kernel/ |
D | irq.c | 82 int irq, new_cpu; in migrate_irqs() local 108 new_cpu = cpumask_any(cpu_online_mask); in migrate_irqs() 117 cpumask_of(new_cpu), false); in migrate_irqs()
|
/kernel/linux/linux-5.10/drivers/irqchip/ |
D | irq-bcm6345-l1.c | 199 unsigned int new_cpu; in bcm6345_l1_set_affinity() local 207 new_cpu = cpumask_any_and(&valid, cpu_online_mask); in bcm6345_l1_set_affinity() 208 if (new_cpu >= nr_cpu_ids) in bcm6345_l1_set_affinity() 211 dest = cpumask_of(new_cpu); in bcm6345_l1_set_affinity() 214 if (old_cpu != new_cpu) { in bcm6345_l1_set_affinity() 226 irq_data_update_effective_affinity(d, cpumask_of(new_cpu)); in bcm6345_l1_set_affinity()
|
/kernel/linux/linux-5.10/kernel/sched/ |
D | cpudeadline.c | 176 int old_idx, new_cpu; in cpudl_clear() local 191 new_cpu = cp->elements[cp->size - 1].cpu; in cpudl_clear() 193 cp->elements[old_idx].cpu = new_cpu; in cpudl_clear() 195 cp->elements[new_cpu].idx = old_idx; in cpudl_clear()
|
D | walt.h | 151 extern void fixup_busy_time(struct task_struct *p, int new_cpu); 229 static inline void fixup_busy_time(struct task_struct *p, int new_cpu) { } in fixup_busy_time() argument
|
D | walt.c | 436 (struct task_struct *p, int new_cpu, int task_cpu, bool new_task) in inter_cluster_migration_fixup() argument 438 struct rq *dest_rq = cpu_rq(new_cpu); in inter_cluster_migration_fixup() 441 if (same_freq_domain(new_cpu, task_cpu)) in inter_cluster_migration_fixup() 444 p->ravg.curr_window_cpu[new_cpu] = p->ravg.curr_window; in inter_cluster_migration_fixup() 445 p->ravg.prev_window_cpu[new_cpu] = p->ravg.prev_window; in inter_cluster_migration_fixup() 475 void fixup_busy_time(struct task_struct *p, int new_cpu) in fixup_busy_time() argument 478 struct rq *dest_rq = cpu_rq(new_cpu); in fixup_busy_time() 572 inter_cluster_migration_fixup(p, new_cpu, in fixup_busy_time() 578 if (!same_freq_domain(new_cpu, task_cpu(p))) in fixup_busy_time()
|
D | fair.c | 2972 static void update_scan_period(struct task_struct *p, int new_cpu) in update_scan_period() argument 2975 int dst_nid = cpu_to_node(new_cpu); in update_scan_period() 3019 static inline void update_scan_period(struct task_struct *p, int new_cpu) in update_scan_period() argument 6284 int new_cpu = cpu; in find_idlest_cpu() local 6312 new_cpu = find_idlest_group_cpu(group, p, cpu); in find_idlest_cpu() 6313 if (new_cpu == cpu) { in find_idlest_cpu() 6320 cpu = new_cpu; in find_idlest_cpu() 6331 return new_cpu; in find_idlest_cpu() 7134 int new_cpu = prev_cpu; in select_task_rq_fair() local 7148 new_cpu = find_energy_efficient_cpu(p, prev_cpu); in select_task_rq_fair() [all …]
|
D | core.c | 1799 struct task_struct *p, int new_cpu) in move_queued_task() argument 1805 double_lock_balance(rq, cpu_rq(new_cpu)); in move_queued_task() 1809 set_task_cpu(p, new_cpu); in move_queued_task() 1811 double_rq_unlock(cpu_rq(new_cpu), rq); in move_queued_task() 1816 rq = cpu_rq(new_cpu); in move_queued_task() 1819 BUG_ON(task_cpu(p) != new_cpu); in move_queued_task() 2051 void set_task_cpu(struct task_struct *p, unsigned int new_cpu) in set_task_cpu() argument 2087 WARN_ON_ONCE(!cpu_online(new_cpu)); in set_task_cpu() 2090 trace_sched_migrate_task(p, new_cpu); in set_task_cpu() 2092 if (task_cpu(p) != new_cpu) { in set_task_cpu() [all …]
|
D | sched.h | 1941 void (*migrate_task_rq)(struct task_struct *p, int new_cpu);
|
D | deadline.c | 1730 static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused) in migrate_task_rq_dl()
|
/kernel/linux/linux-5.10/arch/x86/hyperv/ |
D | hv_init.c | 214 unsigned int new_cpu; in hv_cpu_die() local 239 new_cpu = cpumask_any_but(cpu_online_mask, cpu); in hv_cpu_die() 241 if (new_cpu < nr_cpu_ids) in hv_cpu_die() 242 re_ctrl.target_vp = hv_vp_index[new_cpu]; in hv_cpu_die()
|
/kernel/linux/linux-5.10/tools/perf/scripts/python/ |
D | sched-migration.py | 191 def migrate(self, ts_list, new, old_cpu, new_cpu): argument 192 if old_cpu == new_cpu: 199 new_rq = self.prev.rqs[new_cpu] 201 self.rqs[new_cpu] = in_rq 208 self.event_cpus.append(new_cpu)
|
/kernel/linux/linux-5.10/drivers/gpu/drm/amd/amdkfd/ |
D | kfd_device.c | 931 int cpu, new_cpu; in kfd_queue_work() local 933 cpu = new_cpu = smp_processor_id(); in kfd_queue_work() 935 new_cpu = cpumask_next(new_cpu, cpu_online_mask) % nr_cpu_ids; in kfd_queue_work() 936 if (cpu_to_node(new_cpu) == numa_node_id()) in kfd_queue_work() 938 } while (cpu != new_cpu); in kfd_queue_work() 940 queue_work_on(new_cpu, wq, work); in kfd_queue_work()
|
/kernel/linux/linux-5.10/drivers/hv/ |
D | hyperv_vmbus.h | 441 unsigned int new_cpu) in hv_update_alloced_cpus() argument 443 hv_set_alloced_cpu(new_cpu); in hv_update_alloced_cpus()
|
/kernel/linux/linux-5.10/arch/x86/events/intel/ |
D | uncore.c | 1318 int new_cpu) in uncore_change_type_ctx() argument 1324 die = topology_logical_die_id(old_cpu < 0 ? new_cpu : old_cpu); in uncore_change_type_ctx() 1332 box->cpu = new_cpu; in uncore_change_type_ctx() 1338 if (new_cpu < 0) in uncore_change_type_ctx() 1342 perf_pmu_migrate_context(&pmu->pmu, old_cpu, new_cpu); in uncore_change_type_ctx() 1343 box->cpu = new_cpu; in uncore_change_type_ctx() 1348 int old_cpu, int new_cpu) in uncore_change_context() argument 1351 uncore_change_type_ctx(*uncores, old_cpu, new_cpu); in uncore_change_context()
|
/kernel/linux/linux-5.10/drivers/perf/ |
D | thunderx2_pmu.c | 939 int new_cpu; in tx2_uncore_pmu_offline_cpu() local 954 new_cpu = cpumask_any_and( in tx2_uncore_pmu_offline_cpu() 958 tx2_pmu->cpu = new_cpu; in tx2_uncore_pmu_offline_cpu() 959 if (new_cpu >= nr_cpu_ids) in tx2_uncore_pmu_offline_cpu() 961 perf_pmu_migrate_context(&tx2_pmu->pmu, cpu, new_cpu); in tx2_uncore_pmu_offline_cpu()
|
/kernel/linux/linux-5.10/drivers/scsi/lpfc/ |
D | lpfc_init.c | 10942 int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu; in lpfc_cpu_affinity_check() local 11010 new_cpu = start_cpu; in lpfc_cpu_affinity_check() 11012 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; in lpfc_cpu_affinity_check() 11017 new_cpu = cpumask_next( in lpfc_cpu_affinity_check() 11018 new_cpu, cpu_present_mask); in lpfc_cpu_affinity_check() 11019 if (new_cpu == nr_cpumask_bits) in lpfc_cpu_affinity_check() 11020 new_cpu = first_cpu; in lpfc_cpu_affinity_check() 11032 start_cpu = cpumask_next(new_cpu, cpu_present_mask); in lpfc_cpu_affinity_check() 11040 cpu, cpup->eq, new_cpu, in lpfc_cpu_affinity_check() 11061 new_cpu = start_cpu; in lpfc_cpu_affinity_check() [all …]
|
/kernel/linux/linux-5.10/arch/arm64/kvm/vgic/ |
D | vgic.c | 697 struct vgic_cpu *new_cpu = &target_vcpu->arch.vgic_cpu; in vgic_prune_ap_list() local 701 list_add_tail(&irq->ap_list, &new_cpu->ap_list_head); in vgic_prune_ap_list()
|
/kernel/linux/linux-5.10/kernel/ |
D | workqueue.c | 1383 int new_cpu; in wq_select_unbound_cpu() local 1396 new_cpu = __this_cpu_read(wq_rr_cpu_last); in wq_select_unbound_cpu() 1397 new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask); in wq_select_unbound_cpu() 1398 if (unlikely(new_cpu >= nr_cpu_ids)) { in wq_select_unbound_cpu() 1399 new_cpu = cpumask_first_and(wq_unbound_cpumask, cpu_online_mask); in wq_select_unbound_cpu() 1400 if (unlikely(new_cpu >= nr_cpu_ids)) in wq_select_unbound_cpu() 1403 __this_cpu_write(wq_rr_cpu_last, new_cpu); in wq_select_unbound_cpu() 1405 return new_cpu; in wq_select_unbound_cpu()
|
/kernel/linux/linux-5.10/arch/powerpc/perf/ |
D | imc-pmu.c | 331 static void nest_change_cpu_context(int old_cpu, int new_cpu) in nest_change_cpu_context() argument 335 if (old_cpu < 0 || new_cpu < 0) in nest_change_cpu_context() 339 perf_pmu_migrate_context(&(*pn)->pmu, old_cpu, new_cpu); in nest_change_cpu_context()
|
/kernel/linux/linux-5.10/tools/perf/ |
D | builtin-sched.c | 1540 bool new_cpu = false; in map_switch_event() local 1553 new_cpu = true; in map_switch_event() 1658 if (sched->map.comp && new_cpu) in map_switch_event()
|