/kernel/ |
D | padata.c | 54 target_cpu = cpumask_first(pd->cpumask.pcpu); in padata_index_to_cpu() 56 target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu); in padata_index_to_cpu() 67 int cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu); in padata_cpu_hash() 183 if (!cpumask_test_cpu(*cb_cpu, pd->cpumask.cbcpu)) { in padata_do_parallel() 184 if (!cpumask_weight(pd->cpumask.cbcpu)) in padata_do_parallel() 188 cpu_index = *cb_cpu % cpumask_weight(pd->cpumask.cbcpu); in padata_do_parallel() 190 cpu = cpumask_first(pd->cpumask.cbcpu); in padata_do_parallel() 192 cpu = cpumask_next(cpu, pd->cpumask.cbcpu); in padata_do_parallel() 269 pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false); in padata_find_next() 426 cpumask_copy(attrs->cpumask, pinst->cpumask.pcpu); in padata_setup_cpumasks() [all …]
|
D | stop_machine.c | 176 const struct cpumask *active_cpus; 198 notrace void __weak stop_machine_yield(const struct cpumask *cpumask) in stop_machine_yield() argument 209 const struct cpumask *cpumask; in multi_cpu_stop() local 220 cpumask = cpu_online_mask; in multi_cpu_stop() 221 is_active = cpu == cpumask_first(cpumask); in multi_cpu_stop() 223 cpumask = msdata->active_cpus; in multi_cpu_stop() 224 is_active = cpumask_test_cpu(cpu, cpumask); in multi_cpu_stop() 230 stop_machine_yield(cpumask); in multi_cpu_stop() 393 static bool queue_stop_cpus_work(const struct cpumask *cpumask, in queue_stop_cpus_work() argument 409 for_each_cpu(cpu, cpumask) { in queue_stop_cpus_work() [all …]
|
D | workqueue.c | 1904 set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask); in worker_attach_to_pool() 1991 kthread_bind_mask(worker->task, pool->attrs->cpumask); in create_worker() 3413 free_cpumask_var(attrs->cpumask); in free_workqueue_attrs() 3433 if (!alloc_cpumask_var(&attrs->cpumask, GFP_KERNEL)) in alloc_workqueue_attrs() 3436 cpumask_copy(attrs->cpumask, cpu_possible_mask); in alloc_workqueue_attrs() 3447 cpumask_copy(to->cpumask, from->cpumask); in copy_workqueue_attrs() 3462 hash = jhash(cpumask_bits(attrs->cpumask), in wqattrs_hash() 3473 if (!cpumask_equal(a->cpumask, b->cpumask)) in wqattrs_equal() 3688 if (cpumask_subset(attrs->cpumask, in get_unbound_pool() 3916 int cpu_going_down, cpumask_t *cpumask) in wq_calc_node_cpumask() argument [all …]
|
D | smp.c | 91 cpumask_var_t cpumask; member 105 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, in smpcfd_prepare_cpu() 110 free_cpumask_var(cfd->cpumask); in smpcfd_prepare_cpu() 115 free_cpumask_var(cfd->cpumask); in smpcfd_prepare_cpu() 127 free_cpumask_var(cfd->cpumask); in smpcfd_dead_cpu() 826 int smp_call_function_any(const struct cpumask *mask, in smp_call_function_any() 830 const struct cpumask *nodemask; in smp_call_function_any() 864 static void smp_call_function_many_cond(const struct cpumask *mask, in smp_call_function_many_cond() 909 cpumask_and(cfd->cpumask, mask, cpu_online_mask); in smp_call_function_many_cond() 910 __cpumask_clear_cpu(this_cpu, cfd->cpumask); in smp_call_function_many_cond() [all …]
|
D | cpu.c | 2676 struct cpumask __cpu_possible_mask __read_mostly 2679 struct cpumask __cpu_possible_mask __read_mostly; 2683 struct cpumask __cpu_online_mask __read_mostly; 2686 struct cpumask __cpu_present_mask __read_mostly; 2689 struct cpumask __cpu_active_mask __read_mostly; 2692 struct cpumask __cpu_dying_mask __read_mostly; 2698 void init_cpu_present(const struct cpumask *src) in init_cpu_present() 2703 void init_cpu_possible(const struct cpumask *src) in init_cpu_possible() 2708 void init_cpu_online(const struct cpumask *src) in init_cpu_online()
|
/kernel/time/ |
D | tick-common.c | 209 const struct cpumask *cpumask) in tick_setup_device() argument 259 if (!cpumask_equal(newdev->cpumask, cpumask)) in tick_setup_device() 260 irq_set_affinity(newdev->irq, cpumask); in tick_setup_device() 292 if (!cpumask_test_cpu(cpu, newdev->cpumask)) in tick_check_percpu() 294 if (cpumask_equal(newdev->cpumask, cpumask_of(cpu))) in tick_check_percpu() 300 if (curdev && cpumask_equal(curdev->cpumask, cpumask_of(cpu))) in tick_check_percpu() 322 !cpumask_equal(curdev->cpumask, newdev->cpumask); in tick_check_preferred()
|
D | tick-broadcast.c | 62 struct cpumask *tick_get_broadcast_mask(void) in tick_get_broadcast_mask() 132 if (!cpumask_equal(newdev->cpumask, cpumask_of(cpu))) in tick_set_oneshot_wakeup_device() 227 static void err_broadcast(const struct cpumask *mask) in err_broadcast() 346 static bool tick_do_broadcast(struct cpumask *mask) in tick_do_broadcast() 614 struct cpumask *tick_get_broadcast_oneshot_mask(void) in tick_get_broadcast_oneshot_mask() 635 const struct cpumask *cpumask) in tick_broadcast_set_affinity() argument 640 if (cpumask_equal(bc->cpumask, cpumask)) in tick_broadcast_set_affinity() 643 bc->cpumask = cpumask; in tick_broadcast_set_affinity() 644 irq_set_affinity(bc->irq, bc->cpumask); in tick_broadcast_set_affinity() 988 static void tick_broadcast_init_next_event(struct cpumask *mask, in tick_broadcast_init_next_event()
|
D | clockevents.c | 450 if (!dev->cpumask) { in clockevents_register_device() 452 dev->cpumask = cpumask_of(smp_processor_id()); in clockevents_register_device() 455 if (dev->cpumask == cpu_all_mask) { in clockevents_register_device() 458 dev->cpumask = cpu_possible_mask; in clockevents_register_device() 650 if (cpumask_test_cpu(cpu, dev->cpumask) && in tick_cleanup_dead_cpu() 651 cpumask_weight(dev->cpumask) == 1 && in tick_cleanup_dead_cpu()
|
D | tick-internal.h | 73 extern struct cpumask *tick_get_broadcast_mask(void); 132 extern struct cpumask *tick_get_broadcast_oneshot_mask(void);
|
/kernel/irq/ |
D | ipi.c | 24 const struct cpumask *dest) in irq_reserve_ipi() 115 int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest) in irq_destroy_ipi() 118 struct cpumask *ipimask = data ? irq_data_get_affinity_mask(data) : NULL; in irq_destroy_ipi() 165 struct cpumask *ipimask = data ? irq_data_get_affinity_mask(data) : NULL; in ipi_get_hwirq() 187 const struct cpumask *dest, unsigned int cpu) in ipi_send_verify() 189 struct cpumask *ipimask = irq_data_get_affinity_mask(data); in ipi_send_verify() 263 int __ipi_send_mask(struct irq_desc *desc, const struct cpumask *dest) in __ipi_send_mask() 328 int ipi_send_mask(unsigned int virq, const struct cpumask *dest) in ipi_send_mask()
|
D | affinity.c | 12 static void irq_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk, in irq_spread_init_one() 15 const struct cpumask *siblmsk; in irq_spread_init_one() 84 const struct cpumask *mask, nodemask_t *nodemsk) in get_nodes_in_cpumask() 130 const struct cpumask *cpu_mask, in alloc_nodes_vectors() 132 struct cpumask *nmsk, in alloc_nodes_vectors() 251 const struct cpumask *cpu_mask, in __irq_build_affinity_masks() 252 struct cpumask *nmsk, in __irq_build_affinity_masks()
|
D | manage.c | 200 const struct cpumask *m = irq_data_get_effective_affinity_mask(data); in irq_validate_effective_affinity() 210 const struct cpumask *mask) in irq_init_effective_affinity() 217 const struct cpumask *mask) { } in irq_init_effective_affinity() 220 int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, in irq_do_set_affinity() 225 const struct cpumask *prog_mask; in irq_do_set_affinity() 229 static struct cpumask tmp_mask; in irq_do_set_affinity() 256 const struct cpumask *hk_mask; in irq_do_set_affinity() 301 const struct cpumask *dest) in irq_set_affinity_pending() 311 const struct cpumask *dest) in irq_set_affinity_pending() 318 const struct cpumask *dest, bool force) in irq_try_set_affinity() [all …]
|
D | internals.h | 138 const struct cpumask *dest, bool force); 419 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) in irq_copy_pending() 424 irq_get_pending(struct cpumask *mask, struct irq_desc *desc) in irq_get_pending() 428 static inline struct cpumask *irq_desc_get_pending_mask(struct irq_desc *desc) in irq_desc_get_pending_mask() 447 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) in irq_copy_pending() 451 irq_get_pending(struct cpumask *mask, struct irq_desc *desc) in irq_get_pending() 454 static inline struct cpumask *irq_desc_get_pending_mask(struct irq_desc *desc) in irq_desc_get_pending_mask()
|
D | cpuhotplug.c | 22 const struct cpumask *m = irq_data_get_effective_affinity_mask(d); in irq_needs_fixup() 58 const struct cpumask *affinity; in migrate_one_irq() 177 const struct cpumask *hk_mask; in hk_should_isolate() 192 const struct cpumask *affinity = irq_data_get_affinity_mask(data); in irq_restore_affinity_of_irq()
|
D | matrix.c | 130 const struct cpumask *msk) in matrix_find_best_cpu() 151 const struct cpumask *msk) in matrix_find_best_cpu_managed() 210 int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk) in irq_matrix_reserve_managed() 251 void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk) in irq_matrix_remove_managed() 286 int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk, in irq_matrix_alloc_managed() 377 int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk, in irq_matrix_alloc()
|
D | irqdesc.c | 82 const struct cpumask *affinity) in desc_smp_init() 100 desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { } in desc_smp_init() 104 const struct cpumask *affinity, struct module *owner) in desc_set_defaults() 391 const struct cpumask *affinity, in alloc_desc() 486 const struct cpumask *mask = NULL; in alloc_descs() 882 const struct cpumask *affinity) in irq_set_percpu_devid_partition() 911 int irq_get_percpu_devid_partition(unsigned int irq, struct cpumask *affinity) in irq_get_percpu_devid_partition()
|
/kernel/sched/ |
D | topology.c | 40 struct cpumask *groupmask) in sched_domain_debug_one() 289 static void perf_domain_debug(const struct cpumask *cpu_map, in perf_domain_debug() 355 static bool build_perf_domains(const struct cpumask *cpu_map) in build_perf_domains() 863 build_balance_mask(struct sched_domain *sd, struct sched_group *sg, struct cpumask *mask) in build_balance_mask() 865 const struct cpumask *sg_span = sched_group_span(sg); in build_balance_mask() 903 struct cpumask *sg_span; in build_group_from_child_sched_domain() 924 struct cpumask *mask = sched_domains_tmpmask2; in init_overlap_sched_group() 926 struct cpumask *sg_span; in init_overlap_sched_group() 978 const struct cpumask *span = sched_domain_span(sd); in build_overlap_sched_groups() 979 struct cpumask *covered = sched_domains_tmpmask; in build_overlap_sched_groups() [all …]
|
D | cpupri.c | 74 drop_nopreempt_cpus(struct cpumask *lowest_mask) in drop_nopreempt_cpus() 89 struct cpumask *lowest_mask, int idx, in __cpupri_find() 149 struct cpumask *lowest_mask) in cpupri_find() 172 struct cpumask *lowest_mask, in cpupri_find_fitness()
|
D | cpupri.h | 22 struct cpumask *lowest_mask); 24 struct cpumask *lowest_mask,
|
D | sched.h | 353 extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); 878 extern int sched_init_domains(const struct cpumask *cpu_map); 1188 static inline struct cpumask *sched_group_span(struct sched_group *sg); 1720 extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu); 1725 static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu) in sched_numa_find_closest() 1853 unsigned long cpumask[]; /* Balance mask */ member 1871 unsigned long cpumask[]; member 1874 static inline struct cpumask *sched_group_span(struct sched_group *sg) in sched_group_span() 1876 return to_cpumask(sg->cpumask); in sched_group_span() 1882 static inline struct cpumask *group_balance_mask(struct sched_group *sg) in group_balance_mask() [all …]
|
D | core.c | 253 static struct cpumask sched_core_mask; 257 const struct cpumask *smt_mask = cpu_smt_mask(cpu); in sched_core_lock() 267 const struct cpumask *smt_mask = cpu_smt_mask(cpu); in sched_core_unlock() 287 const struct cpumask *smt_mask = cpu_smt_mask(cpu); in __sched_core_flip() 1035 const struct cpumask *hk_mask; in get_nohz_timer_target() 2158 __do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32 flags); 2161 const struct cpumask *new_mask, 2512 void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask, u32 flags) in set_cpus_allowed_common() 2525 __do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32 flags) in __do_set_cpus_allowed() 2569 void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) in do_set_cpus_allowed() [all …]
|
/kernel/events/ |
D | hw_breakpoint.c | 121 static const struct cpumask *cpumask_of_bp(struct perf_event *bp) in cpumask_of_bp() 136 const struct cpumask *cpumask = cpumask_of_bp(bp); in fetch_bp_busy_slots() local 139 for_each_cpu(cpu, cpumask) { in fetch_bp_busy_slots() 194 const struct cpumask *cpumask = cpumask_of_bp(bp); in toggle_bp_slot() local 207 for_each_cpu(cpu, cpumask) in toggle_bp_slot()
|
/kernel/trace/ |
D | ring_buffer.c | 281 for_each_cpu(cpu, buffer->cpumask) 284 for_each_cpu_and(cpu, buffer->cpumask, cpu_online_mask) 540 cpumask_var_t cpumask; member 954 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_wait() 1063 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_poll_wait() 1762 if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL)) in __ring_buffer_alloc() 1786 cpumask_set_cpu(cpu, buffer->cpumask); in __ring_buffer_alloc() 1807 free_cpumask_var(buffer->cpumask); in __ring_buffer_alloc() 1832 free_cpumask_var(buffer->cpumask); in ring_buffer_free() 2099 !cpumask_test_cpu(cpu_id, buffer->cpumask)) in ring_buffer_resize() [all …]
|
D | trace_hwlat.c | 312 static struct cpumask save_cpumask; 316 struct cpumask *current_mask = &save_cpumask; in move_to_next_cpu() 424 struct cpumask *current_mask = &save_cpumask; in start_single_kthread() 581 struct cpumask *current_mask = &save_cpumask; in start_per_cpu_kthreads()
|
/kernel/cgroup/ |
D | cpuset.c | 429 struct cpumask *pmask) in guarantee_online_cpus() 431 const struct cpumask *possible_mask = task_cpu_possible_mask(tsk); in guarantee_online_cpus() 923 struct cpumask *dp; in generate_sched_domains() 1119 const struct cpumask *new_mask) in update_cpus_allowed() 1168 static void compute_effective_cpumask(struct cpumask *new_cpus, in compute_effective_cpumask() 1233 struct cpumask *newmask, in update_parent_subparts_cpumask() 3135 struct cpumask *new_cpus, nodemask_t *new_mems, in hotplug_update_tasks_legacy() 3174 struct cpumask *new_cpus, nodemask_t *new_mems, in hotplug_update_tasks() 3487 void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask) in cpuset_cpus_allowed() 3504 const struct cpumask *possible_mask = task_cpu_possible_mask(tsk); in cpuset_cpus_allowed() [all …]
|