Home
last modified time | relevance | path

Searched refs:cpumask (Results 1 – 25 of 49) sorted by relevance

12

/kernel/
Dpadata.c54 target_cpu = cpumask_first(pd->cpumask.pcpu); in padata_index_to_cpu()
56 target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu); in padata_index_to_cpu()
67 int cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu); in padata_cpu_hash()
183 if (!cpumask_test_cpu(*cb_cpu, pd->cpumask.cbcpu)) { in padata_do_parallel()
184 if (cpumask_empty(pd->cpumask.cbcpu)) in padata_do_parallel()
188 cpu_index = *cb_cpu % cpumask_weight(pd->cpumask.cbcpu); in padata_do_parallel()
190 cpu = cpumask_first(pd->cpumask.cbcpu); in padata_do_parallel()
192 cpu = cpumask_next(cpu, pd->cpumask.cbcpu); in padata_do_parallel()
269 pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false); in padata_find_next()
426 cpumask_copy(attrs->cpumask, pinst->cpumask.pcpu); in padata_setup_cpumasks()
[all …]
Dstop_machine.c176 const struct cpumask *active_cpus;
198 notrace void __weak stop_machine_yield(const struct cpumask *cpumask) in stop_machine_yield() argument
209 const struct cpumask *cpumask; in multi_cpu_stop() local
220 cpumask = cpu_online_mask; in multi_cpu_stop()
221 is_active = cpu == cpumask_first(cpumask); in multi_cpu_stop()
223 cpumask = msdata->active_cpus; in multi_cpu_stop()
224 is_active = cpumask_test_cpu(cpu, cpumask); in multi_cpu_stop()
230 stop_machine_yield(cpumask); in multi_cpu_stop()
393 static bool queue_stop_cpus_work(const struct cpumask *cpumask, in queue_stop_cpus_work() argument
409 for_each_cpu(cpu, cpumask) { in queue_stop_cpus_work()
[all …]
Dworkqueue.c1886 set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask); in worker_attach_to_pool()
1965 kthread_bind_mask(worker->task, pool->attrs->cpumask); in create_worker()
3387 free_cpumask_var(attrs->cpumask); in free_workqueue_attrs()
3407 if (!alloc_cpumask_var(&attrs->cpumask, GFP_KERNEL)) in alloc_workqueue_attrs()
3410 cpumask_copy(attrs->cpumask, cpu_possible_mask); in alloc_workqueue_attrs()
3421 cpumask_copy(to->cpumask, from->cpumask); in copy_workqueue_attrs()
3436 hash = jhash(cpumask_bits(attrs->cpumask), in wqattrs_hash()
3447 if (!cpumask_equal(a->cpumask, b->cpumask)) in wqattrs_equal()
3662 if (cpumask_subset(attrs->cpumask, in get_unbound_pool()
3890 int cpu_going_down, cpumask_t *cpumask) in wq_calc_node_cpumask() argument
[all …]
Dsmp.c91 cpumask_var_t cpumask; member
105 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, in smpcfd_prepare_cpu()
110 free_cpumask_var(cfd->cpumask); in smpcfd_prepare_cpu()
115 free_cpumask_var(cfd->cpumask); in smpcfd_prepare_cpu()
127 free_cpumask_var(cfd->cpumask); in smpcfd_dead_cpu()
855 int smp_call_function_any(const struct cpumask *mask, in smp_call_function_any()
859 const struct cpumask *nodemask; in smp_call_function_any()
893 static void smp_call_function_many_cond(const struct cpumask *mask, in smp_call_function_many_cond()
938 cpumask_and(cfd->cpumask, mask, cpu_online_mask); in smp_call_function_many_cond()
939 __cpumask_clear_cpu(this_cpu, cfd->cpumask); in smp_call_function_many_cond()
[all …]
Dcpu.c2687 struct cpumask __cpu_possible_mask __read_mostly
2690 struct cpumask __cpu_possible_mask __read_mostly;
2694 struct cpumask __cpu_online_mask __read_mostly;
2697 struct cpumask __cpu_present_mask __read_mostly;
2700 struct cpumask __cpu_active_mask __read_mostly;
2703 struct cpumask __cpu_dying_mask __read_mostly;
2709 void init_cpu_present(const struct cpumask *src) in init_cpu_present()
2714 void init_cpu_possible(const struct cpumask *src) in init_cpu_possible()
2719 void init_cpu_online(const struct cpumask *src) in init_cpu_online()
/kernel/time/
Dtick-common.c209 const struct cpumask *cpumask) in tick_setup_device() argument
259 if (!cpumask_equal(newdev->cpumask, cpumask)) in tick_setup_device()
260 irq_set_affinity(newdev->irq, cpumask); in tick_setup_device()
292 if (!cpumask_test_cpu(cpu, newdev->cpumask)) in tick_check_percpu()
294 if (cpumask_equal(newdev->cpumask, cpumask_of(cpu))) in tick_check_percpu()
300 if (curdev && cpumask_equal(curdev->cpumask, cpumask_of(cpu))) in tick_check_percpu()
322 !cpumask_equal(curdev->cpumask, newdev->cpumask); in tick_check_preferred()
Dtick-broadcast.c62 struct cpumask *tick_get_broadcast_mask(void) in tick_get_broadcast_mask()
132 if (!cpumask_equal(newdev->cpumask, cpumask_of(cpu))) in tick_set_oneshot_wakeup_device()
227 static void err_broadcast(const struct cpumask *mask) in err_broadcast()
346 static bool tick_do_broadcast(struct cpumask *mask) in tick_do_broadcast()
614 struct cpumask *tick_get_broadcast_oneshot_mask(void) in tick_get_broadcast_oneshot_mask()
635 const struct cpumask *cpumask) in tick_broadcast_set_affinity() argument
640 if (cpumask_equal(bc->cpumask, cpumask)) in tick_broadcast_set_affinity()
643 bc->cpumask = cpumask; in tick_broadcast_set_affinity()
644 irq_set_affinity(bc->irq, bc->cpumask); in tick_broadcast_set_affinity()
988 static void tick_broadcast_init_next_event(struct cpumask *mask, in tick_broadcast_init_next_event()
Dclockevents.c450 if (!dev->cpumask) { in clockevents_register_device()
452 dev->cpumask = cpumask_of(smp_processor_id()); in clockevents_register_device()
455 if (dev->cpumask == cpu_all_mask) { in clockevents_register_device()
458 dev->cpumask = cpu_possible_mask; in clockevents_register_device()
650 if (cpumask_test_cpu(cpu, dev->cpumask) && in tick_cleanup_dead_cpu()
651 cpumask_weight(dev->cpumask) == 1 && in tick_cleanup_dead_cpu()
Dtick-internal.h73 extern struct cpumask *tick_get_broadcast_mask(void);
132 extern struct cpumask *tick_get_broadcast_oneshot_mask(void);
/kernel/irq/
Dipi.c24 const struct cpumask *dest) in irq_reserve_ipi()
115 int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest) in irq_destroy_ipi()
118 const struct cpumask *ipimask; in irq_destroy_ipi()
166 const struct cpumask *ipimask; in ipi_get_hwirq()
189 const struct cpumask *dest, unsigned int cpu) in ipi_send_verify()
191 const struct cpumask *ipimask; in ipi_send_verify()
269 int __ipi_send_mask(struct irq_desc *desc, const struct cpumask *dest) in __ipi_send_mask()
334 int ipi_send_mask(unsigned int virq, const struct cpumask *dest) in ipi_send_mask()
Dmanage.c200 const struct cpumask *m = irq_data_get_effective_affinity_mask(data); in irq_validate_effective_affinity()
212 int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, in irq_do_set_affinity()
217 const struct cpumask *prog_mask; in irq_do_set_affinity()
221 static struct cpumask tmp_mask; in irq_do_set_affinity()
248 const struct cpumask *hk_mask; in irq_do_set_affinity()
293 const struct cpumask *dest) in irq_set_affinity_pending()
303 const struct cpumask *dest) in irq_set_affinity_pending()
310 const struct cpumask *dest, bool force) in irq_try_set_affinity()
325 const struct cpumask *mask, bool force) in irq_set_affinity_deactivated()
348 int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask, in irq_set_affinity_locked()
[all …]
Dinternals.h138 const struct cpumask *dest, bool force);
419 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) in irq_copy_pending()
424 irq_get_pending(struct cpumask *mask, struct irq_desc *desc) in irq_get_pending()
428 static inline struct cpumask *irq_desc_get_pending_mask(struct irq_desc *desc) in irq_desc_get_pending_mask()
447 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) in irq_copy_pending()
451 irq_get_pending(struct cpumask *mask, struct irq_desc *desc) in irq_get_pending()
454 static inline struct cpumask *irq_desc_get_pending_mask(struct irq_desc *desc) in irq_desc_get_pending_mask()
Dcpuhotplug.c22 const struct cpumask *m = irq_data_get_effective_affinity_mask(d); in irq_needs_fixup()
58 const struct cpumask *affinity; in migrate_one_irq()
177 const struct cpumask *hk_mask; in hk_should_isolate()
192 const struct cpumask *affinity = irq_data_get_affinity_mask(data); in irq_restore_affinity_of_irq()
Dmatrix.c130 const struct cpumask *msk) in matrix_find_best_cpu()
151 const struct cpumask *msk) in matrix_find_best_cpu_managed()
210 int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk) in irq_matrix_reserve_managed()
251 void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk) in irq_matrix_remove_managed()
286 int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk, in irq_matrix_alloc_managed()
377 int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk, in irq_matrix_alloc()
Dirqdesc.c82 const struct cpumask *affinity) in desc_smp_init()
100 desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { } in desc_smp_init()
104 const struct cpumask *affinity, struct module *owner) in desc_set_defaults()
391 const struct cpumask *affinity, in alloc_desc()
486 const struct cpumask *mask = NULL; in alloc_descs()
873 const struct cpumask *affinity) in irq_set_percpu_devid_partition()
902 int irq_get_percpu_devid_partition(unsigned int irq, struct cpumask *affinity) in irq_get_percpu_devid_partition()
/kernel/sched/
Dtopology.c39 struct cpumask *groupmask) in sched_domain_debug_one()
309 static void perf_domain_debug(const struct cpumask *cpu_map, in perf_domain_debug()
375 static bool build_perf_domains(const struct cpumask *cpu_map) in build_perf_domains()
891 build_balance_mask(struct sched_domain *sd, struct sched_group *sg, struct cpumask *mask) in build_balance_mask()
893 const struct cpumask *sg_span = sched_group_span(sg); in build_balance_mask()
931 struct cpumask *sg_span; in build_group_from_child_sched_domain()
954 struct cpumask *mask = sched_domains_tmpmask2; in init_overlap_sched_group()
956 struct cpumask *sg_span; in init_overlap_sched_group()
1008 const struct cpumask *span = sched_domain_span(sd); in build_overlap_sched_groups()
1009 struct cpumask *covered = sched_domains_tmpmask; in build_overlap_sched_groups()
[all …]
Dcpupri.c68 struct cpumask *lowest_mask, int idx) in __cpupri_find()
122 struct cpumask *lowest_mask) in cpupri_find()
145 struct cpumask *lowest_mask, in cpupri_find_fitness()
Dcpupri.h22 struct cpumask *lowest_mask);
24 struct cpumask *lowest_mask,
Dsched.h335 extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
905 extern int sched_init_domains(const struct cpumask *cpu_map);
1238 static inline struct cpumask *sched_group_span(struct sched_group *sg);
1739 extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu);
1745 static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu) in sched_numa_find_closest()
1873 unsigned long cpumask[]; /* Balance mask */ member
1892 unsigned long cpumask[]; member
1895 static inline struct cpumask *sched_group_span(struct sched_group *sg) in sched_group_span()
1897 return to_cpumask(sg->cpumask); in sched_group_span()
1903 static inline struct cpumask *group_balance_mask(struct sched_group *sg) in group_balance_mask()
[all …]
Dcore.c324 static struct cpumask sched_core_mask;
328 const struct cpumask *smt_mask = cpu_smt_mask(cpu); in sched_core_lock()
338 const struct cpumask *smt_mask = cpu_smt_mask(cpu); in sched_core_unlock()
358 const struct cpumask *smt_mask = cpu_smt_mask(cpu); in __sched_core_flip()
1093 const struct cpumask *hk_mask; in get_nohz_timer_target()
2399 __do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32 flags);
2402 const struct cpumask *new_mask,
2757 void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask, u32 flags) in set_cpus_allowed_common()
2770 __do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32 flags) in __do_set_cpus_allowed()
2814 void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) in do_set_cpus_allowed()
[all …]
Dcpudeadline.h19 int cpudl_find(struct cpudl *cp, struct task_struct *p, struct cpumask *later_mask);
/kernel/trace/
Dring_buffer.c273 for_each_cpu(cpu, buffer->cpumask)
276 for_each_cpu_and(cpu, buffer->cpumask, cpu_online_mask)
509 cpumask_var_t cpumask; member
959 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_wait()
1068 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_poll_wait()
1767 if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL)) in __ring_buffer_alloc()
1791 cpumask_set_cpu(cpu, buffer->cpumask); in __ring_buffer_alloc()
1812 free_cpumask_var(buffer->cpumask); in __ring_buffer_alloc()
1859 free_cpumask_var(buffer->cpumask); in ring_buffer_free()
2132 !cpumask_test_cpu(cpu_id, buffer->cpumask)) in ring_buffer_resize()
[all …]
Dtrace_hwlat.c312 static struct cpumask save_cpumask;
316 struct cpumask *current_mask = &save_cpumask; in move_to_next_cpu()
424 struct cpumask *current_mask = &save_cpumask; in start_single_kthread()
581 struct cpumask *current_mask = &save_cpumask; in start_per_cpu_kthreads()
/kernel/events/
Dhw_breakpoint.c358 static const struct cpumask *cpumask_of_bp(struct perf_event *bp) in cpumask_of_bp()
372 const struct cpumask *cpumask = cpumask_of_bp(bp); in max_bp_pinned_slots() local
389 for_each_cpu(cpu, cpumask) { in max_bp_pinned_slots()
504 const struct cpumask *cpumask = cpumask_of_bp(bp); in toggle_bp_slot() local
506 for_each_cpu(cpu, cpumask) { in toggle_bp_slot()
/kernel/cgroup/
Dcpuset.c531 struct cpumask *pmask) in guarantee_online_cpus()
533 const struct cpumask *possible_mask = task_cpu_possible_mask(tsk); in guarantee_online_cpus()
1050 struct cpumask *dp; in generate_sched_domains()
1246 const struct cpumask *new_mask) in update_cpus_allowed()
1266 static void update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus) in update_tasks_cpumask()
1299 static void compute_effective_cpumask(struct cpumask *new_cpus, in compute_effective_cpumask()
1363 struct cpumask *newmask, in update_parent_subparts_cpumask()
3549 struct cpumask *new_cpus, nodemask_t *new_mems, in hotplug_update_tasks_legacy()
3588 struct cpumask *new_cpus, nodemask_t *new_mems, in hotplug_update_tasks()
3918 void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask) in cpuset_cpus_allowed()
[all …]

12