/kernel/ |
D | padata.c | 42 target_cpu = cpumask_first(pd->cpumask.pcpu); in padata_index_to_cpu() 44 target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu); in padata_index_to_cpu() 55 int cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu); in padata_cpu_hash() 116 if (!cpumask_test_cpu(*cb_cpu, pd->cpumask.cbcpu)) { in padata_do_parallel() 117 if (!cpumask_weight(pd->cpumask.cbcpu)) in padata_do_parallel() 121 cpu_index = *cb_cpu % cpumask_weight(pd->cpumask.cbcpu); in padata_do_parallel() 123 cpu = cpumask_first(pd->cpumask.cbcpu); in padata_do_parallel() 125 cpu = cpumask_next(cpu, pd->cpumask.cbcpu); in padata_do_parallel() 204 pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false); in padata_find_next() 345 const struct cpumask *pcpumask, in padata_setup_cpumasks() [all …]
|
D | stop_machine.c | 159 const struct cpumask *active_cpus; 181 void __weak stop_machine_yield(const struct cpumask *cpumask) in stop_machine_yield() argument 192 const struct cpumask *cpumask; in multi_cpu_stop() local 203 cpumask = cpu_online_mask; in multi_cpu_stop() 204 is_active = cpu == cpumask_first(cpumask); in multi_cpu_stop() 206 cpumask = msdata->active_cpus; in multi_cpu_stop() 207 is_active = cpumask_test_cpu(cpu, cpumask); in multi_cpu_stop() 213 stop_machine_yield(cpumask); in multi_cpu_stop() 373 static bool queue_stop_cpus_work(const struct cpumask *cpumask, in queue_stop_cpus_work() argument 389 for_each_cpu(cpu, cpumask) { in queue_stop_cpus_work() [all …]
|
D | smp.c | 33 cpumask_var_t cpumask; member 47 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, in smpcfd_prepare_cpu() 52 free_cpumask_var(cfd->cpumask); in smpcfd_prepare_cpu() 57 free_cpumask_var(cfd->cpumask); in smpcfd_prepare_cpu() 69 free_cpumask_var(cfd->cpumask); in smpcfd_dead_cpu() 369 int smp_call_function_any(const struct cpumask *mask, in smp_call_function_any() 373 const struct cpumask *nodemask; in smp_call_function_any() 412 void smp_call_function_many(const struct cpumask *mask, in smp_call_function_many() 457 cpumask_and(cfd->cpumask, mask, cpu_online_mask); in smp_call_function_many() 458 __cpumask_clear_cpu(this_cpu, cfd->cpumask); in smp_call_function_many() [all …]
|
D | workqueue.c | 1850 set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask); in worker_attach_to_pool() 1936 kthread_bind_mask(worker->task, pool->attrs->cpumask); in create_worker() 3342 free_cpumask_var(attrs->cpumask); in free_workqueue_attrs() 3362 if (!alloc_cpumask_var(&attrs->cpumask, GFP_KERNEL)) in alloc_workqueue_attrs() 3365 cpumask_copy(attrs->cpumask, cpu_possible_mask); in alloc_workqueue_attrs() 3376 cpumask_copy(to->cpumask, from->cpumask); in copy_workqueue_attrs() 3391 hash = jhash(cpumask_bits(attrs->cpumask), in wqattrs_hash() 3402 if (!cpumask_equal(a->cpumask, b->cpumask)) in wqattrs_equal() 3605 if (cpumask_subset(attrs->cpumask, in get_unbound_pool() 3820 int cpu_going_down, cpumask_t *cpumask) in wq_calc_node_cpumask() argument [all …]
|
D | up.c | 54 void on_each_cpu_mask(const struct cpumask *mask, in on_each_cpu_mask() 73 gfp_t gfp_flags, const struct cpumask *mask) in on_each_cpu_cond_mask()
|
D | cpu.c | 2300 struct cpumask __cpu_possible_mask __read_mostly 2303 struct cpumask __cpu_possible_mask __read_mostly; 2307 struct cpumask __cpu_online_mask __read_mostly; 2310 struct cpumask __cpu_present_mask __read_mostly; 2313 struct cpumask __cpu_active_mask __read_mostly; 2319 void init_cpu_present(const struct cpumask *src) in init_cpu_present() 2324 void init_cpu_possible(const struct cpumask *src) in init_cpu_possible() 2329 void init_cpu_online(const struct cpumask *src) in init_cpu_online()
|
/kernel/time/ |
D | tick-common.c | 203 const struct cpumask *cpumask) in tick_setup_device() argument 255 if (!cpumask_equal(newdev->cpumask, cpumask)) in tick_setup_device() 256 irq_set_affinity(newdev->irq, cpumask); in tick_setup_device() 288 if (!cpumask_test_cpu(cpu, newdev->cpumask)) in tick_check_percpu() 290 if (cpumask_equal(newdev->cpumask, cpumask_of(cpu))) in tick_check_percpu() 296 if (curdev && cpumask_equal(curdev->cpumask, cpumask_of(cpu))) in tick_check_percpu() 318 !cpumask_equal(curdev->cpumask, newdev->cpumask); in tick_check_preferred()
|
D | tick-broadcast.c | 59 struct cpumask *tick_get_broadcast_mask(void) in tick_get_broadcast_mask() 143 static void err_broadcast(const struct cpumask *mask) in err_broadcast() 264 static bool tick_do_broadcast(struct cpumask *mask) in tick_do_broadcast() 532 struct cpumask *tick_get_broadcast_oneshot_mask(void) in tick_get_broadcast_oneshot_mask() 553 const struct cpumask *cpumask) in tick_broadcast_set_affinity() argument 558 if (cpumask_equal(bc->cpumask, cpumask)) in tick_broadcast_set_affinity() 561 bc->cpumask = cpumask; in tick_broadcast_set_affinity() 562 irq_set_affinity(bc->irq, bc->cpumask); in tick_broadcast_set_affinity() 867 static void tick_broadcast_init_next_event(struct cpumask *mask, in tick_broadcast_init_next_event()
|
D | clockevents.c | 451 if (!dev->cpumask) { in clockevents_register_device() 453 dev->cpumask = cpumask_of(smp_processor_id()); in clockevents_register_device() 456 if (dev->cpumask == cpu_all_mask) { in clockevents_register_device() 459 dev->cpumask = cpu_possible_mask; in clockevents_register_device() 651 if (cpumask_test_cpu(cpu, dev->cpumask) && in tick_cleanup_dead_cpu() 652 cpumask_weight(dev->cpumask) == 1 && in tick_cleanup_dead_cpu()
|
D | tick-internal.h | 74 extern struct cpumask *tick_get_broadcast_mask(void); 132 extern struct cpumask *tick_get_broadcast_oneshot_mask(void);
|
D | tick-broadcast-hrtimer.c | 96 .cpumask = cpu_possible_mask,
|
/kernel/irq/ |
D | ipi.c | 24 const struct cpumask *dest) in irq_reserve_ipi() 115 int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest) in irq_destroy_ipi() 118 struct cpumask *ipimask = data ? irq_data_get_affinity_mask(data) : NULL; in irq_destroy_ipi() 165 struct cpumask *ipimask = data ? irq_data_get_affinity_mask(data) : NULL; in ipi_get_hwirq() 187 const struct cpumask *dest, unsigned int cpu) in ipi_send_verify() 189 struct cpumask *ipimask = irq_data_get_affinity_mask(data); in ipi_send_verify() 263 int __ipi_send_mask(struct irq_desc *desc, const struct cpumask *dest) in __ipi_send_mask() 328 int ipi_send_mask(unsigned int virq, const struct cpumask *dest) in ipi_send_mask()
|
D | affinity.c | 12 static void irq_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk, in irq_spread_init_one() 15 const struct cpumask *siblmsk; in irq_spread_init_one() 84 const struct cpumask *mask, nodemask_t *nodemsk) in get_nodes_in_cpumask() 130 const struct cpumask *cpu_mask, in alloc_nodes_vectors() 132 struct cpumask *nmsk, in alloc_nodes_vectors() 251 const struct cpumask *cpu_mask, in __irq_build_affinity_masks() 252 struct cpumask *nmsk, in __irq_build_affinity_masks()
|
D | internals.h | 136 const struct cpumask *dest, bool force); 417 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) in irq_copy_pending() 422 irq_get_pending(struct cpumask *mask, struct irq_desc *desc) in irq_get_pending() 426 static inline struct cpumask *irq_desc_get_pending_mask(struct irq_desc *desc) in irq_desc_get_pending_mask() 441 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) in irq_copy_pending() 445 irq_get_pending(struct cpumask *mask, struct irq_desc *desc) in irq_get_pending() 448 static inline struct cpumask *irq_desc_get_pending_mask(struct irq_desc *desc) in irq_desc_get_pending_mask()
|
D | matrix.c | 130 const struct cpumask *msk) in matrix_find_best_cpu() 151 const struct cpumask *msk) in matrix_find_best_cpu_managed() 210 int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk) in irq_matrix_reserve_managed() 251 void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk) in irq_matrix_remove_managed() 285 int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk, in irq_matrix_alloc_managed() 377 int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk, in irq_matrix_alloc()
|
D | cpuhotplug.c | 21 const struct cpumask *m = irq_data_get_effective_affinity_mask(d); in irq_needs_fixup() 57 const struct cpumask *affinity; in migrate_one_irq() 177 const struct cpumask *affinity = irq_data_get_affinity_mask(data); in irq_restore_affinity_of_irq()
|
D | manage.c | 200 const struct cpumask *m = irq_data_get_effective_affinity_mask(data); in irq_validate_effective_affinity() 210 int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, in irq_do_set_affinity() 237 const struct cpumask *dest) in irq_set_affinity_pending() 247 const struct cpumask *dest) in irq_set_affinity_pending() 254 const struct cpumask *dest, bool force) in irq_try_set_affinity() 268 int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask, in irq_set_affinity_locked() 294 int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force) in __irq_set_affinity() 309 int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) in irq_set_affinity_hint() 330 cpumask_var_t cpumask; in irq_affinity_notify() local 333 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL)) in irq_affinity_notify() [all …]
|
D | irqdesc.c | 82 const struct cpumask *affinity) in desc_smp_init() 100 desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { } in desc_smp_init() 104 const struct cpumask *affinity, struct module *owner) in desc_set_defaults() 388 const struct cpumask *affinity, in alloc_desc() 482 const struct cpumask *mask = NULL; in alloc_descs() 901 const struct cpumask *affinity) in irq_set_percpu_devid_partition() 930 int irq_get_percpu_devid_partition(unsigned int irq, struct cpumask *affinity) in irq_get_percpu_devid_partition()
|
/kernel/sched/ |
D | topology.c | 29 struct cpumask *groupmask) in sched_domain_debug_one() 276 static void perf_domain_debug(const struct cpumask *cpu_map, in perf_domain_debug() 342 static bool build_perf_domains(const struct cpumask *cpu_map) in build_perf_domains() 843 build_balance_mask(struct sched_domain *sd, struct sched_group *sg, struct cpumask *mask) in build_balance_mask() 845 const struct cpumask *sg_span = sched_group_span(sg); in build_balance_mask() 883 struct cpumask *sg_span; in build_group_from_child_sched_domain() 904 struct cpumask *mask = sched_domains_tmpmask2; in init_overlap_sched_group() 906 struct cpumask *sg_span; in init_overlap_sched_group() 933 const struct cpumask *span = sched_domain_span(sd); in build_overlap_sched_groups() 934 struct cpumask *covered = sched_domains_tmpmask; in build_overlap_sched_groups() [all …]
|
D | sched.h | 317 extern int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed); 318 extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); 798 extern int sched_init_domains(const struct cpumask *cpu_map); 1285 extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu); 1290 static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu) in sched_numa_find_closest() 1411 unsigned long cpumask[0]; /* Balance mask */ member 1429 unsigned long cpumask[0]; member 1432 static inline struct cpumask *sched_group_span(struct sched_group *sg) in sched_group_span() 1434 return to_cpumask(sg->cpumask); in sched_group_span() 1440 static inline struct cpumask *group_balance_mask(struct sched_group *sg) in group_balance_mask() [all …]
|
D | cpupri.h | 22 struct cpumask *lowest_mask,
|
D | cpudeadline.h | 19 int cpudl_find(struct cpudl *cp, struct task_struct *p, struct cpumask *later_mask);
|
/kernel/events/ |
D | hw_breakpoint.c | 121 static const struct cpumask *cpumask_of_bp(struct perf_event *bp) in cpumask_of_bp() 136 const struct cpumask *cpumask = cpumask_of_bp(bp); in fetch_bp_busy_slots() local 139 for_each_cpu(cpu, cpumask) { in fetch_bp_busy_slots() 194 const struct cpumask *cpumask = cpumask_of_bp(bp); in toggle_bp_slot() local 207 for_each_cpu(cpu, cpumask) in toggle_bp_slot()
|
/kernel/trace/ |
D | ring_buffer.c | 271 for_each_cpu(cpu, buffer->cpumask) 490 cpumask_var_t cpumask; member 593 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_wait() 696 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_poll_wait() 1392 if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL)) in __ring_buffer_alloc() 1416 cpumask_set_cpu(cpu, buffer->cpumask); in __ring_buffer_alloc() 1437 free_cpumask_var(buffer->cpumask); in __ring_buffer_alloc() 1460 free_cpumask_var(buffer->cpumask); in ring_buffer_free() 1730 !cpumask_test_cpu(cpu_id, buffer->cpumask)) in ring_buffer_resize() 1812 if (!cpumask_test_cpu(cpu_id, buffer->cpumask)) in ring_buffer_resize() [all …]
|
D | trace_hwlat.c | 267 static struct cpumask save_cpumask; 272 struct cpumask *current_mask = &save_cpumask; in move_to_next_cpu() 353 struct cpumask *current_mask = &save_cpumask; in start_kthread()
|