Home
last modified time | relevance | path

Searched refs:cpumask (Results 1 – 21 of 21) sorted by relevance

/kernel/
Dpadata.c40 target_cpu = cpumask_first(pd->cpumask.pcpu); in padata_index_to_cpu()
42 target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu); in padata_index_to_cpu()
57 cpu_index = pd->seq_nr % cpumask_weight(pd->cpumask.pcpu); in padata_cpu_hash()
122 if (!cpumask_test_cpu(cb_cpu, pd->cpumask.cbcpu)) in padata_do_parallel()
178 num_cpus = cpumask_weight(pd->cpumask.pcpu); in padata_get_next()
352 const struct cpumask *pcpumask, in padata_setup_cpumasks()
353 const struct cpumask *cbcpumask) in padata_setup_cpumasks()
355 if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL)) in padata_setup_cpumasks()
358 cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_online_mask); in padata_setup_cpumasks()
359 if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) { in padata_setup_cpumasks()
[all …]
Dstop_machine.c142 static void queue_stop_cpus_work(const struct cpumask *cpumask, in queue_stop_cpus_work() argument
150 for_each_cpu(cpu, cpumask) { in queue_stop_cpus_work()
163 for_each_cpu(cpu, cpumask) in queue_stop_cpus_work()
168 static int __stop_cpus(const struct cpumask *cpumask, in __stop_cpus() argument
173 cpu_stop_init_done(&done, cpumask_weight(cpumask)); in __stop_cpus()
174 queue_stop_cpus_work(cpumask, fn, arg, &done); in __stop_cpus()
207 int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg) in stop_cpus() argument
213 ret = __stop_cpus(cpumask, fn, arg); in stop_cpus()
236 int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg) in try_stop_cpus() argument
243 ret = __stop_cpus(cpumask, fn, arg); in try_stop_cpus()
[all …]
Dsmp.c25 cpumask_var_t cpumask; member
47 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, in hotplug_cfd()
55 free_cpumask_var(cfd->cpumask); in hotplug_cfd()
66 free_cpumask_var(cfd->cpumask); in hotplug_cfd()
289 int smp_call_function_any(const struct cpumask *mask, in smp_call_function_any()
293 const struct cpumask *nodemask; in smp_call_function_any()
369 void smp_call_function_many(const struct cpumask *mask, in smp_call_function_many()
406 cpumask_and(cfd->cpumask, mask, cpu_online_mask); in smp_call_function_many()
407 cpumask_clear_cpu(this_cpu, cfd->cpumask); in smp_call_function_many()
410 if (unlikely(!cpumask_weight(cfd->cpumask))) in smp_call_function_many()
[all …]
Dworkqueue.c1632 set_cpus_allowed_ptr(current, pool->attrs->cpumask); in worker_maybe_bind_and_lock()
1638 cpumask_equal(&current->cpus_allowed, pool->attrs->cpumask)) in worker_maybe_bind_and_lock()
1726 set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask); in create_worker()
3183 written = cpumask_scnprintf(buf, PAGE_SIZE, wq->unbound_attrs->cpumask); in wq_cpumask_show()
3202 ret = cpumask_parse(buf, attrs->cpumask); in wq_cpumask_store()
3248 __ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store),
3367 free_cpumask_var(attrs->cpumask); in free_workqueue_attrs()
3386 if (!alloc_cpumask_var(&attrs->cpumask, gfp_mask)) in alloc_workqueue_attrs()
3389 cpumask_copy(attrs->cpumask, cpu_possible_mask); in alloc_workqueue_attrs()
3400 cpumask_copy(to->cpumask, from->cpumask); in copy_workqueue_attrs()
[all …]
Dcpu.c696 const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
700 const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
704 const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
708 const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
743 void init_cpu_present(const struct cpumask *src) in init_cpu_present()
748 void init_cpu_possible(const struct cpumask *src) in init_cpu_possible()
753 void init_cpu_online(const struct cpumask *src) in init_cpu_online()
Dtaskstats.c288 static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd) in add_del_listener()
345 static int parse(struct nlattr *na, struct cpumask *mask) in parse()
Dcpuset.c319 struct cpumask *pmask) in guarantee_online_cpus()
695 struct cpumask *dp; in generate_sched_domains()
2241 void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask) in cpuset_cpus_allowed()
Dcompat.c586 unsigned len, struct cpumask *new_mask) in compat_get_user_cpu_mask()
/kernel/time/
Dtick-common.c152 const struct cpumask *cpumask) in tick_setup_device() argument
190 if (!cpumask_equal(newdev->cpumask, cpumask)) in tick_setup_device()
191 irq_set_affinity(newdev->irq, cpumask); in tick_setup_device()
221 if (!cpumask_test_cpu(cpu, newdev->cpumask)) in tick_check_new_device()
228 if (!cpumask_equal(newdev->cpumask, cpumask_of(cpu))) { in tick_check_new_device()
241 if (curdev && cpumask_equal(curdev->cpumask, cpumask_of(cpu))) in tick_check_new_device()
Dtick-broadcast.c50 struct cpumask *tick_get_broadcast_mask(void) in tick_get_broadcast_mask()
104 static void err_broadcast(const struct cpumask *mask) in err_broadcast()
181 static void tick_do_broadcast(struct cpumask *mask) in tick_do_broadcast()
416 struct cpumask *tick_get_broadcast_oneshot_mask(void) in tick_get_broadcast_oneshot_mask()
437 const struct cpumask *cpumask) in tick_broadcast_set_affinity() argument
442 if (cpumask_equal(bc->cpumask, cpumask)) in tick_broadcast_set_affinity()
445 bc->cpumask = cpumask; in tick_broadcast_set_affinity()
446 irq_set_affinity(bc->irq, bc->cpumask); in tick_broadcast_set_affinity()
678 static void tick_broadcast_init_next_event(struct cpumask *mask, in tick_broadcast_init_next_event()
Dclockevents.c285 if (!dev->cpumask) { in clockevents_register_device()
287 dev->cpumask = cpumask_of(smp_processor_id()); in clockevents_register_device()
451 if (cpumask_test_cpu(cpu, dev->cpumask) && in clockevents_notify()
452 cpumask_weight(dev->cpumask) == 1 && in clockevents_notify()
/kernel/irq/
Dmanage.c128 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) in irq_copy_pending()
133 irq_get_pending(struct cpumask *mask, struct irq_desc *desc) in irq_get_pending()
141 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { } in irq_copy_pending()
143 irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { } in irq_get_pending()
146 int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, in irq_do_set_affinity()
165 int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask) in __irq_set_affinity_locked()
196 int irq_set_affinity(unsigned int irq, const struct cpumask *mask) in irq_set_affinity()
211 int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) in irq_set_affinity_hint()
229 cpumask_var_t cpumask; in irq_affinity_notify() local
232 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL)) in irq_affinity_notify()
[all …]
Dinternals.h100 extern int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask);
105 const struct cpumask *dest, bool force);
Dproc.c25 const struct cpumask *mask = desc->irq_data.affinity; in show_irq_affinity()
/kernel/trace/
Dring_buffer.c328 for_each_cpu(cpu, buffer->cpumask)
499 cpumask_var_t cpumask; member
623 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_poll_wait()
1283 if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL)) in __ring_buffer_alloc()
1305 cpumask_copy(buffer->cpumask, cpu_online_mask); in __ring_buffer_alloc()
1307 cpumask_copy(buffer->cpumask, cpu_possible_mask); in __ring_buffer_alloc()
1343 free_cpumask_var(buffer->cpumask); in __ring_buffer_alloc()
1373 free_cpumask_var(buffer->cpumask); in ring_buffer_free()
1630 !cpumask_test_cpu(cpu_id, buffer->cpumask)) in ring_buffer_resize()
1720 if (!cpumask_test_cpu(cpu_id, buffer->cpumask)) in ring_buffer_resize()
[all …]
/kernel/sched/
Dsched.h600 unsigned long cpumask[0]; /* iteration mask */ member
617 unsigned long cpumask[0]; member
620 static inline struct cpumask *sched_group_cpus(struct sched_group *sg) in sched_group_cpus()
622 return to_cpumask(sg->cpumask); in sched_group_cpus()
629 static inline struct cpumask *sched_group_mask(struct sched_group *sg) in sched_group_mask()
631 return to_cpumask(sg->sgp->cpumask); in sched_group_mask()
991 const struct cpumask *newmask);
Dcore.c1179 const struct cpumask *nodemask = NULL; in select_fallback_rq()
4145 long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) in sched_setaffinity()
4219 struct cpumask *new_mask) in get_user_cpu_mask()
4251 long sched_getaffinity(pid_t pid, struct cpumask *mask) in sched_getaffinity()
4768 void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) in do_set_cpus_allowed()
4800 int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) in set_cpus_allowed_ptr()
5324 struct cpumask *groupmask) in sched_domain_debug_one()
5722 static const struct cpumask *cpu_cpu_mask(int cpu) in cpu_cpu_mask()
5748 typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
5775 const struct cpumask *span = sched_domain_span(sd); in build_group_mask()
[all …]
Dcpupri.h25 struct task_struct *p, struct cpumask *lowest_mask);
Dcpupri.c68 struct cpumask *lowest_mask) in cpupri_find()
Drt.c473 static inline const struct cpumask *sched_rt_period_mask(void) in sched_rt_period_mask()
478 static inline const struct cpumask *sched_rt_period_mask(void) in sched_rt_period_mask()
546 static inline const struct cpumask *sched_rt_period_mask(void) in sched_rt_period_mask()
791 const struct cpumask *span; in do_sched_rt_period_timer()
1480 struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask); in find_lowest_rq()
1820 const struct cpumask *new_mask) in set_cpus_allowed_rt()
Dfair.c3833 struct cpumask *dst_grpmask;
3838 struct cpumask *cpus;
5018 struct cpumask *cpus = __get_cpu_var(load_balance_mask); in load_balance()