/kernel/irq/ |
D | generic-chip.c | 39 u32 mask = d->mask; in irq_gc_mask_disable_reg() local 42 irq_reg_writel(gc, mask, ct->regs.disable); in irq_gc_mask_disable_reg() 43 *ct->mask_cache &= ~mask; in irq_gc_mask_disable_reg() 58 u32 mask = d->mask; in irq_gc_mask_set_bit() local 61 *ct->mask_cache |= mask; in irq_gc_mask_set_bit() 62 irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask); in irq_gc_mask_set_bit() 78 u32 mask = d->mask; in irq_gc_mask_clr_bit() local 81 *ct->mask_cache &= ~mask; in irq_gc_mask_clr_bit() 82 irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask); in irq_gc_mask_clr_bit() 98 u32 mask = d->mask; in irq_gc_unmask_enable_reg() local [all …]
|
D | manage.c | 158 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) in irq_copy_pending() argument 160 cpumask_copy(desc->pending_mask, mask); in irq_copy_pending() 163 irq_get_pending(struct cpumask *mask, struct irq_desc *desc) in irq_get_pending() argument 165 cpumask_copy(mask, desc->pending_mask); in irq_get_pending() 171 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { } in irq_copy_pending() argument 173 irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { } in irq_get_pending() argument 176 int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, in irq_do_set_affinity() argument 183 ret = chip->irq_set_affinity(data, mask, force); in irq_do_set_affinity() 186 cpumask_copy(data->affinity, mask); in irq_do_set_affinity() 195 int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask, in irq_set_affinity_locked() argument [all …]
|
D | autoprobe.c | 34 unsigned long mask = 0; in probe_irq_on() local 97 mask |= 1 << i; in probe_irq_on() 102 return mask; in probe_irq_on() 120 unsigned int mask = 0; in probe_irq_mask() local 128 mask |= 1 << i; in probe_irq_mask() 137 return mask & val; in probe_irq_mask()
|
D | internals.h | 113 extern int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask); 181 static inline void irqd_clear(struct irq_data *d, unsigned int mask) in irqd_clear() argument 183 d->state_use_accessors &= ~mask; in irqd_clear() 186 static inline void irqd_set(struct irq_data *d, unsigned int mask) in irqd_set() argument 188 d->state_use_accessors |= mask; in irqd_set() 191 static inline bool irqd_has_set(struct irq_data *d, unsigned int mask) in irqd_has_set() argument 193 return d->state_use_accessors & mask; in irqd_has_set()
|
D | proc.c | 43 const struct cpumask *mask = desc->irq_data.affinity; in show_irq_affinity() local 47 mask = desc->pending_mask; in show_irq_affinity() 50 seq_cpumask_list(m, mask); in show_irq_affinity() 52 seq_cpumask(m, mask); in show_irq_affinity() 61 cpumask_var_t mask; in irq_affinity_hint_proc_show() local 63 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) in irq_affinity_hint_proc_show() 68 cpumask_copy(mask, desc->affinity_hint); in irq_affinity_hint_proc_show() 71 seq_cpumask(m, mask); in irq_affinity_hint_proc_show() 73 free_cpumask_var(mask); in irq_affinity_hint_proc_show()
|
D | settings.h | 72 irq_settings_set_trigger_mask(struct irq_desc *desc, u32 mask) in irq_settings_set_trigger_mask() argument 75 desc->status_use_accessors |= mask & IRQ_TYPE_SENSE_MASK; in irq_settings_set_trigger_mask()
|
/kernel/ |
D | smp.c | 345 int smp_call_function_any(const struct cpumask *mask, in smp_call_function_any() argument 354 if (cpumask_test_cpu(cpu, mask)) in smp_call_function_any() 359 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids; in smp_call_function_any() 360 cpu = cpumask_next_and(cpu, nodemask, mask)) { in smp_call_function_any() 366 cpu = cpumask_any_and(mask, cpu_online_mask); in smp_call_function_any() 388 void smp_call_function_many(const struct cpumask *mask, in smp_call_function_many() argument 404 cpu = cpumask_first_and(mask, cpu_online_mask); in smp_call_function_many() 406 cpu = cpumask_next_and(cpu, mask, cpu_online_mask); in smp_call_function_many() 413 next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask); in smp_call_function_many() 415 next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask); in smp_call_function_many() [all …]
|
D | taskstats.c | 288 static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd) in add_del_listener() argument 295 if (!cpumask_subset(mask, cpu_possible_mask)) in add_del_listener() 305 for_each_cpu(cpu, mask) { in add_del_listener() 332 for_each_cpu(cpu, mask) { in add_del_listener() 347 static int parse(struct nlattr *na, struct cpumask *mask) in parse() argument 364 ret = cpulist_parse(data, mask); in parse() 477 cpumask_var_t mask; in cmd_attr_register_cpumask() local 480 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) in cmd_attr_register_cpumask() 482 rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], mask); in cmd_attr_register_cpumask() 485 rc = add_del_listener(info->snd_portid, mask, REGISTER); in cmd_attr_register_cpumask() [all …]
|
D | signal.c | 173 int next_signal(struct sigpending *pending, sigset_t *mask) in next_signal() argument 179 m = mask->sig; in next_signal() 250 bool task_set_jobctl_pending(struct task_struct *task, unsigned int mask) in task_set_jobctl_pending() argument 252 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME | in task_set_jobctl_pending() 254 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK)); in task_set_jobctl_pending() 259 if (mask & JOBCTL_STOP_SIGMASK) in task_set_jobctl_pending() 262 task->jobctl |= mask; in task_set_jobctl_pending() 302 void task_clear_jobctl_pending(struct task_struct *task, unsigned int mask) in task_clear_jobctl_pending() argument 304 BUG_ON(mask & ~JOBCTL_PENDING_MASK); in task_clear_jobctl_pending() 306 if (mask & JOBCTL_STOP_PENDING) in task_clear_jobctl_pending() [all …]
|
D | auditfilter.c | 207 static inline int audit_match_class_bits(int class, u32 *mask) in audit_match_class_bits() argument 213 if (mask[i] & classes[class][i]) in audit_match_class_bits() 227 entry->rule.mask) && in audit_match_signal() 229 entry->rule.mask)); in audit_match_signal() 235 entry->rule.mask)); in audit_match_signal() 238 entry->rule.mask)); in audit_match_signal() 288 entry->rule.mask[i] = rule->mask[i]; in audit_to_entry_common() 292 __u32 *p = &entry->rule.mask[AUDIT_WORD(bit)]; in audit_to_entry_common() 302 entry->rule.mask[j] |= class[j]; in audit_to_entry_common() 632 for (i = 0; i < AUDIT_BITMASK_SIZE; i++) data->mask[i] = krule->mask[i]; in audit_krule_to_data() [all …]
|
D | up.c | 53 void on_each_cpu_mask(const struct cpumask *mask, in on_each_cpu_mask() argument 58 if (cpumask_test_cpu(0, mask)) { in on_each_cpu_mask()
|
D | audit_watch.c | 160 parent->mark.mask = AUDIT_FS_WATCH; in audit_init_parent() 475 u32 mask, void *data, int data_type, in audit_watch_handle_event() argument 498 if (mask & (FS_CREATE|FS_MOVED_TO) && inode) in audit_watch_handle_event() 500 else if (mask & (FS_DELETE|FS_MOVED_FROM)) in audit_watch_handle_event() 502 else if (mask & (FS_DELETE_SELF|FS_UNMOUNT|FS_MOVE_SELF)) in audit_watch_handle_event()
|
D | padata.c | 710 int padata_add_cpu(struct padata_instance *pinst, int cpu, int mask) in padata_add_cpu() argument 714 if (!(mask & (PADATA_CPU_SERIAL | PADATA_CPU_PARALLEL))) in padata_add_cpu() 720 if (mask & PADATA_CPU_SERIAL) in padata_add_cpu() 722 if (mask & PADATA_CPU_PARALLEL) in padata_add_cpu() 769 int padata_remove_cpu(struct padata_instance *pinst, int cpu, int mask) in padata_remove_cpu() argument 773 if (!(mask & (PADATA_CPU_SERIAL | PADATA_CPU_PARALLEL))) in padata_remove_cpu() 779 if (mask & PADATA_CPU_SERIAL) in padata_remove_cpu() 781 if (mask & PADATA_CPU_PARALLEL) in padata_remove_cpu()
|
D | compat.c | 640 cpumask_var_t mask; in COMPAT_SYSCALL_DEFINE3() local 647 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) in COMPAT_SYSCALL_DEFINE3() 650 ret = sched_getaffinity(pid, mask); in COMPAT_SYSCALL_DEFINE3() 654 if (compat_put_bitmap(user_mask_ptr, cpumask_bits(mask), retlen * 8)) in COMPAT_SYSCALL_DEFINE3() 659 free_cpumask_var(mask); in COMPAT_SYSCALL_DEFINE3() 890 long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask, in compat_get_bitmap() argument 925 *mask++ = m; in compat_get_bitmap() 931 long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask, in compat_put_bitmap() argument 948 m = *mask++; in compat_put_bitmap()
|
D | kthread.c | 328 static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state) in __kthread_bind_mask() argument 339 do_set_cpus_allowed(p, mask); in __kthread_bind_mask() 349 void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask) in kthread_bind_mask() argument 351 __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE); in kthread_bind_mask()
|
D | audit.c | 149 .mask = -1, 763 if (!(feature & uaf->mask)) in audit_set_feature() 784 if (!(feature & uaf->mask)) in audit_set_feature() 855 if (s.mask & AUDIT_STATUS_ENABLED) { in audit_receive_msg() 860 if (s.mask & AUDIT_STATUS_FAILURE) { in audit_receive_msg() 865 if (s.mask & AUDIT_STATUS_PID) { in audit_receive_msg() 882 if (s.mask & AUDIT_STATUS_RATE_LIMIT) { in audit_receive_msg() 887 if (s.mask & AUDIT_STATUS_BACKLOG_LIMIT) { in audit_receive_msg() 892 if (s.mask & AUDIT_STATUS_BACKLOG_WAIT_TIME) { in audit_receive_msg()
|
/kernel/sched/ |
D | cpupri.c | 106 if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) in cpupri_find() 110 cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask); in cpupri_find() 162 cpumask_set_cpu(cpu, vec->mask); in cpupri_set() 196 cpumask_clear_cpu(cpu, vec->mask); in cpupri_set() 218 if (!zalloc_cpumask_var(&vec->mask, GFP_KERNEL)) in cpupri_init() 233 free_cpumask_var(cp->pri_to_cpu[i].mask); in cpupri_init() 247 free_cpumask_var(cp->pri_to_cpu[i].mask); in cpupri_cleanup()
|
/kernel/time/ |
D | timekeeping_internal.h | 16 static inline cycle_t clocksource_delta(cycle_t now, cycle_t last, cycle_t mask) in clocksource_delta() argument 18 cycle_t ret = (now - last) & mask; in clocksource_delta() 23 static inline cycle_t clocksource_delta(cycle_t now, cycle_t last, cycle_t mask) in clocksource_delta() argument 25 return (now - last) & mask; in clocksource_delta()
|
D | timecounter.c | 51 cycle_delta = (cycle_now - tc->cycle_last) & tc->cc->mask; in timecounter_read_delta() 78 u64 cycle_delta = (cycle_tstamp - tc->cycle_last) & tc->cc->mask; in timecounter_cyc2time() 86 if (cycle_delta > tc->cc->mask / 2) { in timecounter_cyc2time() 87 cycle_delta = (tc->cycle_last - cycle_tstamp) & tc->cc->mask; in timecounter_cyc2time()
|
D | tick-broadcast.c | 136 static void err_broadcast(const struct cpumask *mask) in err_broadcast() argument 256 static void tick_do_broadcast(struct cpumask *mask) in tick_do_broadcast() argument 264 if (cpumask_test_cpu(cpu, mask)) { in tick_do_broadcast() 265 cpumask_clear_cpu(cpu, mask); in tick_do_broadcast() 270 if (!cpumask_empty(mask)) { in tick_do_broadcast() 277 td = &per_cpu(tick_cpu_device, cpumask_first(mask)); in tick_do_broadcast() 278 td->evtdev->broadcast(mask); in tick_do_broadcast() 823 static void tick_broadcast_init_next_event(struct cpumask *mask, in tick_broadcast_init_next_event() argument 829 for_each_cpu(cpu, mask) { in tick_broadcast_init_next_event()
|
D | clocksource.c | 210 delta = clocksource_delta(wdnow, cs->wd_last, watchdog->mask); in clocksource_watchdog() 214 delta = clocksource_delta(csnow, cs->cs_last, cs->mask); in clocksource_watchdog() 473 u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask) in clocks_calc_max_nsecs() argument 499 max_cycles = min(max_cycles, mask); in clocks_calc_max_nsecs() 515 cs->mask); in clocksource_max_deferment() 674 sec = (cs->mask - (cs->mask >> 3)); in __clocksource_updatefreq_scale() 679 else if (sec > 600 && cs->mask > UINT_MAX) in __clocksource_updatefreq_scale()
|
D | timekeeping.c | 145 tk->tkr_mono.mask = clock->mask; in tk_setup_internals() 150 tk->tkr_raw.mask = clock->mask; in tk_setup_internals() 217 delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask); in timekeeping_get_ns() 468 delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask); in timekeeping_forward_now() 1163 tk->tkr_mono.mask); in timekeeping_resume() 1543 tk->tkr_mono.cycle_last, tk->tkr_mono.mask); in update_wall_time()
|
/kernel/debug/kdb/ |
D | kdb_bt.c | 80 kdb_bt1(struct task_struct *p, unsigned long mask, in kdb_bt1() argument 87 if (!kdb_task_state(p, mask)) in kdb_bt1() 120 unsigned long mask = kdb_task_state_string(argc ? argv[1] : in kdb_bt() local 127 if (kdb_bt1(p, mask, argcount, btaprompt)) in kdb_bt() 136 if (kdb_bt1(p, mask, argcount, btaprompt)) in kdb_bt()
|
/kernel/rcu/ |
D | tree.c | 1957 rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp, in rcu_report_qs_rnp() argument 1965 if (!(rnp->qsmask & mask)) { in rcu_report_qs_rnp() 1971 rnp->qsmask &= ~mask; in rcu_report_qs_rnp() 1973 mask, rnp->qsmask, rnp->level, in rcu_report_qs_rnp() 1982 mask = rnp->grpmask; in rcu_report_qs_rnp() 2018 unsigned long mask; in rcu_report_qs_rdp() local 2038 mask = rdp->grpmask; in rcu_report_qs_rdp() 2039 if ((rnp->qsmask & mask) == 0) { in rcu_report_qs_rdp() 2050 rcu_report_qs_rnp(mask, rsp, rnp, flags); /* rlses rnp->lock */ in rcu_report_qs_rdp() 2199 RCU_TRACE(unsigned long mask); in rcu_cleanup_dying_cpu() [all …]
|
/kernel/trace/ |
D | trace_output.c | 76 unsigned long mask; in ftrace_print_flags_seq() local 83 mask = flag_array[i].mask; in ftrace_print_flags_seq() 84 if ((flags & mask) != mask) in ftrace_print_flags_seq() 88 flags &= ~mask; in ftrace_print_flags_seq() 118 if (val != symbol_array[i].mask) in ftrace_print_symbols_seq() 144 if (val != symbol_array[i].mask) in ftrace_print_symbols_seq_u64()
|