Home
last modified time | relevance | path

Searched refs:mask (Results 1 – 25 of 75) sorted by relevance

123

/kernel/bpf/
Dtnum.c12 #define TNUM(_v, _m) (struct tnum){.value = _v, .mask = _m}
14 const struct tnum tnum_unknown = { .value = 0, .mask = -1 };
39 return TNUM(a.value << shift, a.mask << shift); in tnum_lshift()
44 return TNUM(a.value >> shift, a.mask >> shift); in tnum_rshift()
56 (u32)(((s32)a.mask) >> min_shift)); in tnum_arshift()
59 (s64)a.mask >> min_shift); in tnum_arshift()
66 sm = a.mask + b.mask; in tnum_add()
70 mu = chi | a.mask | b.mask; in tnum_add()
79 alpha = dv + a.mask; in tnum_sub()
80 beta = dv - b.mask; in tnum_sub()
[all …]
/kernel/irq/
Dgeneric-chip.c41 u32 mask = d->mask; in irq_gc_mask_disable_reg() local
44 irq_reg_writel(gc, mask, ct->regs.disable); in irq_gc_mask_disable_reg()
45 *ct->mask_cache &= ~mask; in irq_gc_mask_disable_reg()
61 u32 mask = d->mask; in irq_gc_mask_set_bit() local
64 *ct->mask_cache |= mask; in irq_gc_mask_set_bit()
65 irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask); in irq_gc_mask_set_bit()
81 u32 mask = d->mask; in irq_gc_mask_clr_bit() local
84 *ct->mask_cache &= ~mask; in irq_gc_mask_clr_bit()
85 irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask); in irq_gc_mask_clr_bit()
101 u32 mask = d->mask; in irq_gc_unmask_enable_reg() local
[all …]
Dinternals.h227 static inline void irqd_clear(struct irq_data *d, unsigned int mask) in irqd_clear() argument
229 __irqd_to_state(d) &= ~mask; in irqd_clear()
232 static inline void irqd_set(struct irq_data *d, unsigned int mask) in irqd_set() argument
234 __irqd_to_state(d) |= mask; in irqd_set()
237 static inline bool irqd_has_set(struct irq_data *d, unsigned int mask) in irqd_has_set() argument
239 return __irqd_to_state(d) & mask; in irqd_has_set()
419 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) in irq_copy_pending() argument
421 cpumask_copy(desc->pending_mask, mask); in irq_copy_pending()
424 irq_get_pending(struct cpumask *mask, struct irq_desc *desc) in irq_get_pending() argument
426 cpumask_copy(mask, desc->pending_mask); in irq_get_pending()
[all …]
Dautoprobe.c33 unsigned long mask = 0; in probe_irq_on() local
96 mask |= 1 << i; in probe_irq_on()
101 return mask; in probe_irq_on()
119 unsigned int mask = 0; in probe_irq_mask() local
127 mask |= 1 << i; in probe_irq_mask()
136 return mask & val; in probe_irq_mask()
Dproc.c49 const struct cpumask *mask; in show_irq_affinity() local
54 mask = desc->irq_common_data.affinity; in show_irq_affinity()
57 mask = desc->pending_mask; in show_irq_affinity()
63 mask = irq_data_get_effective_affinity_mask(&desc->irq_data); in show_irq_affinity()
73 seq_printf(m, "%*pbl\n", cpumask_pr_args(mask)); in show_irq_affinity()
77 seq_printf(m, "%*pb\n", cpumask_pr_args(mask)); in show_irq_affinity()
87 cpumask_var_t mask; in irq_affinity_hint_proc_show() local
89 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) in irq_affinity_hint_proc_show()
94 cpumask_copy(mask, desc->affinity_hint); in irq_affinity_hint_proc_show()
97 seq_printf(m, "%*pb\n", cpumask_pr_args(mask)); in irq_affinity_hint_proc_show()
[all …]
Dmanage.c212 int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, in irq_do_set_affinity() argument
252 cpumask_and(&tmp_mask, mask, hk_mask); in irq_do_set_affinity()
254 prog_mask = mask; in irq_do_set_affinity()
258 prog_mask = mask; in irq_do_set_affinity()
270 ret = chip->irq_set_affinity(data, mask, force); in irq_do_set_affinity()
279 cpumask_copy(desc->irq_common_data.affinity, mask); in irq_do_set_affinity()
325 const struct cpumask *mask, bool force) in irq_set_affinity_deactivated() argument
342 cpumask_copy(desc->irq_common_data.affinity, mask); in irq_set_affinity_deactivated()
343 irq_data_update_effective_affinity(data, mask); in irq_set_affinity_deactivated()
348 int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask, in irq_set_affinity_locked() argument
[all …]
Daffinity.c65 cpumask_copy(&masks[curvec].mask, irq_default_affinity); in irq_create_affinity_masks()
82 cpumask_copy(&masks[curvec + j].mask, &result[j]); in irq_create_affinity_masks()
95 cpumask_copy(&masks[curvec].mask, irq_default_affinity); in irq_create_affinity_masks()
/kernel/time/
Dtimecounter.c15 tc->mask = (1ULL << cc->shift) - 1; in timecounter_init()
40 cycle_delta = (cycle_now - tc->cycle_last) & tc->cc->mask; in timecounter_read_delta()
44 tc->mask, &tc->frac); in timecounter_read_delta()
70 u64 cycles, u64 mask, u64 frac) in cc_cyc2ns_backwards() argument
82 u64 delta = (cycle_tstamp - tc->cycle_last) & tc->cc->mask; in timecounter_cyc2time()
90 if (delta > tc->cc->mask / 2) { in timecounter_cyc2time()
91 delta = (tc->cycle_last - cycle_tstamp) & tc->cc->mask; in timecounter_cyc2time()
92 nsec -= cc_cyc2ns_backwards(tc->cc, delta, tc->mask, frac); in timecounter_cyc2time()
94 nsec += cyclecounter_cyc2ns(tc->cc, delta, tc->mask, &frac); in timecounter_cyc2time()
Dtimekeeping_internal.h19 static inline u64 clocksource_delta(u64 now, u64 last, u64 mask) in clocksource_delta() argument
21 u64 ret = (now - last) & mask; in clocksource_delta()
27 return ret & ~(mask >> 1) ? 0 : ret; in clocksource_delta()
30 static inline u64 clocksource_delta(u64 now, u64 last, u64 mask) in clocksource_delta() argument
32 return (now - last) & mask; in clocksource_delta()
Dclocksource.c236 wd_delta = clocksource_delta(wd_end, *wdnow, watchdog->mask); in cs_watchdog_read()
256 wd_delta = clocksource_delta(wd_end2, wd_end, watchdog->mask); in cs_watchdog_read()
362 delta = (s64)((csnow_mid - csnow_begin) & cs->mask); in clocksource_verify_percpu()
365 delta = (csnow_end - csnow_mid) & cs->mask; in clocksource_verify_percpu()
368 delta = clocksource_delta(csnow_end, csnow_begin, cs->mask); in clocksource_verify_percpu()
458 delta = clocksource_delta(wdnow, cs->wd_last, watchdog->mask); in clocksource_watchdog()
462 delta = clocksource_delta(csnow, cs->cs_last, cs->mask); in clocksource_watchdog()
499 watchdog->name, wd_nsec, wdnow, wdlast, watchdog->mask); in clocksource_watchdog()
501 cs->name, cs_nsec, csnow, cslast, cs->mask); in clocksource_watchdog()
843 suspend_clocksource->mask); in clocksource_stop_suspend_timing()
[all …]
Dtick-broadcast.c227 static void err_broadcast(const struct cpumask *mask) in err_broadcast() argument
346 static bool tick_do_broadcast(struct cpumask *mask) in tick_do_broadcast() argument
355 if (cpumask_test_cpu(cpu, mask)) { in tick_do_broadcast()
358 cpumask_clear_cpu(cpu, mask); in tick_do_broadcast()
374 if (!cpumask_empty(mask)) { in tick_do_broadcast()
381 td = &per_cpu(tick_cpu_device, cpumask_first(mask)); in tick_do_broadcast()
382 td->evtdev->broadcast(mask); in tick_do_broadcast()
988 static void tick_broadcast_init_next_event(struct cpumask *mask, in tick_broadcast_init_next_event() argument
994 for_each_cpu(cpu, mask) { in tick_broadcast_init_next_event()
Dvsyscall.c25 vdata[CS_HRES_COARSE].mask = tk->tkr_mono.mask; in update_vdso_data()
29 vdata[CS_RAW].mask = tk->tkr_raw.mask; in update_vdso_data()
/kernel/rcu/
Dtree_exp.h79 unsigned long mask; in sync_exp_reset_tree_hotplug() local
111 mask = rnp->grpmask; in sync_exp_reset_tree_hotplug()
118 rnp_up->expmaskinit |= mask; in sync_exp_reset_tree_hotplug()
122 mask = rnp_up->grpmask; in sync_exp_reset_tree_hotplug()
187 unsigned long mask; in __rcu_report_exp_rnp() local
206 mask = rnp->grpmask; in __rcu_report_exp_rnp()
210 WARN_ON_ONCE(!(rnp->expmask & mask)); in __rcu_report_exp_rnp()
211 WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask); in __rcu_report_exp_rnp()
232 unsigned long mask, bool wake) in rcu_report_exp_cpu_mult() argument
239 if (!(rnp->expmask & mask)) { in rcu_report_exp_cpu_mult()
[all …]
Dtree.c146 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
1475 unsigned long mask; in rcu_gp_init() local
1594 mask = rnp->qsmask & ~rnp->qsmaskinitnext; in rcu_gp_init()
1595 rnp->rcu_gp_init_mask = mask; in rcu_gp_init()
1596 if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp)) in rcu_gp_init()
1597 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); in rcu_gp_init()
1948 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp, in rcu_report_qs_rnp() argument
1959 if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) { in rcu_report_qs_rnp()
1971 WRITE_ONCE(rnp->qsmask, rnp->qsmask & ~mask); in rcu_report_qs_rnp()
1973 mask, rnp->qsmask, rnp->level, in rcu_report_qs_rnp()
[all …]
/kernel/
Dcompat.c148 cpumask_var_t mask; in COMPAT_SYSCALL_DEFINE3() local
155 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) in COMPAT_SYSCALL_DEFINE3()
158 ret = sched_getaffinity(pid, mask); in COMPAT_SYSCALL_DEFINE3()
162 if (compat_put_bitmap(user_mask_ptr, cpumask_bits(mask), retlen * 8)) in COMPAT_SYSCALL_DEFINE3()
167 free_cpumask_var(mask); in COMPAT_SYSCALL_DEFINE3()
193 long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask, in compat_get_bitmap() argument
209 *mask++ = ((unsigned long)l2 << BITS_PER_COMPAT_LONG) | l1; in compat_get_bitmap()
213 unsafe_get_user(*mask, umask++, Efault); in compat_get_bitmap()
222 long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask, in compat_put_bitmap() argument
235 unsigned long m = *mask++; in compat_put_bitmap()
[all …]
Dtaskstats.c296 static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd) in add_del_listener() argument
303 if (!cpumask_subset(mask, cpu_possible_mask)) in add_del_listener()
313 for_each_cpu(cpu, mask) { in add_del_listener()
340 for_each_cpu(cpu, mask) { in add_del_listener()
355 static int parse(struct nlattr *na, struct cpumask *mask) in parse() argument
372 ret = cpulist_parse(data, mask); in parse()
459 cpumask_var_t mask; in cmd_attr_register_cpumask() local
462 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) in cmd_attr_register_cpumask()
464 rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], mask); in cmd_attr_register_cpumask()
467 rc = add_del_listener(info->snd_portid, mask, REGISTER); in cmd_attr_register_cpumask()
[all …]
Dsignal.c211 int next_signal(struct sigpending *pending, sigset_t *mask) in next_signal() argument
217 m = mask->sig; in next_signal()
288 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask) in task_set_jobctl_pending() argument
290 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME | in task_set_jobctl_pending()
292 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK)); in task_set_jobctl_pending()
297 if (mask & JOBCTL_STOP_SIGMASK) in task_set_jobctl_pending()
300 task->jobctl |= mask; in task_set_jobctl_pending()
340 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask) in task_clear_jobctl_pending() argument
342 BUG_ON(mask & ~JOBCTL_PENDING_MASK); in task_clear_jobctl_pending()
344 if (mask & JOBCTL_STOP_PENDING) in task_clear_jobctl_pending()
[all …]
Dsmp.c855 int smp_call_function_any(const struct cpumask *mask, in smp_call_function_any() argument
864 if (cpumask_test_cpu(cpu, mask)) in smp_call_function_any()
869 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids; in smp_call_function_any()
870 cpu = cpumask_next_and(cpu, nodemask, mask)) { in smp_call_function_any()
876 cpu = cpumask_any_and(mask, cpu_online_mask); in smp_call_function_any()
893 static void smp_call_function_many_cond(const struct cpumask *mask, in smp_call_function_many_cond() argument
926 if ((scf_flags & SCF_RUN_LOCAL) && cpumask_test_cpu(this_cpu, mask)) in smp_call_function_many_cond()
930 cpu = cpumask_first_and(mask, cpu_online_mask); in smp_call_function_many_cond()
932 cpu = cpumask_next_and(cpu, mask, cpu_online_mask); in smp_call_function_many_cond()
938 cpumask_and(cfd->cpumask, mask, cpu_online_mask); in smp_call_function_many_cond()
[all …]
Dauditfilter.c199 static inline int audit_match_class_bits(int class, u32 *mask) in audit_match_class_bits() argument
205 if (mask[i] & classes[class][i]) in audit_match_class_bits()
219 entry->rule.mask) && in audit_match_signal()
221 entry->rule.mask)); in audit_match_signal()
227 entry->rule.mask)); in audit_match_signal()
230 entry->rule.mask)); in audit_match_signal()
282 entry->rule.mask[i] = rule->mask[i]; in audit_to_entry_common()
286 __u32 *p = &entry->rule.mask[AUDIT_WORD(bit)]; in audit_to_entry_common()
296 entry->rule.mask[j] |= class[j]; in audit_to_entry_common()
697 for (i = 0; i < AUDIT_BITMASK_SIZE; i++) data->mask[i] = krule->mask[i]; in audit_krule_to_data()
[all …]
Daudit_fsnotify.c98 audit_mark->mark.mask = AUDIT_FS_EVENTS; in audit_alloc_mark()
156 static int audit_mark_handle_event(struct fsnotify_mark *inode_mark, u32 mask, in audit_mark_handle_event() argument
167 if (mask & (FS_CREATE|FS_MOVED_TO|FS_DELETE|FS_MOVED_FROM)) { in audit_mark_handle_event()
171 } else if (mask & (FS_DELETE_SELF|FS_UNMOUNT|FS_MOVE_SELF)) { in audit_mark_handle_event()
/kernel/sched/
Dcpupri.c99 if (cpumask_any_and(&p->cpus_mask, vec->mask) >= nr_cpu_ids) in __cpupri_find()
103 cpumask_and(lowest_mask, &p->cpus_mask, vec->mask); in __cpupri_find()
233 cpumask_set_cpu(cpu, vec->mask); in cpupri_set()
267 cpumask_clear_cpu(cpu, vec->mask); in cpupri_set()
287 if (!zalloc_cpumask_var(&vec->mask, GFP_KERNEL)) in cpupri_init()
302 free_cpumask_var(cp->pri_to_cpu[i].mask); in cpupri_init()
316 free_cpumask_var(cp->pri_to_cpu[i].mask); in cpupri_cleanup()
Dtopology.c891 build_balance_mask(struct sched_domain *sd, struct sched_group *sg, struct cpumask *mask) in build_balance_mask() argument
898 cpumask_clear(mask); in build_balance_mask()
915 cpumask_set_cpu(i, mask); in build_balance_mask()
919 WARN_ON_ONCE(cpumask_empty(mask)); in build_balance_mask()
954 struct cpumask *mask = sched_domains_tmpmask2; in init_overlap_sched_group() local
959 build_balance_mask(sd, sg, mask); in init_overlap_sched_group()
960 cpu = cpumask_first(mask); in init_overlap_sched_group()
964 cpumask_copy(group_balance_mask(sg), mask); in init_overlap_sched_group()
966 WARN_ON_ONCE(!cpumask_equal(group_balance_mask(sg), mask)); in init_overlap_sched_group()
1549 sd_weight = cpumask_weight(tl->mask(cpu)); in sd_init()
[all …]
/kernel/dma/
Dmapping.c112 static bool dma_go_direct(struct device *dev, dma_addr_t mask, in dma_go_direct() argument
119 return min_not_zero(mask, dev->bus_dma_limit) >= in dma_go_direct()
717 static int dma_supported(struct device *dev, u64 mask) in dma_supported() argument
726 return dma_direct_supported(dev, mask); in dma_supported()
729 return ops->dma_supported(dev, mask); in dma_supported()
751 void arch_dma_set_mask(struct device *dev, u64 mask);
753 #define arch_dma_set_mask(dev, mask) do { } while (0) argument
756 int dma_set_mask(struct device *dev, u64 mask) in dma_set_mask() argument
762 mask = (dma_addr_t)mask; in dma_set_mask()
764 if (!dev->dma_mask || !dma_supported(dev, mask)) in dma_set_mask()
[all …]
/kernel/debug/kdb/
Dkdb_bt.c77 kdb_bt1(struct task_struct *p, const char *mask, bool btaprompt) in kdb_bt1() argument
84 if (!kdb_task_state(p, mask)) in kdb_bt1()
141 const char *mask = argc ? argv[1] : kdbgetenv("PS"); in kdb_bt() local
148 if (kdb_bt1(p, mask, btaprompt)) in kdb_bt()
157 if (kdb_bt1(p, mask, btaprompt)) in kdb_bt()
/kernel/kcsan/
Dreport.c391 u64 old, u64 new, u64 mask) in print_report() argument
476 if (mask) in print_report()
477 diff &= mask; in print_report()
482 if (mask) { in print_report()
484 hex_len, diff, hex_len, mask); in print_report()
668 int watchpoint_idx, u64 old, u64 new, u64 mask) in kcsan_report_known_origin() argument
692 print_report(value_change, &ai, other_info, old, new, mask); in kcsan_report_known_origin()
701 unsigned long ip, u64 old, u64 new, u64 mask) in kcsan_report_unknown_origin() argument
710 print_report(KCSAN_VALUE_CHANGE_TRUE, &ai, NULL, old, new, mask); in kcsan_report_unknown_origin()

123