/kernel/bpf/ |
D | tnum.c | 12 #define TNUM(_v, _m) (struct tnum){.value = _v, .mask = _m} 14 const struct tnum tnum_unknown = { .value = 0, .mask = -1 }; 39 return TNUM(a.value << shift, a.mask << shift); in tnum_lshift() 44 return TNUM(a.value >> shift, a.mask >> shift); in tnum_rshift() 56 (u32)(((s32)a.mask) >> min_shift)); in tnum_arshift() 59 (s64)a.mask >> min_shift); in tnum_arshift() 66 sm = a.mask + b.mask; in tnum_add() 70 mu = chi | a.mask | b.mask; in tnum_add() 79 alpha = dv + a.mask; in tnum_sub() 80 beta = dv - b.mask; in tnum_sub() [all …]
|
D | ringbuf.c | 36 u64 mask; member 145 rb->mask = data_sz - 1; in bpf_ringbuf_alloc() 334 if (len > rb->mask + 1) in __bpf_ringbuf_reserve() 352 if (new_prod_pos - cons_pos > rb->mask) { in __bpf_ringbuf_reserve() 357 hdr = (void *)rb->data + (prod_pos & rb->mask); in __bpf_ringbuf_reserve() 409 cons_pos = smp_load_acquire(&rb->consumer_pos) & rb->mask; in bpf_ringbuf_commit() 481 return rb->mask + 1; in BPF_CALL_2()
|
/kernel/irq/ |
D | generic-chip.c | 40 u32 mask = d->mask; in irq_gc_mask_disable_reg() local 43 irq_reg_writel(gc, mask, ct->regs.disable); in irq_gc_mask_disable_reg() 44 *ct->mask_cache &= ~mask; in irq_gc_mask_disable_reg() 59 u32 mask = d->mask; in irq_gc_mask_set_bit() local 62 *ct->mask_cache |= mask; in irq_gc_mask_set_bit() 63 irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask); in irq_gc_mask_set_bit() 79 u32 mask = d->mask; in irq_gc_mask_clr_bit() local 82 *ct->mask_cache &= ~mask; in irq_gc_mask_clr_bit() 83 irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask); in irq_gc_mask_clr_bit() 99 u32 mask = d->mask; in irq_gc_unmask_enable_reg() local [all …]
|
D | internals.h | 227 static inline void irqd_clear(struct irq_data *d, unsigned int mask) in irqd_clear() argument 229 __irqd_to_state(d) &= ~mask; in irqd_clear() 232 static inline void irqd_set(struct irq_data *d, unsigned int mask) in irqd_set() argument 234 __irqd_to_state(d) |= mask; in irqd_set() 237 static inline bool irqd_has_set(struct irq_data *d, unsigned int mask) in irqd_has_set() argument 239 return __irqd_to_state(d) & mask; in irqd_has_set() 419 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) in irq_copy_pending() argument 421 cpumask_copy(desc->pending_mask, mask); in irq_copy_pending() 424 irq_get_pending(struct cpumask *mask, struct irq_desc *desc) in irq_get_pending() argument 426 cpumask_copy(mask, desc->pending_mask); in irq_get_pending() [all …]
|
D | autoprobe.c | 33 unsigned long mask = 0; in probe_irq_on() local 96 mask |= 1 << i; in probe_irq_on() 101 return mask; in probe_irq_on() 119 unsigned int mask = 0; in probe_irq_mask() local 127 mask |= 1 << i; in probe_irq_mask() 136 return mask & val; in probe_irq_mask()
|
D | proc.c | 49 const struct cpumask *mask; in show_irq_affinity() local 54 mask = desc->irq_common_data.affinity; in show_irq_affinity() 57 mask = desc->pending_mask; in show_irq_affinity() 63 mask = irq_data_get_effective_affinity_mask(&desc->irq_data); in show_irq_affinity() 73 seq_printf(m, "%*pbl\n", cpumask_pr_args(mask)); in show_irq_affinity() 77 seq_printf(m, "%*pb\n", cpumask_pr_args(mask)); in show_irq_affinity() 87 cpumask_var_t mask; in irq_affinity_hint_proc_show() local 89 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) in irq_affinity_hint_proc_show() 94 cpumask_copy(mask, desc->affinity_hint); in irq_affinity_hint_proc_show() 97 seq_printf(m, "%*pb\n", cpumask_pr_args(mask)); in irq_affinity_hint_proc_show() [all …]
|
D | manage.c | 211 const struct cpumask *mask) in irq_init_effective_affinity() argument 213 cpumask_copy(irq_data_get_effective_affinity_mask(data), mask); in irq_init_effective_affinity() 218 const struct cpumask *mask) { } in irq_init_effective_affinity() argument 221 int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, in irq_do_set_affinity() argument 261 cpumask_and(&tmp_mask, mask, hk_mask); in irq_do_set_affinity() 263 prog_mask = mask; in irq_do_set_affinity() 267 prog_mask = mask; in irq_do_set_affinity() 279 ret = chip->irq_set_affinity(data, mask, force); in irq_do_set_affinity() 288 cpumask_copy(desc->irq_common_data.affinity, mask); in irq_do_set_affinity() 334 const struct cpumask *mask, bool force) in irq_set_affinity_deactivated() argument [all …]
|
D | settings.h | 42 __irq_settings_clr_and_set(struct irq_desc *desc, u32 clr, u32 set, u32 mask) in __irq_settings_clr_and_set() argument 44 desc->status_use_accessors &= ~(clr & mask); in __irq_settings_clr_and_set() 45 desc->status_use_accessors |= (set & mask); in __irq_settings_clr_and_set() 85 irq_settings_set_trigger_mask(struct irq_desc *desc, u32 mask) in irq_settings_set_trigger_mask() argument 88 desc->status_use_accessors |= mask & IRQ_TYPE_SENSE_MASK; in irq_settings_set_trigger_mask()
|
D | affinity.c | 84 const struct cpumask *mask, nodemask_t *nodemsk) in get_nodes_in_cpumask() argument 90 if (cpumask_intersects(mask, node_to_cpumask[n])) { in get_nodes_in_cpumask() 274 cpumask_or(&masks[curvec].mask, &masks[curvec].mask, nmsk); in __irq_build_affinity_masks() 325 irq_spread_init_one(&masks[curvec].mask, nmsk, in __irq_build_affinity_masks() 456 cpumask_copy(&masks[curvec].mask, irq_default_affinity); in irq_create_affinity_masks() 482 cpumask_copy(&masks[curvec].mask, irq_default_affinity); in irq_create_affinity_masks()
|
/kernel/rcu/ |
D | tree_exp.h | 77 unsigned long mask; in sync_exp_reset_tree_hotplug() local 109 mask = rnp->grpmask; in sync_exp_reset_tree_hotplug() 116 rnp_up->expmaskinit |= mask; in sync_exp_reset_tree_hotplug() 120 mask = rnp_up->grpmask; in sync_exp_reset_tree_hotplug() 186 unsigned long mask; in __rcu_report_exp_rnp() local 205 mask = rnp->grpmask; in __rcu_report_exp_rnp() 209 WARN_ON_ONCE(!(rnp->expmask & mask)); in __rcu_report_exp_rnp() 210 WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask); in __rcu_report_exp_rnp() 231 unsigned long mask, bool wake) in rcu_report_exp_cpu_mult() argument 238 if (!(rnp->expmask & mask)) { in rcu_report_exp_cpu_mult() [all …]
|
D | tree.c | 146 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp, 1732 unsigned long mask; in rcu_gp_init() local 1852 mask = rnp->qsmask & ~rnp->qsmaskinitnext; in rcu_gp_init() 1853 rnp->rcu_gp_init_mask = mask; in rcu_gp_init() 1854 if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp)) in rcu_gp_init() 1855 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); in rcu_gp_init() 2160 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp, in rcu_report_qs_rnp() argument 2171 if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) { in rcu_report_qs_rnp() 2183 WRITE_ONCE(rnp->qsmask, rnp->qsmask & ~mask); in rcu_report_qs_rnp() 2185 mask, rnp->qsmask, rnp->level, in rcu_report_qs_rnp() [all …]
|
/kernel/time/ |
D | timecounter.c | 15 tc->mask = (1ULL << cc->shift) - 1; in timecounter_init() 40 cycle_delta = (cycle_now - tc->cycle_last) & tc->cc->mask; in timecounter_read_delta() 44 tc->mask, &tc->frac); in timecounter_read_delta() 70 u64 cycles, u64 mask, u64 frac) in cc_cyc2ns_backwards() argument 82 u64 delta = (cycle_tstamp - tc->cycle_last) & tc->cc->mask; in timecounter_cyc2time() 90 if (delta > tc->cc->mask / 2) { in timecounter_cyc2time() 91 delta = (tc->cycle_last - cycle_tstamp) & tc->cc->mask; in timecounter_cyc2time() 92 nsec -= cc_cyc2ns_backwards(tc->cc, delta, tc->mask, frac); in timecounter_cyc2time() 94 nsec += cyclecounter_cyc2ns(tc->cc, delta, tc->mask, &frac); in timecounter_cyc2time()
|
D | timekeeping_internal.h | 19 static inline u64 clocksource_delta(u64 now, u64 last, u64 mask) in clocksource_delta() argument 21 u64 ret = (now - last) & mask; in clocksource_delta() 27 return ret & ~(mask >> 1) ? 0 : ret; in clocksource_delta() 30 static inline u64 clocksource_delta(u64 now, u64 last, u64 mask) in clocksource_delta() argument 32 return (now - last) & mask; in clocksource_delta()
|
D | clocksource.c | 210 wd_delta = clocksource_delta(wd_end, *wdnow, watchdog->mask); in cs_watchdog_read() 256 delta = (s64)((csnow_mid - csnow_begin) & cs->mask); in clocksource_verify_percpu() 259 delta = (csnow_end - csnow_mid) & cs->mask; in clocksource_verify_percpu() 262 delta = clocksource_delta(csnow_end, csnow_begin, cs->mask); in clocksource_verify_percpu() 318 delta = clocksource_delta(wdnow, cs->wd_last, watchdog->mask); in clocksource_watchdog() 322 delta = clocksource_delta(csnow, cs->cs_last, cs->mask); in clocksource_watchdog() 337 watchdog->name, wdnow, wdlast, watchdog->mask); in clocksource_watchdog() 339 cs->name, csnow, cslast, cs->mask); in clocksource_watchdog() 683 suspend_clocksource->mask); in clocksource_stop_suspend_timing() 767 u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cyc) in clocks_calc_max_nsecs() argument [all …]
|
D | tick-broadcast.c | 226 static void err_broadcast(const struct cpumask *mask) in err_broadcast() argument 345 static bool tick_do_broadcast(struct cpumask *mask) in tick_do_broadcast() argument 354 if (cpumask_test_cpu(cpu, mask)) { in tick_do_broadcast() 357 cpumask_clear_cpu(cpu, mask); in tick_do_broadcast() 373 if (!cpumask_empty(mask)) { in tick_do_broadcast() 380 td = &per_cpu(tick_cpu_device, cpumask_first(mask)); in tick_do_broadcast() 381 td->evtdev->broadcast(mask); in tick_do_broadcast() 987 static void tick_broadcast_init_next_event(struct cpumask *mask, in tick_broadcast_init_next_event() argument 993 for_each_cpu(cpu, mask) { in tick_broadcast_init_next_event()
|
/kernel/ |
D | compat.c | 148 cpumask_var_t mask; in COMPAT_SYSCALL_DEFINE3() local 155 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) in COMPAT_SYSCALL_DEFINE3() 158 ret = sched_getaffinity(pid, mask); in COMPAT_SYSCALL_DEFINE3() 162 if (compat_put_bitmap(user_mask_ptr, cpumask_bits(mask), retlen * 8)) in COMPAT_SYSCALL_DEFINE3() 167 free_cpumask_var(mask); in COMPAT_SYSCALL_DEFINE3() 193 long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask, in compat_get_bitmap() argument 209 *mask++ = ((unsigned long)l2 << BITS_PER_COMPAT_LONG) | l1; in compat_get_bitmap() 213 unsafe_get_user(*mask, umask++, Efault); in compat_get_bitmap() 222 long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask, in compat_put_bitmap() argument 235 unsigned long m = *mask++; in compat_put_bitmap() [all …]
|
D | smp.c | 583 int smp_call_function_any(const struct cpumask *mask, in smp_call_function_any() argument 592 if (cpumask_test_cpu(cpu, mask)) in smp_call_function_any() 597 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids; in smp_call_function_any() 598 cpu = cpumask_next_and(cpu, nodemask, mask)) { in smp_call_function_any() 604 cpu = cpumask_any_and(mask, cpu_online_mask); in smp_call_function_any() 612 static void smp_call_function_many_cond(const struct cpumask *mask, in smp_call_function_many_cond() argument 637 cpu = cpumask_first_and(mask, cpu_online_mask); in smp_call_function_many_cond() 639 cpu = cpumask_next_and(cpu, mask, cpu_online_mask); in smp_call_function_many_cond() 646 next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask); in smp_call_function_many_cond() 648 next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask); in smp_call_function_many_cond() [all …]
|
D | taskstats.c | 274 static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd) in add_del_listener() argument 281 if (!cpumask_subset(mask, cpu_possible_mask)) in add_del_listener() 291 for_each_cpu(cpu, mask) { in add_del_listener() 318 for_each_cpu(cpu, mask) { in add_del_listener() 333 static int parse(struct nlattr *na, struct cpumask *mask) in parse() argument 350 ret = cpulist_parse(data, mask); in parse() 437 cpumask_var_t mask; in cmd_attr_register_cpumask() local 440 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) in cmd_attr_register_cpumask() 442 rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], mask); in cmd_attr_register_cpumask() 445 rc = add_del_listener(info->snd_portid, mask, REGISTER); in cmd_attr_register_cpumask() [all …]
|
D | signal.c | 211 int next_signal(struct sigpending *pending, sigset_t *mask) in next_signal() argument 217 m = mask->sig; in next_signal() 288 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask) in task_set_jobctl_pending() argument 290 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME | in task_set_jobctl_pending() 292 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK)); in task_set_jobctl_pending() 297 if (mask & JOBCTL_STOP_SIGMASK) in task_set_jobctl_pending() 300 task->jobctl |= mask; in task_set_jobctl_pending() 340 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask) in task_clear_jobctl_pending() argument 342 BUG_ON(mask & ~JOBCTL_PENDING_MASK); in task_clear_jobctl_pending() 344 if (mask & JOBCTL_STOP_PENDING) in task_clear_jobctl_pending() [all …]
|
D | up.c | 55 void on_each_cpu_mask(const struct cpumask *mask, in on_each_cpu_mask() argument 60 if (cpumask_test_cpu(0, mask)) { in on_each_cpu_mask() 73 void *info, bool wait, const struct cpumask *mask) in on_each_cpu_cond_mask() argument
|
D | auditfilter.c | 196 static inline int audit_match_class_bits(int class, u32 *mask) in audit_match_class_bits() argument 202 if (mask[i] & classes[class][i]) in audit_match_class_bits() 216 entry->rule.mask) && in audit_match_signal() 218 entry->rule.mask)); in audit_match_signal() 224 entry->rule.mask)); in audit_match_signal() 227 entry->rule.mask)); in audit_match_signal() 278 entry->rule.mask[i] = rule->mask[i]; in audit_to_entry_common() 282 __u32 *p = &entry->rule.mask[AUDIT_WORD(bit)]; in audit_to_entry_common() 292 entry->rule.mask[j] |= class[j]; in audit_to_entry_common() 689 for (i = 0; i < AUDIT_BITMASK_SIZE; i++) data->mask[i] = krule->mask[i]; in audit_krule_to_data() [all …]
|
/kernel/dma/ |
D | mapping.c | 109 static bool dma_go_direct(struct device *dev, dma_addr_t mask, in dma_go_direct() argument 116 return min_not_zero(mask, dev->bus_dma_limit) >= in dma_go_direct() 557 int dma_supported(struct device *dev, u64 mask) in dma_supported() argument 566 return dma_direct_supported(dev, mask); in dma_supported() 569 return ops->dma_supported(dev, mask); in dma_supported() 574 void arch_dma_set_mask(struct device *dev, u64 mask); 576 #define arch_dma_set_mask(dev, mask) do { } while (0) argument 579 int dma_set_mask(struct device *dev, u64 mask) in dma_set_mask() argument 585 mask = (dma_addr_t)mask; in dma_set_mask() 587 if (!dev->dma_mask || !dma_supported(dev, mask)) in dma_set_mask() [all …]
|
/kernel/sched/ |
D | cpupri.c | 97 if (cpumask_any_and(p->cpus_ptr, vec->mask) >= nr_cpu_ids) in __cpupri_find() 101 cpumask_and(lowest_mask, p->cpus_ptr, vec->mask); in __cpupri_find() 251 cpumask_set_cpu(cpu, vec->mask); in cpupri_set() 285 cpumask_clear_cpu(cpu, vec->mask); in cpupri_set() 305 if (!zalloc_cpumask_var(&vec->mask, GFP_KERNEL)) in cpupri_init() 320 free_cpumask_var(cp->pri_to_cpu[i].mask); in cpupri_init() 334 free_cpumask_var(cp->pri_to_cpu[i].mask); in cpupri_cleanup()
|
D | topology.c | 845 build_balance_mask(struct sched_domain *sd, struct sched_group *sg, struct cpumask *mask) in build_balance_mask() argument 852 cpumask_clear(mask); in build_balance_mask() 869 cpumask_set_cpu(i, mask); in build_balance_mask() 873 WARN_ON_ONCE(cpumask_empty(mask)); in build_balance_mask() 906 struct cpumask *mask = sched_domains_tmpmask2; in init_overlap_sched_group() local 911 build_balance_mask(sd, sg, mask); in init_overlap_sched_group() 912 cpu = cpumask_first_and(sched_group_span(sg), mask); in init_overlap_sched_group() 916 cpumask_copy(group_balance_mask(sg), mask); in init_overlap_sched_group() 918 WARN_ON_ONCE(!cpumask_equal(group_balance_mask(sg), mask)); in init_overlap_sched_group() 1330 sd_weight = cpumask_weight(tl->mask(cpu)); in sd_init() [all …]
|
/kernel/debug/kdb/ |
D | kdb_bt.c | 77 kdb_bt1(struct task_struct *p, unsigned long mask, bool btaprompt) in kdb_bt1() argument 84 if (!kdb_task_state(p, mask)) in kdb_bt1() 141 unsigned long mask = kdb_task_state_string(argc ? argv[1] : in kdb_bt() local 148 if (kdb_bt1(p, mask, btaprompt)) in kdb_bt() 157 if (kdb_bt1(p, mask, btaprompt)) in kdb_bt()
|