/kernel/bpf/ |
D | tnum.c | 62 struct tnum tnum_add(struct tnum a, struct tnum b) in tnum_add() argument 66 sm = a.mask + b.mask; in tnum_add() 67 sv = a.value + b.value; in tnum_add() 70 mu = chi | a.mask | b.mask; in tnum_add() 74 struct tnum tnum_sub(struct tnum a, struct tnum b) in tnum_sub() argument 78 dv = a.value - b.value; in tnum_sub() 80 beta = dv - b.mask; in tnum_sub() 82 mu = chi | a.mask | b.mask; in tnum_sub() 86 struct tnum tnum_and(struct tnum a, struct tnum b) in tnum_and() argument 91 beta = b.value | b.mask; in tnum_and() [all …]
|
D | hashtab.c | 150 struct bucket *b, u32 hash, in htab_lock_bucket() argument 166 raw_spin_lock(&b->raw_lock); in htab_lock_bucket() 173 struct bucket *b, u32 hash, in htab_unlock_bucket() argument 177 raw_spin_unlock(&b->raw_lock); in htab_unlock_bucket() 786 struct bucket *b; in htab_lru_map_delete_node() local 790 b = __select_bucket(htab, tgt_l->hash); in htab_lru_map_delete_node() 791 head = &b->head; in htab_lru_map_delete_node() 793 ret = htab_lock_bucket(htab, b, tgt_l->hash, &flags); in htab_lru_map_delete_node() 804 htab_unlock_bucket(htab, b, tgt_l->hash, flags); in htab_lru_map_delete_node() 1076 struct bucket *b; in htab_map_update_elem() local [all …]
|
D | bpf_local_storage.c | 209 struct bpf_local_storage_map_bucket *b; in bpf_selem_unlink_map() local 217 b = select_bucket(smap, selem); in bpf_selem_unlink_map() 218 raw_spin_lock_irqsave(&b->lock, flags); in bpf_selem_unlink_map() 221 raw_spin_unlock_irqrestore(&b->lock, flags); in bpf_selem_unlink_map() 227 struct bpf_local_storage_map_bucket *b = select_bucket(smap, selem); in bpf_selem_link_map() local 230 raw_spin_lock_irqsave(&b->lock, flags); in bpf_selem_link_map() 232 hlist_add_head_rcu(&selem->map_node, &b->list); in bpf_selem_link_map() 233 raw_spin_unlock_irqrestore(&b->lock, flags); in bpf_selem_link_map() 540 struct bpf_local_storage_map_bucket *b; in bpf_local_storage_map_free() local 559 b = &smap->buckets[i]; in bpf_local_storage_map_free() [all …]
|
/kernel/time/ |
D | timeconst.bc | 5 define gcd(a,b) { 7 while (b) { 8 t = b; 9 b = a % b; 16 define fmul(b,n,d) { 17 return (2^b*n+d-1)/d; 22 define fadj(b,n,d) { 25 v = 2^b*(d-1)/d; 30 which brings the mul value into the range 2^b-1 <= x < 2^b. Such 33 define fmuls(b,n,d) { [all …]
|
D | timer.c | 2007 int b; in timers_prepare_cpu() local 2009 for (b = 0; b < NR_BASES; b++) { in timers_prepare_cpu() 2010 base = per_cpu_ptr(&timer_bases[b], cpu); in timers_prepare_cpu() 2024 int b, i; in timers_dead_cpu() local 2028 for (b = 0; b < NR_BASES; b++) { in timers_dead_cpu() 2029 old_base = per_cpu_ptr(&timer_bases[b], cpu); in timers_dead_cpu() 2030 new_base = get_cpu_ptr(&timer_bases[b]); in timers_dead_cpu()
|
/kernel/trace/ |
D | trace_events_filter_test.h | 12 TP_PROTO(int a, int b, int c, int d, int e, int f, int g, int h), 14 TP_ARGS(a, b, c, d, e, f, g, h), 18 __field(int, b) 29 __entry->b = b; 39 __entry->a, __entry->b, __entry->c, __entry->d,
|
D | tracing_map.c | 131 char *b = val_b; in tracing_map_cmp_string() local 133 return strcmp(a, b); in tracing_map_cmp_string() 144 u64 b = atomic64_read((atomic64_t *)val_b); in tracing_map_cmp_atomic64() local 146 return (a > b) ? 1 : ((a < b) ? -1 : 0); in tracing_map_cmp_atomic64() 153 type b = (type)(*(u64 *)val_b); \ 155 return (a > b) ? 1 : ((a < b) ? -1 : 0); \ 847 const struct tracing_map_sort_entry *a, *b; in cmp_entries_dup() local 851 b = *(const struct tracing_map_sort_entry **)B; in cmp_entries_dup() 853 if (memcmp(a->key, b->key, a->elt->map->key_size)) in cmp_entries_dup() 862 const struct tracing_map_sort_entry *a, *b; in cmp_entries_sum() local [all …]
|
D | trace_branch.c | 338 const struct ftrace_branch_data *b = p2; in annotated_branch_stat_cmp() local 343 percent_b = get_incorrect_percent(b); in annotated_branch_stat_cmp() 350 if (a->incorrect < b->incorrect) in annotated_branch_stat_cmp() 352 if (a->incorrect > b->incorrect) in annotated_branch_stat_cmp() 360 if (a->correct > b->correct) in annotated_branch_stat_cmp() 362 if (a->correct < b->correct) in annotated_branch_stat_cmp()
|
D | trace_probe.c | 17 #define C(a, b) b argument 1180 int trace_probe_compare_arg_type(struct trace_probe *a, struct trace_probe *b) in trace_probe_compare_arg_type() argument 1185 if (a->nr_args < b->nr_args) in trace_probe_compare_arg_type() 1187 if (a->nr_args > b->nr_args) in trace_probe_compare_arg_type() 1188 return b->nr_args + 1; in trace_probe_compare_arg_type() 1191 if ((b->nr_args <= i) || in trace_probe_compare_arg_type() 1192 ((a->args[i].type != b->args[i].type) || in trace_probe_compare_arg_type() 1193 (a->args[i].count != b->args[i].count) || in trace_probe_compare_arg_type() 1194 strcmp(a->args[i].name, b->args[i].name))) in trace_probe_compare_arg_type()
|
D | trace_hwlat.c | 159 #define time_sub(a, b) ((a) - (b)) argument 160 #define init_time(a, b) (a = b) argument
|
/kernel/rcu/ |
D | tree_plugin.h | 263 if (blkd_state & RCU_EXP_BLKD && rdp->cpu_no_qs.b.exp) in rcu_preempt_ctxt_queue() 266 WARN_ON_ONCE(rdp->cpu_no_qs.b.exp); in rcu_preempt_ctxt_queue() 289 if (__this_cpu_read(rcu_data.cpu_no_qs.b.norm)) { in rcu_qs() 293 __this_cpu_write(rcu_data.cpu_no_qs.b.norm, false); in rcu_qs() 295 WRITE_ONCE(current->rcu_read_unlock_special.b.need_qs, false); in rcu_qs() 322 !t->rcu_read_unlock_special.b.blocked) { in rcu_note_context_switch() 327 t->rcu_read_unlock_special.b.blocked = true; in rcu_note_context_switch() 357 if (rdp->cpu_no_qs.b.exp) in rcu_note_context_switch() 406 WRITE_ONCE(current->rcu_read_unlock_special.b.need_qs, true); in __rcu_read_lock() 484 if (!special.s && !rdp->cpu_no_qs.b.exp) { in rcu_preempt_deferred_qs_irqrestore() [all …]
|
D | tasks.h | 1246 return smp_load_acquire(&t->trc_reader_special.b.need_qs); in rcu_ld_need_qs() 1252 smp_store_release(&t->trc_reader_special.b.need_qs, v); in rcu_st_need_qs() 1267 if (trs_old.b.need_qs != old) in rcu_trc_cmpxchg_need_qs() 1268 return trs_old.b.need_qs; in rcu_trc_cmpxchg_need_qs() 1269 trs_new.b.need_qs = new; in rcu_trc_cmpxchg_need_qs() 1271 return ret.b.need_qs; in rcu_trc_cmpxchg_need_qs() 1289 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && t->trc_reader_special.b.need_mb) in rcu_read_unlock_trace_special() 1292 if (trs.b.need_qs == (TRC_NEED_QS_CHECKED | TRC_NEED_QS)) { in rcu_read_unlock_trace_special() 1296 WARN_ONCE(result != trs.b.need_qs, "%s: result = %d", __func__, result); in rcu_read_unlock_trace_special() 1298 if (trs.b.blocked) { in rcu_read_unlock_trace_special() [all …]
|
D | tree_exp.h | 259 WRITE_ONCE(rdp->cpu_no_qs.b.exp, false); in rcu_report_exp_rdp() 643 "D."[!!data_race(rdp->cpu_no_qs.b.exp)]); in synchronize_rcu_expedited_wait() 753 WRITE_ONCE(rdp->cpu_no_qs.b.exp, true); in rcu_exp_handler() 775 WRITE_ONCE(rdp->cpu_no_qs.b.exp, true); in rcu_exp_handler() 776 t->rcu_read_unlock_special.b.exp_hint = true; in rcu_exp_handler() 822 __this_cpu_write(rcu_data.cpu_no_qs.b.exp, true); in rcu_exp_need_qs() 837 __this_cpu_read(rcu_data.cpu_no_qs.b.exp)) in rcu_exp_handler() 861 READ_ONCE(rdp->cpu_no_qs.b.exp)) { in sync_sched_exp_online_cleanup()
|
/kernel/kcsan/ |
D | selftest.c | 142 #define KCSAN_CHECK_READ_BARRIER(b) __KCSAN_CHECK_BARRIER(0, b, #b) in test_barrier() argument 143 #define KCSAN_CHECK_WRITE_BARRIER(b) __KCSAN_CHECK_BARRIER(KCSAN_ACCESS_WRITE, b, #b) in test_barrier() argument 144 #define KCSAN_CHECK_RW_BARRIER(b) __KCSAN_CHECK_BARRIER(KCSAN_ACCESS_WRITE | KCSAN_ACCESS_COMPOU… in test_barrier() argument
|
D | debugfs.c | 98 const unsigned long b = *(const unsigned long *)lhs; in cmp_filterlist_addrs() local 100 return a < b ? -1 : a == b ? 0 : 1; in cmp_filterlist_addrs()
|
D | kcsan_test.c | 547 #define KCSAN_EXPECT_READ_BARRIER(b, o) __KCSAN_EXPECT_BARRIER(0, b, o, #b) in test_barrier_nothreads() argument 548 #define KCSAN_EXPECT_WRITE_BARRIER(b, o) __KCSAN_EXPECT_BARRIER(KCSAN_ACCESS_WRITE, b, o, #b) in test_barrier_nothreads() argument 549 #define KCSAN_EXPECT_RW_BARRIER(b, o) __KCSAN_EXPECT_BARRIER(KCSAN_ACCESS_COMPOUND | KCSAN_ACCES… in test_barrier_nothreads() argument
|
/kernel/ |
D | auditfilter.c | 704 static int audit_compare_rule(struct audit_krule *a, struct audit_krule *b) in audit_compare_rule() argument 708 if (a->flags != b->flags || in audit_compare_rule() 709 a->pflags != b->pflags || in audit_compare_rule() 710 a->listnr != b->listnr || in audit_compare_rule() 711 a->action != b->action || in audit_compare_rule() 712 a->field_count != b->field_count) in audit_compare_rule() 716 if (a->fields[i].type != b->fields[i].type || in audit_compare_rule() 717 a->fields[i].op != b->fields[i].op) in audit_compare_rule() 731 if (strcmp(a->fields[i].lsm_str, b->fields[i].lsm_str)) in audit_compare_rule() 736 audit_watch_path(b->watch))) in audit_compare_rule() [all …]
|
D | static_call_inline.c | 68 const struct static_call_site *b = _b; in static_call_site_cmp() local 70 const struct static_call_key *key_b = static_call_key(b); in static_call_site_cmp() 85 struct static_call_site *b = _b; in static_call_site_swap() local 88 a->addr = b->addr - delta; in static_call_site_swap() 89 a->key = b->key - delta; in static_call_site_swap() 91 b->addr = tmp.addr + delta; in static_call_site_swap() 92 b->key = tmp.key + delta; in static_call_site_swap()
|
D | cred.c | 632 int cred_fscmp(const struct cred *a, const struct cred *b) in cred_fscmp() argument 637 if (a == b) in cred_fscmp() 639 if (uid_lt(a->fsuid, b->fsuid)) in cred_fscmp() 641 if (uid_gt(a->fsuid, b->fsuid)) in cred_fscmp() 644 if (gid_lt(a->fsgid, b->fsgid)) in cred_fscmp() 646 if (gid_gt(a->fsgid, b->fsgid)) in cred_fscmp() 650 gb = b->group_info; in cred_fscmp()
|
D | groups.c | 79 kgid_t b = *(kgid_t *)_b; in gid_cmp() local 81 return gid_gt(a, b) - gid_lt(a, b); in gid_cmp()
|
D | jump_label.c | 35 static int jump_label_cmp(const void *a, const void *b) in jump_label_cmp() argument 38 const struct jump_entry *jeb = b; in jump_label_cmp() 63 static void jump_label_swap(void *a, void *b, int size) in jump_label_swap() argument 65 long delta = (unsigned long)a - (unsigned long)b; in jump_label_swap() 67 struct jump_entry *jeb = b; in jump_label_swap()
|
/kernel/locking/ |
D | ww_mutex.h | 228 __ww_ctx_less(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b) in __ww_ctx_less() argument 238 int b_prio = b->task->prio; in __ww_ctx_less() 251 if (dl_time_before(b->task->dl.deadline, in __ww_ctx_less() 256 b->task->dl.deadline)) in __ww_ctx_less() 265 return (signed long)(a->stamp - b->stamp) > 0; in __ww_ctx_less()
|
/kernel/module/ |
D | tree_lookup.c | 37 mod_tree_less(struct latch_tree_node *a, struct latch_tree_node *b) in mod_tree_less() argument 39 return __mod_tree_val(a) < __mod_tree_val(b); in mod_tree_less()
|
D | main.c | 563 static int already_uses(struct module *a, struct module *b) in already_uses() argument 567 list_for_each_entry(use, &b->source_list, source_list) { in already_uses() 569 pr_debug("%s uses %s!\n", a->name, b->name); in already_uses() 573 pr_debug("%s does not use %s!\n", a->name, b->name); in already_uses() 584 static int add_module_usage(struct module *a, struct module *b) in add_module_usage() argument 594 use->target = b; in add_module_usage() 595 list_add(&use->source_list, &b->source_list); in add_module_usage() 601 static int ref_module(struct module *a, struct module *b) in ref_module() argument 605 if (b == NULL || already_uses(a, b)) in ref_module() 609 err = strong_try_module_get(b); in ref_module() [all …]
|
/kernel/dma/ |
D | debug.c | 270 static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b) in exact_match() argument 272 return ((a->dev_addr == b->dev_addr) && in exact_match() 273 (a->dev == b->dev)) ? true : false; in exact_match() 277 struct dma_debug_entry *b) in containing_match() argument 279 if (a->dev != b->dev) in containing_match() 282 if ((b->dev_addr <= a->dev_addr) && in containing_match() 283 ((b->dev_addr + b->size) >= (a->dev_addr + a->size))) in containing_match()
|