/kernel/printk/ |
D | printk_ringbuffer.c | 1350 bool prb_reserve_in_last(struct prb_reserved_entry *e, struct printk_ringbuffer *rb, in prb_reserve_in_last() argument 1359 local_irq_save(e->irqflags); in prb_reserve_in_last() 1364 local_irq_restore(e->irqflags); in prb_reserve_in_last() 1376 e->rb = rb; in prb_reserve_in_last() 1377 e->id = id; in prb_reserve_in_last() 1432 e->text_space = space_used(&rb->text_data_ring, &d->text_blk_lpos); in prb_reserve_in_last() 1436 prb_commit(e); in prb_reserve_in_last() 1485 bool prb_reserve(struct prb_reserved_entry *e, struct printk_ringbuffer *rb, in prb_reserve() argument 1503 local_irq_save(e->irqflags); in prb_reserve() 1508 local_irq_restore(e->irqflags); in prb_reserve() [all …]
|
D | printk_ringbuffer.h | 305 bool prb_reserve(struct prb_reserved_entry *e, struct printk_ringbuffer *rb, 307 bool prb_reserve_in_last(struct prb_reserved_entry *e, struct printk_ringbuffer *rb, 309 void prb_commit(struct prb_reserved_entry *e); 310 void prb_final_commit(struct prb_reserved_entry *e); 316 unsigned int prb_record_text_space(struct prb_reserved_entry *e);
|
D | printk.c | 549 static void append_char(char **pp, char *e, char c) in append_char() argument 551 if (*pp < e) in append_char() 580 char *p = buf, *e = buf + size; in msg_add_ext_text() local 588 p += scnprintf(p, e - p, "\\x%02x", c); in msg_add_ext_text() 590 append_char(&p, e, c); in msg_add_ext_text() 592 append_char(&p, e, endc); in msg_add_ext_text() 1049 struct prb_reserved_entry e; in add_to_rb() local 1054 if (!prb_reserve(&e, rb, &dest_r)) in add_to_rb() 1066 prb_final_commit(&e); in add_to_rb() 1068 return prb_record_text_space(&e); in add_to_rb() [all …]
|
/kernel/ |
D | extable.c | 56 const struct exception_table_entry *e; in search_exception_tables() local 58 e = search_kernel_exception_table(addr); in search_exception_tables() 59 if (!e) in search_exception_tables() 60 e = search_module_extables(addr); in search_exception_tables() 61 if (!e) in search_exception_tables() 62 e = search_bpf_extables(addr); in search_exception_tables() 63 return e; in search_exception_tables()
|
D | auditfilter.c | 83 static inline void audit_free_rule(struct audit_entry *e) in audit_free_rule() argument 86 struct audit_krule *erule = &e->rule; in audit_free_rule() 96 kfree(e); in audit_free_rule() 101 struct audit_entry *e = container_of(head, struct audit_entry, rcu); in audit_free_rule_rcu() local 102 audit_free_rule(e); in audit_free_rule_rcu() 902 struct audit_entry *e, *found = NULL; in audit_find_rule() local 913 list_for_each_entry(e, list, list) in audit_find_rule() 914 if (!audit_compare_rule(&entry->rule, &e->rule)) { in audit_find_rule() 915 found = e; in audit_find_rule() 924 list_for_each_entry(e, list, list) in audit_find_rule() [all …]
|
D | audit_watch.c | 326 struct audit_entry *e; in audit_remove_parent_watches() local 331 e = container_of(r, struct audit_entry, rule); in audit_remove_parent_watches() 333 if (e->rule.exe) in audit_remove_parent_watches() 334 audit_remove_mark(e->rule.exe); in audit_remove_parent_watches() 337 list_del_rcu(&e->list); in audit_remove_parent_watches() 338 call_rcu(&e->rcu, audit_free_rule_rcu); in audit_remove_parent_watches()
|
D | auditsc.c | 775 struct audit_entry *e; in audit_filter_task() local 779 list_for_each_entry_rcu(e, &audit_filter_list[AUDIT_FILTER_TASK], list) { in audit_filter_task() 780 if (audit_filter_rules(tsk, &e->rule, NULL, NULL, in audit_filter_task() 783 *key = kstrdup(e->rule.filterkey, GFP_ATOMIC); in audit_filter_task() 816 struct audit_entry *e; in audit_filter_uring() local 823 list_for_each_entry_rcu(e, &audit_filter_list[AUDIT_FILTER_URING_EXIT], in audit_filter_uring() 825 if (audit_in_mask(&e->rule, ctx->uring_op) && in audit_filter_uring() 826 audit_filter_rules(tsk, &e->rule, ctx, NULL, &state, in audit_filter_uring() 844 struct audit_entry *e; in audit_filter_syscall() local 851 list_for_each_entry_rcu(e, &audit_filter_list[AUDIT_FILTER_EXIT], list) { in audit_filter_syscall() [all …]
|
D | user_namespace.c | 245 static int cmp_map_id(const void *k, const void *e) in cmp_map_id() argument 249 const struct uid_gid_extent *el = e; in cmp_map_id() 1052 struct uid_gid_extent *e; in map_write() local 1056 e = &new_map.extent[idx]; in map_write() 1058 e = &new_map.forward[idx]; in map_write() 1061 e->lower_first, in map_write() 1062 e->count); in map_write() 1070 e->lower_first = lower_first; in map_write()
|
/kernel/trace/ |
D | trace_events_filter_test.h | 12 TP_PROTO(int a, int b, int c, int d, int e, int f, int g, int h), 14 TP_ARGS(a, b, c, d, e, f, g, h), 21 __field(int, e) 32 __entry->e = e; 40 __entry->e, __entry->f, __entry->g, __entry->h)
|
D | trace_output.c | 695 struct trace_event *e = NULL, *iter; in trace_search_list() local 709 e = iter; in trace_search_list() 719 if (e) in trace_search_list() 720 *list = &e->list; in trace_search_list()
|
D | ring_buffer.c | 3347 int e; in dump_buffer_page() local 3352 for (e = 0; e < tail; e += rb_event_length(event)) { in dump_buffer_page() 3354 event = (struct ring_buffer_event *)(bpage->data + e); in dump_buffer_page() 3401 int e; in check_buffer() local 3429 for (e = 0; e < tail; e += rb_event_length(event)) { in check_buffer() 3431 event = (struct ring_buffer_event *)(bpage->data + e); in check_buffer()
|
D | Kconfig | 509 ftrace interface, e.g.: 631 Tracing also is possible using the ftrace interface, e.g.: 1046 However, it should be safe to use on e.g. unused portion of VRAM.
|
D | trace_events_filter.c | 2305 .e = ve, .f = vf, .g = vg, .h = vh }, \
|
D | trace.c | 4205 unsigned long t, e; in get_total_entries() local 4212 get_total_entries_cpu(buf, &t, &e, cpu); in get_total_entries() 4214 *entries += e; in get_total_entries()
|
/kernel/locking/ |
D | lockdep.c | 984 static bool in_list(struct list_head *e, struct list_head *h) in in_list() argument 989 if (e == f) in in_list() 1000 static bool in_any_class_list(struct list_head *e) in in_any_class_list() argument 1007 if (in_list(e, &class->locks_after) || in in_any_class_list() 1008 in_list(e, &class->locks_before)) in in_any_class_list() 1016 struct lock_list *e; in class_lock_list_valid() local 1018 list_for_each_entry(e, h, entry) { in class_lock_list_valid() 1019 if (e->links_to != c) { in class_lock_list_valid() 1022 (unsigned long)(e - list_entries), in class_lock_list_valid() 1023 e->links_to && e->links_to->name ? in class_lock_list_valid() [all …]
|
/kernel/gcov/ |
D | Kconfig | 11 This option enables gcov-based code profiling (e.g. for code coverage 21 For a single file (e.g. main.o):
|
/kernel/debug/kdb/ |
D | kdb_main.c | 256 char *e = *ep++; in kdbgetenv() local 258 if (!e) in kdbgetenv() 261 if ((strncmp(match, e, matchlen) == 0) in kdbgetenv() 262 && ((e[matchlen] == '\0') in kdbgetenv() 263 || (e[matchlen] == '='))) { in kdbgetenv() 264 char *cp = strchr(e, '='); in kdbgetenv()
|
/kernel/module/ |
D | Kconfig | 58 list of linked (or loaded) modules e.g. on detection of a bad 96 by developers to identify the SCM version of a given module, e.g.
|
D | main.c | 3054 const struct exception_table_entry *e = NULL; in search_module_extables() local 3065 e = search_extable(mod->extable, in search_module_extables() 3075 return e; in search_module_extables()
|
/kernel/power/ |
D | Kconfig | 9 suspend-to-RAM state (e.g. the ACPI S3 state). 286 notification of APM "events" (e.g. battery status change).
|
/kernel/bpf/ |
D | core.c | 736 const struct exception_table_entry *e = NULL; in search_bpf_extables() local 746 e = search_extable(prog->aux->extable, prog->aux->num_exentries, addr); in search_bpf_extables() 749 return e; in search_bpf_extables()
|
D | verifier.c | 11041 static int push_insn(int t, int w, int e, struct bpf_verifier_env *env, in push_insn() argument 11047 if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH)) in push_insn() 11050 if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH)) in push_insn() 11059 if (e == BRANCH) in push_insn() 11065 insn_state[t] = DISCOVERED | e; in push_insn() 11080 insn_state[t] = DISCOVERED | e; in push_insn()
|
/kernel/events/ |
D | core.c | 1650 struct perf_event *e = __node_2_pe(a); in __group_less() local 1651 return perf_event_groups_cmp(e->cpu, event_cgroup(e), e->group_index, in __group_less()
|
/kernel/sched/ |
D | core.c | 10645 #define _POW10(exp) ((unsigned int)1e##exp)
|