/kernel/bpf/ |
D | net_namespace.c | 14 enum bpf_attach_type type; member 29 static void netns_bpf_attach_type_unneed(enum netns_bpf_attach_type type) in netns_bpf_attach_type_unneed() argument 31 switch (type) { in netns_bpf_attach_type_unneed() 42 static void netns_bpf_attach_type_need(enum netns_bpf_attach_type type) in netns_bpf_attach_type_need() argument 44 switch (type) { in netns_bpf_attach_type_need() 57 enum netns_bpf_attach_type type) in netns_bpf_run_array_detach() argument 61 run_array = rcu_replace_pointer(net->bpf.run_array[type], NULL, in netns_bpf_run_array_detach() 66 static int link_index(struct net *net, enum netns_bpf_attach_type type, in link_index() argument 72 list_for_each_entry(pos, &net->bpf.links[type], node) { in link_index() 80 static int link_count(struct net *net, enum netns_bpf_attach_type type) in link_count() argument [all …]
|
D | inode.c | 32 static void *bpf_any_get(void *raw, enum bpf_type type) in bpf_any_get() argument 34 switch (type) { in bpf_any_get() 52 static void bpf_any_put(void *raw, enum bpf_type type) in bpf_any_put() argument 54 switch (type) { in bpf_any_put() 70 static void *bpf_fd_probe_obj(u32 ufd, enum bpf_type *type) in bpf_fd_probe_obj() argument 76 *type = BPF_TYPE_MAP; in bpf_fd_probe_obj() 82 *type = BPF_TYPE_PROG; in bpf_fd_probe_obj() 88 *type = BPF_TYPE_LINK; in bpf_fd_probe_obj() 130 static int bpf_inode_type(const struct inode *inode, enum bpf_type *type) in bpf_inode_type() argument 132 *type = BPF_TYPE_UNSPEC; in bpf_inode_type() [all …]
|
D | verifier.c | 430 static bool type_is_pkt_pointer(enum bpf_reg_type type) in type_is_pkt_pointer() argument 432 type = base_type(type); in type_is_pkt_pointer() 433 return type == PTR_TO_PACKET || in type_is_pkt_pointer() 434 type == PTR_TO_PACKET_META; in type_is_pkt_pointer() 437 static bool type_is_sk_pointer(enum bpf_reg_type type) in type_is_sk_pointer() argument 439 return type == PTR_TO_SOCKET || in type_is_sk_pointer() 440 type == PTR_TO_SOCK_COMMON || in type_is_sk_pointer() 441 type == PTR_TO_TCP_SOCK || in type_is_sk_pointer() 442 type == PTR_TO_XDP_SOCK; in type_is_sk_pointer() 445 static bool reg_type_not_null(enum bpf_reg_type type) in reg_type_not_null() argument [all …]
|
D | bpf_lru_list.c | 53 enum bpf_lru_list_type type) in bpf_lru_list_count_inc() argument 55 if (type < NR_BPF_LRU_LIST_COUNT) in bpf_lru_list_count_inc() 56 l->counts[type]++; in bpf_lru_list_count_inc() 60 enum bpf_lru_list_type type) in bpf_lru_list_count_dec() argument 62 if (type < NR_BPF_LRU_LIST_COUNT) in bpf_lru_list_count_dec() 63 l->counts[type]--; in bpf_lru_list_count_dec() 71 if (WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(node->type))) in __bpf_lru_node_move_to_free() 80 bpf_lru_list_count_dec(l, node->type); in __bpf_lru_node_move_to_free() 82 node->type = tgt_free_type; in __bpf_lru_node_move_to_free() 91 if (WARN_ON_ONCE(!IS_LOCAL_LIST_TYPE(node->type)) || in __bpf_lru_node_move_in() [all …]
|
D | btf.c | 394 const struct btf_type *type; member 589 id = t->type; in btf_type_skip_modifiers() 590 t = btf_type_by_id(btf, t->type); in btf_type_skip_modifiers() 608 return btf_type_skip_modifiers(btf, t->type, res_id); in btf_type_resolve_ptr() 855 id = m->type; in btf_member_is_reg_int() 892 t = btf_type_by_id(btf, t->type); in btf_type_skip_qualifiers() 940 id = m->type; in btf_show_name() 984 id = t->type; in btf_show_name() 994 id = array->type; in btf_show_name() 1000 id = t->type; in btf_show_name() [all …]
|
/kernel/sched/ |
D | isolation.c | 33 bool housekeeping_enabled(enum hk_type type) in housekeeping_enabled() argument 35 return !!(housekeeping.flags & BIT(type)); in housekeeping_enabled() 39 int housekeeping_any_cpu(enum hk_type type) in housekeeping_any_cpu() argument 44 if (housekeeping.flags & BIT(type)) { in housekeeping_any_cpu() 45 cpu = sched_numa_find_closest(housekeeping.cpumasks[type], smp_processor_id()); in housekeeping_any_cpu() 49 return cpumask_any_and(housekeeping.cpumasks[type], cpu_online_mask); in housekeeping_any_cpu() 56 const struct cpumask *housekeeping_cpumask(enum hk_type type) in housekeeping_cpumask() argument 59 if (housekeeping.flags & BIT(type)) in housekeeping_cpumask() 60 return housekeeping.cpumasks[type]; in housekeeping_cpumask() 65 void housekeeping_affine(struct task_struct *t, enum hk_type type) in housekeeping_affine() argument [all …]
|
D | core_sched.c | 129 int sched_core_share_pid(unsigned int cmd, pid_t pid, enum pid_type type, in sched_core_share_pid() argument 144 if (type > PIDTYPE_PGID || cmd >= PR_SCHED_CORE_MAX || pid < 0 || in sched_core_share_pid() 172 if (type != PIDTYPE_PID || uaddr & 7) { in sched_core_share_pid() 197 if (type != PIDTYPE_PID) { in sched_core_share_pid() 210 if (type == PIDTYPE_PID) { in sched_core_share_pid() 216 grp = task_pid_type(task, type); in sched_core_share_pid() 218 do_each_pid_thread(grp, type, p) { in sched_core_share_pid() 223 } while_each_pid_thread(grp, type, p); in sched_core_share_pid() 225 do_each_pid_thread(grp, type, p) { in sched_core_share_pid() 227 } while_each_pid_thread(grp, type, p); in sched_core_share_pid()
|
/kernel/events/ |
D | hw_breakpoint.c | 62 static struct bp_cpuinfo *get_bp_info(int cpu, enum bp_type_idx type) in get_bp_info() argument 64 return per_cpu_ptr(bp_cpuinfo + type, cpu); in get_bp_info() 172 static inline int hw_breakpoint_slots_cached(int type) { return hw_breakpoint_slots(type); } in hw_breakpoint_slots_cached() argument 180 static inline int hw_breakpoint_slots_cached(int type) in hw_breakpoint_slots_cached() argument 182 return __nr_bp_slots[type]; in hw_breakpoint_slots_cached() 186 bp_slots_histogram_alloc(struct bp_slots_histogram *hist, enum bp_type_idx type) in bp_slots_histogram_alloc() argument 188 hist->count = kcalloc(hw_breakpoint_slots_cached(type), sizeof(*hist->count), GFP_KERNEL); in bp_slots_histogram_alloc() 249 bp_slots_histogram_max(struct bp_slots_histogram *hist, enum bp_type_idx type) in bp_slots_histogram_max() argument 251 for (int i = hw_breakpoint_slots_cached(type) - 1; i >= 0; i--) { in bp_slots_histogram_max() 266 enum bp_type_idx type) in bp_slots_histogram_max_merge() argument [all …]
|
/kernel/trace/ |
D | trace_export.c | 19 enum trace_reg type, void *data) in ftrace_event_register() argument 37 #define __field_struct(type, item) argument 40 #define __field(type, item) type item; argument 43 #define __field_fn(type, item) type item; argument 46 #define __field_desc(type, container, item) type item; argument 49 #define __field_packed(type, container, item) type item; argument 52 #define __array(type, item, size) type item[size]; argument 55 #define __array_desc(type, container, item, size) type item[size]; argument 58 #define __dynamic_array(type, item) type item[]; argument 87 .type = #_type, .name = #_item, \ [all …]
|
D | trace_events_synth.c | 139 char *name, *type; in synth_event_define_fields() local 146 type = event->fields[i]->type; in synth_event_define_fields() 148 ret = trace_define_field(call, type, name, offset, size, in synth_event_define_fields() 169 static bool synth_field_signed(char *type) in synth_field_signed() argument 171 if (str_has_prefix(type, "u")) in synth_field_signed() 173 if (strcmp(type, "gfp_t") == 0) in synth_field_signed() 179 static int synth_field_is_string(char *type) in synth_field_is_string() argument 181 if (strstr(type, "char[") != NULL) in synth_field_is_string() 187 static int synth_field_is_stack(char *type) in synth_field_is_stack() argument 189 if (strstr(type, "long[") != NULL) in synth_field_is_stack() [all …]
|
D | trace_events_user.c | 349 static int user_field_array_size(const char *type) in user_field_array_size() argument 351 const char *start = strchr(type, '['); in user_field_array_size() 378 static int user_field_size(const char *type) in user_field_size() argument 381 if (strcmp(type, "s64") == 0) in user_field_size() 383 if (strcmp(type, "u64") == 0) in user_field_size() 385 if (strcmp(type, "s32") == 0) in user_field_size() 387 if (strcmp(type, "u32") == 0) in user_field_size() 389 if (strcmp(type, "int") == 0) in user_field_size() 391 if (strcmp(type, "unsigned int") == 0) in user_field_size() 393 if (strcmp(type, "s16") == 0) in user_field_size() [all …]
|
D | trace_probe.c | 33 #define DEFINE_BASIC_PRINT_TYPE_FUNC(tname, type, fmt) \ argument 36 trace_seq_printf(s, fmt, *(type *)data); \ 103 static const struct fetch_type *find_fetch_type(const char *type) in find_fetch_type() argument 107 if (!type) in find_fetch_type() 108 type = DEFAULT_FETCH_TYPE_STR; in find_fetch_type() 111 if (*type == 'b') { in find_fetch_type() 114 type = strchr(type, '/'); in find_fetch_type() 115 if (!type) in find_fetch_type() 118 type++; in find_fetch_type() 119 if (kstrtoul(type, 0, &bs)) in find_fetch_type() [all …]
|
D | trace_dynevent.c | 72 int dyn_event_release(const char *raw_command, struct dyn_event_operations *type) in dyn_event_release() argument 111 if (type && type != pos->ops) in dyn_event_release() 196 int dyn_events_release_all(struct dyn_event_operations *type) in dyn_events_release_all() argument 203 if (type && ev->ops != type) in dyn_events_release_all() 211 if (type && ev->ops != type) in dyn_events_release_all() 405 enum dynevent_type type, in dynevent_cmd_init() argument 411 cmd->type = type; in dynevent_cmd_init()
|
D | trace_events_filter.c | 638 #define DEFINE_COMPARISON_PRED(type) \ argument 639 static int filter_pred_##type(struct filter_pred *pred, void *event) \ 643 type *addr = (type *)(event + pred->offset); \ 644 type val = (type)pred->val; \ 648 type *addr = (type *)(event + pred->offset); \ 649 type val = (type)pred->val; \ 653 type *addr = (type *)(event + pred->offset); \ 654 type val = (type)pred->val; \ 658 type *addr = (type *)(event + pred->offset); \ 659 type val = (type)pred->val; \ [all …]
|
D | trace_output.c | 304 if (entry->type != event->event.type) { in trace_raw_output_prep() 676 struct trace_event *ftrace_find_event(int type) in ftrace_find_event() argument 681 key = type & (EVENT_HASHSIZE - 1); in ftrace_find_event() 684 if (event->type == type) in ftrace_find_event() 708 if (iter->type != next) { in trace_search_list() 766 if (!event->type) { in register_trace_event() 771 event->type = trace_search_list(&list); in register_trace_event() 772 if (!event->type) in register_trace_event() 777 event->type = next_event_type++; in register_trace_event() 781 if (WARN_ON(ftrace_find_event(event->type))) in register_trace_event() [all …]
|
D | trace.h | 63 #define __field(type, item) type item; argument 66 #define __field_fn(type, item) type item; argument 69 #define __field_struct(type, item) __field(type, item) argument 72 #define __field_desc(type, container, item) argument 75 #define __field_packed(type, container, item) argument 78 #define __array(type, item, size) type item[size]; argument 81 #define __array_desc(type, container, item, size) argument 84 #define __dynamic_array(type, item) type item[]; argument 87 #define __rel_dynamic_array(type, item) type item[]; argument 218 static inline bool pid_type_enabled(int type, struct trace_pid_list *pid_list, in pid_type_enabled() argument [all …]
|
/kernel/cgroup/ |
D | misc.c | 65 static inline bool valid_type(enum misc_res_type type) in valid_type() argument 67 return type >= 0 && type < MISC_CG_RES_TYPES; in valid_type() 77 unsigned long misc_cg_res_total_usage(enum misc_res_type type) in misc_cg_res_total_usage() argument 79 if (valid_type(type)) in misc_cg_res_total_usage() 80 return atomic_long_read(&root_cg.res[type].usage); in misc_cg_res_total_usage() 98 int misc_cg_set_capacity(enum misc_res_type type, unsigned long capacity) in misc_cg_set_capacity() argument 100 if (!valid_type(type)) in misc_cg_set_capacity() 103 WRITE_ONCE(misc_res_capacity[type], capacity); in misc_cg_set_capacity() 116 static void misc_cg_cancel_charge(enum misc_res_type type, struct misc_cg *cg, in misc_cg_cancel_charge() argument 119 WARN_ONCE(atomic_long_add_negative(-amount, &cg->res[type].usage), in misc_cg_cancel_charge() [all …]
|
/kernel/time/ |
D | alarmtimer.c | 200 struct alarm_base *base = &alarm_bases[alarm->type]; in alarmtimer_fired() 227 struct alarm_base *base = &alarm_bases[alarm->type]; in alarm_expires_remaining() 245 int i, ret, type; in alarmtimer_suspend() local 253 type = freezer_alarmtype; in alarmtimer_suspend() 277 type = i; in alarmtimer_suspend() 288 trace_alarmtimer_suspend(expires, type); in alarmtimer_suspend() 326 __alarm_init(struct alarm *alarm, enum alarmtimer_type type, in __alarm_init() argument 332 alarm->type = type; in __alarm_init() 342 void alarm_init(struct alarm *alarm, enum alarmtimer_type type, in alarm_init() argument 345 hrtimer_init(&alarm->timer, alarm_bases[type].base_clockid, in alarm_init() [all …]
|
/kernel/ |
D | pid.c | 163 enum pid_type type; in alloc_pid() local 262 for (type = 0; type < PIDTYPE_MAX; ++type) in alloc_pid() 263 INIT_HLIST_HEAD(&pid->tasks[type]); in alloc_pid() 321 static struct pid **task_pid_ptr(struct task_struct *task, enum pid_type type) in task_pid_ptr() argument 323 return (type == PIDTYPE_PID) ? in task_pid_ptr() 325 &task->signal->pids[type]; in task_pid_ptr() 331 void attach_pid(struct task_struct *task, enum pid_type type) in attach_pid() argument 333 struct pid *pid = *task_pid_ptr(task, type); in attach_pid() 334 hlist_add_head_rcu(&task->pid_links[type], &pid->tasks[type]); in attach_pid() 337 static void __change_pid(struct task_struct *task, enum pid_type type, in __change_pid() argument [all …]
|
D | ucount.c | 230 enum ucount_type type) in inc_ucount() argument 238 max = READ_ONCE(tns->ucount_max[type]); in inc_ucount() 239 if (!atomic_long_inc_below(&iter->ucount[type], max)) in inc_ucount() 246 atomic_long_dec(&iter->ucount[type]); in inc_ucount() 252 void dec_ucount(struct ucounts *ucounts, enum ucount_type type) in dec_ucount() argument 256 long dec = atomic_long_dec_if_positive(&iter->ucount[type]); in dec_ucount() 262 long inc_rlimit_ucounts(struct ucounts *ucounts, enum rlimit_type type, long v) in inc_rlimit_ucounts() argument 269 long new = atomic_long_add_return(v, &iter->rlimit[type]); in inc_rlimit_ucounts() 274 max = get_userns_rlimit_max(iter->ns, type); in inc_rlimit_ucounts() 279 bool dec_rlimit_ucounts(struct ucounts *ucounts, enum rlimit_type type, long v) in dec_rlimit_ucounts() argument [all …]
|
D | auditsc.c | 90 int type; member 484 switch (f->type) { in audit_filter_rules() 681 result = security_audit_rule_match(sid, f->type, in audit_filter_rules() 698 f->type, in audit_filter_rules() 705 f->type, in audit_filter_rules() 714 if (!ctx || ctx->type != AUDIT_IPC) in audit_filter_rules() 717 f->type, f->op, in audit_filter_rules() 727 result = audit_comparator(ctx->argv[f->type-AUDIT_ARG0], f->op, f->val); in audit_filter_rules() 916 if (context->type == AUDIT_KERN_MODULE) { in audit_free_module() 1019 ctx->type = 0; /* reset last for audit_free_*() */ in audit_reset_context() [all …]
|
D | jump_label.c | 337 enum jump_label_type type) in arch_jump_label_transform_static() argument 345 WARN_ON_ONCE(key->type & JUMP_TYPE_LINKED); in static_key_entries() 346 return (struct jump_entry *)(key->type & ~JUMP_TYPE_MASK); in static_key_entries() 351 return key->type & JUMP_TYPE_TRUE; in static_key_type() 356 return key->type & JUMP_TYPE_LINKED; in static_key_linked() 361 key->type &= ~JUMP_TYPE_LINKED; in static_key_clear_linked() 366 key->type |= JUMP_TYPE_LINKED; in static_key_set_linked() 381 unsigned long type; in static_key_set_entries() local 384 type = key->type & JUMP_TYPE_MASK; in static_key_set_entries() 386 key->type |= type; in static_key_set_entries() [all …]
|
D | kcmp.c | 39 static long kptr_obfuscate(long v, int type) in kptr_obfuscate() argument 41 return (v ^ cookies[type][0]) * cookies[type][1]; in kptr_obfuscate() 50 static int kcmp_ptr(void *v1, void *v2, enum kcmp_type type) in kcmp_ptr() argument 54 t1 = kptr_obfuscate((long)v1, type); in kcmp_ptr() 55 t2 = kptr_obfuscate((long)v2, type); in kcmp_ptr() 135 SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type, in SYSCALL_DEFINE5() argument 169 switch (type) { in SYSCALL_DEFINE5()
|
/kernel/kcsan/ |
D | core.c | 210 check_access(const volatile void *ptr, size_t size, int type, unsigned long ip); 224 scoped_access->type, scoped_access->ip); in kcsan_check_scoped_accesses() 231 is_atomic(struct kcsan_ctx *ctx, const volatile void *ptr, size_t size, int type) in is_atomic() argument 233 if (type & KCSAN_ACCESS_ATOMIC) in is_atomic() 241 if (type & KCSAN_ACCESS_ASSERT) in is_atomic() 245 (type & KCSAN_ACCESS_WRITE) && size <= sizeof(long) && in is_atomic() 246 !(type & KCSAN_ACCESS_COMPOUND) && IS_ALIGNED((unsigned long)ptr, size)) in is_atomic() 268 should_watch(struct kcsan_ctx *ctx, const volatile void *ptr, size_t size, int type) in should_watch() argument 277 if (is_atomic(ctx, ptr, size, type)) in should_watch() 321 static void delay_access(int type) in delay_access() argument [all …]
|
/kernel/power/ |
D | qos.c | 64 switch (c->type) { in pm_qos_get_value() 221 .type = PM_QOS_MIN, 448 c->type = PM_QOS_MAX; in freq_constraints_init() 457 c->type = PM_QOS_MIN; in freq_constraints_init() 468 enum freq_qos_req_type type) in freq_qos_read_value() argument 472 switch (type) { in freq_qos_read_value() 504 switch(req->type) { in freq_qos_apply() 536 enum freq_qos_req_type type, s32 value) in freq_qos_add_request() argument 548 req->type = type; in freq_qos_add_request() 552 req->type = 0; in freq_qos_add_request() [all …]
|