/kernel/ |
D | fail_function.c | 58 struct fei_attr *attr; in fei_attr_new() local 60 attr = kzalloc(sizeof(*attr), GFP_KERNEL); in fei_attr_new() 61 if (attr) { in fei_attr_new() 62 attr->kp.symbol_name = kstrdup(sym, GFP_KERNEL); in fei_attr_new() 63 if (!attr->kp.symbol_name) { in fei_attr_new() 64 kfree(attr); in fei_attr_new() 67 attr->kp.pre_handler = fei_kprobe_handler; in fei_attr_new() 68 attr->kp.post_handler = fei_post_handler; in fei_attr_new() 69 attr->retval = adjust_error_retval(addr, 0); in fei_attr_new() 70 INIT_LIST_HEAD(&attr->list); in fei_attr_new() [all …]
|
D | ksysfs.c | 31 struct kobj_attribute *attr, char *buf) in uevent_seqnum_show() argument 40 struct kobj_attribute *attr, char *buf) in uevent_helper_show() argument 45 struct kobj_attribute *attr, in uevent_helper_store() argument 61 struct kobj_attribute *attr, char *buf) in profiling_show() argument 66 struct kobj_attribute *attr, in profiling_store() argument 92 struct kobj_attribute *attr, char *buf) in kexec_loaded_show() argument 99 struct kobj_attribute *attr, char *buf) in kexec_crash_loaded_show() argument 106 struct kobj_attribute *attr, char *buf) in kexec_crash_size_show() argument 116 struct kobj_attribute *attr, in kexec_crash_size_store() argument 135 struct kobj_attribute *attr, char *buf) in vmcoreinfo_show() argument [all …]
|
D | reboot.c | 1088 static ssize_t mode_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) in mode_show() argument 1114 static ssize_t mode_store(struct kobject *kobj, struct kobj_attribute *attr, in mode_store() argument 1140 static ssize_t force_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) in force_show() argument 1144 static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr, in force_store() argument 1162 static ssize_t type_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) in type_show() argument 1191 static ssize_t type_store(struct kobject *kobj, struct kobj_attribute *attr, in type_store() argument 1220 static ssize_t cpu_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) in cpu_show() argument 1224 static ssize_t cpu_store(struct kobject *kobj, struct kobj_attribute *attr, in cpu_store() argument 1250 &reboot_mode_attr.attr, 1252 &reboot_force_attr.attr, [all …]
|
D | padata.c | 845 container_of(_attr, struct padata_sysfs_entry, attr) 854 struct attribute attr; member 861 struct attribute *attr, char *buf) in show_cpumask() argument 867 if (!strcmp(attr->name, "serial_cpumask")) in show_cpumask() 879 struct attribute *attr, in store_cpumask() argument 894 mask_type = !strcmp(attr->name, "serial_cpumask") ? in store_cpumask() 921 &serial_cpumask_attr.attr, 922 ¶llel_cpumask_attr.attr, 928 struct attribute *attr, char *buf) in padata_sysfs_show() argument 935 pentry = attr2pentry(attr); in padata_sysfs_show() [all …]
|
/kernel/power/ |
D | main.c | 103 static ssize_t pm_async_show(struct kobject *kobj, struct kobj_attribute *attr, in pm_async_show() argument 109 static ssize_t pm_async_store(struct kobject *kobj, struct kobj_attribute *attr, in pm_async_store() argument 127 static ssize_t mem_sleep_show(struct kobject *kobj, struct kobj_attribute *attr, in mem_sleep_show() argument 172 static ssize_t mem_sleep_store(struct kobject *kobj, struct kobj_attribute *attr, in mem_sleep_store() argument 209 struct kobj_attribute *attr, char *buf) in sync_on_suspend_show() argument 215 struct kobj_attribute *attr, in sync_on_suspend_store() argument 245 static ssize_t pm_test_show(struct kobject *kobj, struct kobj_attribute *attr, in pm_test_show() argument 266 static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr, in pm_test_store() argument 319 struct kobj_attribute *attr, char *buf) \ 337 struct kobj_attribute *attr, char *buf) in last_failed_dev_show() argument [all …]
|
D | hibernate.c | 1080 static ssize_t disk_show(struct kobject *kobj, struct kobj_attribute *attr, in disk_show() argument 1115 static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr, in disk_store() argument 1167 static ssize_t resume_show(struct kobject *kobj, struct kobj_attribute *attr, in resume_show() argument 1174 static ssize_t resume_store(struct kobject *kobj, struct kobj_attribute *attr, in resume_store() argument 1207 struct kobj_attribute *attr, char *buf) in resume_offset_show() argument 1213 struct kobj_attribute *attr, const char *buf, in resume_offset_store() argument 1229 static ssize_t image_size_show(struct kobject *kobj, struct kobj_attribute *attr, in image_size_show() argument 1235 static ssize_t image_size_store(struct kobject *kobj, struct kobj_attribute *attr, in image_size_store() argument 1251 struct kobj_attribute *attr, char *buf) in reserved_size_show() argument 1257 struct kobj_attribute *attr, in reserved_size_store() argument [all …]
|
/kernel/bpf/ |
D | syscall.c | 112 static struct bpf_map *find_and_alloc_map(union bpf_attr *attr) in find_and_alloc_map() argument 115 u32 type = attr->map_type; in find_and_alloc_map() 127 err = ops->map_alloc_check(attr); in find_and_alloc_map() 131 if (attr->map_ifindex) in find_and_alloc_map() 133 map = ops->map_alloc(attr); in find_and_alloc_map() 366 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr) in bpf_map_init_from_attr() argument 368 map->map_type = attr->map_type; in bpf_map_init_from_attr() 369 map->key_size = attr->key_size; in bpf_map_init_from_attr() 370 map->value_size = attr->value_size; in bpf_map_init_from_attr() 371 map->max_entries = attr->max_entries; in bpf_map_init_from_attr() [all …]
|
D | bloom_filter.c | 91 static struct bpf_map *bloom_map_alloc(union bpf_attr *attr) in bloom_map_alloc() argument 94 int numa_node = bpf_map_attr_numa_node(attr); in bloom_map_alloc() 100 if (attr->key_size != 0 || attr->value_size == 0 || in bloom_map_alloc() 101 attr->max_entries == 0 || in bloom_map_alloc() 102 attr->map_flags & ~BLOOM_CREATE_FLAG_MASK || in bloom_map_alloc() 103 !bpf_map_flags_access_ok(attr->map_flags) || in bloom_map_alloc() 107 (attr->map_extra & ~0xF)) in bloom_map_alloc() 110 nr_hash_funcs = attr->map_extra; in bloom_map_alloc() 126 if (check_mul_overflow(attr->max_entries, nr_hash_funcs, &nr_bits) || in bloom_map_alloc() 150 bpf_map_init_from_attr(&bloom->map, attr); in bloom_map_alloc() [all …]
|
D | net_namespace.c | 246 static int __netns_bpf_prog_query(const union bpf_attr *attr, in __netns_bpf_prog_query() argument 251 __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids); in __netns_bpf_prog_query() 264 if (!attr->query.prog_cnt || !prog_ids || !prog_cnt) in __netns_bpf_prog_query() 268 attr->query.prog_cnt); in __netns_bpf_prog_query() 271 int netns_bpf_prog_query(const union bpf_attr *attr, in netns_bpf_prog_query() argument 278 if (attr->query.query_flags) in netns_bpf_prog_query() 281 type = to_netns_bpf_attach_type(attr->query.attach_type); in netns_bpf_prog_query() 285 net = get_net_ns_by_fd(attr->query.target_fd); in netns_bpf_prog_query() 290 ret = __netns_bpf_prog_query(attr, uattr, net, type); in netns_bpf_prog_query() 297 int netns_bpf_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog) in netns_bpf_prog_attach() argument [all …]
|
D | queue_stack_maps.c | 47 static int queue_stack_map_alloc_check(union bpf_attr *attr) in queue_stack_map_alloc_check() argument 53 if (attr->max_entries == 0 || attr->key_size != 0 || in queue_stack_map_alloc_check() 54 attr->value_size == 0 || in queue_stack_map_alloc_check() 55 attr->map_flags & ~QUEUE_STACK_CREATE_FLAG_MASK || in queue_stack_map_alloc_check() 56 !bpf_map_flags_access_ok(attr->map_flags)) in queue_stack_map_alloc_check() 59 if (attr->value_size > KMALLOC_MAX_SIZE) in queue_stack_map_alloc_check() 68 static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr) in queue_stack_map_alloc() argument 70 int numa_node = bpf_map_attr_numa_node(attr); in queue_stack_map_alloc() 74 size = (u64) attr->max_entries + 1; in queue_stack_map_alloc() 75 queue_size = sizeof(*qs) + size * attr->value_size; in queue_stack_map_alloc() [all …]
|
D | hashtab.c | 401 static int htab_map_alloc_check(union bpf_attr *attr) in htab_map_alloc_check() argument 403 bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH || in htab_map_alloc_check() 404 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH); in htab_map_alloc_check() 405 bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH || in htab_map_alloc_check() 406 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH); in htab_map_alloc_check() 412 bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU); in htab_map_alloc_check() 413 bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC); in htab_map_alloc_check() 414 bool zero_seed = (attr->map_flags & BPF_F_ZERO_SEED); in htab_map_alloc_check() 415 int numa_node = bpf_map_attr_numa_node(attr); in htab_map_alloc_check() 430 if (attr->map_flags & ~HTAB_CREATE_FLAG_MASK || in htab_map_alloc_check() [all …]
|
D | arraymap.c | 52 int array_map_alloc_check(union bpf_attr *attr) in array_map_alloc_check() argument 54 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; in array_map_alloc_check() 55 int numa_node = bpf_map_attr_numa_node(attr); in array_map_alloc_check() 58 if (attr->max_entries == 0 || attr->key_size != 4 || in array_map_alloc_check() 59 attr->value_size == 0 || in array_map_alloc_check() 60 attr->map_flags & ~ARRAY_CREATE_FLAG_MASK || in array_map_alloc_check() 61 !bpf_map_flags_access_ok(attr->map_flags) || in array_map_alloc_check() 65 if (attr->map_type != BPF_MAP_TYPE_ARRAY && in array_map_alloc_check() 66 attr->map_flags & (BPF_F_MMAPABLE | BPF_F_INNER_MAP)) in array_map_alloc_check() 69 if (attr->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY && in array_map_alloc_check() [all …]
|
D | reuseport_array.c | 41 static int reuseport_array_alloc_check(union bpf_attr *attr) in reuseport_array_alloc_check() argument 43 if (attr->value_size != sizeof(u32) && in reuseport_array_alloc_check() 44 attr->value_size != sizeof(u64)) in reuseport_array_alloc_check() 47 return array_map_alloc_check(attr); in reuseport_array_alloc_check() 149 static struct bpf_map *reuseport_array_alloc(union bpf_attr *attr) in reuseport_array_alloc() argument 151 int numa_node = bpf_map_attr_numa_node(attr); in reuseport_array_alloc() 158 array = bpf_map_area_alloc(struct_size(array, ptrs, attr->max_entries), numa_node); in reuseport_array_alloc() 163 bpf_map_init_from_attr(&array->map, attr); in reuseport_array_alloc()
|
D | bpf_local_storage.c | 598 int bpf_local_storage_map_alloc_check(union bpf_attr *attr) in bpf_local_storage_map_alloc_check() argument 600 if (attr->map_flags & ~BPF_LOCAL_STORAGE_CREATE_FLAG_MASK || in bpf_local_storage_map_alloc_check() 601 !(attr->map_flags & BPF_F_NO_PREALLOC) || in bpf_local_storage_map_alloc_check() 602 attr->max_entries || in bpf_local_storage_map_alloc_check() 603 attr->key_size != sizeof(int) || !attr->value_size || in bpf_local_storage_map_alloc_check() 605 !attr->btf_key_type_id || !attr->btf_value_type_id) in bpf_local_storage_map_alloc_check() 611 if (attr->value_size > BPF_LOCAL_STORAGE_MAX_VALUE_SIZE) in bpf_local_storage_map_alloc_check() 617 struct bpf_local_storage_map *bpf_local_storage_map_alloc(union bpf_attr *attr) in bpf_local_storage_map_alloc() argument 626 bpf_map_init_from_attr(&smap->map, attr); in bpf_local_storage_map_alloc() 646 sizeof(struct bpf_local_storage_elem) + attr->value_size; in bpf_local_storage_map_alloc()
|
D | local_storage.c | 285 static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr) in cgroup_storage_map_alloc() argument 288 int numa_node = bpf_map_attr_numa_node(attr); in cgroup_storage_map_alloc() 294 if (attr->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) in cgroup_storage_map_alloc() 298 if (attr->key_size != sizeof(struct bpf_cgroup_storage_key) && in cgroup_storage_map_alloc() 299 attr->key_size != sizeof(__u64)) in cgroup_storage_map_alloc() 302 if (attr->value_size == 0) in cgroup_storage_map_alloc() 305 if (attr->value_size > max_value_size) in cgroup_storage_map_alloc() 308 if (attr->map_flags & ~LOCAL_STORAGE_CREATE_FLAG_MASK || in cgroup_storage_map_alloc() 309 !bpf_map_flags_access_ok(attr->map_flags)) in cgroup_storage_map_alloc() 312 if (attr->max_entries) in cgroup_storage_map_alloc() [all …]
|
D | lpm_trie.c | 546 static struct bpf_map *trie_alloc(union bpf_attr *attr) in trie_alloc() argument 554 if (attr->max_entries == 0 || in trie_alloc() 555 !(attr->map_flags & BPF_F_NO_PREALLOC) || in trie_alloc() 556 attr->map_flags & ~LPM_CREATE_FLAG_MASK || in trie_alloc() 557 !bpf_map_flags_access_ok(attr->map_flags) || in trie_alloc() 558 attr->key_size < LPM_KEY_SIZE_MIN || in trie_alloc() 559 attr->key_size > LPM_KEY_SIZE_MAX || in trie_alloc() 560 attr->value_size < LPM_VAL_SIZE_MIN || in trie_alloc() 561 attr->value_size > LPM_VAL_SIZE_MAX) in trie_alloc() 569 bpf_map_init_from_attr(&trie->map, attr); in trie_alloc() [all …]
|
D | stackmap.c | 70 static struct bpf_map *stack_map_alloc(union bpf_attr *attr) in stack_map_alloc() argument 72 u32 value_size = attr->value_size; in stack_map_alloc() 80 if (attr->map_flags & ~STACK_CREATE_FLAG_MASK) in stack_map_alloc() 84 if (attr->max_entries == 0 || attr->key_size != 4 || in stack_map_alloc() 89 if (attr->map_flags & BPF_F_STACK_BUILD_ID) { in stack_map_alloc() 98 n_buckets = roundup_pow_of_two(attr->max_entries); in stack_map_alloc() 103 smap = bpf_map_area_alloc(cost, bpf_map_attr_numa_node(attr)); in stack_map_alloc() 107 bpf_map_init_from_attr(&smap->map, attr); in stack_map_alloc() 341 if (!(event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)) in BPF_CALL_3() 518 if (!(event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)) in BPF_CALL_4()
|
D | offload.c | 80 int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr) in bpf_prog_offload_init() argument 86 if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS && in bpf_prog_offload_init() 87 attr->prog_type != BPF_PROG_TYPE_XDP) in bpf_prog_offload_init() 90 if (attr->prog_flags) in bpf_prog_offload_init() 100 attr->prog_ifindex); in bpf_prog_offload_init() 359 struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr) in bpf_map_offload_map_alloc() argument 368 if (attr->map_type != BPF_MAP_TYPE_ARRAY && in bpf_map_offload_map_alloc() 369 attr->map_type != BPF_MAP_TYPE_HASH) in bpf_map_offload_map_alloc() 376 bpf_map_init_from_attr(&offmap->map, attr); in bpf_map_offload_map_alloc() 380 offmap->netdev = __dev_get_by_index(net, attr->map_ifindex); in bpf_map_offload_map_alloc()
|
/kernel/module/ |
D | sysfs.c | 68 kfree(sect_attrs->attrs[section].battr.attr.name); in free_sect_attrs() 104 sattr->battr.attr.name = in add_sect_attrs() 106 if (!sattr->battr.attr.name) in add_sect_attrs() 111 sattr->battr.attr.mode = 0400; in add_sect_attrs() 204 nattr->attr.name = mod->sect_attrs->attrs[loaded].battr.attr.name; in add_notes_attrs() 205 nattr->attr.mode = 0444; in add_notes_attrs() 277 struct module_attribute *attr; in module_remove_modinfo_attrs() local 280 for (i = 0; (attr = &mod->modinfo_attrs[i]); i++) { in module_remove_modinfo_attrs() 284 if (!attr->attr.name) in module_remove_modinfo_attrs() 286 sysfs_remove_file(&mod->mkobj.kobj, &attr->attr); in module_remove_modinfo_attrs() [all …]
|
/kernel/events/ |
D | hw_breakpoint.c | 339 if (find_slot_idx(iter->attr.bp_type) != type) in task_bp_pinned() 626 int ret = __reserve_bp_slot(bp, bp->attr.bp_type); in reserve_bp_slot() 649 __release_bp_slot(bp, bp->attr.bp_type); in release_bp_slot() 698 ret = __reserve_bp_slot(bp, bp->attr.bp_type); in dbg_reserve_bp_slot() 711 __release_bp_slot(bp, bp->attr.bp_type); in dbg_release_bp_slot() 718 const struct perf_event_attr *attr, in hw_breakpoint_parse() argument 723 err = hw_breakpoint_arch_parse(bp, attr, hw); in hw_breakpoint_parse() 728 if (attr->exclude_kernel) in hw_breakpoint_parse() 750 err = hw_breakpoint_parse(bp, &bp->attr, &hw); in register_perf_hw_breakpoint() 769 register_user_hw_breakpoint(struct perf_event_attr *attr, in register_user_hw_breakpoint() argument [all …]
|
D | core.c | 918 struct perf_event_attr *attr, in perf_cgroup_connect() argument 1026 struct perf_event_attr *attr, in perf_cgroup_connect() argument 1544 event_type = event->attr.pinned ? EVENT_PINNED : EVENT_FLEXIBLE; in get_event_type() 1567 if (event->attr.pinned) in get_event_groups() 1797 if (event->attr.inherit_stat) in list_add_event() 1811 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF : in perf_event__state_init() 1893 __perf_event_read_size(event->attr.read_format, in perf_event__header_size() 1895 __perf_event_header_size(event, event->attr.sample_type); in perf_event__header_size() 1901 u64 sample_type = event->attr.sample_type; in perf_event__id_header_size() 1940 if (__perf_event_read_size(event->attr.read_format, in perf_event_validate_size() [all …]
|
/kernel/trace/ |
D | trace_event_perf.c | 52 ret = perf_allow_tracepoint(&p_event->attr); in perf_trace_event_perm() 64 if (!p_event->attr.exclude_callchain_user) in perf_trace_event_perm() 71 if (p_event->attr.sample_type & PERF_SAMPLE_STACK_USER) in perf_trace_event_perm() 76 if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW)) in perf_trace_event_perm() 89 ret = perf_allow_tracepoint(&p_event->attr); in perf_trace_event_perm() 218 u64 event_id = p_event->attr.config; in perf_trace_init() 253 if (p_event->attr.kprobe_func) { in perf_kprobe_init() 258 func, u64_to_user_ptr(p_event->attr.kprobe_func), in perf_kprobe_init() 272 func, (void *)(unsigned long)(p_event->attr.kprobe_addr), in perf_kprobe_init() 273 p_event->attr.probe_offset, is_retprobe); in perf_kprobe_init() [all …]
|
/kernel/irq/ |
D | irqdesc.c | 147 struct kobj_attribute *attr, char *buf) in per_cpu_count_show() argument 167 struct kobj_attribute *attr, char *buf) in chip_name_show() argument 184 struct kobj_attribute *attr, char *buf) in hwirq_show() argument 199 struct kobj_attribute *attr, char *buf) in type_show() argument 215 struct kobj_attribute *attr, char *buf) in wakeup_show() argument 231 struct kobj_attribute *attr, char *buf) in name_show() argument 246 struct kobj_attribute *attr, char *buf) in actions_show() argument 269 &per_cpu_count_attr.attr, 270 &chip_name_attr.attr, 271 &hwirq_attr.attr, [all …]
|
/kernel/sched/ |
D | deadline.c | 2841 const struct sched_attr *attr) in sched_dl_overflow() argument 2843 u64 period = attr->sched_period ?: attr->sched_deadline; in sched_dl_overflow() 2844 u64 runtime = attr->sched_runtime; in sched_dl_overflow() 2850 if (attr->sched_flags & SCHED_FLAG_SUGOV) in sched_dl_overflow() 2906 void __setparam_dl(struct task_struct *p, const struct sched_attr *attr) in __setparam_dl() argument 2910 dl_se->dl_runtime = attr->sched_runtime; in __setparam_dl() 2911 dl_se->dl_deadline = attr->sched_deadline; in __setparam_dl() 2912 dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline; in __setparam_dl() 2913 dl_se->flags = attr->sched_flags & SCHED_DL_FLAGS; in __setparam_dl() 2918 void __getparam_dl(struct task_struct *p, struct sched_attr *attr) in __getparam_dl() argument [all …]
|
D | core.c | 1884 const struct sched_attr *attr, bool user) in uclamp_validate() argument 1891 trace_android_vh_uclamp_validate(p, attr, user, &ret, &done); in uclamp_validate() 1895 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN) { in uclamp_validate() 1896 util_min = attr->sched_util_min; in uclamp_validate() 1902 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX) { in uclamp_validate() 1903 util_max = attr->sched_util_max; in uclamp_validate() 1932 static bool uclamp_reset(const struct sched_attr *attr, in uclamp_reset() argument 1937 if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)) && in uclamp_reset() 1943 attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN && in uclamp_reset() 1944 attr->sched_util_min == -1) { in uclamp_reset() [all …]
|