Home
last modified time | relevance | path

Searched refs:attr (Results 1 – 25 of 58) sorted by relevance

123

/kernel/
Dfail_function.c58 struct fei_attr *attr; in fei_attr_new() local
60 attr = kzalloc(sizeof(*attr), GFP_KERNEL); in fei_attr_new()
61 if (attr) { in fei_attr_new()
62 attr->kp.symbol_name = kstrdup(sym, GFP_KERNEL); in fei_attr_new()
63 if (!attr->kp.symbol_name) { in fei_attr_new()
64 kfree(attr); in fei_attr_new()
67 attr->kp.pre_handler = fei_kprobe_handler; in fei_attr_new()
68 attr->kp.post_handler = fei_post_handler; in fei_attr_new()
69 attr->retval = adjust_error_retval(addr, 0); in fei_attr_new()
70 INIT_LIST_HEAD(&attr->list); in fei_attr_new()
[all …]
Dksysfs.c32 struct kobj_attribute *attr, char *buf) in uevent_seqnum_show() argument
41 struct kobj_attribute *attr, char *buf) in uevent_helper_show() argument
46 struct kobj_attribute *attr, in uevent_helper_store() argument
62 struct kobj_attribute *attr, char *buf) in profiling_show() argument
67 struct kobj_attribute *attr, in profiling_store() argument
93 struct kobj_attribute *attr, char *buf) in kexec_loaded_show() argument
100 struct kobj_attribute *attr, char *buf) in kexec_crash_loaded_show() argument
107 struct kobj_attribute *attr, char *buf) in kexec_crash_size_show() argument
117 struct kobj_attribute *attr, in kexec_crash_size_store() argument
136 struct kobj_attribute *attr, char *buf) in vmcoreinfo_show() argument
[all …]
Dpadata.c858 container_of(_attr, struct padata_sysfs_entry, attr)
867 struct attribute attr; member
874 struct attribute *attr, char *buf) in show_cpumask() argument
880 if (!strcmp(attr->name, "serial_cpumask")) in show_cpumask()
892 struct attribute *attr, in store_cpumask() argument
907 mask_type = !strcmp(attr->name, "serial_cpumask") ? in store_cpumask()
934 &serial_cpumask_attr.attr,
935 &parallel_cpumask_attr.attr,
941 struct attribute *attr, char *buf) in padata_sysfs_show() argument
948 pentry = attr2pentry(attr); in padata_sysfs_show()
[all …]
/kernel/bpf/
Dsyscall.c104 static struct bpf_map *find_and_alloc_map(union bpf_attr *attr) in find_and_alloc_map() argument
107 u32 type = attr->map_type; in find_and_alloc_map()
119 err = ops->map_alloc_check(attr); in find_and_alloc_map()
123 if (attr->map_ifindex) in find_and_alloc_map()
125 map = ops->map_alloc(attr); in find_and_alloc_map()
352 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr) in bpf_map_init_from_attr() argument
354 map->map_type = attr->map_type; in bpf_map_init_from_attr()
355 map->key_size = attr->key_size; in bpf_map_init_from_attr()
356 map->value_size = attr->value_size; in bpf_map_init_from_attr()
357 map->max_entries = attr->max_entries; in bpf_map_init_from_attr()
[all …]
Dnet_namespace.c245 static int __netns_bpf_prog_query(const union bpf_attr *attr, in __netns_bpf_prog_query() argument
250 __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids); in __netns_bpf_prog_query()
263 if (!attr->query.prog_cnt || !prog_ids || !prog_cnt) in __netns_bpf_prog_query()
267 attr->query.prog_cnt); in __netns_bpf_prog_query()
270 int netns_bpf_prog_query(const union bpf_attr *attr, in netns_bpf_prog_query() argument
277 if (attr->query.query_flags) in netns_bpf_prog_query()
280 type = to_netns_bpf_attach_type(attr->query.attach_type); in netns_bpf_prog_query()
284 net = get_net_ns_by_fd(attr->query.target_fd); in netns_bpf_prog_query()
289 ret = __netns_bpf_prog_query(attr, uattr, net, type); in netns_bpf_prog_query()
296 int netns_bpf_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog) in netns_bpf_prog_attach() argument
[all …]
Dhashtab.c342 static int htab_map_alloc_check(union bpf_attr *attr) in htab_map_alloc_check() argument
344 bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH || in htab_map_alloc_check()
345 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH); in htab_map_alloc_check()
346 bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH || in htab_map_alloc_check()
347 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH); in htab_map_alloc_check()
353 bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU); in htab_map_alloc_check()
354 bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC); in htab_map_alloc_check()
355 bool zero_seed = (attr->map_flags & BPF_F_ZERO_SEED); in htab_map_alloc_check()
356 int numa_node = bpf_map_attr_numa_node(attr); in htab_map_alloc_check()
373 if (attr->map_flags & ~HTAB_CREATE_FLAG_MASK || in htab_map_alloc_check()
[all …]
Dqueue_stack_maps.c46 static int queue_stack_map_alloc_check(union bpf_attr *attr) in queue_stack_map_alloc_check() argument
52 if (attr->max_entries == 0 || attr->key_size != 0 || in queue_stack_map_alloc_check()
53 attr->value_size == 0 || in queue_stack_map_alloc_check()
54 attr->map_flags & ~QUEUE_STACK_CREATE_FLAG_MASK || in queue_stack_map_alloc_check()
55 !bpf_map_flags_access_ok(attr->map_flags)) in queue_stack_map_alloc_check()
58 if (attr->value_size > KMALLOC_MAX_SIZE) in queue_stack_map_alloc_check()
67 static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr) in queue_stack_map_alloc() argument
69 int ret, numa_node = bpf_map_attr_numa_node(attr); in queue_stack_map_alloc()
74 size = (u64) attr->max_entries + 1; in queue_stack_map_alloc()
75 cost = queue_size = sizeof(*qs) + size * attr->value_size; in queue_stack_map_alloc()
[all …]
Darraymap.c51 int array_map_alloc_check(union bpf_attr *attr) in array_map_alloc_check() argument
53 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; in array_map_alloc_check()
54 int numa_node = bpf_map_attr_numa_node(attr); in array_map_alloc_check()
57 if (attr->max_entries == 0 || attr->key_size != 4 || in array_map_alloc_check()
58 attr->value_size == 0 || in array_map_alloc_check()
59 attr->map_flags & ~ARRAY_CREATE_FLAG_MASK || in array_map_alloc_check()
60 !bpf_map_flags_access_ok(attr->map_flags) || in array_map_alloc_check()
64 if (attr->map_type != BPF_MAP_TYPE_ARRAY && in array_map_alloc_check()
65 attr->map_flags & (BPF_F_MMAPABLE | BPF_F_INNER_MAP)) in array_map_alloc_check()
68 if (attr->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY && in array_map_alloc_check()
[all …]
Dbpf_local_storage.c532 int bpf_local_storage_map_alloc_check(union bpf_attr *attr) in bpf_local_storage_map_alloc_check() argument
534 if (attr->map_flags & ~BPF_LOCAL_STORAGE_CREATE_FLAG_MASK || in bpf_local_storage_map_alloc_check()
535 !(attr->map_flags & BPF_F_NO_PREALLOC) || in bpf_local_storage_map_alloc_check()
536 attr->max_entries || in bpf_local_storage_map_alloc_check()
537 attr->key_size != sizeof(int) || !attr->value_size || in bpf_local_storage_map_alloc_check()
539 !attr->btf_key_type_id || !attr->btf_value_type_id) in bpf_local_storage_map_alloc_check()
545 if (attr->value_size > BPF_LOCAL_STORAGE_MAX_VALUE_SIZE) in bpf_local_storage_map_alloc_check()
551 struct bpf_local_storage_map *bpf_local_storage_map_alloc(union bpf_attr *attr) in bpf_local_storage_map_alloc() argument
562 bpf_map_init_from_attr(&smap->map, attr); in bpf_local_storage_map_alloc()
590 sizeof(struct bpf_local_storage_elem) + attr->value_size; in bpf_local_storage_map_alloc()
Dlpm_trie.c543 static struct bpf_map *trie_alloc(union bpf_attr *attr) in trie_alloc() argument
553 if (attr->max_entries == 0 || in trie_alloc()
554 !(attr->map_flags & BPF_F_NO_PREALLOC) || in trie_alloc()
555 attr->map_flags & ~LPM_CREATE_FLAG_MASK || in trie_alloc()
556 !bpf_map_flags_access_ok(attr->map_flags) || in trie_alloc()
557 attr->key_size < LPM_KEY_SIZE_MIN || in trie_alloc()
558 attr->key_size > LPM_KEY_SIZE_MAX || in trie_alloc()
559 attr->value_size < LPM_VAL_SIZE_MIN || in trie_alloc()
560 attr->value_size > LPM_VAL_SIZE_MAX) in trie_alloc()
568 bpf_map_init_from_attr(&trie->map, attr); in trie_alloc()
[all …]
Dreuseport_array.c43 static int reuseport_array_alloc_check(union bpf_attr *attr) in reuseport_array_alloc_check() argument
45 if (attr->value_size != sizeof(u32) && in reuseport_array_alloc_check()
46 attr->value_size != sizeof(u64)) in reuseport_array_alloc_check()
49 return array_map_alloc_check(attr); in reuseport_array_alloc_check()
151 static struct bpf_map *reuseport_array_alloc(union bpf_attr *attr) in reuseport_array_alloc() argument
153 int err, numa_node = bpf_map_attr_numa_node(attr); in reuseport_array_alloc()
162 array_size += (u64)attr->max_entries * sizeof(struct sock *); in reuseport_array_alloc()
176 bpf_map_init_from_attr(&array->map, attr); in reuseport_array_alloc()
Dringbuf.c152 static struct bpf_map *ringbuf_map_alloc(union bpf_attr *attr) in ringbuf_map_alloc() argument
158 if (attr->map_flags & ~RINGBUF_CREATE_FLAG_MASK) in ringbuf_map_alloc()
161 if (attr->key_size || attr->value_size || in ringbuf_map_alloc()
162 !is_power_of_2(attr->max_entries) || in ringbuf_map_alloc()
163 !PAGE_ALIGNED(attr->max_entries)) in ringbuf_map_alloc()
168 if (attr->max_entries > RINGBUF_MAX_DATA_SZ) in ringbuf_map_alloc()
176 bpf_map_init_from_attr(&rb_map->map, attr); in ringbuf_map_alloc()
180 attr->max_entries; in ringbuf_map_alloc()
185 rb_map->rb = bpf_ringbuf_alloc(attr->max_entries, rb_map->map.numa_node); in ringbuf_map_alloc()
Dcgroup.c764 int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, in __cgroup_bpf_query() argument
767 __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids); in __cgroup_bpf_query()
768 enum bpf_attach_type type = attr->query.attach_type; in __cgroup_bpf_query()
778 if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) in __cgroup_bpf_query()
787 if (attr->query.prog_cnt == 0 || !prog_ids || !cnt) in __cgroup_bpf_query()
790 if (attr->query.prog_cnt < cnt) { in __cgroup_bpf_query()
791 cnt = attr->query.prog_cnt; in __cgroup_bpf_query()
795 if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) { in __cgroup_bpf_query()
814 int cgroup_bpf_prog_attach(const union bpf_attr *attr, in cgroup_bpf_prog_attach() argument
821 cgrp = cgroup_get_from_fd(attr->target_fd); in cgroup_bpf_prog_attach()
[all …]
Dlocal_storage.c287 static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr) in cgroup_storage_map_alloc() argument
289 int numa_node = bpf_map_attr_numa_node(attr); in cgroup_storage_map_alloc()
294 if (attr->key_size != sizeof(struct bpf_cgroup_storage_key) && in cgroup_storage_map_alloc()
295 attr->key_size != sizeof(__u64)) in cgroup_storage_map_alloc()
298 if (attr->value_size == 0) in cgroup_storage_map_alloc()
301 if (attr->value_size > PAGE_SIZE) in cgroup_storage_map_alloc()
304 if (attr->map_flags & ~LOCAL_STORAGE_CREATE_FLAG_MASK || in cgroup_storage_map_alloc()
305 !bpf_map_flags_access_ok(attr->map_flags)) in cgroup_storage_map_alloc()
308 if (attr->max_entries) in cgroup_storage_map_alloc()
326 bpf_map_init_from_attr(&map->map, attr); in cgroup_storage_map_alloc()
Doffload.c80 int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr) in bpf_prog_offload_init() argument
86 if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS && in bpf_prog_offload_init()
87 attr->prog_type != BPF_PROG_TYPE_XDP) in bpf_prog_offload_init()
90 if (attr->prog_flags) in bpf_prog_offload_init()
100 attr->prog_ifindex); in bpf_prog_offload_init()
362 struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr) in bpf_map_offload_map_alloc() argument
371 if (attr->map_type != BPF_MAP_TYPE_ARRAY && in bpf_map_offload_map_alloc()
372 attr->map_type != BPF_MAP_TYPE_HASH) in bpf_map_offload_map_alloc()
379 bpf_map_init_from_attr(&offmap->map, attr); in bpf_map_offload_map_alloc()
383 offmap->netdev = __dev_get_by_index(net, attr->map_ifindex); in bpf_map_offload_map_alloc()
Dbpf_struct_ops.c548 static int bpf_struct_ops_map_alloc_check(union bpf_attr *attr) in bpf_struct_ops_map_alloc_check() argument
550 if (attr->key_size != sizeof(unsigned int) || attr->max_entries != 1 || in bpf_struct_ops_map_alloc_check()
551 attr->map_flags || !attr->btf_vmlinux_value_type_id) in bpf_struct_ops_map_alloc_check()
556 static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr) in bpf_struct_ops_map_alloc() argument
569 st_ops = bpf_struct_ops_find_value(attr->btf_vmlinux_value_type_id); in bpf_struct_ops_map_alloc()
574 if (attr->value_size != vt->size) in bpf_struct_ops_map_alloc()
614 bpf_map_init_from_attr(map, attr); in bpf_struct_ops_map_alloc()
Dstackmap.c90 static struct bpf_map *stack_map_alloc(union bpf_attr *attr) in stack_map_alloc() argument
92 u32 value_size = attr->value_size; in stack_map_alloc()
101 if (attr->map_flags & ~STACK_CREATE_FLAG_MASK) in stack_map_alloc()
105 if (attr->max_entries == 0 || attr->key_size != 4 || in stack_map_alloc()
110 if (attr->map_flags & BPF_F_STACK_BUILD_ID) { in stack_map_alloc()
119 n_buckets = roundup_pow_of_two(attr->max_entries); in stack_map_alloc()
124 err = bpf_map_charge_init(&mem, cost + attr->max_entries * in stack_map_alloc()
129 smap = bpf_map_area_alloc(cost, bpf_map_attr_numa_node(attr)); in stack_map_alloc()
135 bpf_map_init_from_attr(&smap->map, attr); in stack_map_alloc()
525 if (!(event->attr.sample_type & __PERF_SAMPLE_CALLCHAIN_EARLY)) in BPF_CALL_3()
[all …]
/kernel/power/
Dmain.c100 static ssize_t pm_async_show(struct kobject *kobj, struct kobj_attribute *attr, in pm_async_show() argument
106 static ssize_t pm_async_store(struct kobject *kobj, struct kobj_attribute *attr, in pm_async_store() argument
124 static ssize_t mem_sleep_show(struct kobject *kobj, struct kobj_attribute *attr, in mem_sleep_show() argument
166 static ssize_t mem_sleep_store(struct kobject *kobj, struct kobj_attribute *attr, in mem_sleep_store() argument
203 struct kobj_attribute *attr, char *buf) in sync_on_suspend_show() argument
209 struct kobj_attribute *attr, in sync_on_suspend_store() argument
239 static ssize_t pm_test_show(struct kobject *kobj, struct kobj_attribute *attr, in pm_test_show() argument
260 static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr, in pm_test_store() argument
312 struct kobj_attribute *attr, char *buf) \
330 struct kobj_attribute *attr, char *buf) in last_failed_dev_show() argument
[all …]
Dhibernate.c1059 static ssize_t disk_show(struct kobject *kobj, struct kobj_attribute *attr, in disk_show() argument
1094 static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr, in disk_store() argument
1145 static ssize_t resume_show(struct kobject *kobj, struct kobj_attribute *attr, in resume_show() argument
1152 static ssize_t resume_store(struct kobject *kobj, struct kobj_attribute *attr, in resume_store() argument
1183 struct kobj_attribute *attr, char *buf) in resume_offset_show() argument
1189 struct kobj_attribute *attr, const char *buf, in resume_offset_store() argument
1205 static ssize_t image_size_show(struct kobject *kobj, struct kobj_attribute *attr, in image_size_show() argument
1211 static ssize_t image_size_store(struct kobject *kobj, struct kobj_attribute *attr, in image_size_store() argument
1227 struct kobj_attribute *attr, char *buf) in reserved_size_show() argument
1233 struct kobj_attribute *attr, in reserved_size_store() argument
[all …]
/kernel/events/
Dhw_breakpoint.c113 find_slot_idx(iter->attr.bp_type) == type && in task_bp_pinned()
322 ret = __reserve_bp_slot(bp, bp->attr.bp_type); in reserve_bp_slot()
346 __release_bp_slot(bp, bp->attr.bp_type); in release_bp_slot()
393 return __reserve_bp_slot(bp, bp->attr.bp_type); in dbg_reserve_bp_slot()
401 __release_bp_slot(bp, bp->attr.bp_type); in dbg_release_bp_slot()
407 const struct perf_event_attr *attr, in hw_breakpoint_parse() argument
412 err = hw_breakpoint_arch_parse(bp, attr, hw); in hw_breakpoint_parse()
417 if (attr->exclude_kernel) in hw_breakpoint_parse()
439 err = hw_breakpoint_parse(bp, &bp->attr, &hw); in register_perf_hw_breakpoint()
457 register_user_hw_breakpoint(struct perf_event_attr *attr, in register_user_hw_breakpoint() argument
[all …]
Dcore.c944 struct perf_event_attr *attr, in perf_cgroup_connect() argument
1083 struct perf_event_attr *attr, in perf_cgroup_connect() argument
1564 event_type = event->attr.pinned ? EVENT_PINNED : EVENT_FLEXIBLE; in get_event_type()
1587 if (event->attr.pinned) in get_event_groups()
1830 if (event->attr.inherit_stat) in list_add_event()
1844 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF : in perf_event__state_init()
1917 __perf_event_read_size(event->attr.read_format, in perf_event__header_size()
1919 __perf_event_header_size(event, event->attr.sample_type); in perf_event__header_size()
1925 u64 sample_type = event->attr.sample_type; in perf_event__id_header_size()
1964 if (__perf_event_read_size(event->attr.read_format, in perf_event_validate_size()
[all …]
/kernel/trace/
Dtrace_event_perf.c52 ret = perf_allow_tracepoint(&p_event->attr); in perf_trace_event_perm()
64 if (!p_event->attr.exclude_callchain_user) in perf_trace_event_perm()
71 if (p_event->attr.sample_type & PERF_SAMPLE_STACK_USER) in perf_trace_event_perm()
76 if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW)) in perf_trace_event_perm()
89 ret = perf_allow_tracepoint(&p_event->attr); in perf_trace_event_perm()
220 u64 event_id = p_event->attr.config; in perf_trace_init()
254 if (p_event->attr.kprobe_func) { in perf_kprobe_init()
259 func, u64_to_user_ptr(p_event->attr.kprobe_func), in perf_kprobe_init()
273 func, (void *)(unsigned long)(p_event->attr.kprobe_addr), in perf_kprobe_init()
274 p_event->attr.probe_offset, is_retprobe); in perf_kprobe_init()
[all …]
/kernel/sched/
Dcore.c1472 const struct sched_attr *attr) in uclamp_validate() argument
1477 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN) { in uclamp_validate()
1478 util_min = attr->sched_util_min; in uclamp_validate()
1484 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX) { in uclamp_validate()
1485 util_max = attr->sched_util_max; in uclamp_validate()
1506 static bool uclamp_reset(const struct sched_attr *attr, in uclamp_reset() argument
1511 if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)) && in uclamp_reset()
1517 attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN && in uclamp_reset()
1518 attr->sched_util_min == -1) { in uclamp_reset()
1523 attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX && in uclamp_reset()
[all …]
Ddeadline.c2653 const struct sched_attr *attr) in sched_dl_overflow() argument
2655 u64 period = attr->sched_period ?: attr->sched_deadline; in sched_dl_overflow()
2656 u64 runtime = attr->sched_runtime; in sched_dl_overflow()
2662 if (attr->sched_flags & SCHED_FLAG_SUGOV) in sched_dl_overflow()
2718 void __setparam_dl(struct task_struct *p, const struct sched_attr *attr) in __setparam_dl() argument
2722 dl_se->dl_runtime = attr->sched_runtime; in __setparam_dl()
2723 dl_se->dl_deadline = attr->sched_deadline; in __setparam_dl()
2724 dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline; in __setparam_dl()
2725 dl_se->flags = attr->sched_flags & SCHED_DL_FLAGS; in __setparam_dl()
2730 void __getparam_dl(struct task_struct *p, struct sched_attr *attr) in __getparam_dl() argument
[all …]
/kernel/irq/
Dirqdesc.c147 struct kobj_attribute *attr, char *buf) in per_cpu_count_show() argument
167 struct kobj_attribute *attr, char *buf) in chip_name_show() argument
184 struct kobj_attribute *attr, char *buf) in hwirq_show() argument
199 struct kobj_attribute *attr, char *buf) in type_show() argument
215 struct kobj_attribute *attr, char *buf) in wakeup_show() argument
231 struct kobj_attribute *attr, char *buf) in name_show() argument
246 struct kobj_attribute *attr, char *buf) in actions_show() argument
269 &per_cpu_count_attr.attr,
270 &chip_name_attr.attr,
271 &hwirq_attr.attr,
[all …]

123