/kernel/ |
D | fail_function.c | 58 struct fei_attr *attr; in fei_attr_new() local 60 attr = kzalloc(sizeof(*attr), GFP_KERNEL); in fei_attr_new() 61 if (attr) { in fei_attr_new() 62 attr->kp.symbol_name = kstrdup(sym, GFP_KERNEL); in fei_attr_new() 63 if (!attr->kp.symbol_name) { in fei_attr_new() 64 kfree(attr); in fei_attr_new() 67 attr->kp.pre_handler = fei_kprobe_handler; in fei_attr_new() 68 attr->kp.post_handler = fei_post_handler; in fei_attr_new() 69 attr->retval = adjust_error_retval(addr, 0); in fei_attr_new() 70 INIT_LIST_HEAD(&attr->list); in fei_attr_new() [all …]
|
D | ksysfs.c | 32 struct kobj_attribute *attr, char *buf) in uevent_seqnum_show() argument 41 struct kobj_attribute *attr, char *buf) in uevent_helper_show() argument 46 struct kobj_attribute *attr, in uevent_helper_store() argument 62 struct kobj_attribute *attr, char *buf) in profiling_show() argument 67 struct kobj_attribute *attr, in profiling_store() argument 93 struct kobj_attribute *attr, char *buf) in kexec_loaded_show() argument 100 struct kobj_attribute *attr, char *buf) in kexec_crash_loaded_show() argument 107 struct kobj_attribute *attr, char *buf) in kexec_crash_size_show() argument 117 struct kobj_attribute *attr, in kexec_crash_size_store() argument 136 struct kobj_attribute *attr, char *buf) in vmcoreinfo_show() argument [all …]
|
D | reboot.c | 700 static ssize_t mode_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) in mode_show() argument 726 static ssize_t mode_store(struct kobject *kobj, struct kobj_attribute *attr, in mode_store() argument 752 static ssize_t force_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) in force_show() argument 756 static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr, in force_store() argument 774 static ssize_t type_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) in type_show() argument 803 static ssize_t type_store(struct kobject *kobj, struct kobj_attribute *attr, in type_store() argument 832 static ssize_t cpu_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) in cpu_show() argument 836 static ssize_t cpu_store(struct kobject *kobj, struct kobj_attribute *attr, in cpu_store() argument 862 &reboot_mode_attr.attr, 864 &reboot_force_attr.attr, [all …]
|
/kernel/power/ |
D | main.c | 100 static ssize_t pm_async_show(struct kobject *kobj, struct kobj_attribute *attr, in pm_async_show() argument 106 static ssize_t pm_async_store(struct kobject *kobj, struct kobj_attribute *attr, in pm_async_store() argument 124 static ssize_t mem_sleep_show(struct kobject *kobj, struct kobj_attribute *attr, in mem_sleep_show() argument 166 static ssize_t mem_sleep_store(struct kobject *kobj, struct kobj_attribute *attr, in mem_sleep_store() argument 203 struct kobj_attribute *attr, char *buf) in sync_on_suspend_show() argument 209 struct kobj_attribute *attr, in sync_on_suspend_store() argument 239 static ssize_t pm_test_show(struct kobject *kobj, struct kobj_attribute *attr, in pm_test_show() argument 260 static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr, in pm_test_store() argument 312 struct kobj_attribute *attr, char *buf) \ 330 struct kobj_attribute *attr, char *buf) in last_failed_dev_show() argument [all …]
|
D | hibernate.c | 1062 static ssize_t disk_show(struct kobject *kobj, struct kobj_attribute *attr, in disk_show() argument 1097 static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr, in disk_store() argument 1148 static ssize_t resume_show(struct kobject *kobj, struct kobj_attribute *attr, in resume_show() argument 1155 static ssize_t resume_store(struct kobject *kobj, struct kobj_attribute *attr, in resume_store() argument 1186 struct kobj_attribute *attr, char *buf) in resume_offset_show() argument 1192 struct kobj_attribute *attr, const char *buf, in resume_offset_store() argument 1208 static ssize_t image_size_show(struct kobject *kobj, struct kobj_attribute *attr, in image_size_show() argument 1214 static ssize_t image_size_store(struct kobject *kobj, struct kobj_attribute *attr, in image_size_store() argument 1230 struct kobj_attribute *attr, char *buf) in reserved_size_show() argument 1236 struct kobj_attribute *attr, in reserved_size_store() argument [all …]
|
/kernel/bpf/ |
D | syscall.c | 108 static struct bpf_map *find_and_alloc_map(union bpf_attr *attr) in find_and_alloc_map() argument 111 u32 type = attr->map_type; in find_and_alloc_map() 123 err = ops->map_alloc_check(attr); in find_and_alloc_map() 127 if (attr->map_ifindex) in find_and_alloc_map() 129 map = ops->map_alloc(attr); in find_and_alloc_map() 360 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr) in bpf_map_init_from_attr() argument 362 map->map_type = attr->map_type; in bpf_map_init_from_attr() 363 map->key_size = attr->key_size; in bpf_map_init_from_attr() 364 map->value_size = attr->value_size; in bpf_map_init_from_attr() 365 map->max_entries = attr->max_entries; in bpf_map_init_from_attr() [all …]
|
D | net_namespace.c | 245 static int __netns_bpf_prog_query(const union bpf_attr *attr, in __netns_bpf_prog_query() argument 250 __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids); in __netns_bpf_prog_query() 263 if (!attr->query.prog_cnt || !prog_ids || !prog_cnt) in __netns_bpf_prog_query() 267 attr->query.prog_cnt); in __netns_bpf_prog_query() 270 int netns_bpf_prog_query(const union bpf_attr *attr, in netns_bpf_prog_query() argument 277 if (attr->query.query_flags) in netns_bpf_prog_query() 280 type = to_netns_bpf_attach_type(attr->query.attach_type); in netns_bpf_prog_query() 284 net = get_net_ns_by_fd(attr->query.target_fd); in netns_bpf_prog_query() 289 ret = __netns_bpf_prog_query(attr, uattr, net, type); in netns_bpf_prog_query() 296 int netns_bpf_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog) in netns_bpf_prog_attach() argument [all …]
|
D | queue_stack_maps.c | 46 static int queue_stack_map_alloc_check(union bpf_attr *attr) in queue_stack_map_alloc_check() argument 52 if (attr->max_entries == 0 || attr->key_size != 0 || in queue_stack_map_alloc_check() 53 attr->value_size == 0 || in queue_stack_map_alloc_check() 54 attr->map_flags & ~QUEUE_STACK_CREATE_FLAG_MASK || in queue_stack_map_alloc_check() 55 !bpf_map_flags_access_ok(attr->map_flags)) in queue_stack_map_alloc_check() 58 if (attr->value_size > KMALLOC_MAX_SIZE) in queue_stack_map_alloc_check() 67 static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr) in queue_stack_map_alloc() argument 69 int numa_node = bpf_map_attr_numa_node(attr); in queue_stack_map_alloc() 73 size = (u64) attr->max_entries + 1; in queue_stack_map_alloc() 74 queue_size = sizeof(*qs) + size * attr->value_size; in queue_stack_map_alloc() [all …]
|
D | hashtab.c | 408 static int htab_map_alloc_check(union bpf_attr *attr) in htab_map_alloc_check() argument 410 bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH || in htab_map_alloc_check() 411 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH); in htab_map_alloc_check() 412 bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH || in htab_map_alloc_check() 413 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH); in htab_map_alloc_check() 419 bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU); in htab_map_alloc_check() 420 bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC); in htab_map_alloc_check() 421 bool zero_seed = (attr->map_flags & BPF_F_ZERO_SEED); in htab_map_alloc_check() 422 int numa_node = bpf_map_attr_numa_node(attr); in htab_map_alloc_check() 439 if (attr->map_flags & ~HTAB_CREATE_FLAG_MASK || in htab_map_alloc_check() [all …]
|
D | arraymap.c | 51 int array_map_alloc_check(union bpf_attr *attr) in array_map_alloc_check() argument 53 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; in array_map_alloc_check() 54 int numa_node = bpf_map_attr_numa_node(attr); in array_map_alloc_check() 57 if (attr->max_entries == 0 || attr->key_size != 4 || in array_map_alloc_check() 58 attr->value_size == 0 || in array_map_alloc_check() 59 attr->map_flags & ~ARRAY_CREATE_FLAG_MASK || in array_map_alloc_check() 60 !bpf_map_flags_access_ok(attr->map_flags) || in array_map_alloc_check() 64 if (attr->map_type != BPF_MAP_TYPE_ARRAY && in array_map_alloc_check() 65 attr->map_flags & (BPF_F_MMAPABLE | BPF_F_INNER_MAP)) in array_map_alloc_check() 68 if (attr->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY && in array_map_alloc_check() [all …]
|
D | bpf_local_storage.c | 549 int bpf_local_storage_map_alloc_check(union bpf_attr *attr) in bpf_local_storage_map_alloc_check() argument 551 if (attr->map_flags & ~BPF_LOCAL_STORAGE_CREATE_FLAG_MASK || in bpf_local_storage_map_alloc_check() 552 !(attr->map_flags & BPF_F_NO_PREALLOC) || in bpf_local_storage_map_alloc_check() 553 attr->max_entries || in bpf_local_storage_map_alloc_check() 554 attr->key_size != sizeof(int) || !attr->value_size || in bpf_local_storage_map_alloc_check() 556 !attr->btf_key_type_id || !attr->btf_value_type_id) in bpf_local_storage_map_alloc_check() 562 if (attr->value_size > BPF_LOCAL_STORAGE_MAX_VALUE_SIZE) in bpf_local_storage_map_alloc_check() 568 struct bpf_local_storage_map *bpf_local_storage_map_alloc(union bpf_attr *attr) in bpf_local_storage_map_alloc() argument 577 bpf_map_init_from_attr(&smap->map, attr); in bpf_local_storage_map_alloc() 597 sizeof(struct bpf_local_storage_elem) + attr->value_size; in bpf_local_storage_map_alloc()
|
D | reuseport_array.c | 43 static int reuseport_array_alloc_check(union bpf_attr *attr) in reuseport_array_alloc_check() argument 45 if (attr->value_size != sizeof(u32) && in reuseport_array_alloc_check() 46 attr->value_size != sizeof(u64)) in reuseport_array_alloc_check() 49 return array_map_alloc_check(attr); in reuseport_array_alloc_check() 151 static struct bpf_map *reuseport_array_alloc(union bpf_attr *attr) in reuseport_array_alloc() argument 153 int numa_node = bpf_map_attr_numa_node(attr); in reuseport_array_alloc() 161 array_size += (u64)attr->max_entries * sizeof(struct sock *); in reuseport_array_alloc() 169 bpf_map_init_from_attr(&array->map, attr); in reuseport_array_alloc()
|
D | ringbuf.c | 148 static struct bpf_map *ringbuf_map_alloc(union bpf_attr *attr) in ringbuf_map_alloc() argument 152 if (attr->map_flags & ~RINGBUF_CREATE_FLAG_MASK) in ringbuf_map_alloc() 155 if (attr->key_size || attr->value_size || in ringbuf_map_alloc() 156 !is_power_of_2(attr->max_entries) || in ringbuf_map_alloc() 157 !PAGE_ALIGNED(attr->max_entries)) in ringbuf_map_alloc() 162 if (attr->max_entries > RINGBUF_MAX_DATA_SZ) in ringbuf_map_alloc() 170 bpf_map_init_from_attr(&rb_map->map, attr); in ringbuf_map_alloc() 172 rb_map->rb = bpf_ringbuf_alloc(attr->max_entries, rb_map->map.numa_node); in ringbuf_map_alloc()
|
D | lpm_trie.c | 545 static struct bpf_map *trie_alloc(union bpf_attr *attr) in trie_alloc() argument 553 if (attr->max_entries == 0 || in trie_alloc() 554 !(attr->map_flags & BPF_F_NO_PREALLOC) || in trie_alloc() 555 attr->map_flags & ~LPM_CREATE_FLAG_MASK || in trie_alloc() 556 !bpf_map_flags_access_ok(attr->map_flags) || in trie_alloc() 557 attr->key_size < LPM_KEY_SIZE_MIN || in trie_alloc() 558 attr->key_size > LPM_KEY_SIZE_MAX || in trie_alloc() 559 attr->value_size < LPM_VAL_SIZE_MIN || in trie_alloc() 560 attr->value_size > LPM_VAL_SIZE_MAX) in trie_alloc() 568 bpf_map_init_from_attr(&trie->map, attr); in trie_alloc() [all …]
|
D | local_storage.c | 285 static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr) in cgroup_storage_map_alloc() argument 288 int numa_node = bpf_map_attr_numa_node(attr); in cgroup_storage_map_alloc() 294 if (attr->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) in cgroup_storage_map_alloc() 298 if (attr->key_size != sizeof(struct bpf_cgroup_storage_key) && in cgroup_storage_map_alloc() 299 attr->key_size != sizeof(__u64)) in cgroup_storage_map_alloc() 302 if (attr->value_size == 0) in cgroup_storage_map_alloc() 305 if (attr->value_size > max_value_size) in cgroup_storage_map_alloc() 308 if (attr->map_flags & ~LOCAL_STORAGE_CREATE_FLAG_MASK || in cgroup_storage_map_alloc() 309 !bpf_map_flags_access_ok(attr->map_flags)) in cgroup_storage_map_alloc() 312 if (attr->max_entries) in cgroup_storage_map_alloc() [all …]
|
D | cgroup.c | 786 int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, in __cgroup_bpf_query() argument 789 __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids); in __cgroup_bpf_query() 790 enum bpf_attach_type type = attr->query.attach_type; in __cgroup_bpf_query() 808 if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) in __cgroup_bpf_query() 817 if (attr->query.prog_cnt == 0 || !prog_ids || !cnt) in __cgroup_bpf_query() 820 if (attr->query.prog_cnt < cnt) { in __cgroup_bpf_query() 821 cnt = attr->query.prog_cnt; in __cgroup_bpf_query() 825 if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) { in __cgroup_bpf_query() 844 int cgroup_bpf_prog_attach(const union bpf_attr *attr, in cgroup_bpf_prog_attach() argument 851 cgrp = cgroup_get_from_fd(attr->target_fd); in cgroup_bpf_prog_attach() [all …]
|
D | stackmap.c | 89 static struct bpf_map *stack_map_alloc(union bpf_attr *attr) in stack_map_alloc() argument 91 u32 value_size = attr->value_size; in stack_map_alloc() 99 if (attr->map_flags & ~STACK_CREATE_FLAG_MASK) in stack_map_alloc() 103 if (attr->max_entries == 0 || attr->key_size != 4 || in stack_map_alloc() 108 if (attr->map_flags & BPF_F_STACK_BUILD_ID) { in stack_map_alloc() 117 n_buckets = roundup_pow_of_two(attr->max_entries); in stack_map_alloc() 122 smap = bpf_map_area_alloc(cost, bpf_map_attr_numa_node(attr)); in stack_map_alloc() 126 bpf_map_init_from_attr(&smap->map, attr); in stack_map_alloc() 385 if (!(event->attr.sample_type & __PERF_SAMPLE_CALLCHAIN_EARLY)) in BPF_CALL_3() 562 if (!(event->attr.sample_type & __PERF_SAMPLE_CALLCHAIN_EARLY)) in BPF_CALL_4()
|
D | offload.c | 80 int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr) in bpf_prog_offload_init() argument 86 if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS && in bpf_prog_offload_init() 87 attr->prog_type != BPF_PROG_TYPE_XDP) in bpf_prog_offload_init() 90 if (attr->prog_flags) in bpf_prog_offload_init() 100 attr->prog_ifindex); in bpf_prog_offload_init() 359 struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr) in bpf_map_offload_map_alloc() argument 368 if (attr->map_type != BPF_MAP_TYPE_ARRAY && in bpf_map_offload_map_alloc() 369 attr->map_type != BPF_MAP_TYPE_HASH) in bpf_map_offload_map_alloc() 376 bpf_map_init_from_attr(&offmap->map, attr); in bpf_map_offload_map_alloc() 380 offmap->netdev = __dev_get_by_index(net, attr->map_ifindex); in bpf_map_offload_map_alloc()
|
/kernel/events/ |
D | hw_breakpoint.c | 113 find_slot_idx(iter->attr.bp_type) == type && in task_bp_pinned() 322 ret = __reserve_bp_slot(bp, bp->attr.bp_type); in reserve_bp_slot() 346 __release_bp_slot(bp, bp->attr.bp_type); in release_bp_slot() 393 return __reserve_bp_slot(bp, bp->attr.bp_type); in dbg_reserve_bp_slot() 401 __release_bp_slot(bp, bp->attr.bp_type); in dbg_release_bp_slot() 407 const struct perf_event_attr *attr, in hw_breakpoint_parse() argument 412 err = hw_breakpoint_arch_parse(bp, attr, hw); in hw_breakpoint_parse() 417 if (attr->exclude_kernel) in hw_breakpoint_parse() 439 err = hw_breakpoint_parse(bp, &bp->attr, &hw); in register_perf_hw_breakpoint() 458 register_user_hw_breakpoint(struct perf_event_attr *attr, in register_user_hw_breakpoint() argument [all …]
|
D | core.c | 983 struct perf_event_attr *attr, in perf_cgroup_connect() argument 1115 struct perf_event_attr *attr, in perf_cgroup_connect() argument 1633 event_type = event->attr.pinned ? EVENT_PINNED : EVENT_FLEXIBLE; in get_event_type() 1656 if (event->attr.pinned) in get_event_groups() 1884 if (event->attr.inherit_stat) in list_add_event() 1898 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF : in perf_event__state_init() 1977 __perf_event_read_size(event->attr.read_format, in perf_event__header_size() 1979 __perf_event_header_size(event, event->attr.sample_type); in perf_event__header_size() 1985 u64 sample_type = event->attr.sample_type; in perf_event__id_header_size() 2024 if (__perf_event_read_size(event->attr.read_format, in perf_event_validate_size() [all …]
|
/kernel/trace/ |
D | trace_event_perf.c | 52 ret = perf_allow_tracepoint(&p_event->attr); in perf_trace_event_perm() 64 if (!p_event->attr.exclude_callchain_user) in perf_trace_event_perm() 71 if (p_event->attr.sample_type & PERF_SAMPLE_STACK_USER) in perf_trace_event_perm() 76 if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW)) in perf_trace_event_perm() 89 ret = perf_allow_tracepoint(&p_event->attr); in perf_trace_event_perm() 218 u64 event_id = p_event->attr.config; in perf_trace_init() 253 if (p_event->attr.kprobe_func) { in perf_kprobe_init() 258 func, u64_to_user_ptr(p_event->attr.kprobe_func), in perf_kprobe_init() 272 func, (void *)(unsigned long)(p_event->attr.kprobe_addr), in perf_kprobe_init() 273 p_event->attr.probe_offset, is_retprobe); in perf_kprobe_init() [all …]
|
/kernel/irq/ |
D | irqdesc.c | 147 struct kobj_attribute *attr, char *buf) in per_cpu_count_show() argument 167 struct kobj_attribute *attr, char *buf) in chip_name_show() argument 184 struct kobj_attribute *attr, char *buf) in hwirq_show() argument 199 struct kobj_attribute *attr, char *buf) in type_show() argument 215 struct kobj_attribute *attr, char *buf) in wakeup_show() argument 231 struct kobj_attribute *attr, char *buf) in name_show() argument 246 struct kobj_attribute *attr, char *buf) in actions_show() argument 269 &per_cpu_count_attr.attr, 270 &chip_name_attr.attr, 271 &hwirq_attr.attr, [all …]
|
D | msi.c | 75 static ssize_t msi_mode_show(struct device *dev, struct device_attribute *attr, in msi_mode_show() argument 83 retval = kstrtoul(attr->attr.name, 10, &irq); in msi_mode_show() 133 msi_attrs[count] = &msi_dev_attr->attr; in msi_populate_sysfs() 135 sysfs_attr_init(&msi_dev_attr->attr); in msi_populate_sysfs() 136 msi_dev_attr->attr.name = kasprintf(GFP_KERNEL, "%d", in msi_populate_sysfs() 138 if (!msi_dev_attr->attr.name) in msi_populate_sysfs() 140 msi_dev_attr->attr.mode = 0444; in msi_populate_sysfs() 171 msi_dev_attr = container_of(msi_attr, struct device_attribute, attr); in msi_populate_sysfs() 197 struct device_attribute, attr); in msi_destroy_sysfs() 198 kfree(dev_attr->attr.name); in msi_destroy_sysfs()
|
/kernel/sched/ |
D | deadline.c | 2699 const struct sched_attr *attr) in sched_dl_overflow() argument 2701 u64 period = attr->sched_period ?: attr->sched_deadline; in sched_dl_overflow() 2702 u64 runtime = attr->sched_runtime; in sched_dl_overflow() 2708 if (attr->sched_flags & SCHED_FLAG_SUGOV) in sched_dl_overflow() 2764 void __setparam_dl(struct task_struct *p, const struct sched_attr *attr) in __setparam_dl() argument 2768 dl_se->dl_runtime = attr->sched_runtime; in __setparam_dl() 2769 dl_se->dl_deadline = attr->sched_deadline; in __setparam_dl() 2770 dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline; in __setparam_dl() 2771 dl_se->flags = attr->sched_flags & SCHED_DL_FLAGS; in __setparam_dl() 2776 void __getparam_dl(struct task_struct *p, struct sched_attr *attr) in __getparam_dl() argument [all …]
|
D | core.c | 1820 const struct sched_attr *attr, bool user) in uclamp_validate() argument 1827 trace_android_vh_uclamp_validate(p, attr, user, &ret, &done); in uclamp_validate() 1831 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN) { in uclamp_validate() 1832 util_min = attr->sched_util_min; in uclamp_validate() 1838 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX) { in uclamp_validate() 1839 util_max = attr->sched_util_max; in uclamp_validate() 1868 static bool uclamp_reset(const struct sched_attr *attr, in uclamp_reset() argument 1873 if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)) && in uclamp_reset() 1879 attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN && in uclamp_reset() 1880 attr->sched_util_min == -1) { in uclamp_reset() [all …]
|