/kernel/ |
D | softirq.c | 163 int cnt; member 182 return __this_cpu_read(softirq_ctrl.cnt) != 0; in local_bh_blocked() 185 void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) in __local_bh_disable_ip() argument 199 DEBUG_LOCKS_WARN_ON(this_cpu_read(softirq_ctrl.cnt)); in __local_bh_disable_ip() 207 newcnt = __this_cpu_add_return(softirq_ctrl.cnt, cnt); in __local_bh_disable_ip() 214 if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && newcnt == cnt) { in __local_bh_disable_ip() 222 static void __local_bh_enable(unsigned int cnt, bool unlock) in __local_bh_enable() argument 228 this_cpu_read(softirq_ctrl.cnt)); in __local_bh_enable() 230 if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && softirq_count() == cnt) { in __local_bh_enable() 236 newcnt = __this_cpu_sub_return(softirq_ctrl.cnt, cnt); in __local_bh_enable() [all …]
|
D | smp.c | 52 u64 cnt:28; member 195 (union cfd_seq_cnt){ .u.src = s, .u.dst = d, .u.type = t, .u.cnt = c } 205 new.u.cnt = old.u.cnt + 1; in cfd_seq_inc() 257 new[1] = CFD_SEQ(src, dst, type, new[0].u.cnt + 1); in cfd_seq_data_add() 260 if (new[i].u.cnt <= now) in cfd_seq_data_add() 261 new[i].u.cnt |= 0x80000000U; in cfd_seq_data_add() 263 if (new[i].u.cnt == data[j].u.cnt) { in cfd_seq_data_add() 269 if (new[i].u.cnt < data[j].u.cnt) { in cfd_seq_data_add() 300 now = data[0].u.cnt; in csd_lock_print_extended() 317 data[i].u.cnt & ~0x80000000U, data[i].u.src, in csd_lock_print_extended()
|
D | ksysfs.c | 119 unsigned long cnt; in kexec_crash_size_store() local 122 if (kstrtoul(buf, 0, &cnt)) in kexec_crash_size_store() 125 ret = crash_shrink_memory(cnt); in kexec_crash_size_store()
|
/kernel/trace/ |
D | trace_kdb.c | 27 int cnt = 0, cpu; in ftrace_dump_buf() local 61 if (!cnt) in ftrace_dump_buf() 63 cnt++; in ftrace_dump_buf() 76 if (!cnt) in ftrace_dump_buf() 100 int cnt; in kdb_ftdump() local 133 cnt = trace_total_entries(NULL); in kdb_ftdump() 135 cnt = trace_total_entries_cpu(NULL, cpu_file); in kdb_ftdump() 136 skip_entries = max(cnt + skip_entries, 0); in kdb_ftdump()
|
D | trace.c | 684 const char __user *ubuf, size_t cnt) in trace_pid_write() argument 721 while (cnt > 0) { in trace_pid_write() 725 ret = trace_get_user(&parser, ubuf, cnt, &pos); in trace_pid_write() 731 cnt -= ret; in trace_pid_write() 1611 size_t cnt, loff_t *ppos) in trace_get_user() argument 1625 cnt--; in trace_get_user() 1633 while (cnt && isspace(ch)) { in trace_get_user() 1638 cnt--; in trace_get_user() 1652 while (cnt && !isspace(ch) && ch) { in trace_get_user() 1663 cnt--; in trace_get_user() [all …]
|
D | trace_events.c | 1170 size_t cnt, loff_t *ppos) in ftrace_event_write() argument 1177 if (!cnt) in ftrace_event_write() 1187 read = trace_get_user(&parser, ubuf, cnt, ppos); in ftrace_event_write() 1370 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt, in event_enable_read() argument 1396 return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf)); in event_enable_read() 1400 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, in event_enable_write() argument 1407 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); in event_enable_write() 1430 *ppos += cnt; in event_enable_write() 1432 return ret ? ret : cnt; in event_enable_write() 1436 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt, in system_enable_read() argument [all …]
|
D | trace_selftest.c | 65 unsigned long flags, cnt = 0; in trace_test_buffer() local 72 cnt = ring_buffer_entries(buf->buffer); in trace_test_buffer() 92 *count = cnt; in trace_test_buffer() 182 static int trace_selftest_ops(struct trace_array *tr, int cnt) in trace_selftest_ops() argument 193 pr_info("Testing dynamic ftrace ops #%d: ", cnt); in trace_selftest_ops() 218 if (cnt > 1) { in trace_selftest_ops() 233 if (cnt > 1) { in trace_selftest_ops() 272 if (cnt > 1) { in trace_selftest_ops() 306 if (cnt > 1) { in trace_selftest_ops() 334 if (cnt > 1) in trace_selftest_ops()
|
D | ftrace.c | 883 size_t cnt, loff_t *ppos) in ftrace_profile_write() argument 888 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); in ftrace_profile_write() 899 cnt = ret; in ftrace_profile_write() 905 cnt = ret; in ftrace_profile_write() 921 *ppos += cnt; in ftrace_profile_write() 923 return cnt; in ftrace_profile_write() 928 size_t cnt, loff_t *ppos) in ftrace_profile_read() argument 934 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); in ftrace_profile_read() 3184 int cnt; in ftrace_allocate_records() local 3207 cnt = (PAGE_SIZE << order) / ENTRY_SIZE; in ftrace_allocate_records() [all …]
|
D | ring_buffer.c | 440 local_t cnt; member 591 static inline bool __rb_time_read(rb_time_t *t, u64 *ret, unsigned long *cnt) in __rb_time_read() argument 602 c = local_read(&t->cnt); in __rb_time_read() 606 } while (c != local_read(&t->cnt)); in __rb_time_read() 608 *cnt = rb_time_cnt(top); in __rb_time_read() 611 if (*cnt != rb_time_cnt(msb) || *cnt != rb_time_cnt(bottom)) in __rb_time_read() 621 unsigned long cnt; in rb_time_read() local 623 return __rb_time_read(t, ret, &cnt); in rb_time_read() 626 static inline unsigned long rb_time_val_cnt(unsigned long val, unsigned long cnt) in rb_time_val_cnt() argument 628 return (val & RB_TIME_VAL_MASK) | ((cnt & 3) << RB_TIME_SHIFT); in rb_time_val_cnt() [all …]
|
D | trace_mmiotrace.c | 122 unsigned long cnt = atomic_xchg(&dropped_count, 0); in count_overruns() local 126 cnt += over - prev_overruns; in count_overruns() 128 return cnt; in count_overruns() 132 char __user *ubuf, size_t cnt, loff_t *ppos) in mmio_read() argument 161 ret = trace_seq_to_user(s, ubuf, cnt); in mmio_read()
|
D | bpf_trace.c | 2473 u32 cnt; member 2487 static int copy_user_syms(struct user_syms *us, unsigned long __user *usyms, u32 cnt) in copy_user_syms() argument 2495 syms = kvmalloc_array(cnt, sizeof(*syms), GFP_KERNEL); in copy_user_syms() 2499 buf = kvmalloc_array(cnt, KSYM_NAME_LEN, GFP_KERNEL); in copy_user_syms() 2503 for (p = buf, i = 0; i < cnt; i++) { in copy_user_syms() 2600 addr = bsearch(&entry_ip, link->addrs, link->cnt, sizeof(entry_ip), in bpf_kprobe_multi_cookie() 2685 static int addrs_check_error_injection_list(unsigned long *addrs, u32 cnt) in addrs_check_error_injection_list() argument 2689 for (i = 0; i < cnt; i++) { in addrs_check_error_injection_list() 2702 u32 flags, cnt, size; in bpf_kprobe_multi_link_attach() local 2724 cnt = attr->link_create.kprobe_multi.cnt; in bpf_kprobe_multi_link_attach() [all …]
|
D | ring_buffer_benchmark.c | 239 int cnt = 0; in ring_buffer_producer() local 266 cnt++; in ring_buffer_producer() 267 if (consumer && !(cnt % wakeup_interval)) in ring_buffer_producer() 280 if (cnt % wakeup_interval) in ring_buffer_producer()
|
D | trace_hwlat.c | 681 size_t cnt, loff_t *ppos) in hwlat_mode_write() argument 688 if (cnt >= sizeof(buf)) in hwlat_mode_write() 691 if (copy_from_user(buf, ubuf, cnt)) in hwlat_mode_write() 694 buf[cnt] = 0; in hwlat_mode_write() 713 ret = cnt; in hwlat_mode_write() 723 *ppos += cnt; in hwlat_mode_write()
|
D | trace_events_inject.c | 284 event_inject_write(struct file *filp, const char __user *ubuf, size_t cnt, in event_inject_write() argument 293 if (cnt >= PAGE_SIZE) in event_inject_write() 296 buf = memdup_user_nul(ubuf, cnt); in event_inject_write() 320 return cnt; in event_inject_write()
|
/kernel/bpf/ |
D | memalloc.c | 164 static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node) in alloc_bulk() argument 173 for (i = 0; i < cnt; i++) { in alloc_bulk() 266 int cnt; in free_bulk() local 274 cnt = --c->free_cnt; in free_bulk() 276 cnt = 0; in free_bulk() 282 } while (cnt > (c->high_watermark + c->low_watermark) / 2); in free_bulk() 293 int cnt; in bpf_mem_refill() local 296 cnt = c->free_cnt; in bpf_mem_refill() 297 if (cnt < c->low_watermark) in bpf_mem_refill() 302 else if (cnt > c->high_watermark) in bpf_mem_refill() [all …]
|
D | net_namespace.c | 109 int cnt, idx; in bpf_netns_link_release() local 128 cnt = link_count(net, type); in bpf_netns_link_release() 129 if (!cnt) { in bpf_netns_link_release() 136 new_array = bpf_prog_array_alloc(cnt, GFP_KERNEL); in bpf_netns_link_release() 427 int cnt, err; in netns_bpf_link_attach() local 431 cnt = link_count(net, type); in netns_bpf_link_attach() 432 if (cnt >= netns_bpf_max_progs(type)) { in netns_bpf_link_attach() 456 run_array = bpf_prog_array_alloc(cnt + 1, GFP_KERNEL); in netns_bpf_link_attach()
|
D | cgroup.c | 358 u32 cnt = 0; in prog_list_length() local 363 cnt++; in prog_list_length() 365 return cnt; in prog_list_length() 382 u32 cnt; in hierarchy_allows_attach() local 386 cnt = prog_list_length(&p->bpf.progs[atype]); in hierarchy_allows_attach() 387 WARN_ON_ONCE(cnt > 1); in hierarchy_allows_attach() 388 if (cnt == 1) in hierarchy_allows_attach() 409 int cnt = 0; in compute_effective_progs() local 413 if (cnt == 0 || (p->bpf.flags[atype] & BPF_F_ALLOW_MULTI)) in compute_effective_progs() 414 cnt += prog_list_length(&p->bpf.progs[atype]); in compute_effective_progs() [all …]
|
D | verifier.c | 2290 int cnt = 0; in mark_reg_read() local 2325 cnt++; in mark_reg_read() 2328 if (env->longest_mark_read_walk < cnt) in mark_reg_read() 2329 env->longest_mark_read_walk = cnt; in mark_reg_read() 2519 u32 cnt = cur->jmp_history_cnt; in push_jmp_history() local 2523 cnt++; in push_jmp_history() 2524 alloc_size = kmalloc_size_roundup(size_mul(cnt, sizeof(*p))); in push_jmp_history() 2528 p[cnt - 1].idx = env->insn_idx; in push_jmp_history() 2529 p[cnt - 1].prev_idx = env->prev_insn_idx; in push_jmp_history() 2531 cur->jmp_history_cnt = cnt; in push_jmp_history() [all …]
|
D | btf.c | 221 u32 cnt; member 7071 return bsearch(&id, set->ids, set->cnt, sizeof(u32), btf_id_cmp_func) != NULL; in btf_id_set_contains() 7076 return bsearch(&id, set->pairs, set->cnt, sizeof(set->pairs[0]), btf_id_cmp_func); in btf_id_set8_contains() 7344 if (!add_set->cnt) in BTF_ID_LIST_GLOBAL() 7380 set_cnt = set ? set->cnt : 0; in BTF_ID_LIST_GLOBAL() 7382 if (set_cnt > U32_MAX - add_set->cnt) { in BTF_ID_LIST_GLOBAL() 7387 if (set_cnt + add_set->cnt > BTF_KFUNC_SET_MAX_CNT) { in BTF_ID_LIST_GLOBAL() 7394 offsetof(struct btf_id_set8, pairs[set_cnt + add_set->cnt]), in BTF_ID_LIST_GLOBAL() 7403 set->cnt = 0; in BTF_ID_LIST_GLOBAL() 7407 memcpy(set->pairs + set->cnt, add_set->pairs, add_set->cnt * sizeof(set->pairs[0])); in BTF_ID_LIST_GLOBAL() [all …]
|
/kernel/sched/ |
D | debug.c | 127 size_t cnt, loff_t *ppos) in sched_feat_write() argument 134 if (cnt > 63) in sched_feat_write() 135 cnt = 63; in sched_feat_write() 137 if (copy_from_user(&buf, ubuf, cnt)) in sched_feat_write() 140 buf[cnt] = 0; in sched_feat_write() 153 *ppos += cnt; in sched_feat_write() 155 return cnt; in sched_feat_write() 174 size_t cnt, loff_t *ppos) in sched_scaling_write() argument 179 if (cnt > 15) in sched_scaling_write() 180 cnt = 15; in sched_scaling_write() [all …]
|
/kernel/irq/ |
D | irqdesc.c | 470 static int alloc_descs(unsigned int start, unsigned int cnt, int node, in alloc_descs() argument 479 for (i = 0; i < cnt; i++) { in alloc_descs() 485 for (i = 0; i < cnt; i++) { in alloc_descs() 506 bitmap_set(allocated_irqs, start, cnt); in alloc_descs() 602 static inline int alloc_descs(unsigned int start, unsigned int cnt, int node, in alloc_descs() argument 608 for (i = 0; i < cnt; i++) { in alloc_descs() 613 bitmap_set(allocated_irqs, start, cnt); in alloc_descs() 758 void irq_free_descs(unsigned int from, unsigned int cnt) in irq_free_descs() argument 762 if (from >= nr_irqs || (from + cnt) > nr_irqs) in irq_free_descs() 766 for (i = 0; i < cnt; i++) in irq_free_descs() [all …]
|
D | devres.c | 151 unsigned int cnt; member 158 irq_free_descs(this->from, this->cnt); in devm_irq_desc_release() 179 unsigned int cnt, int node, struct module *owner, in __devm_irq_alloc_descs() argument 189 base = __irq_alloc_descs(irq, from, cnt, node, owner, affinity); in __devm_irq_alloc_descs() 196 dr->cnt = cnt; in __devm_irq_alloc_descs()
|
/kernel/time/ |
D | clocksource.c | 1336 ssize_t sysfs_get_uname(const char *buf, char *dst, size_t cnt) in sysfs_get_uname() argument 1338 size_t ret = cnt; in sysfs_get_uname() 1341 if (!cnt || cnt >= CS_NAME_LEN) in sysfs_get_uname() 1345 if (buf[cnt-1] == '\n') in sysfs_get_uname() 1346 cnt--; in sysfs_get_uname() 1347 if (cnt > 0) in sysfs_get_uname() 1348 memcpy(dst, buf, cnt); in sysfs_get_uname() 1349 dst[cnt] = 0; in sysfs_get_uname()
|
/kernel/locking/ |
D | mutex.c | 365 int cnt = 0; in mutex_spin_on_owner() local 371 trace_android_vh_mutex_opt_spin_start(lock, &time_out, &cnt); in mutex_spin_on_owner() 1166 int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) in atomic_dec_and_mutex_lock() argument 1169 if (atomic_add_unless(cnt, -1, 1)) in atomic_dec_and_mutex_lock() 1173 if (!atomic_dec_and_test(cnt)) { in atomic_dec_and_mutex_lock()
|
/kernel/livepatch/ |
D | core.c | 197 int i, cnt, ret; in klp_resolve_symbols() local 229 cnt = sscanf(strtab + sym->st_name, in klp_resolve_symbols() 232 if (cnt != 3) { in klp_resolve_symbols() 292 int cnt, ret; in klp_apply_section_relocs() local 301 cnt = sscanf(shstrtab + sec->sh_name, ".klp.rela.%55[^.]", in klp_apply_section_relocs() 303 if (cnt != 1) { in klp_apply_section_relocs()
|