Home
last modified time | relevance | path

Searched refs:cnt (Results 1 – 25 of 35) sorted by relevance

12

/kernel/trace/
Dtrace_kdb.c27 int cnt = 0, cpu; in ftrace_dump_buf() local
61 if (!cnt) in ftrace_dump_buf()
63 cnt++; in ftrace_dump_buf()
76 if (!cnt) in ftrace_dump_buf()
100 int cnt; in kdb_ftdump() local
133 cnt = trace_total_entries(NULL); in kdb_ftdump()
135 cnt = trace_total_entries_cpu(NULL, cpu_file); in kdb_ftdump()
136 skip_entries = max(cnt + skip_entries, 0); in kdb_ftdump()
Dtrace.c498 const char __user *ubuf, size_t cnt) in trace_pid_write() argument
546 while (cnt > 0) { in trace_pid_write()
550 ret = trace_get_user(&parser, ubuf, cnt, &pos); in trace_pid_write()
556 cnt -= ret; in trace_pid_write()
1406 size_t cnt, loff_t *ppos) in trace_get_user() argument
1420 cnt--; in trace_get_user()
1428 while (cnt && isspace(ch)) { in trace_get_user()
1433 cnt--; in trace_get_user()
1447 while (cnt && !isspace(ch) && ch) { in trace_get_user()
1458 cnt--; in trace_get_user()
[all …]
Dtrace_hwlat.c408 size_t cnt, loff_t *ppos) in hwlat_read() argument
418 if (cnt > sizeof(buf)) in hwlat_read()
419 cnt = sizeof(buf); in hwlat_read()
425 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); in hwlat_read()
445 size_t cnt, loff_t *ppos) in hwlat_width_write() argument
450 err = kstrtoull_from_user(ubuf, cnt, 10, &val); in hwlat_width_write()
464 return cnt; in hwlat_width_write()
484 size_t cnt, loff_t *ppos) in hwlat_window_write() argument
489 err = kstrtoull_from_user(ubuf, cnt, 10, &val); in hwlat_window_write()
503 return cnt; in hwlat_window_write()
Dtrace_events.c858 size_t cnt, loff_t *ppos) in ftrace_event_write() argument
865 if (!cnt) in ftrace_event_write()
875 read = trace_get_user(&parser, ubuf, cnt, ppos); in ftrace_event_write()
1026 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt, in event_enable_read() argument
1052 return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf)); in event_enable_read()
1056 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, in event_enable_write() argument
1063 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); in event_enable_write()
1086 *ppos += cnt; in event_enable_write()
1088 return ret ? ret : cnt; in event_enable_write()
1092 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt, in system_enable_read() argument
[all …]
Dtrace_selftest.c65 unsigned long flags, cnt = 0; in trace_test_buffer() local
72 cnt = ring_buffer_entries(buf->buffer); in trace_test_buffer()
92 *count = cnt; in trace_test_buffer()
185 static int trace_selftest_ops(struct trace_array *tr, int cnt) in trace_selftest_ops() argument
196 pr_info("Testing dynamic ftrace ops #%d: ", cnt); in trace_selftest_ops()
221 if (cnt > 1) { in trace_selftest_ops()
236 if (cnt > 1) { in trace_selftest_ops()
275 if (cnt > 1) { in trace_selftest_ops()
303 if (cnt > 1) in trace_selftest_ops()
Dtrace_mmiotrace.c124 unsigned long cnt = atomic_xchg(&dropped_count, 0); in count_overruns() local
128 cnt += over - prev_overruns; in count_overruns()
130 return cnt; in count_overruns()
134 char __user *ubuf, size_t cnt, loff_t *ppos) in mmio_read() argument
163 ret = trace_seq_to_user(s, ubuf, cnt); in mmio_read()
Dring_buffer_benchmark.c239 int cnt = 0; in ring_buffer_producer() local
266 cnt++; in ring_buffer_producer()
267 if (consumer && !(cnt % wakeup_interval)) in ring_buffer_producer()
280 if (cnt % wakeup_interval) in ring_buffer_producer()
Dftrace.c890 size_t cnt, loff_t *ppos) in ftrace_profile_write() argument
895 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); in ftrace_profile_write()
906 cnt = ret; in ftrace_profile_write()
912 cnt = ret; in ftrace_profile_write()
928 *ppos += cnt; in ftrace_profile_write()
930 return cnt; in ftrace_profile_write()
935 size_t cnt, loff_t *ppos) in ftrace_profile_read() argument
941 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); in ftrace_profile_read()
2963 int cnt; in ftrace_allocate_records() local
2988 cnt = (PAGE_SIZE << order) / ENTRY_SIZE; in ftrace_allocate_records()
[all …]
Dtrace_events_trigger.c240 size_t cnt, loff_t *ppos) in event_trigger_regex_write() argument
246 if (!cnt) in event_trigger_regex_write()
249 if (cnt >= PAGE_SIZE) in event_trigger_regex_write()
252 buf = memdup_user_nul(ubuf, cnt); in event_trigger_regex_write()
272 *ppos += cnt; in event_trigger_regex_write()
273 ret = cnt; in event_trigger_regex_write()
292 size_t cnt, loff_t *ppos) in event_trigger_write() argument
294 return event_trigger_regex_write(filp, ubuf, cnt, ppos); in event_trigger_write()
Dtrace_seq.c373 int trace_seq_to_user(struct trace_seq *s, char __user *ubuf, int cnt) in trace_seq_to_user() argument
376 return seq_buf_to_user(&s->seq, ubuf, cnt); in trace_seq_to_user()
Dtrace_functions_graph.c1301 graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt, in graph_depth_write() argument
1307 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); in graph_depth_write()
1313 *ppos += cnt; in graph_depth_write()
1315 return cnt; in graph_depth_write()
1319 graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt, in graph_depth_read() argument
1327 return simple_read_from_buffer(ubuf, cnt, ppos, buf, n); in graph_depth_read()
Dring_buffer.c536 size_t cnt; in ring_buffer_nr_dirty_pages() local
539 cnt = local_read(&buffer->buffers[cpu]->pages_touched); in ring_buffer_nr_dirty_pages()
541 if (cnt < read) { in ring_buffer_nr_dirty_pages()
542 WARN_ON_ONCE(read > cnt + 1); in ring_buffer_nr_dirty_pages()
546 return cnt - read; in ring_buffer_nr_dirty_pages()
4940 int cnt; member
4968 int cnt; in rb_write_something() local
4971 cnt = data->cnt + (nested ? 27 : 0); in rb_write_something()
4974 size = (cnt * 68 / 25) % (sizeof(rb_string) - 1); in rb_write_something()
5033 data->cnt++; in rb_test()
[all …]
/kernel/irq/
Dirqdesc.c466 static int alloc_descs(unsigned int start, unsigned int cnt, int node, in alloc_descs() argument
475 for (i = 0; i < cnt; i++) { in alloc_descs()
481 for (i = 0; i < cnt; i++) { in alloc_descs()
502 bitmap_set(allocated_irqs, start, cnt); in alloc_descs()
597 static inline int alloc_descs(unsigned int start, unsigned int cnt, int node, in alloc_descs() argument
603 for (i = 0; i < cnt; i++) { in alloc_descs()
608 bitmap_set(allocated_irqs, start, cnt); in alloc_descs()
736 void irq_free_descs(unsigned int from, unsigned int cnt) in irq_free_descs() argument
740 if (from >= nr_irqs || (from + cnt) > nr_irqs) in irq_free_descs()
744 for (i = 0; i < cnt; i++) in irq_free_descs()
[all …]
Ddevres.c151 unsigned int cnt; member
158 irq_free_descs(this->from, this->cnt); in devm_irq_desc_release()
179 unsigned int cnt, int node, struct module *owner, in __devm_irq_alloc_descs() argument
189 base = __irq_alloc_descs(irq, from, cnt, node, owner, affinity); in __devm_irq_alloc_descs()
196 dr->cnt = cnt; in __devm_irq_alloc_descs()
/kernel/bpf/
Dcgroup.c94 u32 cnt = 0; in prog_list_length() local
99 cnt++; in prog_list_length()
101 return cnt; in prog_list_length()
119 u32 cnt; in hierarchy_allows_attach() local
123 cnt = prog_list_length(&p->bpf.progs[type]); in hierarchy_allows_attach()
124 WARN_ON_ONCE(cnt > 1); in hierarchy_allows_attach()
125 if (cnt == 1) in hierarchy_allows_attach()
146 int cnt = 0; in compute_effective_progs() local
150 if (cnt == 0 || (p->bpf.flags[type] & BPF_F_ALLOW_MULTI)) in compute_effective_progs()
151 cnt += prog_list_length(&p->bpf.progs[type]); in compute_effective_progs()
[all …]
Dverifier.c1212 int cnt = 0; in mark_reg_read() local
1247 cnt++; in mark_reg_read()
1250 if (env->longest_mark_read_walk < cnt) in mark_reg_read()
1251 env->longest_mark_read_walk = cnt; in mark_reg_read()
1419 u32 cnt = cur->jmp_history_cnt; in push_jmp_history() local
1422 cnt++; in push_jmp_history()
1423 p = krealloc(cur->jmp_history, cnt * sizeof(*p), GFP_USER); in push_jmp_history()
1426 p[cnt - 1].idx = env->insn_idx; in push_jmp_history()
1427 p[cnt - 1].prev_idx = env->prev_insn_idx; in push_jmp_history()
1429 cur->jmp_history_cnt = cnt; in push_jmp_history()
[all …]
Dcore.c493 int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt) in bpf_remove_insns() argument
498 memmove(prog->insnsi + off, prog->insnsi + off + cnt, in bpf_remove_insns()
499 sizeof(struct bpf_insn) * (prog->len - off - cnt)); in bpf_remove_insns()
500 prog->len -= cnt; in bpf_remove_insns()
502 return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false)); in bpf_remove_insns()
1816 u32 cnt = 0; in bpf_prog_array_length() local
1820 cnt++; in bpf_prog_array_length()
1821 return cnt; in bpf_prog_array_length()
1855 __u32 __user *prog_ids, u32 cnt) in bpf_prog_array_copy_to_user() argument
1867 ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN); in bpf_prog_array_copy_to_user()
[all …]
/kernel/
Dsoftirq.c110 void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) in __local_bh_disable_ip() argument
124 __preempt_count_add(cnt); in __local_bh_disable_ip()
128 if (softirq_count() == (cnt & SOFTIRQ_MASK)) in __local_bh_disable_ip()
132 if (preempt_count() == cnt) { in __local_bh_disable_ip()
142 static void __local_bh_enable(unsigned int cnt) in __local_bh_enable() argument
146 if (preempt_count() == cnt) in __local_bh_enable()
149 if (softirq_count() == (cnt & SOFTIRQ_MASK)) in __local_bh_enable()
152 __preempt_count_sub(cnt); in __local_bh_enable()
166 void __local_bh_enable_ip(unsigned long ip, unsigned int cnt) in __local_bh_enable_ip() argument
182 preempt_count_sub(cnt - 1); in __local_bh_enable_ip()
Dksysfs.c115 unsigned long cnt; in kexec_crash_size_store() local
118 if (kstrtoul(buf, 0, &cnt)) in kexec_crash_size_store()
121 ret = crash_shrink_memory(cnt); in kexec_crash_size_store()
/kernel/time/
Dclocksource.c1047 ssize_t sysfs_get_uname(const char *buf, char *dst, size_t cnt) in sysfs_get_uname() argument
1049 size_t ret = cnt; in sysfs_get_uname()
1052 if (!cnt || cnt >= CS_NAME_LEN) in sysfs_get_uname()
1056 if (buf[cnt-1] == '\n') in sysfs_get_uname()
1057 cnt--; in sysfs_get_uname()
1058 if (cnt > 0) in sysfs_get_uname()
1059 memcpy(dst, buf, cnt); in sysfs_get_uname()
1060 dst[cnt] = 0; in sysfs_get_uname()
/kernel/sched/
Ddebug.c128 size_t cnt, loff_t *ppos) in sched_feat_write() argument
135 if (cnt > 63) in sched_feat_write()
136 cnt = 63; in sched_feat_write()
138 if (copy_from_user(&buf, ubuf, cnt)) in sched_feat_write()
141 buf[cnt] = 0; in sched_feat_write()
154 *ppos += cnt; in sched_feat_write()
156 return cnt; in sched_feat_write()
Dwait.c71 int cnt = 0; in __wake_up_common() local
99 if (bookmark && (++cnt > WAITQUEUE_WALK_BREAK_CNT) && in __wake_up_common()
/kernel/livepatch/
Dcore.c195 int i, cnt, vmlinux, ret; in klp_resolve_symbols() local
226 cnt = sscanf(strtab + sym->st_name, in klp_resolve_symbols()
229 if (cnt != 3) { in klp_resolve_symbols()
251 int i, cnt, ret = 0; in klp_write_object_relocations() local
273 cnt = sscanf(secname, ".klp.rela.%55[^.]", sec_objname); in klp_write_object_relocations()
274 if (cnt != 1) { in klp_write_object_relocations()
/kernel/locking/
Dqspinlock.c335 int cnt = _Q_PENDING_LOOPS; in queued_spin_lock_slowpath() local
337 (VAL != _Q_PENDING_VAL) || !cnt--); in queued_spin_lock_slowpath()
/kernel/power/
Dsnapshot.c1202 unsigned int cnt = 0; in count_free_highmem_pages() local
1206 cnt += zone_page_state(zone, NR_FREE_PAGES); in count_free_highmem_pages()
1208 return cnt; in count_free_highmem_pages()
2266 unsigned int cnt = 0; in count_highmem_image_pages() local
2272 cnt++; in count_highmem_image_pages()
2276 return cnt; in count_highmem_image_pages()

12