/kernel/trace/ |
D | ring_buffer.c | 271 #define for_each_buffer_cpu(buffer, cpu) \ argument 272 for_each_cpu(cpu, buffer->cpumask) 442 struct ring_buffer *buffer; member 532 int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full) in ring_buffer_wait() argument 545 work = &buffer->irq_work; in ring_buffer_wait() 549 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_wait() 551 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_wait() 592 if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) in ring_buffer_wait() 596 !ring_buffer_empty_cpu(buffer, cpu)) { in ring_buffer_wait() 636 int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu, in ring_buffer_poll_wait() argument [all …]
|
D | trace.c | 313 struct ring_buffer *buffer, in filter_check_discard() argument 318 ring_buffer_discard_commit(buffer, event); in filter_check_discard() 327 struct ring_buffer *buffer, in call_filter_check_discard() argument 332 ring_buffer_discard_commit(buffer, event); in call_filter_check_discard() 345 if (!buf->buffer) in buffer_ftrace_now() 348 ts = ring_buffer_time_stamp(buf->buffer, cpu); in buffer_ftrace_now() 349 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts); in buffer_ftrace_now() 484 static void __ftrace_trace_stack(struct ring_buffer *buffer, 488 struct ring_buffer *buffer, 493 static inline void __ftrace_trace_stack(struct ring_buffer *buffer, in __ftrace_trace_stack() argument [all …]
|
D | ring_buffer_benchmark.c | 30 static struct ring_buffer *buffer; variable 89 event = ring_buffer_consume(buffer, cpu, &ts, NULL); in read_event() 114 bpage = ring_buffer_alloc_read_page(buffer, cpu); in read_page() 118 ret = ring_buffer_read_page(buffer, &bpage, PAGE_SIZE, cpu, 1); in read_page() 173 ring_buffer_free_read_page(buffer, bpage); in read_page() 252 event = ring_buffer_lock_reserve(buffer, 10); in ring_buffer_producer() 259 ring_buffer_unlock_commit(buffer, event); in ring_buffer_producer() 297 entries = ring_buffer_entries(buffer); in ring_buffer_producer() 298 overruns = ring_buffer_overruns(buffer); in ring_buffer_producer() 402 ring_buffer_reset(buffer); in ring_buffer_producer_thread() [all …]
|
D | trace_mmiotrace.c | 128 unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer); in count_overruns() 302 struct ring_buffer *buffer = tr->trace_buffer.buffer; in __trace_mmiotrace_rw() local 307 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW, in __trace_mmiotrace_rw() 316 if (!call_filter_check_discard(call, entry, buffer, event)) in __trace_mmiotrace_rw() 317 trace_buffer_unlock_commit(tr, buffer, event, 0, pc); in __trace_mmiotrace_rw() 332 struct ring_buffer *buffer = tr->trace_buffer.buffer; in __trace_mmiotrace_map() local 337 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP, in __trace_mmiotrace_map() 346 if (!call_filter_check_discard(call, entry, buffer, event)) in __trace_mmiotrace_map() 347 trace_buffer_unlock_commit(tr, buffer, event, 0, pc); in __trace_mmiotrace_map()
|
D | trace_branch.c | 37 struct ring_buffer *buffer; in probe_likely_condition() local 62 buffer = tr->trace_buffer.buffer; in probe_likely_condition() 63 event = trace_buffer_lock_reserve(buffer, TRACE_BRANCH, in probe_likely_condition() 83 if (!call_filter_check_discard(call, entry, buffer, event)) in probe_likely_condition() 84 __buffer_unlock_commit(buffer, event); in probe_likely_condition()
|
D | trace_sched_wakeup.c | 377 struct ring_buffer *buffer = tr->trace_buffer.buffer; in tracing_sched_switch_trace() local 381 event = trace_buffer_lock_reserve(buffer, TRACE_CTX, in tracing_sched_switch_trace() 394 if (!call_filter_check_discard(call, entry, buffer, event)) in tracing_sched_switch_trace() 395 trace_buffer_unlock_commit(tr, buffer, event, flags, pc); in tracing_sched_switch_trace() 407 struct ring_buffer *buffer = tr->trace_buffer.buffer; in tracing_sched_wakeup_trace() local 409 event = trace_buffer_lock_reserve(buffer, TRACE_WAKE, in tracing_sched_wakeup_trace() 422 if (!call_filter_check_discard(call, entry, buffer, event)) in tracing_sched_wakeup_trace() 423 trace_buffer_unlock_commit(tr, buffer, event, flags, pc); in tracing_sched_wakeup_trace()
|
D | trace_syscalls.c | 301 struct ring_buffer *buffer; in ftrace_syscall_enter() local 328 buffer = tr->trace_buffer.buffer; in ftrace_syscall_enter() 329 event = trace_buffer_lock_reserve(buffer, in ftrace_syscall_enter() 338 event_trigger_unlock_commit(trace_file, buffer, event, entry, in ftrace_syscall_enter() 349 struct ring_buffer *buffer; in ftrace_syscall_exit() local 373 buffer = tr->trace_buffer.buffer; in ftrace_syscall_exit() 374 event = trace_buffer_lock_reserve(buffer, in ftrace_syscall_exit() 384 event_trigger_unlock_commit(trace_file, buffer, event, entry, in ftrace_syscall_exit()
|
D | trace_functions_graph.c | 294 struct ring_buffer *buffer = tr->trace_buffer.buffer; in __trace_graph_entry() local 297 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, in __trace_graph_entry() 303 if (!call_filter_check_discard(call, entry, buffer, event)) in __trace_graph_entry() 304 __buffer_unlock_commit(buffer, event); in __trace_graph_entry() 406 struct ring_buffer *buffer = tr->trace_buffer.buffer; in __trace_graph_return() local 409 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET, in __trace_graph_return() 415 if (!call_filter_check_discard(call, entry, buffer, event)) in __trace_graph_return() 416 __buffer_unlock_commit(buffer, event); in __trace_graph_return() 603 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, in get_return_for_leaf() 605 event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu, in get_return_for_leaf() [all …]
|
D | Kconfig | 256 bool "Create a snapshot trace buffer" 259 Allow tracing users to take snapshot of the current buffer using the 270 Allow doing a snapshot of a single CPU buffer instead of a 276 After which, only the tracing buffer for CPU 2 was swapped with 277 the main tracing buffer, and the other CPU buffers remain the same. 360 events into a running trace buffer to see when and where the 586 tristate "Ring buffer benchmark stress tester" 589 This option creates a test to stress the ring buffer and benchmark it. 590 It creates its own ring buffer such that it will not interfere with 591 any other users of the ring buffer (such as ftrace). It then creates [all …]
|
D | blktrace.c | 77 struct ring_buffer *buffer = NULL; in trace_note() local 83 buffer = blk_tr->trace_buffer.buffer; in trace_note() 85 event = trace_buffer_lock_reserve(buffer, TRACE_BLK, in trace_note() 110 trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc); in trace_note() 210 struct ring_buffer *buffer = NULL; in __blk_add_trace() local 237 buffer = blk_tr->trace_buffer.buffer; in __blk_add_trace() 239 event = trace_buffer_lock_reserve(buffer, TRACE_BLK, in __blk_add_trace() 285 trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc); in __blk_add_trace() 356 static ssize_t blk_dropped_read(struct file *filp, char __user *buffer, in blk_dropped_read() argument 364 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); in blk_dropped_read() [all …]
|
D | trace_kdb.c | 49 ring_buffer_read_prepare(iter.trace_buffer->buffer, in ftrace_dump_buf() 57 ring_buffer_read_prepare(iter.trace_buffer->buffer, in ftrace_dump_buf()
|
D | trace.h | 165 struct ring_buffer *buffer; member 572 trace_buffer_lock_reserve(struct ring_buffer *buffer, 584 void __buffer_unlock_commit(struct ring_buffer *buffer, 636 struct ring_buffer *buffer, unsigned long flags, 643 struct ring_buffer *buffer, in ftrace_trace_userstack() argument 711 int trace_array_printk_buf(struct ring_buffer *buffer, 882 char *buffer; member
|
/kernel/debug/kdb/ |
D | kdb_io.c | 34 static int kgdb_transition_check(char *buffer) in kgdb_transition_check() argument 36 if (buffer[0] != '+' && buffer[0] != '$') { in kgdb_transition_check() 38 kdb_printf("%s", buffer); in kgdb_transition_check() 40 int slen = strlen(buffer); in kgdb_transition_check() 41 if (slen > 3 && buffer[slen - 3] == '#') { in kgdb_transition_check() 42 kdb_gdb_state_pass(buffer); in kgdb_transition_check() 43 strcpy(buffer, "kgdb"); in kgdb_transition_check() 51 static int kdb_read_get_key(char *buffer, size_t bufsize) in kdb_read_get_key() argument 89 *buffer++ = key; in kdb_read_get_key() 90 *buffer = '\0'; in kdb_read_get_key() [all …]
|
D | kdb_bt.c | 83 char buffer[2]; in kdb_bt1() local 84 if (kdb_getarea(buffer[0], (unsigned long)p) || in kdb_bt1() 85 kdb_getarea(buffer[0], (unsigned long)(p+1)-1)) in kdb_bt1() 93 kdb_getstr(buffer, sizeof(buffer), in kdb_bt1() 95 if (buffer[0] == 'q') { in kdb_bt1()
|
/kernel/gcov/ |
D | gcc_4_7.c | 366 void *buffer; member 382 static size_t store_gcov_u32(void *buffer, size_t off, u32 v) in store_gcov_u32() argument 386 if (buffer) { in store_gcov_u32() 387 data = buffer + off; in store_gcov_u32() 406 static size_t store_gcov_u64(void *buffer, size_t off, u64 v) in store_gcov_u64() argument 410 if (buffer) { in store_gcov_u64() 411 data = buffer + off; in store_gcov_u64() 427 static size_t convert_to_gcda(char *buffer, struct gcov_info *info) in convert_to_gcda() argument 437 pos += store_gcov_u32(buffer, pos, GCOV_DATA_MAGIC); in convert_to_gcda() 438 pos += store_gcov_u32(buffer, pos, info->version); in convert_to_gcda() [all …]
|
/kernel/ |
D | sysctl.c | 183 void __user *buffer, size_t *lenp, loff_t *ppos); 185 void __user *buffer, size_t *lenp, loff_t *ppos); 190 void __user *buffer, size_t *lenp, loff_t *ppos); 194 void __user *buffer, size_t *lenp, loff_t *ppos); 197 void __user *buffer, size_t *lenp, loff_t *ppos); 205 void __user *buffer, size_t *lenp, in sysrq_sysctl_handler() argument 210 error = proc_dointvec(table, write, buffer, lenp, ppos); in sysrq_sysctl_handler() 1925 char __user *buffer, in _proc_do_string() argument 1953 p = buffer; in _proc_do_string() 1954 while ((p - buffer) < *lenp && len < maxlen - 1) { in _proc_do_string() [all …]
|
D | kallsyms.c | 356 static int __sprint_symbol(char *buffer, unsigned long address, in __sprint_symbol() argument 365 name = kallsyms_lookup(address, &size, &offset, &modname, buffer); in __sprint_symbol() 367 return sprintf(buffer, "0x%lx", address - symbol_offset); in __sprint_symbol() 369 if (name != buffer) in __sprint_symbol() 370 strcpy(buffer, name); in __sprint_symbol() 371 len = strlen(buffer); in __sprint_symbol() 375 len += sprintf(buffer + len, "+%#lx/%#lx", offset, size); in __sprint_symbol() 378 len += sprintf(buffer + len, " [%s]", modname); in __sprint_symbol() 394 int sprint_symbol(char *buffer, unsigned long address) in sprint_symbol() argument 396 return __sprint_symbol(buffer, address, 0, 1); in sprint_symbol() [all …]
|
D | watchdog.c | 969 void __user *buffer, size_t *lenp, loff_t *ppos) in proc_watchdog_common() argument 990 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); in proc_watchdog_common() 992 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); in proc_watchdog_common() 1037 void __user *buffer, size_t *lenp, loff_t *ppos) in proc_watchdog() argument 1040 table, write, buffer, lenp, ppos); in proc_watchdog() 1047 void __user *buffer, size_t *lenp, loff_t *ppos) in proc_nmi_watchdog() argument 1050 table, write, buffer, lenp, ppos); in proc_nmi_watchdog() 1057 void __user *buffer, size_t *lenp, loff_t *ppos) in proc_soft_watchdog() argument 1060 table, write, buffer, lenp, ppos); in proc_soft_watchdog() 1067 void __user *buffer, size_t *lenp, loff_t *ppos) in proc_watchdog_thresh() argument [all …]
|
D | reboot.c | 284 char buffer[256]; in SYSCALL_DEFINE4() local 339 ret = strncpy_from_user(&buffer[0], arg, sizeof(buffer) - 1); in SYSCALL_DEFINE4() 344 buffer[sizeof(buffer) - 1] = '\0'; in SYSCALL_DEFINE4() 346 kernel_restart(buffer); in SYSCALL_DEFINE4()
|
D | sysctl_binary.c | 975 char *buffer; in bin_intvec() local 979 buffer = kmalloc(BUFSZ, GFP_KERNEL); in bin_intvec() 980 if (!buffer) in bin_intvec() 989 result = kernel_read(file, 0, buffer, BUFSZ - 1); in bin_intvec() 993 str = buffer; in bin_intvec() 1019 str = buffer; in bin_intvec() 1031 result = kernel_write(file, buffer, str - buffer, 0); in bin_intvec() 1037 kfree(buffer); in bin_intvec() 1046 char *buffer; in bin_ulongvec() local 1050 buffer = kmalloc(BUFSZ, GFP_KERNEL); in bin_ulongvec() [all …]
|
D | params.c | 277 int param_get_##name(char *buffer, const struct kernel_param *kp) \ 279 return scnprintf(buffer, PAGE_SIZE, format, \ 323 int param_get_charp(char *buffer, const struct kernel_param *kp) in param_get_charp() argument 325 return scnprintf(buffer, PAGE_SIZE, "%s", *((char **)kp->arg)); in param_get_charp() 353 int param_get_bool(char *buffer, const struct kernel_param *kp) in param_get_bool() argument 356 return sprintf(buffer, "%c", *(bool *)kp->arg ? 'Y' : 'N'); in param_get_bool() 413 int param_get_invbool(char *buffer, const struct kernel_param *kp) in param_get_invbool() argument 415 return sprintf(buffer, "%c", (*(bool *)kp->arg) ? 'N' : 'Y'); in param_get_invbool() 508 static int param_array_get(char *buffer, const struct kernel_param *kp) in param_array_get() argument 516 buffer[off++] = ','; in param_array_get() [all …]
|
D | utsname_sysctl.c | 36 void __user *buffer, size_t *lenp, loff_t *ppos) in proc_do_uts_string() argument 54 r = proc_dostring(&uts_table, write, buffer, lenp, ppos); in proc_do_uts_string()
|
/kernel/power/ |
D | snapshot.c | 78 static void *buffer; variable 1249 safe_copy_page(buffer, s_page); in copy_data_page() 1251 copy_page(dst, buffer); in copy_data_page() 1362 buffer = NULL; in swsusp_free() 1760 buffer = get_image_page(GFP_ATOMIC | __GFP_COLD, safe_needed); in get_highmem_buffer() 1761 return buffer ? 0 : -ENOMEM; in get_highmem_buffer() 1961 if (!buffer) { in snapshot_read_next() 1963 buffer = get_image_page(GFP_ATOMIC, PG_ANY); in snapshot_read_next() 1964 if (!buffer) in snapshot_read_next() 1970 error = init_header((struct swsusp_info *)buffer); in snapshot_read_next() [all …]
|
/kernel/irq/ |
D | proc.c | 93 const char __user *buffer, size_t count, loff_t *pos) in write_irq_affinity() argument 106 err = cpumask_parselist_user(buffer, count, new_value); in write_irq_affinity() 108 err = cpumask_parse_user(buffer, count, new_value); in write_irq_affinity() 137 const char __user *buffer, size_t count, loff_t *pos) in irq_affinity_proc_write() argument 139 return write_irq_affinity(0, file, buffer, count, pos); in irq_affinity_proc_write() 143 const char __user *buffer, size_t count, loff_t *pos) in irq_affinity_list_proc_write() argument 145 return write_irq_affinity(1, file, buffer, count, pos); in irq_affinity_list_proc_write() 193 const char __user *buffer, size_t count, loff_t *ppos) in default_affinity_write() argument 201 err = cpumask_parse_user(buffer, count, new_value); in default_affinity_write()
|
/kernel/debug/ |
D | gdbstub.c | 90 static void get_packet(char *buffer) in get_packet() argument 119 buffer[count] = ch; in get_packet() 136 buffer[count] = 0; in get_packet() 144 static void put_packet(char *buffer) in put_packet() argument 158 while ((ch = buffer[count])) { in put_packet() 1115 unsigned char checksum, ch, buffer[3]; in gdbstub_exit() local 1125 buffer[0] = 'W'; in gdbstub_exit() 1126 buffer[1] = hex_asc_hi(status); in gdbstub_exit() 1127 buffer[2] = hex_asc_lo(status); in gdbstub_exit() 1133 ch = buffer[loop]; in gdbstub_exit()
|