Lines Matching refs:buffer
354 ret = vsnprintf(s->buffer + s->len, len, fmt, ap); in trace_seq_printf()
384 memcpy(s->buffer + s->len, str, len); in trace_seq_puts()
396 s->buffer[s->len++] = c; in trace_seq_putc()
407 memcpy(s->buffer + s->len, mem, len); in trace_seq_putmem()
443 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
445 p = mangle_path(s->buffer + s->len, p, "\n");
447 s->len = p - s->buffer;
451 s->buffer[s->len++] = '?';
476 ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
489 s->buffer[len] = 0;
490 seq_puts(m, s->buffer);
507 struct ring_buffer *buf = tr->buffer;
512 tr->buffer = max_tr.buffer;
513 max_tr.buffer = buf;
516 ring_buffer_reset(tr->buffer);
541 ring_buffer_reset(max_tr.buffer);
542 ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu);
677 ring_buffer_reset_cpu(tr->buffer, cpu);
734 struct ring_buffer *buffer; local
752 buffer = global_trace.buffer;
753 if (buffer)
754 ring_buffer_record_enable(buffer);
756 buffer = max_tr.buffer;
757 if (buffer)
758 ring_buffer_record_enable(buffer);
773 struct ring_buffer *buffer; local
781 buffer = global_trace.buffer;
782 if (buffer)
783 ring_buffer_record_disable(buffer);
785 buffer = max_tr.buffer;
786 if (buffer)
787 ring_buffer_record_disable(buffer);
892 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
901 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
918 event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry),
926 ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags);
942 event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry),
950 ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags);
977 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
993 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
1018 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
1034 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
1056 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
1066 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
1091 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
1105 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
1121 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
1135 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
1336 event = ring_buffer_peek(iter->tr->buffer, cpu, ts);
1346 struct ring_buffer *buffer = iter->tr->buffer; local
1354 if (ring_buffer_empty_cpu(buffer, cpu))
1400 ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts);
1657 entries = ring_buffer_entries(iter->tr->buffer);
1659 ring_buffer_overruns(iter->tr->buffer);
1793 ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL);
2353 if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
2459 if (ring_buffer_overruns(iter->tr->buffer))
2466 ring_buffer_read_start(iter->tr->buffer, cpu);
3347 ret = ring_buffer_resize(global_trace.buffer, val);
3353 ret = ring_buffer_resize(max_tr.buffer, val);
3357 r = ring_buffer_resize(global_trace.buffer,
3656 event = ring_buffer_lock_reserve(tr->buffer, size, &irq_flags);
3667 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
3749 s->buffer[s->len] = 0;
3751 printk(KERN_TRACE "%s", s->buffer);
3842 global_trace.buffer = ring_buffer_alloc(trace_buf_size,
3844 if (!global_trace.buffer) {
3849 global_trace.entries = ring_buffer_size(global_trace.buffer);
3853 max_tr.buffer = ring_buffer_alloc(trace_buf_size,
3855 if (!max_tr.buffer) {
3858 ring_buffer_free(global_trace.buffer);
3861 max_tr.entries = ring_buffer_size(max_tr.buffer);