1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Generic ring buffer
4 *
5 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
6 */
7 #include <linux/trace_recursion.h>
8 #include <linux/ring_buffer_ext.h>
9 #include <linux/trace_events.h>
10 #include <linux/ring_buffer.h>
11 #include <linux/trace_clock.h>
12 #include <linux/sched/clock.h>
13 #include <linux/trace_seq.h>
14 #include <linux/spinlock.h>
15 #include <linux/irq_work.h>
16 #include <linux/security.h>
17 #include <linux/uaccess.h>
18 #include <linux/hardirq.h>
19 #include <linux/kthread.h> /* for self test */
20 #include <linux/module.h>
21 #include <linux/percpu.h>
22 #include <linux/mutex.h>
23 #include <linux/delay.h>
24 #include <linux/slab.h>
25 #include <linux/init.h>
26 #include <linux/hash.h>
27 #include <linux/list.h>
28 #include <linux/cpu.h>
29 #include <linux/oom.h>
30
31 #include <asm/local.h>
32
33 /*
34 * The "absolute" timestamp in the buffer is only 59 bits.
35 * If a clock has the 5 MSBs set, it needs to be saved and
36 * reinserted.
37 */
38 #define TS_MSB (0xf8ULL << 56)
39 #define ABS_TS_MASK (~TS_MSB)
40
41 static void update_pages_handler(struct work_struct *work);
42
43 /*
44 * The ring buffer header is special. We must manually up keep it.
45 */
ring_buffer_print_entry_header(struct trace_seq * s)46 int ring_buffer_print_entry_header(struct trace_seq *s)
47 {
48 trace_seq_puts(s, "# compressed entry header\n");
49 trace_seq_puts(s, "\ttype_len : 5 bits\n");
50 trace_seq_puts(s, "\ttime_delta : 27 bits\n");
51 trace_seq_puts(s, "\tarray : 32 bits\n");
52 trace_seq_putc(s, '\n');
53 trace_seq_printf(s, "\tpadding : type == %d\n",
54 RINGBUF_TYPE_PADDING);
55 trace_seq_printf(s, "\ttime_extend : type == %d\n",
56 RINGBUF_TYPE_TIME_EXTEND);
57 trace_seq_printf(s, "\ttime_stamp : type == %d\n",
58 RINGBUF_TYPE_TIME_STAMP);
59 trace_seq_printf(s, "\tdata max type_len == %d\n",
60 RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
61
62 return !trace_seq_has_overflowed(s);
63 }
64
65 /*
66 * The ring buffer is made up of a list of pages. A separate list of pages is
67 * allocated for each CPU. A writer may only write to a buffer that is
68 * associated with the CPU it is currently executing on. A reader may read
69 * from any per cpu buffer.
70 *
71 * The reader is special. For each per cpu buffer, the reader has its own
72 * reader page. When a reader has read the entire reader page, this reader
73 * page is swapped with another page in the ring buffer.
74 *
75 * Now, as long as the writer is off the reader page, the reader can do what
76 * ever it wants with that page. The writer will never write to that page
77 * again (as long as it is out of the ring buffer).
78 *
79 * Here's some silly ASCII art.
80 *
81 * +------+
82 * |reader| RING BUFFER
83 * |page |
84 * +------+ +---+ +---+ +---+
85 * | |-->| |-->| |
86 * +---+ +---+ +---+
87 * ^ |
88 * | |
89 * +---------------+
90 *
91 *
92 * +------+
93 * |reader| RING BUFFER
94 * |page |------------------v
95 * +------+ +---+ +---+ +---+
96 * | |-->| |-->| |
97 * +---+ +---+ +---+
98 * ^ |
99 * | |
100 * +---------------+
101 *
102 *
103 * +------+
104 * |reader| RING BUFFER
105 * |page |------------------v
106 * +------+ +---+ +---+ +---+
107 * ^ | |-->| |-->| |
108 * | +---+ +---+ +---+
109 * | |
110 * | |
111 * +------------------------------+
112 *
113 *
114 * +------+
115 * |buffer| RING BUFFER
116 * |page |------------------v
117 * +------+ +---+ +---+ +---+
118 * ^ | | | |-->| |
119 * | New +---+ +---+ +---+
120 * | Reader------^ |
121 * | page |
122 * +------------------------------+
123 *
124 *
125 * After we make this swap, the reader can hand this page off to the splice
126 * code and be done with it. It can even allocate a new page if it needs to
127 * and swap that into the ring buffer.
128 *
129 * We will be using cmpxchg soon to make all this lockless.
130 *
131 */
132
133 /* Used for individual buffers (after the counter) */
134 #define RB_BUFFER_OFF (1 << 20)
135
136 /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
137 #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
138
139 enum {
140 RB_LEN_TIME_EXTEND = 8,
141 RB_LEN_TIME_STAMP = 8,
142 };
143
144 #define skip_time_extend(event) \
145 ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND))
146
147 #define extended_time(event) \
148 (event->type_len >= RINGBUF_TYPE_TIME_EXTEND)
149
rb_null_event(struct ring_buffer_event * event)150 static inline int rb_null_event(struct ring_buffer_event *event)
151 {
152 return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
153 }
154
rb_event_set_padding(struct ring_buffer_event * event)155 static void rb_event_set_padding(struct ring_buffer_event *event)
156 {
157 /* padding has a NULL time_delta */
158 event->type_len = RINGBUF_TYPE_PADDING;
159 event->time_delta = 0;
160 }
161
162 static unsigned
rb_event_data_length(struct ring_buffer_event * event)163 rb_event_data_length(struct ring_buffer_event *event)
164 {
165 unsigned length;
166
167 if (event->type_len)
168 length = event->type_len * RB_ALIGNMENT;
169 else
170 length = event->array[0];
171 return length + RB_EVNT_HDR_SIZE;
172 }
173
174 /*
175 * Return the length of the given event. Will return
176 * the length of the time extend if the event is a
177 * time extend.
178 */
179 static inline unsigned
rb_event_length(struct ring_buffer_event * event)180 rb_event_length(struct ring_buffer_event *event)
181 {
182 switch (event->type_len) {
183 case RINGBUF_TYPE_PADDING:
184 if (rb_null_event(event))
185 /* undefined */
186 return -1;
187 return event->array[0] + RB_EVNT_HDR_SIZE;
188
189 case RINGBUF_TYPE_TIME_EXTEND:
190 return RB_LEN_TIME_EXTEND;
191
192 case RINGBUF_TYPE_TIME_STAMP:
193 return RB_LEN_TIME_STAMP;
194
195 case RINGBUF_TYPE_DATA:
196 return rb_event_data_length(event);
197 default:
198 WARN_ON_ONCE(1);
199 }
200 /* not hit */
201 return 0;
202 }
203
204 /*
205 * Return total length of time extend and data,
206 * or just the event length for all other events.
207 */
208 static inline unsigned
rb_event_ts_length(struct ring_buffer_event * event)209 rb_event_ts_length(struct ring_buffer_event *event)
210 {
211 unsigned len = 0;
212
213 if (extended_time(event)) {
214 /* time extends include the data event after it */
215 len = RB_LEN_TIME_EXTEND;
216 event = skip_time_extend(event);
217 }
218 return len + rb_event_length(event);
219 }
220
221 /**
222 * ring_buffer_event_length - return the length of the event
223 * @event: the event to get the length of
224 *
225 * Returns the size of the data load of a data event.
226 * If the event is something other than a data event, it
227 * returns the size of the event itself. With the exception
228 * of a TIME EXTEND, where it still returns the size of the
229 * data load of the data event after it.
230 */
ring_buffer_event_length(struct ring_buffer_event * event)231 unsigned ring_buffer_event_length(struct ring_buffer_event *event)
232 {
233 unsigned length;
234
235 if (extended_time(event))
236 event = skip_time_extend(event);
237
238 length = rb_event_length(event);
239 if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
240 return length;
241 length -= RB_EVNT_HDR_SIZE;
242 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
243 length -= sizeof(event->array[0]);
244 return length;
245 }
246 EXPORT_SYMBOL_GPL(ring_buffer_event_length);
247
248 /* inline for ring buffer fast paths */
249 static __always_inline void *
rb_event_data(struct ring_buffer_event * event)250 rb_event_data(struct ring_buffer_event *event)
251 {
252 if (extended_time(event))
253 event = skip_time_extend(event);
254 WARN_ON_ONCE(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
255 /* If length is in len field, then array[0] has the data */
256 if (event->type_len)
257 return (void *)&event->array[0];
258 /* Otherwise length is in array[0] and array[1] has the data */
259 return (void *)&event->array[1];
260 }
261
262 /**
263 * ring_buffer_event_data - return the data of the event
264 * @event: the event to get the data from
265 */
ring_buffer_event_data(struct ring_buffer_event * event)266 void *ring_buffer_event_data(struct ring_buffer_event *event)
267 {
268 return rb_event_data(event);
269 }
270 EXPORT_SYMBOL_GPL(ring_buffer_event_data);
271
272 #define for_each_buffer_cpu(buffer, cpu) \
273 for_each_cpu(cpu, buffer->cpumask)
274
275 #define for_each_online_buffer_cpu(buffer, cpu) \
276 for_each_cpu_and(cpu, buffer->cpumask, cpu_online_mask)
277
rb_event_time_stamp(struct ring_buffer_event * event)278 static u64 rb_event_time_stamp(struct ring_buffer_event *event)
279 {
280 u64 ts;
281
282 ts = event->array[0];
283 ts <<= TS_SHIFT;
284 ts += event->time_delta;
285
286 return ts;
287 }
288
289 /* Flag when events were overwritten */
290 #define RB_MISSED_EVENTS (1 << 31)
291 /* Missed count stored at end */
292 #define RB_MISSED_STORED (1 << 30)
293
294 /*
295 * Note, the buffer_page list must be first. The buffer pages
296 * are allocated in cache lines, which means that each buffer
297 * page will be at the beginning of a cache line, and thus
298 * the least significant bits will be zero. We use this to
299 * add flags in the list struct pointers, to make the ring buffer
300 * lockless.
301 */
302 struct buffer_page {
303 struct list_head list; /* list of buffer pages */
304 local_t write; /* index for next write */
305 unsigned read; /* index for next read */
306 local_t entries; /* entries on this page */
307 unsigned long real_end; /* real end of data */
308 struct buffer_data_page *page; /* Actual data page */
309 };
310
311 /*
312 * The buffer page counters, write and entries, must be reset
313 * atomically when crossing page boundaries. To synchronize this
314 * update, two counters are inserted into the number. One is
315 * the actual counter for the write position or count on the page.
316 *
317 * The other is a counter of updaters. Before an update happens
318 * the update partition of the counter is incremented. This will
319 * allow the updater to update the counter atomically.
320 *
321 * The counter is 20 bits, and the state data is 12.
322 */
323 #define RB_WRITE_MASK 0xfffff
324 #define RB_WRITE_INTCNT (1 << 20)
325
rb_init_page(struct buffer_data_page * bpage)326 static void rb_init_page(struct buffer_data_page *bpage)
327 {
328 local_set(&bpage->commit, 0);
329 }
330
rb_page_commit(struct buffer_page * bpage)331 static __always_inline unsigned int rb_page_commit(struct buffer_page *bpage)
332 {
333 return local_read(&bpage->page->commit);
334 }
335
free_buffer_page(struct buffer_page * bpage)336 static void free_buffer_page(struct buffer_page *bpage)
337 {
338 free_page((unsigned long)bpage->page);
339 kfree(bpage);
340 }
341
342 /* Max payload is BUF_PAGE_SIZE - header (8bytes) */
343 #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
344
ring_buffer_print_page_header(struct trace_seq * s)345 int ring_buffer_print_page_header(struct trace_seq *s)
346 {
347 struct buffer_data_page field;
348
349 trace_seq_printf(s, "\tfield: u64 timestamp;\t"
350 "offset:0;\tsize:%u;\tsigned:%u;\n",
351 (unsigned int)sizeof(field.time_stamp),
352 (unsigned int)is_signed_type(u64));
353
354 trace_seq_printf(s, "\tfield: local_t commit;\t"
355 "offset:%u;\tsize:%u;\tsigned:%u;\n",
356 (unsigned int)offsetof(typeof(field), commit),
357 (unsigned int)sizeof(field.commit),
358 (unsigned int)is_signed_type(long));
359
360 trace_seq_printf(s, "\tfield: int overwrite;\t"
361 "offset:%u;\tsize:%u;\tsigned:%u;\n",
362 (unsigned int)offsetof(typeof(field), commit),
363 1,
364 (unsigned int)is_signed_type(long));
365
366 trace_seq_printf(s, "\tfield: char data;\t"
367 "offset:%u;\tsize:%u;\tsigned:%u;\n",
368 (unsigned int)offsetof(typeof(field), data),
369 (unsigned int)BUF_PAGE_SIZE,
370 (unsigned int)is_signed_type(char));
371
372 return !trace_seq_has_overflowed(s);
373 }
374
375 struct rb_irq_work {
376 struct irq_work work;
377 wait_queue_head_t waiters;
378 wait_queue_head_t full_waiters;
379 long wait_index;
380 bool waiters_pending;
381 bool full_waiters_pending;
382 bool wakeup_full;
383 };
384
385 /*
386 * Structure to hold event state and handle nested events.
387 */
388 struct rb_event_info {
389 u64 ts;
390 u64 delta;
391 u64 before;
392 u64 after;
393 unsigned long length;
394 struct buffer_page *tail_page;
395 int add_timestamp;
396 };
397
398 /*
399 * Used for the add_timestamp
400 * NONE
401 * EXTEND - wants a time extend
402 * ABSOLUTE - the buffer requests all events to have absolute time stamps
403 * FORCE - force a full time stamp.
404 */
405 enum {
406 RB_ADD_STAMP_NONE = 0,
407 RB_ADD_STAMP_EXTEND = BIT(1),
408 RB_ADD_STAMP_ABSOLUTE = BIT(2),
409 RB_ADD_STAMP_FORCE = BIT(3)
410 };
411 /*
412 * Used for which event context the event is in.
413 * TRANSITION = 0
414 * NMI = 1
415 * IRQ = 2
416 * SOFTIRQ = 3
417 * NORMAL = 4
418 *
419 * See trace_recursive_lock() comment below for more details.
420 */
421 enum {
422 RB_CTX_TRANSITION,
423 RB_CTX_NMI,
424 RB_CTX_IRQ,
425 RB_CTX_SOFTIRQ,
426 RB_CTX_NORMAL,
427 RB_CTX_MAX
428 };
429
430 #if BITS_PER_LONG == 32
431 #define RB_TIME_32
432 #endif
433
434 /* To test on 64 bit machines */
435 //#define RB_TIME_32
436
437 #ifdef RB_TIME_32
438
439 struct rb_time_struct {
440 local_t cnt;
441 local_t top;
442 local_t bottom;
443 local_t msb;
444 };
445 #else
446 #include <asm/local64.h>
447 struct rb_time_struct {
448 local64_t time;
449 };
450 #endif
451 typedef struct rb_time_struct rb_time_t;
452
453 #define MAX_NEST 5
454
455 /*
456 * head_page == tail_page && head == tail then buffer is empty.
457 */
458 struct ring_buffer_per_cpu {
459 int cpu;
460 atomic_t record_disabled;
461 atomic_t resize_disabled;
462 struct trace_buffer *buffer;
463 raw_spinlock_t reader_lock; /* serialize readers */
464 arch_spinlock_t lock;
465 struct lock_class_key lock_key;
466 struct buffer_data_page *free_page;
467 unsigned long nr_pages;
468 unsigned int current_context;
469 struct list_head *pages;
470 struct buffer_page *head_page; /* read from head */
471 struct buffer_page *tail_page; /* write to tail */
472 struct buffer_page *commit_page; /* committed pages */
473 struct buffer_page *reader_page;
474 unsigned long lost_events;
475 unsigned long last_overrun;
476 unsigned long nest;
477 local_t entries_bytes;
478 local_t entries;
479 local_t overrun;
480 local_t commit_overrun;
481 local_t dropped_events;
482 local_t committing;
483 local_t commits;
484 local_t pages_touched;
485 local_t pages_lost;
486 local_t pages_read;
487 long last_pages_touch;
488 size_t shortest_full;
489 unsigned long read;
490 unsigned long read_bytes;
491 rb_time_t write_stamp;
492 rb_time_t before_stamp;
493 u64 event_stamp[MAX_NEST];
494 u64 read_stamp;
495 /* ring buffer pages to update, > 0 to add, < 0 to remove */
496 long nr_pages_to_update;
497 struct list_head new_pages; /* new pages to add */
498 struct work_struct update_pages_work;
499 struct completion update_done;
500
501 struct rb_irq_work irq_work;
502 };
503
504 struct trace_buffer {
505 unsigned flags;
506 int cpus;
507 atomic_t record_disabled;
508 atomic_t resizing;
509 cpumask_var_t cpumask;
510
511 struct lock_class_key *reader_lock_key;
512
513 struct mutex mutex;
514
515 struct ring_buffer_per_cpu **buffers;
516
517 struct hlist_node node;
518 u64 (*clock)(void);
519
520 struct rb_irq_work irq_work;
521 bool time_stamp_abs;
522
523 struct ring_buffer_ext_cb *ext_cb;
524 };
525
526 struct ring_buffer_iter {
527 struct ring_buffer_per_cpu *cpu_buffer;
528 unsigned long head;
529 unsigned long next_event;
530 struct buffer_page *head_page;
531 struct buffer_page *cache_reader_page;
532 unsigned long cache_read;
533 u64 read_stamp;
534 u64 page_stamp;
535 struct ring_buffer_event *event;
536 int missed_events;
537 };
538
539 #ifdef RB_TIME_32
540
541 /*
542 * On 32 bit machines, local64_t is very expensive. As the ring
543 * buffer doesn't need all the features of a true 64 bit atomic,
544 * on 32 bit, it uses these functions (64 still uses local64_t).
545 *
546 * For the ring buffer, 64 bit required operations for the time is
547 * the following:
548 *
549 * - Reads may fail if it interrupted a modification of the time stamp.
550 * It will succeed if it did not interrupt another write even if
551 * the read itself is interrupted by a write.
552 * It returns whether it was successful or not.
553 *
554 * - Writes always succeed and will overwrite other writes and writes
555 * that were done by events interrupting the current write.
556 *
557 * - A write followed by a read of the same time stamp will always succeed,
558 * but may not contain the same value.
559 *
560 * - A cmpxchg will fail if it interrupted another write or cmpxchg.
561 * Other than that, it acts like a normal cmpxchg.
562 *
563 * The 60 bit time stamp is broken up by 30 bits in a top and bottom half
564 * (bottom being the least significant 30 bits of the 60 bit time stamp).
565 *
566 * The two most significant bits of each half holds a 2 bit counter (0-3).
567 * Each update will increment this counter by one.
568 * When reading the top and bottom, if the two counter bits match then the
569 * top and bottom together make a valid 60 bit number.
570 */
571 #define RB_TIME_SHIFT 30
572 #define RB_TIME_VAL_MASK ((1 << RB_TIME_SHIFT) - 1)
573 #define RB_TIME_MSB_SHIFT 60
574
rb_time_cnt(unsigned long val)575 static inline int rb_time_cnt(unsigned long val)
576 {
577 return (val >> RB_TIME_SHIFT) & 3;
578 }
579
rb_time_val(unsigned long top,unsigned long bottom)580 static inline u64 rb_time_val(unsigned long top, unsigned long bottom)
581 {
582 u64 val;
583
584 val = top & RB_TIME_VAL_MASK;
585 val <<= RB_TIME_SHIFT;
586 val |= bottom & RB_TIME_VAL_MASK;
587
588 return val;
589 }
590
__rb_time_read(rb_time_t * t,u64 * ret,unsigned long * cnt)591 static inline bool __rb_time_read(rb_time_t *t, u64 *ret, unsigned long *cnt)
592 {
593 unsigned long top, bottom, msb;
594 unsigned long c;
595
596 /*
597 * If the read is interrupted by a write, then the cnt will
598 * be different. Loop until both top and bottom have been read
599 * without interruption.
600 */
601 do {
602 c = local_read(&t->cnt);
603 top = local_read(&t->top);
604 bottom = local_read(&t->bottom);
605 msb = local_read(&t->msb);
606 } while (c != local_read(&t->cnt));
607
608 *cnt = rb_time_cnt(top);
609
610 /* If top, msb or bottom counts don't match, this interrupted a write */
611 if (*cnt != rb_time_cnt(msb) || *cnt != rb_time_cnt(bottom))
612 return false;
613
614 /* The shift to msb will lose its cnt bits */
615 *ret = rb_time_val(top, bottom) | ((u64)msb << RB_TIME_MSB_SHIFT);
616 return true;
617 }
618
rb_time_read(rb_time_t * t,u64 * ret)619 static bool rb_time_read(rb_time_t *t, u64 *ret)
620 {
621 unsigned long cnt;
622
623 return __rb_time_read(t, ret, &cnt);
624 }
625
rb_time_val_cnt(unsigned long val,unsigned long cnt)626 static inline unsigned long rb_time_val_cnt(unsigned long val, unsigned long cnt)
627 {
628 return (val & RB_TIME_VAL_MASK) | ((cnt & 3) << RB_TIME_SHIFT);
629 }
630
rb_time_split(u64 val,unsigned long * top,unsigned long * bottom,unsigned long * msb)631 static inline void rb_time_split(u64 val, unsigned long *top, unsigned long *bottom,
632 unsigned long *msb)
633 {
634 *top = (unsigned long)((val >> RB_TIME_SHIFT) & RB_TIME_VAL_MASK);
635 *bottom = (unsigned long)(val & RB_TIME_VAL_MASK);
636 *msb = (unsigned long)(val >> RB_TIME_MSB_SHIFT);
637 }
638
rb_time_val_set(local_t * t,unsigned long val,unsigned long cnt)639 static inline void rb_time_val_set(local_t *t, unsigned long val, unsigned long cnt)
640 {
641 val = rb_time_val_cnt(val, cnt);
642 local_set(t, val);
643 }
644
rb_time_set(rb_time_t * t,u64 val)645 static void rb_time_set(rb_time_t *t, u64 val)
646 {
647 unsigned long cnt, top, bottom, msb;
648
649 rb_time_split(val, &top, &bottom, &msb);
650
651 /* Writes always succeed with a valid number even if it gets interrupted. */
652 do {
653 cnt = local_inc_return(&t->cnt);
654 rb_time_val_set(&t->top, top, cnt);
655 rb_time_val_set(&t->bottom, bottom, cnt);
656 rb_time_val_set(&t->msb, val >> RB_TIME_MSB_SHIFT, cnt);
657 } while (cnt != local_read(&t->cnt));
658 }
659
660 static inline bool
rb_time_read_cmpxchg(local_t * l,unsigned long expect,unsigned long set)661 rb_time_read_cmpxchg(local_t *l, unsigned long expect, unsigned long set)
662 {
663 unsigned long ret;
664
665 ret = local_cmpxchg(l, expect, set);
666 return ret == expect;
667 }
668
669 #else /* 64 bits */
670
671 /* local64_t always succeeds */
672
rb_time_read(rb_time_t * t,u64 * ret)673 static inline bool rb_time_read(rb_time_t *t, u64 *ret)
674 {
675 *ret = local64_read(&t->time);
676 return true;
677 }
rb_time_set(rb_time_t * t,u64 val)678 static void rb_time_set(rb_time_t *t, u64 val)
679 {
680 local64_set(&t->time, val);
681 }
682 #endif
683
has_ext_writer(struct trace_buffer * buffer)684 static inline bool has_ext_writer(struct trace_buffer *buffer)
685 {
686 return !!buffer->ext_cb;
687 }
688
rb_has_ext_writer(struct ring_buffer_per_cpu * cpu_buffer)689 static inline bool rb_has_ext_writer(struct ring_buffer_per_cpu *cpu_buffer)
690 {
691 return has_ext_writer(cpu_buffer->buffer);
692 }
693
694 /*
695 * Enable this to make sure that the event passed to
696 * ring_buffer_event_time_stamp() is not committed and also
697 * is on the buffer that it passed in.
698 */
699 //#define RB_VERIFY_EVENT
700 #ifdef RB_VERIFY_EVENT
701 static struct list_head *rb_list_head(struct list_head *list);
verify_event(struct ring_buffer_per_cpu * cpu_buffer,void * event)702 static void verify_event(struct ring_buffer_per_cpu *cpu_buffer,
703 void *event)
704 {
705 struct buffer_page *page = cpu_buffer->commit_page;
706 struct buffer_page *tail_page = READ_ONCE(cpu_buffer->tail_page);
707 struct list_head *next;
708 long commit, write;
709 unsigned long addr = (unsigned long)event;
710 bool done = false;
711 int stop = 0;
712
713 /* Make sure the event exists and is not committed yet */
714 do {
715 if (page == tail_page || WARN_ON_ONCE(stop++ > 100))
716 done = true;
717 commit = local_read(&page->page->commit);
718 write = local_read(&page->write);
719 if (addr >= (unsigned long)&page->page->data[commit] &&
720 addr < (unsigned long)&page->page->data[write])
721 return;
722
723 next = rb_list_head(page->list.next);
724 page = list_entry(next, struct buffer_page, list);
725 } while (!done);
726 WARN_ON_ONCE(1);
727 }
728 #else
verify_event(struct ring_buffer_per_cpu * cpu_buffer,void * event)729 static inline void verify_event(struct ring_buffer_per_cpu *cpu_buffer,
730 void *event)
731 {
732 }
733 #endif
734
735 /*
736 * The absolute time stamp drops the 5 MSBs and some clocks may
737 * require them. The rb_fix_abs_ts() will take a previous full
738 * time stamp, and add the 5 MSB of that time stamp on to the
739 * saved absolute time stamp. Then they are compared in case of
740 * the unlikely event that the latest time stamp incremented
741 * the 5 MSB.
742 */
rb_fix_abs_ts(u64 abs,u64 save_ts)743 static inline u64 rb_fix_abs_ts(u64 abs, u64 save_ts)
744 {
745 if (save_ts & TS_MSB) {
746 abs |= save_ts & TS_MSB;
747 /* Check for overflow */
748 if (unlikely(abs < save_ts))
749 abs += 1ULL << 59;
750 }
751 return abs;
752 }
753
754 static inline u64 rb_time_stamp(struct trace_buffer *buffer);
755
756 /**
757 * ring_buffer_event_time_stamp - return the event's current time stamp
758 * @buffer: The buffer that the event is on
759 * @event: the event to get the time stamp of
760 *
761 * Note, this must be called after @event is reserved, and before it is
762 * committed to the ring buffer. And must be called from the same
763 * context where the event was reserved (normal, softirq, irq, etc).
764 *
765 * Returns the time stamp associated with the current event.
766 * If the event has an extended time stamp, then that is used as
767 * the time stamp to return.
768 * In the highly unlikely case that the event was nested more than
769 * the max nesting, then the write_stamp of the buffer is returned,
770 * otherwise current time is returned, but that really neither of
771 * the last two cases should ever happen.
772 */
ring_buffer_event_time_stamp(struct trace_buffer * buffer,struct ring_buffer_event * event)773 u64 ring_buffer_event_time_stamp(struct trace_buffer *buffer,
774 struct ring_buffer_event *event)
775 {
776 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[smp_processor_id()];
777 unsigned int nest;
778 u64 ts;
779
780 /* If the event includes an absolute time, then just use that */
781 if (event->type_len == RINGBUF_TYPE_TIME_STAMP) {
782 ts = rb_event_time_stamp(event);
783 return rb_fix_abs_ts(ts, cpu_buffer->tail_page->page->time_stamp);
784 }
785
786 nest = local_read(&cpu_buffer->committing);
787 verify_event(cpu_buffer, event);
788 if (WARN_ON_ONCE(!nest))
789 goto fail;
790
791 /* Read the current saved nesting level time stamp */
792 if (likely(--nest < MAX_NEST))
793 return cpu_buffer->event_stamp[nest];
794
795 /* Shouldn't happen, warn if it does */
796 WARN_ONCE(1, "nest (%d) greater than max", nest);
797
798 fail:
799 /* Can only fail on 32 bit */
800 if (!rb_time_read(&cpu_buffer->write_stamp, &ts))
801 /* Screw it, just read the current time */
802 ts = rb_time_stamp(cpu_buffer->buffer);
803
804 return ts;
805 }
806
807 /**
808 * ring_buffer_nr_pages - get the number of buffer pages in the ring buffer
809 * @buffer: The ring_buffer to get the number of pages from
810 * @cpu: The cpu of the ring_buffer to get the number of pages from
811 *
812 * Returns the number of pages used by a per_cpu buffer of the ring buffer.
813 */
ring_buffer_nr_pages(struct trace_buffer * buffer,int cpu)814 size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu)
815 {
816 return buffer->buffers[cpu]->nr_pages;
817 }
818
819 /**
820 * ring_buffer_nr_dirty_pages - get the number of used pages in the ring buffer
821 * @buffer: The ring_buffer to get the number of pages from
822 * @cpu: The cpu of the ring_buffer to get the number of pages from
823 *
824 * Returns the number of pages that have content in the ring buffer.
825 */
ring_buffer_nr_dirty_pages(struct trace_buffer * buffer,int cpu)826 size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu)
827 {
828 size_t read;
829 size_t lost;
830 size_t cnt;
831
832 read = local_read(&buffer->buffers[cpu]->pages_read);
833 lost = local_read(&buffer->buffers[cpu]->pages_lost);
834 cnt = local_read(&buffer->buffers[cpu]->pages_touched);
835
836 if (WARN_ON_ONCE(cnt < lost))
837 return 0;
838
839 cnt -= lost;
840
841 /* The reader can read an empty page, but not more than that */
842 if (cnt < read) {
843 WARN_ON_ONCE(read > cnt + 1);
844 return 0;
845 }
846
847 return cnt - read;
848 }
849
full_hit(struct trace_buffer * buffer,int cpu,int full)850 static __always_inline bool full_hit(struct trace_buffer *buffer, int cpu, int full)
851 {
852 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
853 size_t nr_pages;
854 size_t dirty;
855
856 nr_pages = cpu_buffer->nr_pages;
857 if (!nr_pages || !full)
858 return true;
859
860 /*
861 * Add one as dirty will never equal nr_pages, as the sub-buffer
862 * that the writer is on is not counted as dirty.
863 * This is needed if "buffer_percent" is set to 100.
864 */
865 dirty = ring_buffer_nr_dirty_pages(buffer, cpu) + 1;
866
867 return (dirty * 100) >= (full * nr_pages);
868 }
869
870 /*
871 * rb_wake_up_waiters - wake up tasks waiting for ring buffer input
872 *
873 * Schedules a delayed work to wake up any task that is blocked on the
874 * ring buffer waiters queue.
875 */
rb_wake_up_waiters(struct irq_work * work)876 static void rb_wake_up_waiters(struct irq_work *work)
877 {
878 struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work);
879
880 wake_up_all(&rbwork->waiters);
881 if (rbwork->full_waiters_pending || rbwork->wakeup_full) {
882 rbwork->wakeup_full = false;
883 rbwork->full_waiters_pending = false;
884 wake_up_all(&rbwork->full_waiters);
885 }
886 }
887
888 /**
889 * ring_buffer_wake_waiters - wake up any waiters on this ring buffer
890 * @buffer: The ring buffer to wake waiters on
891 *
892 * In the case of a file that represents a ring buffer is closing,
893 * it is prudent to wake up any waiters that are on this.
894 */
ring_buffer_wake_waiters(struct trace_buffer * buffer,int cpu)895 void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu)
896 {
897 struct ring_buffer_per_cpu *cpu_buffer;
898 struct rb_irq_work *rbwork;
899
900 if (!buffer)
901 return;
902
903 if (cpu == RING_BUFFER_ALL_CPUS) {
904
905 /* Wake up individual ones too. One level recursion */
906 for_each_buffer_cpu(buffer, cpu)
907 ring_buffer_wake_waiters(buffer, cpu);
908
909 rbwork = &buffer->irq_work;
910 } else {
911 if (WARN_ON_ONCE(!buffer->buffers))
912 return;
913 if (WARN_ON_ONCE(cpu >= nr_cpu_ids))
914 return;
915
916 cpu_buffer = buffer->buffers[cpu];
917 /* The CPU buffer may not have been initialized yet */
918 if (!cpu_buffer)
919 return;
920 rbwork = &cpu_buffer->irq_work;
921 }
922
923 rbwork->wait_index++;
924 /* make sure the waiters see the new index */
925 smp_wmb();
926
927 /* This can be called in any context */
928 irq_work_queue(&rbwork->work);
929 }
930
931 /**
932 * ring_buffer_wait - wait for input to the ring buffer
933 * @buffer: buffer to wait on
934 * @cpu: the cpu buffer to wait on
935 * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS
936 *
937 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
938 * as data is added to any of the @buffer's cpu buffers. Otherwise
939 * it will wait for data to be added to a specific cpu buffer.
940 */
ring_buffer_wait(struct trace_buffer * buffer,int cpu,int full)941 int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full)
942 {
943 struct ring_buffer_per_cpu *cpu_buffer;
944 DEFINE_WAIT(wait);
945 struct rb_irq_work *work;
946 long wait_index;
947 int ret = 0;
948
949 /*
950 * Depending on what the caller is waiting for, either any
951 * data in any cpu buffer, or a specific buffer, put the
952 * caller on the appropriate wait queue.
953 */
954 if (cpu == RING_BUFFER_ALL_CPUS) {
955 work = &buffer->irq_work;
956 /* Full only makes sense on per cpu reads */
957 full = 0;
958 } else {
959 if (!cpumask_test_cpu(cpu, buffer->cpumask))
960 return -ENODEV;
961 cpu_buffer = buffer->buffers[cpu];
962 work = &cpu_buffer->irq_work;
963 }
964
965 wait_index = READ_ONCE(work->wait_index);
966
967 while (true) {
968 if (full)
969 prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE);
970 else
971 prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
972
973 /*
974 * The events can happen in critical sections where
975 * checking a work queue can cause deadlocks.
976 * After adding a task to the queue, this flag is set
977 * only to notify events to try to wake up the queue
978 * using irq_work.
979 *
980 * We don't clear it even if the buffer is no longer
981 * empty. The flag only causes the next event to run
982 * irq_work to do the work queue wake up. The worse
983 * that can happen if we race with !trace_empty() is that
984 * an event will cause an irq_work to try to wake up
985 * an empty queue.
986 *
987 * There's no reason to protect this flag either, as
988 * the work queue and irq_work logic will do the necessary
989 * synchronization for the wake ups. The only thing
990 * that is necessary is that the wake up happens after
991 * a task has been queued. It's OK for spurious wake ups.
992 */
993 if (full)
994 work->full_waiters_pending = true;
995 else
996 work->waiters_pending = true;
997
998 if (signal_pending(current)) {
999 ret = -EINTR;
1000 break;
1001 }
1002
1003 if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer))
1004 break;
1005
1006 if (cpu != RING_BUFFER_ALL_CPUS &&
1007 !ring_buffer_empty_cpu(buffer, cpu)) {
1008 unsigned long flags;
1009 bool pagebusy;
1010 bool done;
1011
1012 if (!full)
1013 break;
1014
1015 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1016 pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
1017 done = !pagebusy && full_hit(buffer, cpu, full);
1018
1019 if (!cpu_buffer->shortest_full ||
1020 cpu_buffer->shortest_full > full)
1021 cpu_buffer->shortest_full = full;
1022 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1023 if (done)
1024 break;
1025 }
1026
1027 schedule();
1028
1029 /* Make sure to see the new wait index */
1030 smp_rmb();
1031 if (wait_index != work->wait_index)
1032 break;
1033 }
1034
1035 if (full)
1036 finish_wait(&work->full_waiters, &wait);
1037 else
1038 finish_wait(&work->waiters, &wait);
1039
1040 return ret;
1041 }
1042
1043 /**
1044 * ring_buffer_poll_wait - poll on buffer input
1045 * @buffer: buffer to wait on
1046 * @cpu: the cpu buffer to wait on
1047 * @filp: the file descriptor
1048 * @poll_table: The poll descriptor
1049 * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS
1050 *
1051 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
1052 * as data is added to any of the @buffer's cpu buffers. Otherwise
1053 * it will wait for data to be added to a specific cpu buffer.
1054 *
1055 * Returns EPOLLIN | EPOLLRDNORM if data exists in the buffers,
1056 * zero otherwise.
1057 */
ring_buffer_poll_wait(struct trace_buffer * buffer,int cpu,struct file * filp,poll_table * poll_table,int full)1058 __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
1059 struct file *filp, poll_table *poll_table, int full)
1060 {
1061 struct ring_buffer_per_cpu *cpu_buffer;
1062 struct rb_irq_work *work;
1063
1064 if (cpu == RING_BUFFER_ALL_CPUS) {
1065 work = &buffer->irq_work;
1066 full = 0;
1067 } else {
1068 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1069 return EPOLLERR;
1070
1071 cpu_buffer = buffer->buffers[cpu];
1072 work = &cpu_buffer->irq_work;
1073 }
1074
1075 if (full) {
1076 poll_wait(filp, &work->full_waiters, poll_table);
1077 work->full_waiters_pending = true;
1078 if (!cpu_buffer->shortest_full ||
1079 cpu_buffer->shortest_full > full)
1080 cpu_buffer->shortest_full = full;
1081 } else {
1082 poll_wait(filp, &work->waiters, poll_table);
1083 work->waiters_pending = true;
1084 }
1085
1086 /*
1087 * There's a tight race between setting the waiters_pending and
1088 * checking if the ring buffer is empty. Once the waiters_pending bit
1089 * is set, the next event will wake the task up, but we can get stuck
1090 * if there's only a single event in.
1091 *
1092 * FIXME: Ideally, we need a memory barrier on the writer side as well,
1093 * but adding a memory barrier to all events will cause too much of a
1094 * performance hit in the fast path. We only need a memory barrier when
1095 * the buffer goes from empty to having content. But as this race is
1096 * extremely small, and it's not a problem if another event comes in, we
1097 * will fix it later.
1098 */
1099 smp_mb();
1100
1101 if (full)
1102 return full_hit(buffer, cpu, full) ? EPOLLIN | EPOLLRDNORM : 0;
1103
1104 if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
1105 (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
1106 return EPOLLIN | EPOLLRDNORM;
1107 return 0;
1108 }
1109
1110 /* buffer may be either ring_buffer or ring_buffer_per_cpu */
1111 #define RB_WARN_ON(b, cond) \
1112 ({ \
1113 int _____ret = unlikely(cond); \
1114 if (_____ret) { \
1115 if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
1116 struct ring_buffer_per_cpu *__b = \
1117 (void *)b; \
1118 atomic_inc(&__b->buffer->record_disabled); \
1119 } else \
1120 atomic_inc(&b->record_disabled); \
1121 WARN_ON(1); \
1122 } \
1123 _____ret; \
1124 })
1125
1126 /* Up this if you want to test the TIME_EXTENTS and normalization */
1127 #define DEBUG_SHIFT 0
1128
rb_time_stamp(struct trace_buffer * buffer)1129 static inline u64 rb_time_stamp(struct trace_buffer *buffer)
1130 {
1131 u64 ts;
1132
1133 /* Skip retpolines :-( */
1134 if (IS_ENABLED(CONFIG_RETPOLINE) && likely(buffer->clock == trace_clock_local))
1135 ts = trace_clock_local();
1136 else
1137 ts = buffer->clock();
1138
1139 /* shift to debug/test normalization and TIME_EXTENTS */
1140 return ts << DEBUG_SHIFT;
1141 }
1142
ring_buffer_time_stamp(struct trace_buffer * buffer)1143 u64 ring_buffer_time_stamp(struct trace_buffer *buffer)
1144 {
1145 u64 time;
1146
1147 preempt_disable_notrace();
1148 time = rb_time_stamp(buffer);
1149 preempt_enable_notrace();
1150
1151 return time;
1152 }
1153 EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
1154
ring_buffer_normalize_time_stamp(struct trace_buffer * buffer,int cpu,u64 * ts)1155 void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer,
1156 int cpu, u64 *ts)
1157 {
1158 /* Just stupid testing the normalize function and deltas */
1159 *ts >>= DEBUG_SHIFT;
1160 }
1161 EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
1162
1163 /*
1164 * Making the ring buffer lockless makes things tricky.
1165 * Although writes only happen on the CPU that they are on,
1166 * and they only need to worry about interrupts. Reads can
1167 * happen on any CPU.
1168 *
1169 * The reader page is always off the ring buffer, but when the
1170 * reader finishes with a page, it needs to swap its page with
1171 * a new one from the buffer. The reader needs to take from
1172 * the head (writes go to the tail). But if a writer is in overwrite
1173 * mode and wraps, it must push the head page forward.
1174 *
1175 * Here lies the problem.
1176 *
1177 * The reader must be careful to replace only the head page, and
1178 * not another one. As described at the top of the file in the
1179 * ASCII art, the reader sets its old page to point to the next
1180 * page after head. It then sets the page after head to point to
1181 * the old reader page. But if the writer moves the head page
1182 * during this operation, the reader could end up with the tail.
1183 *
1184 * We use cmpxchg to help prevent this race. We also do something
1185 * special with the page before head. We set the LSB to 1.
1186 *
1187 * When the writer must push the page forward, it will clear the
1188 * bit that points to the head page, move the head, and then set
1189 * the bit that points to the new head page.
1190 *
1191 * We also don't want an interrupt coming in and moving the head
1192 * page on another writer. Thus we use the second LSB to catch
1193 * that too. Thus:
1194 *
1195 * head->list->prev->next bit 1 bit 0
1196 * ------- -------
1197 * Normal page 0 0
1198 * Points to head page 0 1
1199 * New head page 1 0
1200 *
1201 * Note we can not trust the prev pointer of the head page, because:
1202 *
1203 * +----+ +-----+ +-----+
1204 * | |------>| T |---X--->| N |
1205 * | |<------| | | |
1206 * +----+ +-----+ +-----+
1207 * ^ ^ |
1208 * | +-----+ | |
1209 * +----------| R |----------+ |
1210 * | |<-----------+
1211 * +-----+
1212 *
1213 * Key: ---X--> HEAD flag set in pointer
1214 * T Tail page
1215 * R Reader page
1216 * N Next page
1217 *
1218 * (see __rb_reserve_next() to see where this happens)
1219 *
1220 * What the above shows is that the reader just swapped out
1221 * the reader page with a page in the buffer, but before it
1222 * could make the new header point back to the new page added
1223 * it was preempted by a writer. The writer moved forward onto
1224 * the new page added by the reader and is about to move forward
1225 * again.
1226 *
1227 * You can see, it is legitimate for the previous pointer of
1228 * the head (or any page) not to point back to itself. But only
1229 * temporarily.
1230 */
1231
1232 #define RB_PAGE_NORMAL 0UL
1233 #define RB_PAGE_HEAD 1UL
1234 #define RB_PAGE_UPDATE 2UL
1235
1236
1237 #define RB_FLAG_MASK 3UL
1238
1239 /* PAGE_MOVED is not part of the mask */
1240 #define RB_PAGE_MOVED 4UL
1241
1242 /*
1243 * rb_list_head - remove any bit
1244 */
rb_list_head(struct list_head * list)1245 static struct list_head *rb_list_head(struct list_head *list)
1246 {
1247 unsigned long val = (unsigned long)list;
1248
1249 return (struct list_head *)(val & ~RB_FLAG_MASK);
1250 }
1251
1252 /*
1253 * rb_is_head_page - test if the given page is the head page
1254 *
1255 * Because the reader may move the head_page pointer, we can
1256 * not trust what the head page is (it may be pointing to
1257 * the reader page). But if the next page is a header page,
1258 * its flags will be non zero.
1259 */
1260 static inline int
rb_is_head_page(struct buffer_page * page,struct list_head * list)1261 rb_is_head_page(struct buffer_page *page, struct list_head *list)
1262 {
1263 unsigned long val;
1264
1265 val = (unsigned long)list->next;
1266
1267 if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
1268 return RB_PAGE_MOVED;
1269
1270 return val & RB_FLAG_MASK;
1271 }
1272
1273 /*
1274 * rb_is_reader_page
1275 *
1276 * The unique thing about the reader page, is that, if the
1277 * writer is ever on it, the previous pointer never points
1278 * back to the reader page.
1279 */
rb_is_reader_page(struct buffer_page * page)1280 static bool rb_is_reader_page(struct buffer_page *page)
1281 {
1282 struct list_head *list = page->list.prev;
1283
1284 return rb_list_head(list->next) != &page->list;
1285 }
1286
1287 /*
1288 * rb_set_list_to_head - set a list_head to be pointing to head.
1289 */
rb_set_list_to_head(struct list_head * list)1290 static void rb_set_list_to_head(struct list_head *list)
1291 {
1292 unsigned long *ptr;
1293
1294 ptr = (unsigned long *)&list->next;
1295 *ptr |= RB_PAGE_HEAD;
1296 *ptr &= ~RB_PAGE_UPDATE;
1297 }
1298
1299 /*
1300 * rb_head_page_activate - sets up head page
1301 */
rb_head_page_activate(struct ring_buffer_per_cpu * cpu_buffer)1302 static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
1303 {
1304 struct buffer_page *head;
1305
1306 head = cpu_buffer->head_page;
1307 if (!head)
1308 return;
1309
1310 /*
1311 * Set the previous list pointer to have the HEAD flag.
1312 */
1313 rb_set_list_to_head(head->list.prev);
1314 }
1315
rb_list_head_clear(struct list_head * list)1316 static void rb_list_head_clear(struct list_head *list)
1317 {
1318 unsigned long *ptr = (unsigned long *)&list->next;
1319
1320 *ptr &= ~RB_FLAG_MASK;
1321 }
1322
1323 /*
1324 * rb_head_page_deactivate - clears head page ptr (for free list)
1325 */
1326 static void
rb_head_page_deactivate(struct ring_buffer_per_cpu * cpu_buffer)1327 rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
1328 {
1329 struct list_head *hd;
1330
1331 /* Go through the whole list and clear any pointers found. */
1332 rb_list_head_clear(cpu_buffer->pages);
1333
1334 list_for_each(hd, cpu_buffer->pages)
1335 rb_list_head_clear(hd);
1336 }
1337
rb_head_page_set(struct ring_buffer_per_cpu * cpu_buffer,struct buffer_page * head,struct buffer_page * prev,int old_flag,int new_flag)1338 static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
1339 struct buffer_page *head,
1340 struct buffer_page *prev,
1341 int old_flag, int new_flag)
1342 {
1343 struct list_head *list;
1344 unsigned long val = (unsigned long)&head->list;
1345 unsigned long ret;
1346
1347 list = &prev->list;
1348
1349 val &= ~RB_FLAG_MASK;
1350
1351 ret = cmpxchg((unsigned long *)&list->next,
1352 val | old_flag, val | new_flag);
1353
1354 /* check if the reader took the page */
1355 if ((ret & ~RB_FLAG_MASK) != val)
1356 return RB_PAGE_MOVED;
1357
1358 return ret & RB_FLAG_MASK;
1359 }
1360
rb_head_page_set_update(struct ring_buffer_per_cpu * cpu_buffer,struct buffer_page * head,struct buffer_page * prev,int old_flag)1361 static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
1362 struct buffer_page *head,
1363 struct buffer_page *prev,
1364 int old_flag)
1365 {
1366 return rb_head_page_set(cpu_buffer, head, prev,
1367 old_flag, RB_PAGE_UPDATE);
1368 }
1369
rb_head_page_set_head(struct ring_buffer_per_cpu * cpu_buffer,struct buffer_page * head,struct buffer_page * prev,int old_flag)1370 static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
1371 struct buffer_page *head,
1372 struct buffer_page *prev,
1373 int old_flag)
1374 {
1375 return rb_head_page_set(cpu_buffer, head, prev,
1376 old_flag, RB_PAGE_HEAD);
1377 }
1378
rb_head_page_set_normal(struct ring_buffer_per_cpu * cpu_buffer,struct buffer_page * head,struct buffer_page * prev,int old_flag)1379 static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
1380 struct buffer_page *head,
1381 struct buffer_page *prev,
1382 int old_flag)
1383 {
1384 return rb_head_page_set(cpu_buffer, head, prev,
1385 old_flag, RB_PAGE_NORMAL);
1386 }
1387
rb_inc_page(struct buffer_page ** bpage)1388 static inline void rb_inc_page(struct buffer_page **bpage)
1389 {
1390 struct list_head *p = rb_list_head((*bpage)->list.next);
1391
1392 *bpage = list_entry(p, struct buffer_page, list);
1393 }
1394
1395 static struct buffer_page *
rb_set_head_page(struct ring_buffer_per_cpu * cpu_buffer)1396 rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
1397 {
1398 struct buffer_page *head;
1399 struct buffer_page *page;
1400 struct list_head *list;
1401 int i;
1402
1403 if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
1404 return NULL;
1405
1406 /* sanity check */
1407 list = cpu_buffer->pages;
1408 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
1409 return NULL;
1410
1411 page = head = cpu_buffer->head_page;
1412 /*
1413 * It is possible that the writer moves the header behind
1414 * where we started, and we miss in one loop.
1415 * A second loop should grab the header, but we'll do
1416 * three loops just because I'm paranoid.
1417 */
1418 for (i = 0; i < 3; i++) {
1419 do {
1420 if (rb_is_head_page(page, page->list.prev)) {
1421 cpu_buffer->head_page = page;
1422 return page;
1423 }
1424 rb_inc_page(&page);
1425 } while (page != head);
1426 }
1427
1428 RB_WARN_ON(cpu_buffer, 1);
1429
1430 return NULL;
1431 }
1432
rb_head_page_replace(struct buffer_page * old,struct buffer_page * new)1433 static int rb_head_page_replace(struct buffer_page *old,
1434 struct buffer_page *new)
1435 {
1436 unsigned long *ptr = (unsigned long *)&old->list.prev->next;
1437 unsigned long val;
1438 unsigned long ret;
1439
1440 val = *ptr & ~RB_FLAG_MASK;
1441 val |= RB_PAGE_HEAD;
1442
1443 ret = cmpxchg(ptr, val, (unsigned long)&new->list);
1444
1445 return ret == val;
1446 }
1447
1448 /*
1449 * rb_tail_page_update - move the tail page forward
1450 */
rb_tail_page_update(struct ring_buffer_per_cpu * cpu_buffer,struct buffer_page * tail_page,struct buffer_page * next_page)1451 static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
1452 struct buffer_page *tail_page,
1453 struct buffer_page *next_page)
1454 {
1455 unsigned long old_entries;
1456 unsigned long old_write;
1457
1458 /*
1459 * The tail page now needs to be moved forward.
1460 *
1461 * We need to reset the tail page, but without messing
1462 * with possible erasing of data brought in by interrupts
1463 * that have moved the tail page and are currently on it.
1464 *
1465 * We add a counter to the write field to denote this.
1466 */
1467 old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
1468 old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
1469
1470 local_inc(&cpu_buffer->pages_touched);
1471 /*
1472 * Just make sure we have seen our old_write and synchronize
1473 * with any interrupts that come in.
1474 */
1475 barrier();
1476
1477 /*
1478 * If the tail page is still the same as what we think
1479 * it is, then it is up to us to update the tail
1480 * pointer.
1481 */
1482 if (tail_page == READ_ONCE(cpu_buffer->tail_page)) {
1483 /* Zero the write counter */
1484 unsigned long val = old_write & ~RB_WRITE_MASK;
1485 unsigned long eval = old_entries & ~RB_WRITE_MASK;
1486
1487 /*
1488 * This will only succeed if an interrupt did
1489 * not come in and change it. In which case, we
1490 * do not want to modify it.
1491 *
1492 * We add (void) to let the compiler know that we do not care
1493 * about the return value of these functions. We use the
1494 * cmpxchg to only update if an interrupt did not already
1495 * do it for us. If the cmpxchg fails, we don't care.
1496 */
1497 (void)local_cmpxchg(&next_page->write, old_write, val);
1498 (void)local_cmpxchg(&next_page->entries, old_entries, eval);
1499
1500 /*
1501 * No need to worry about races with clearing out the commit.
1502 * it only can increment when a commit takes place. But that
1503 * only happens in the outer most nested commit.
1504 */
1505 local_set(&next_page->page->commit, 0);
1506
1507 /* Again, either we update tail_page or an interrupt does */
1508 (void)cmpxchg(&cpu_buffer->tail_page, tail_page, next_page);
1509 }
1510 }
1511
rb_check_bpage(struct ring_buffer_per_cpu * cpu_buffer,struct buffer_page * bpage)1512 static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
1513 struct buffer_page *bpage)
1514 {
1515 unsigned long val = (unsigned long)bpage;
1516
1517 if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
1518 return 1;
1519
1520 return 0;
1521 }
1522
1523 /**
1524 * rb_check_pages - integrity check of buffer pages
1525 * @cpu_buffer: CPU buffer with pages to test
1526 *
1527 * As a safety measure we check to make sure the data pages have not
1528 * been corrupted.
1529 */
rb_check_pages(struct ring_buffer_per_cpu * cpu_buffer)1530 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
1531 {
1532 struct list_head *head = rb_list_head(cpu_buffer->pages);
1533 struct list_head *tmp;
1534
1535 if (RB_WARN_ON(cpu_buffer,
1536 rb_list_head(rb_list_head(head->next)->prev) != head))
1537 return -1;
1538
1539 if (RB_WARN_ON(cpu_buffer,
1540 rb_list_head(rb_list_head(head->prev)->next) != head))
1541 return -1;
1542
1543 for (tmp = rb_list_head(head->next); tmp != head; tmp = rb_list_head(tmp->next)) {
1544 if (RB_WARN_ON(cpu_buffer,
1545 rb_list_head(rb_list_head(tmp->next)->prev) != tmp))
1546 return -1;
1547
1548 if (RB_WARN_ON(cpu_buffer,
1549 rb_list_head(rb_list_head(tmp->prev)->next) != tmp))
1550 return -1;
1551 }
1552
1553 return 0;
1554 }
1555
__rb_allocate_pages(struct ring_buffer_per_cpu * cpu_buffer,long nr_pages,struct list_head * pages)1556 static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
1557 long nr_pages, struct list_head *pages)
1558 {
1559 struct buffer_page *bpage, *tmp;
1560 bool user_thread = current->mm != NULL;
1561 gfp_t mflags;
1562 long i;
1563
1564 /*
1565 * Check if the available memory is there first.
1566 * Note, si_mem_available() only gives us a rough estimate of available
1567 * memory. It may not be accurate. But we don't care, we just want
1568 * to prevent doing any allocation when it is obvious that it is
1569 * not going to succeed.
1570 */
1571 i = si_mem_available();
1572 if (i < nr_pages)
1573 return -ENOMEM;
1574
1575 /*
1576 * __GFP_RETRY_MAYFAIL flag makes sure that the allocation fails
1577 * gracefully without invoking oom-killer and the system is not
1578 * destabilized.
1579 */
1580 mflags = GFP_KERNEL | __GFP_RETRY_MAYFAIL;
1581
1582 /*
1583 * If a user thread allocates too much, and si_mem_available()
1584 * reports there's enough memory, even though there is not.
1585 * Make sure the OOM killer kills this thread. This can happen
1586 * even with RETRY_MAYFAIL because another task may be doing
1587 * an allocation after this task has taken all memory.
1588 * This is the task the OOM killer needs to take out during this
1589 * loop, even if it was triggered by an allocation somewhere else.
1590 */
1591 if (user_thread)
1592 set_current_oom_origin();
1593 for (i = 0; i < nr_pages; i++) {
1594 struct page *page;
1595
1596 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1597 mflags, cpu_to_node(cpu_buffer->cpu));
1598 if (!bpage)
1599 goto free_pages;
1600
1601 rb_check_bpage(cpu_buffer, bpage);
1602
1603 list_add(&bpage->list, pages);
1604
1605 page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu), mflags, 0);
1606 if (!page)
1607 goto free_pages;
1608 bpage->page = page_address(page);
1609 rb_init_page(bpage->page);
1610
1611 if (user_thread && fatal_signal_pending(current))
1612 goto free_pages;
1613 }
1614 if (user_thread)
1615 clear_current_oom_origin();
1616
1617 return 0;
1618
1619 free_pages:
1620 list_for_each_entry_safe(bpage, tmp, pages, list) {
1621 list_del_init(&bpage->list);
1622 free_buffer_page(bpage);
1623 }
1624 if (user_thread)
1625 clear_current_oom_origin();
1626
1627 return -ENOMEM;
1628 }
1629
rb_allocate_pages(struct ring_buffer_per_cpu * cpu_buffer,unsigned long nr_pages)1630 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
1631 unsigned long nr_pages)
1632 {
1633 LIST_HEAD(pages);
1634
1635 WARN_ON(!nr_pages);
1636
1637 if (__rb_allocate_pages(cpu_buffer, nr_pages, &pages))
1638 return -ENOMEM;
1639
1640 /*
1641 * The ring buffer page list is a circular list that does not
1642 * start and end with a list head. All page list items point to
1643 * other pages.
1644 */
1645 cpu_buffer->pages = pages.next;
1646 list_del(&pages);
1647
1648 cpu_buffer->nr_pages = nr_pages;
1649
1650 rb_check_pages(cpu_buffer);
1651
1652 return 0;
1653 }
1654
1655 static struct ring_buffer_per_cpu *
rb_allocate_cpu_buffer(struct trace_buffer * buffer,long nr_pages,int cpu)1656 rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu)
1657 {
1658 struct ring_buffer_per_cpu *cpu_buffer;
1659 struct buffer_page *bpage;
1660 struct page *page;
1661 int ret;
1662
1663 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
1664 GFP_KERNEL, cpu_to_node(cpu));
1665 if (!cpu_buffer)
1666 return NULL;
1667
1668 cpu_buffer->cpu = cpu;
1669 cpu_buffer->buffer = buffer;
1670 raw_spin_lock_init(&cpu_buffer->reader_lock);
1671 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
1672 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1673 INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler);
1674 init_completion(&cpu_buffer->update_done);
1675 init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters);
1676 init_waitqueue_head(&cpu_buffer->irq_work.waiters);
1677 init_waitqueue_head(&cpu_buffer->irq_work.full_waiters);
1678
1679 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1680 GFP_KERNEL, cpu_to_node(cpu));
1681 if (!bpage)
1682 goto fail_free_buffer;
1683
1684 rb_check_bpage(cpu_buffer, bpage);
1685
1686 cpu_buffer->reader_page = bpage;
1687 page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
1688 if (!page)
1689 goto fail_free_reader;
1690 bpage->page = page_address(page);
1691 rb_init_page(bpage->page);
1692
1693 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1694 INIT_LIST_HEAD(&cpu_buffer->new_pages);
1695
1696 ret = rb_allocate_pages(cpu_buffer, nr_pages);
1697 if (ret < 0)
1698 goto fail_free_reader;
1699
1700 cpu_buffer->head_page
1701 = list_entry(cpu_buffer->pages, struct buffer_page, list);
1702 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
1703
1704 rb_head_page_activate(cpu_buffer);
1705
1706 return cpu_buffer;
1707
1708 fail_free_reader:
1709 free_buffer_page(cpu_buffer->reader_page);
1710
1711 fail_free_buffer:
1712 kfree(cpu_buffer);
1713 return NULL;
1714 }
1715
rb_free_cpu_buffer(struct ring_buffer_per_cpu * cpu_buffer)1716 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
1717 {
1718 struct list_head *head = cpu_buffer->pages;
1719 struct buffer_page *bpage, *tmp;
1720
1721 irq_work_sync(&cpu_buffer->irq_work.work);
1722
1723 free_buffer_page(cpu_buffer->reader_page);
1724
1725 if (head) {
1726 rb_head_page_deactivate(cpu_buffer);
1727
1728 list_for_each_entry_safe(bpage, tmp, head, list) {
1729 list_del_init(&bpage->list);
1730 free_buffer_page(bpage);
1731 }
1732 bpage = list_entry(head, struct buffer_page, list);
1733 free_buffer_page(bpage);
1734 }
1735
1736 free_page((unsigned long)cpu_buffer->free_page);
1737
1738 kfree(cpu_buffer);
1739 }
1740
1741 /**
1742 * __ring_buffer_alloc - allocate a new ring_buffer
1743 * @size: the size in bytes per cpu that is needed.
1744 * @flags: attributes to set for the ring buffer.
1745 * @key: ring buffer reader_lock_key.
1746 *
1747 * Currently the only flag that is available is the RB_FL_OVERWRITE
1748 * flag. This flag means that the buffer will overwrite old data
1749 * when the buffer wraps. If this flag is not set, the buffer will
1750 * drop data when the tail hits the head.
1751 */
__ring_buffer_alloc(unsigned long size,unsigned flags,struct lock_class_key * key)1752 struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1753 struct lock_class_key *key)
1754 {
1755 struct trace_buffer *buffer;
1756 long nr_pages;
1757 int bsize;
1758 int cpu;
1759 int ret;
1760
1761 /* keep it in its own cache line */
1762 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
1763 GFP_KERNEL);
1764 if (!buffer)
1765 return NULL;
1766
1767 if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
1768 goto fail_free_buffer;
1769
1770 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1771 buffer->flags = flags;
1772 buffer->clock = trace_clock_local;
1773 buffer->reader_lock_key = key;
1774
1775 init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters);
1776 init_waitqueue_head(&buffer->irq_work.waiters);
1777
1778 /* need at least two pages */
1779 if (nr_pages < 2)
1780 nr_pages = 2;
1781
1782 buffer->cpus = nr_cpu_ids;
1783
1784 bsize = sizeof(void *) * nr_cpu_ids;
1785 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
1786 GFP_KERNEL);
1787 if (!buffer->buffers)
1788 goto fail_free_cpumask;
1789
1790 cpu = raw_smp_processor_id();
1791 cpumask_set_cpu(cpu, buffer->cpumask);
1792 buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
1793 if (!buffer->buffers[cpu])
1794 goto fail_free_buffers;
1795
1796 ret = cpuhp_state_add_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
1797 if (ret < 0)
1798 goto fail_free_buffers;
1799
1800 mutex_init(&buffer->mutex);
1801
1802 return buffer;
1803
1804 fail_free_buffers:
1805 for_each_buffer_cpu(buffer, cpu) {
1806 if (buffer->buffers[cpu])
1807 rb_free_cpu_buffer(buffer->buffers[cpu]);
1808 }
1809 kfree(buffer->buffers);
1810
1811 fail_free_cpumask:
1812 free_cpumask_var(buffer->cpumask);
1813
1814 fail_free_buffer:
1815 kfree(buffer);
1816 return NULL;
1817 }
1818 EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
1819
ring_buffer_alloc_ext(unsigned long size,struct ring_buffer_ext_cb * cb)1820 struct trace_buffer *ring_buffer_alloc_ext(unsigned long size,
1821 struct ring_buffer_ext_cb *cb)
1822 {
1823 struct trace_buffer *buffer;
1824
1825 if (!cb || !cb->update_footers || !cb->swap_reader)
1826 return NULL;
1827
1828 buffer = ring_buffer_alloc(size, RB_FL_OVERWRITE);
1829 if (!buffer)
1830 return NULL;
1831
1832 WARN_ON(cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE,
1833 &buffer->node));
1834 buffer->ext_cb = cb;
1835 atomic_set(&buffer->record_disabled, 1);
1836
1837 return buffer;
1838 }
1839
1840 /**
1841 * ring_buffer_free - free a ring buffer.
1842 * @buffer: the buffer to free.
1843 */
1844 void
ring_buffer_free(struct trace_buffer * buffer)1845 ring_buffer_free(struct trace_buffer *buffer)
1846 {
1847 int cpu;
1848
1849 if (!has_ext_writer(buffer))
1850 cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE,
1851 &buffer->node);
1852
1853 irq_work_sync(&buffer->irq_work.work);
1854
1855 for_each_buffer_cpu(buffer, cpu)
1856 rb_free_cpu_buffer(buffer->buffers[cpu]);
1857
1858 kfree(buffer->buffers);
1859 free_cpumask_var(buffer->cpumask);
1860
1861 kfree(buffer);
1862 }
1863 EXPORT_SYMBOL_GPL(ring_buffer_free);
1864
ring_buffer_set_clock(struct trace_buffer * buffer,u64 (* clock)(void))1865 void ring_buffer_set_clock(struct trace_buffer *buffer,
1866 u64 (*clock)(void))
1867 {
1868 buffer->clock = clock;
1869 }
1870
ring_buffer_set_time_stamp_abs(struct trace_buffer * buffer,bool abs)1871 void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs)
1872 {
1873 buffer->time_stamp_abs = abs;
1874 }
1875
ring_buffer_time_stamp_abs(struct trace_buffer * buffer)1876 bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer)
1877 {
1878 return buffer->time_stamp_abs;
1879 }
1880
1881 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
1882
rb_page_entries(struct buffer_page * bpage)1883 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
1884 {
1885 return local_read(&bpage->entries) & RB_WRITE_MASK;
1886 }
1887
rb_page_write(struct buffer_page * bpage)1888 static inline unsigned long rb_page_write(struct buffer_page *bpage)
1889 {
1890 return local_read(&bpage->write) & RB_WRITE_MASK;
1891 }
1892
1893 static int
rb_remove_pages(struct ring_buffer_per_cpu * cpu_buffer,unsigned long nr_pages)1894 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
1895 {
1896 struct list_head *tail_page, *to_remove, *next_page;
1897 struct buffer_page *to_remove_page, *tmp_iter_page;
1898 struct buffer_page *last_page, *first_page;
1899 unsigned long nr_removed;
1900 unsigned long head_bit;
1901 int page_entries;
1902
1903 head_bit = 0;
1904
1905 raw_spin_lock_irq(&cpu_buffer->reader_lock);
1906 atomic_inc(&cpu_buffer->record_disabled);
1907 /*
1908 * We don't race with the readers since we have acquired the reader
1909 * lock. We also don't race with writers after disabling recording.
1910 * This makes it easy to figure out the first and the last page to be
1911 * removed from the list. We unlink all the pages in between including
1912 * the first and last pages. This is done in a busy loop so that we
1913 * lose the least number of traces.
1914 * The pages are freed after we restart recording and unlock readers.
1915 */
1916 tail_page = &cpu_buffer->tail_page->list;
1917
1918 /*
1919 * tail page might be on reader page, we remove the next page
1920 * from the ring buffer
1921 */
1922 if (cpu_buffer->tail_page == cpu_buffer->reader_page)
1923 tail_page = rb_list_head(tail_page->next);
1924 to_remove = tail_page;
1925
1926 /* start of pages to remove */
1927 first_page = list_entry(rb_list_head(to_remove->next),
1928 struct buffer_page, list);
1929
1930 for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) {
1931 to_remove = rb_list_head(to_remove)->next;
1932 head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD;
1933 }
1934
1935 next_page = rb_list_head(to_remove)->next;
1936
1937 /*
1938 * Now we remove all pages between tail_page and next_page.
1939 * Make sure that we have head_bit value preserved for the
1940 * next page
1941 */
1942 tail_page->next = (struct list_head *)((unsigned long)next_page |
1943 head_bit);
1944 next_page = rb_list_head(next_page);
1945 next_page->prev = tail_page;
1946
1947 /* make sure pages points to a valid page in the ring buffer */
1948 cpu_buffer->pages = next_page;
1949
1950 /* update head page */
1951 if (head_bit)
1952 cpu_buffer->head_page = list_entry(next_page,
1953 struct buffer_page, list);
1954
1955 /*
1956 * change read pointer to make sure any read iterators reset
1957 * themselves
1958 */
1959 cpu_buffer->read = 0;
1960
1961 /* pages are removed, resume tracing and then free the pages */
1962 atomic_dec(&cpu_buffer->record_disabled);
1963 raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1964
1965 RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages));
1966
1967 /* last buffer page to remove */
1968 last_page = list_entry(rb_list_head(to_remove), struct buffer_page,
1969 list);
1970 tmp_iter_page = first_page;
1971
1972 do {
1973 cond_resched();
1974
1975 to_remove_page = tmp_iter_page;
1976 rb_inc_page(&tmp_iter_page);
1977
1978 /* update the counters */
1979 page_entries = rb_page_entries(to_remove_page);
1980 if (page_entries) {
1981 /*
1982 * If something was added to this page, it was full
1983 * since it is not the tail page. So we deduct the
1984 * bytes consumed in ring buffer from here.
1985 * Increment overrun to account for the lost events.
1986 */
1987 local_add(page_entries, &cpu_buffer->overrun);
1988 local_sub(rb_page_commit(to_remove_page), &cpu_buffer->entries_bytes);
1989 local_inc(&cpu_buffer->pages_lost);
1990 }
1991
1992 /*
1993 * We have already removed references to this list item, just
1994 * free up the buffer_page and its page
1995 */
1996 free_buffer_page(to_remove_page);
1997 nr_removed--;
1998
1999 } while (to_remove_page != last_page);
2000
2001 RB_WARN_ON(cpu_buffer, nr_removed);
2002
2003 return nr_removed == 0;
2004 }
2005
2006 static int
rb_insert_pages(struct ring_buffer_per_cpu * cpu_buffer)2007 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
2008 {
2009 struct list_head *pages = &cpu_buffer->new_pages;
2010 int retries, success;
2011
2012 raw_spin_lock_irq(&cpu_buffer->reader_lock);
2013 /*
2014 * We are holding the reader lock, so the reader page won't be swapped
2015 * in the ring buffer. Now we are racing with the writer trying to
2016 * move head page and the tail page.
2017 * We are going to adapt the reader page update process where:
2018 * 1. We first splice the start and end of list of new pages between
2019 * the head page and its previous page.
2020 * 2. We cmpxchg the prev_page->next to point from head page to the
2021 * start of new pages list.
2022 * 3. Finally, we update the head->prev to the end of new list.
2023 *
2024 * We will try this process 10 times, to make sure that we don't keep
2025 * spinning.
2026 */
2027 retries = 10;
2028 success = 0;
2029 while (retries--) {
2030 struct list_head *head_page, *prev_page, *r;
2031 struct list_head *last_page, *first_page;
2032 struct list_head *head_page_with_bit;
2033
2034 head_page = &rb_set_head_page(cpu_buffer)->list;
2035 if (!head_page)
2036 break;
2037 prev_page = head_page->prev;
2038
2039 first_page = pages->next;
2040 last_page = pages->prev;
2041
2042 head_page_with_bit = (struct list_head *)
2043 ((unsigned long)head_page | RB_PAGE_HEAD);
2044
2045 last_page->next = head_page_with_bit;
2046 first_page->prev = prev_page;
2047
2048 r = cmpxchg(&prev_page->next, head_page_with_bit, first_page);
2049
2050 if (r == head_page_with_bit) {
2051 /*
2052 * yay, we replaced the page pointer to our new list,
2053 * now, we just have to update to head page's prev
2054 * pointer to point to end of list
2055 */
2056 head_page->prev = last_page;
2057 success = 1;
2058 break;
2059 }
2060 }
2061
2062 if (success)
2063 INIT_LIST_HEAD(pages);
2064 /*
2065 * If we weren't successful in adding in new pages, warn and stop
2066 * tracing
2067 */
2068 RB_WARN_ON(cpu_buffer, !success);
2069 raw_spin_unlock_irq(&cpu_buffer->reader_lock);
2070
2071 /* free pages if they weren't inserted */
2072 if (!success) {
2073 struct buffer_page *bpage, *tmp;
2074 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
2075 list) {
2076 list_del_init(&bpage->list);
2077 free_buffer_page(bpage);
2078 }
2079 }
2080 return success;
2081 }
2082
rb_update_pages(struct ring_buffer_per_cpu * cpu_buffer)2083 static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer)
2084 {
2085 int success;
2086
2087 if (cpu_buffer->nr_pages_to_update > 0)
2088 success = rb_insert_pages(cpu_buffer);
2089 else
2090 success = rb_remove_pages(cpu_buffer,
2091 -cpu_buffer->nr_pages_to_update);
2092
2093 if (success)
2094 cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update;
2095 }
2096
update_pages_handler(struct work_struct * work)2097 static void update_pages_handler(struct work_struct *work)
2098 {
2099 struct ring_buffer_per_cpu *cpu_buffer = container_of(work,
2100 struct ring_buffer_per_cpu, update_pages_work);
2101 rb_update_pages(cpu_buffer);
2102 complete(&cpu_buffer->update_done);
2103 }
2104
2105 /**
2106 * ring_buffer_resize - resize the ring buffer
2107 * @buffer: the buffer to resize.
2108 * @size: the new size.
2109 * @cpu_id: the cpu buffer to resize
2110 *
2111 * Minimum size is 2 * BUF_PAGE_SIZE.
2112 *
2113 * Returns 0 on success and < 0 on failure.
2114 */
ring_buffer_resize(struct trace_buffer * buffer,unsigned long size,int cpu_id)2115 int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
2116 int cpu_id)
2117 {
2118 struct ring_buffer_per_cpu *cpu_buffer;
2119 unsigned long nr_pages;
2120 int cpu, err;
2121
2122 if (unlikely(has_ext_writer(buffer)))
2123 return -EINVAL;
2124 /*
2125 * Always succeed at resizing a non-existent buffer:
2126 */
2127 if (!buffer)
2128 return 0;
2129
2130 /* Make sure the requested buffer exists */
2131 if (cpu_id != RING_BUFFER_ALL_CPUS &&
2132 !cpumask_test_cpu(cpu_id, buffer->cpumask))
2133 return 0;
2134
2135 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
2136
2137 /* we need a minimum of two pages */
2138 if (nr_pages < 2)
2139 nr_pages = 2;
2140
2141 /* prevent another thread from changing buffer sizes */
2142 mutex_lock(&buffer->mutex);
2143 atomic_inc(&buffer->resizing);
2144
2145 if (cpu_id == RING_BUFFER_ALL_CPUS) {
2146 /*
2147 * Don't succeed if resizing is disabled, as a reader might be
2148 * manipulating the ring buffer and is expecting a sane state while
2149 * this is true.
2150 */
2151 for_each_buffer_cpu(buffer, cpu) {
2152 cpu_buffer = buffer->buffers[cpu];
2153 if (atomic_read(&cpu_buffer->resize_disabled)) {
2154 err = -EBUSY;
2155 goto out_err_unlock;
2156 }
2157 }
2158
2159 /* calculate the pages to update */
2160 for_each_buffer_cpu(buffer, cpu) {
2161 cpu_buffer = buffer->buffers[cpu];
2162
2163 cpu_buffer->nr_pages_to_update = nr_pages -
2164 cpu_buffer->nr_pages;
2165 /*
2166 * nothing more to do for removing pages or no update
2167 */
2168 if (cpu_buffer->nr_pages_to_update <= 0)
2169 continue;
2170 /*
2171 * to add pages, make sure all new pages can be
2172 * allocated without receiving ENOMEM
2173 */
2174 INIT_LIST_HEAD(&cpu_buffer->new_pages);
2175 if (__rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update,
2176 &cpu_buffer->new_pages)) {
2177 /* not enough memory for new pages */
2178 err = -ENOMEM;
2179 goto out_err;
2180 }
2181
2182 cond_resched();
2183 }
2184
2185 cpus_read_lock();
2186 /*
2187 * Fire off all the required work handlers
2188 * We can't schedule on offline CPUs, but it's not necessary
2189 * since we can change their buffer sizes without any race.
2190 */
2191 for_each_buffer_cpu(buffer, cpu) {
2192 cpu_buffer = buffer->buffers[cpu];
2193 if (!cpu_buffer->nr_pages_to_update)
2194 continue;
2195
2196 /* Can't run something on an offline CPU. */
2197 if (!cpu_online(cpu)) {
2198 rb_update_pages(cpu_buffer);
2199 cpu_buffer->nr_pages_to_update = 0;
2200 } else {
2201 schedule_work_on(cpu,
2202 &cpu_buffer->update_pages_work);
2203 }
2204 }
2205
2206 /* wait for all the updates to complete */
2207 for_each_buffer_cpu(buffer, cpu) {
2208 cpu_buffer = buffer->buffers[cpu];
2209 if (!cpu_buffer->nr_pages_to_update)
2210 continue;
2211
2212 if (cpu_online(cpu))
2213 wait_for_completion(&cpu_buffer->update_done);
2214 cpu_buffer->nr_pages_to_update = 0;
2215 }
2216
2217 cpus_read_unlock();
2218 } else {
2219 cpu_buffer = buffer->buffers[cpu_id];
2220
2221 if (nr_pages == cpu_buffer->nr_pages)
2222 goto out;
2223
2224 /*
2225 * Don't succeed if resizing is disabled, as a reader might be
2226 * manipulating the ring buffer and is expecting a sane state while
2227 * this is true.
2228 */
2229 if (atomic_read(&cpu_buffer->resize_disabled)) {
2230 err = -EBUSY;
2231 goto out_err_unlock;
2232 }
2233
2234 cpu_buffer->nr_pages_to_update = nr_pages -
2235 cpu_buffer->nr_pages;
2236
2237 INIT_LIST_HEAD(&cpu_buffer->new_pages);
2238 if (cpu_buffer->nr_pages_to_update > 0 &&
2239 __rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update,
2240 &cpu_buffer->new_pages)) {
2241 err = -ENOMEM;
2242 goto out_err;
2243 }
2244
2245 cpus_read_lock();
2246
2247 /* Can't run something on an offline CPU. */
2248 if (!cpu_online(cpu_id))
2249 rb_update_pages(cpu_buffer);
2250 else {
2251 schedule_work_on(cpu_id,
2252 &cpu_buffer->update_pages_work);
2253 wait_for_completion(&cpu_buffer->update_done);
2254 }
2255
2256 cpu_buffer->nr_pages_to_update = 0;
2257 cpus_read_unlock();
2258 }
2259
2260 out:
2261 /*
2262 * The ring buffer resize can happen with the ring buffer
2263 * enabled, so that the update disturbs the tracing as little
2264 * as possible. But if the buffer is disabled, we do not need
2265 * to worry about that, and we can take the time to verify
2266 * that the buffer is not corrupt.
2267 */
2268 if (atomic_read(&buffer->record_disabled)) {
2269 atomic_inc(&buffer->record_disabled);
2270 /*
2271 * Even though the buffer was disabled, we must make sure
2272 * that it is truly disabled before calling rb_check_pages.
2273 * There could have been a race between checking
2274 * record_disable and incrementing it.
2275 */
2276 synchronize_rcu();
2277 for_each_buffer_cpu(buffer, cpu) {
2278 cpu_buffer = buffer->buffers[cpu];
2279 rb_check_pages(cpu_buffer);
2280 }
2281 atomic_dec(&buffer->record_disabled);
2282 }
2283
2284 atomic_dec(&buffer->resizing);
2285 mutex_unlock(&buffer->mutex);
2286 return 0;
2287
2288 out_err:
2289 for_each_buffer_cpu(buffer, cpu) {
2290 struct buffer_page *bpage, *tmp;
2291
2292 cpu_buffer = buffer->buffers[cpu];
2293 cpu_buffer->nr_pages_to_update = 0;
2294
2295 if (list_empty(&cpu_buffer->new_pages))
2296 continue;
2297
2298 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
2299 list) {
2300 list_del_init(&bpage->list);
2301 free_buffer_page(bpage);
2302 }
2303 }
2304 out_err_unlock:
2305 atomic_dec(&buffer->resizing);
2306 mutex_unlock(&buffer->mutex);
2307 return err;
2308 }
2309 EXPORT_SYMBOL_GPL(ring_buffer_resize);
2310
ring_buffer_change_overwrite(struct trace_buffer * buffer,int val)2311 void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val)
2312 {
2313 mutex_lock(&buffer->mutex);
2314 if (val)
2315 buffer->flags |= RB_FL_OVERWRITE;
2316 else
2317 buffer->flags &= ~RB_FL_OVERWRITE;
2318 mutex_unlock(&buffer->mutex);
2319 }
2320 EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite);
2321
__rb_page_index(struct buffer_page * bpage,unsigned index)2322 static __always_inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
2323 {
2324 return bpage->page->data + index;
2325 }
2326
2327 static __always_inline struct ring_buffer_event *
rb_reader_event(struct ring_buffer_per_cpu * cpu_buffer)2328 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
2329 {
2330 return __rb_page_index(cpu_buffer->reader_page,
2331 cpu_buffer->reader_page->read);
2332 }
2333
2334 static struct ring_buffer_event *
rb_iter_head_event(struct ring_buffer_iter * iter)2335 rb_iter_head_event(struct ring_buffer_iter *iter)
2336 {
2337 struct ring_buffer_event *event;
2338 struct buffer_page *iter_head_page = iter->head_page;
2339 unsigned long commit;
2340 unsigned length;
2341
2342 if (iter->head != iter->next_event)
2343 return iter->event;
2344
2345 /*
2346 * When the writer goes across pages, it issues a cmpxchg which
2347 * is a mb(), which will synchronize with the rmb here.
2348 * (see rb_tail_page_update() and __rb_reserve_next())
2349 */
2350 commit = rb_page_commit(iter_head_page);
2351 smp_rmb();
2352
2353 /* An event needs to be at least 8 bytes in size */
2354 if (iter->head > commit - 8)
2355 goto reset;
2356
2357 event = __rb_page_index(iter_head_page, iter->head);
2358 length = rb_event_length(event);
2359
2360 /*
2361 * READ_ONCE() doesn't work on functions and we don't want the
2362 * compiler doing any crazy optimizations with length.
2363 */
2364 barrier();
2365
2366 if ((iter->head + length) > commit || length > BUF_PAGE_SIZE)
2367 /* Writer corrupted the read? */
2368 goto reset;
2369
2370 memcpy(iter->event, event, length);
2371 /*
2372 * If the page stamp is still the same after this rmb() then the
2373 * event was safely copied without the writer entering the page.
2374 */
2375 smp_rmb();
2376
2377 /* Make sure the page didn't change since we read this */
2378 if (iter->page_stamp != iter_head_page->page->time_stamp ||
2379 commit > rb_page_commit(iter_head_page))
2380 goto reset;
2381
2382 iter->next_event = iter->head + length;
2383 return iter->event;
2384 reset:
2385 /* Reset to the beginning */
2386 iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp;
2387 iter->head = 0;
2388 iter->next_event = 0;
2389 iter->missed_events = 1;
2390 return NULL;
2391 }
2392
2393 /* Size is determined by what has been committed */
rb_page_size(struct buffer_page * bpage)2394 static __always_inline unsigned rb_page_size(struct buffer_page *bpage)
2395 {
2396 return rb_page_commit(bpage);
2397 }
2398
2399 static __always_inline unsigned
rb_commit_index(struct ring_buffer_per_cpu * cpu_buffer)2400 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
2401 {
2402 return rb_page_commit(cpu_buffer->commit_page);
2403 }
2404
2405 static __always_inline unsigned
rb_event_index(struct ring_buffer_event * event)2406 rb_event_index(struct ring_buffer_event *event)
2407 {
2408 unsigned long addr = (unsigned long)event;
2409
2410 return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
2411 }
2412
rb_inc_iter(struct ring_buffer_iter * iter)2413 static void rb_inc_iter(struct ring_buffer_iter *iter)
2414 {
2415 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2416
2417 /*
2418 * The iterator could be on the reader page (it starts there).
2419 * But the head could have moved, since the reader was
2420 * found. Check for this case and assign the iterator
2421 * to the head page instead of next.
2422 */
2423 if (iter->head_page == cpu_buffer->reader_page)
2424 iter->head_page = rb_set_head_page(cpu_buffer);
2425 else
2426 rb_inc_page(&iter->head_page);
2427
2428 iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp;
2429 iter->head = 0;
2430 iter->next_event = 0;
2431 }
2432
2433 /*
2434 * rb_handle_head_page - writer hit the head page
2435 *
2436 * Returns: +1 to retry page
2437 * 0 to continue
2438 * -1 on error
2439 */
2440 static int
rb_handle_head_page(struct ring_buffer_per_cpu * cpu_buffer,struct buffer_page * tail_page,struct buffer_page * next_page)2441 rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
2442 struct buffer_page *tail_page,
2443 struct buffer_page *next_page)
2444 {
2445 struct buffer_page *new_head;
2446 int entries;
2447 int type;
2448 int ret;
2449
2450 entries = rb_page_entries(next_page);
2451
2452 /*
2453 * The hard part is here. We need to move the head
2454 * forward, and protect against both readers on
2455 * other CPUs and writers coming in via interrupts.
2456 */
2457 type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
2458 RB_PAGE_HEAD);
2459
2460 /*
2461 * type can be one of four:
2462 * NORMAL - an interrupt already moved it for us
2463 * HEAD - we are the first to get here.
2464 * UPDATE - we are the interrupt interrupting
2465 * a current move.
2466 * MOVED - a reader on another CPU moved the next
2467 * pointer to its reader page. Give up
2468 * and try again.
2469 */
2470
2471 switch (type) {
2472 case RB_PAGE_HEAD:
2473 /*
2474 * We changed the head to UPDATE, thus
2475 * it is our responsibility to update
2476 * the counters.
2477 */
2478 local_add(entries, &cpu_buffer->overrun);
2479 local_sub(rb_page_commit(next_page), &cpu_buffer->entries_bytes);
2480 local_inc(&cpu_buffer->pages_lost);
2481
2482 /*
2483 * The entries will be zeroed out when we move the
2484 * tail page.
2485 */
2486
2487 /* still more to do */
2488 break;
2489
2490 case RB_PAGE_UPDATE:
2491 /*
2492 * This is an interrupt that interrupt the
2493 * previous update. Still more to do.
2494 */
2495 break;
2496 case RB_PAGE_NORMAL:
2497 /*
2498 * An interrupt came in before the update
2499 * and processed this for us.
2500 * Nothing left to do.
2501 */
2502 return 1;
2503 case RB_PAGE_MOVED:
2504 /*
2505 * The reader is on another CPU and just did
2506 * a swap with our next_page.
2507 * Try again.
2508 */
2509 return 1;
2510 default:
2511 RB_WARN_ON(cpu_buffer, 1); /* WTF??? */
2512 return -1;
2513 }
2514
2515 /*
2516 * Now that we are here, the old head pointer is
2517 * set to UPDATE. This will keep the reader from
2518 * swapping the head page with the reader page.
2519 * The reader (on another CPU) will spin till
2520 * we are finished.
2521 *
2522 * We just need to protect against interrupts
2523 * doing the job. We will set the next pointer
2524 * to HEAD. After that, we set the old pointer
2525 * to NORMAL, but only if it was HEAD before.
2526 * otherwise we are an interrupt, and only
2527 * want the outer most commit to reset it.
2528 */
2529 new_head = next_page;
2530 rb_inc_page(&new_head);
2531
2532 ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
2533 RB_PAGE_NORMAL);
2534
2535 /*
2536 * Valid returns are:
2537 * HEAD - an interrupt came in and already set it.
2538 * NORMAL - One of two things:
2539 * 1) We really set it.
2540 * 2) A bunch of interrupts came in and moved
2541 * the page forward again.
2542 */
2543 switch (ret) {
2544 case RB_PAGE_HEAD:
2545 case RB_PAGE_NORMAL:
2546 /* OK */
2547 break;
2548 default:
2549 RB_WARN_ON(cpu_buffer, 1);
2550 return -1;
2551 }
2552
2553 /*
2554 * It is possible that an interrupt came in,
2555 * set the head up, then more interrupts came in
2556 * and moved it again. When we get back here,
2557 * the page would have been set to NORMAL but we
2558 * just set it back to HEAD.
2559 *
2560 * How do you detect this? Well, if that happened
2561 * the tail page would have moved.
2562 */
2563 if (ret == RB_PAGE_NORMAL) {
2564 struct buffer_page *buffer_tail_page;
2565
2566 buffer_tail_page = READ_ONCE(cpu_buffer->tail_page);
2567 /*
2568 * If the tail had moved passed next, then we need
2569 * to reset the pointer.
2570 */
2571 if (buffer_tail_page != tail_page &&
2572 buffer_tail_page != next_page)
2573 rb_head_page_set_normal(cpu_buffer, new_head,
2574 next_page,
2575 RB_PAGE_HEAD);
2576 }
2577
2578 /*
2579 * If this was the outer most commit (the one that
2580 * changed the original pointer from HEAD to UPDATE),
2581 * then it is up to us to reset it to NORMAL.
2582 */
2583 if (type == RB_PAGE_HEAD) {
2584 ret = rb_head_page_set_normal(cpu_buffer, next_page,
2585 tail_page,
2586 RB_PAGE_UPDATE);
2587 if (RB_WARN_ON(cpu_buffer,
2588 ret != RB_PAGE_UPDATE))
2589 return -1;
2590 }
2591
2592 return 0;
2593 }
2594
2595 static inline void
rb_reset_tail(struct ring_buffer_per_cpu * cpu_buffer,unsigned long tail,struct rb_event_info * info)2596 rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
2597 unsigned long tail, struct rb_event_info *info)
2598 {
2599 struct buffer_page *tail_page = info->tail_page;
2600 struct ring_buffer_event *event;
2601 unsigned long length = info->length;
2602
2603 /*
2604 * Only the event that crossed the page boundary
2605 * must fill the old tail_page with padding.
2606 */
2607 if (tail >= BUF_PAGE_SIZE) {
2608 /*
2609 * If the page was filled, then we still need
2610 * to update the real_end. Reset it to zero
2611 * and the reader will ignore it.
2612 */
2613 if (tail == BUF_PAGE_SIZE)
2614 tail_page->real_end = 0;
2615
2616 local_sub(length, &tail_page->write);
2617 return;
2618 }
2619
2620 event = __rb_page_index(tail_page, tail);
2621
2622 /*
2623 * Save the original length to the meta data.
2624 * This will be used by the reader to add lost event
2625 * counter.
2626 */
2627 tail_page->real_end = tail;
2628
2629 /*
2630 * If this event is bigger than the minimum size, then
2631 * we need to be careful that we don't subtract the
2632 * write counter enough to allow another writer to slip
2633 * in on this page.
2634 * We put in a discarded commit instead, to make sure
2635 * that this space is not used again, and this space will
2636 * not be accounted into 'entries_bytes'.
2637 *
2638 * If we are less than the minimum size, we don't need to
2639 * worry about it.
2640 */
2641 if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) {
2642 /* No room for any events */
2643
2644 /* Mark the rest of the page with padding */
2645 rb_event_set_padding(event);
2646
2647 /* Make sure the padding is visible before the write update */
2648 smp_wmb();
2649
2650 /* Set the write back to the previous setting */
2651 local_sub(length, &tail_page->write);
2652 return;
2653 }
2654
2655 /* Put in a discarded event */
2656 event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE;
2657 event->type_len = RINGBUF_TYPE_PADDING;
2658 /* time delta must be non zero */
2659 event->time_delta = 1;
2660
2661 /* account for padding bytes */
2662 local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
2663
2664 /* Make sure the padding is visible before the tail_page->write update */
2665 smp_wmb();
2666
2667 /* Set write to end of buffer */
2668 length = (tail + length) - BUF_PAGE_SIZE;
2669 local_sub(length, &tail_page->write);
2670 }
2671
2672 static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer);
2673
2674 /*
2675 * This is the slow path, force gcc not to inline it.
2676 */
2677 static noinline struct ring_buffer_event *
rb_move_tail(struct ring_buffer_per_cpu * cpu_buffer,unsigned long tail,struct rb_event_info * info)2678 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
2679 unsigned long tail, struct rb_event_info *info)
2680 {
2681 struct buffer_page *tail_page = info->tail_page;
2682 struct buffer_page *commit_page = cpu_buffer->commit_page;
2683 struct trace_buffer *buffer = cpu_buffer->buffer;
2684 struct buffer_page *next_page;
2685 int ret;
2686
2687 next_page = tail_page;
2688
2689 rb_inc_page(&next_page);
2690
2691 /*
2692 * If for some reason, we had an interrupt storm that made
2693 * it all the way around the buffer, bail, and warn
2694 * about it.
2695 */
2696 if (unlikely(next_page == commit_page)) {
2697 local_inc(&cpu_buffer->commit_overrun);
2698 goto out_reset;
2699 }
2700
2701 /*
2702 * This is where the fun begins!
2703 *
2704 * We are fighting against races between a reader that
2705 * could be on another CPU trying to swap its reader
2706 * page with the buffer head.
2707 *
2708 * We are also fighting against interrupts coming in and
2709 * moving the head or tail on us as well.
2710 *
2711 * If the next page is the head page then we have filled
2712 * the buffer, unless the commit page is still on the
2713 * reader page.
2714 */
2715 if (rb_is_head_page(next_page, &tail_page->list)) {
2716
2717 /*
2718 * If the commit is not on the reader page, then
2719 * move the header page.
2720 */
2721 if (!rb_is_reader_page(cpu_buffer->commit_page)) {
2722 /*
2723 * If we are not in overwrite mode,
2724 * this is easy, just stop here.
2725 */
2726 if (!(buffer->flags & RB_FL_OVERWRITE)) {
2727 local_inc(&cpu_buffer->dropped_events);
2728 goto out_reset;
2729 }
2730
2731 ret = rb_handle_head_page(cpu_buffer,
2732 tail_page,
2733 next_page);
2734 if (ret < 0)
2735 goto out_reset;
2736 if (ret)
2737 goto out_again;
2738 } else {
2739 /*
2740 * We need to be careful here too. The
2741 * commit page could still be on the reader
2742 * page. We could have a small buffer, and
2743 * have filled up the buffer with events
2744 * from interrupts and such, and wrapped.
2745 *
2746 * Note, if the tail page is also on the
2747 * reader_page, we let it move out.
2748 */
2749 if (unlikely((cpu_buffer->commit_page !=
2750 cpu_buffer->tail_page) &&
2751 (cpu_buffer->commit_page ==
2752 cpu_buffer->reader_page))) {
2753 local_inc(&cpu_buffer->commit_overrun);
2754 goto out_reset;
2755 }
2756 }
2757 }
2758
2759 rb_tail_page_update(cpu_buffer, tail_page, next_page);
2760
2761 out_again:
2762
2763 rb_reset_tail(cpu_buffer, tail, info);
2764
2765 /* Commit what we have for now. */
2766 rb_end_commit(cpu_buffer);
2767 /* rb_end_commit() decs committing */
2768 local_inc(&cpu_buffer->committing);
2769
2770 /* fail and let the caller try again */
2771 return ERR_PTR(-EAGAIN);
2772
2773 out_reset:
2774 /* reset write */
2775 rb_reset_tail(cpu_buffer, tail, info);
2776
2777 return NULL;
2778 }
2779
2780 /* Slow path */
2781 static struct ring_buffer_event *
rb_add_time_stamp(struct ring_buffer_event * event,u64 delta,bool abs)2782 rb_add_time_stamp(struct ring_buffer_event *event, u64 delta, bool abs)
2783 {
2784 if (abs)
2785 event->type_len = RINGBUF_TYPE_TIME_STAMP;
2786 else
2787 event->type_len = RINGBUF_TYPE_TIME_EXTEND;
2788
2789 /* Not the first event on the page, or not delta? */
2790 if (abs || rb_event_index(event)) {
2791 event->time_delta = delta & TS_MASK;
2792 event->array[0] = delta >> TS_SHIFT;
2793 } else {
2794 /* nope, just zero it */
2795 event->time_delta = 0;
2796 event->array[0] = 0;
2797 }
2798
2799 return skip_time_extend(event);
2800 }
2801
2802 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
sched_clock_stable(void)2803 static inline bool sched_clock_stable(void)
2804 {
2805 return true;
2806 }
2807 #endif
2808
2809 static void
rb_check_timestamp(struct ring_buffer_per_cpu * cpu_buffer,struct rb_event_info * info)2810 rb_check_timestamp(struct ring_buffer_per_cpu *cpu_buffer,
2811 struct rb_event_info *info)
2812 {
2813 u64 write_stamp;
2814
2815 WARN_ONCE(1, "Delta way too big! %llu ts=%llu before=%llu after=%llu write stamp=%llu\n%s",
2816 (unsigned long long)info->delta,
2817 (unsigned long long)info->ts,
2818 (unsigned long long)info->before,
2819 (unsigned long long)info->after,
2820 (unsigned long long)(rb_time_read(&cpu_buffer->write_stamp, &write_stamp) ? write_stamp : 0),
2821 sched_clock_stable() ? "" :
2822 "If you just came from a suspend/resume,\n"
2823 "please switch to the trace global clock:\n"
2824 " echo global > /sys/kernel/debug/tracing/trace_clock\n"
2825 "or add trace_clock=global to the kernel command line\n");
2826 }
2827
rb_add_timestamp(struct ring_buffer_per_cpu * cpu_buffer,struct ring_buffer_event ** event,struct rb_event_info * info,u64 * delta,unsigned int * length)2828 static void rb_add_timestamp(struct ring_buffer_per_cpu *cpu_buffer,
2829 struct ring_buffer_event **event,
2830 struct rb_event_info *info,
2831 u64 *delta,
2832 unsigned int *length)
2833 {
2834 bool abs = info->add_timestamp &
2835 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE);
2836
2837 if (unlikely(info->delta > (1ULL << 59))) {
2838 /*
2839 * Some timers can use more than 59 bits, and when a timestamp
2840 * is added to the buffer, it will lose those bits.
2841 */
2842 if (abs && (info->ts & TS_MSB)) {
2843 info->delta &= ABS_TS_MASK;
2844
2845 /* did the clock go backwards */
2846 } else if (info->before == info->after && info->before > info->ts) {
2847 /* not interrupted */
2848 static int once;
2849
2850 /*
2851 * This is possible with a recalibrating of the TSC.
2852 * Do not produce a call stack, but just report it.
2853 */
2854 if (!once) {
2855 once++;
2856 pr_warn("Ring buffer clock went backwards: %llu -> %llu\n",
2857 info->before, info->ts);
2858 }
2859 } else
2860 rb_check_timestamp(cpu_buffer, info);
2861 if (!abs)
2862 info->delta = 0;
2863 }
2864 *event = rb_add_time_stamp(*event, info->delta, abs);
2865 *length -= RB_LEN_TIME_EXTEND;
2866 *delta = 0;
2867 }
2868
2869 /**
2870 * rb_update_event - update event type and data
2871 * @cpu_buffer: The per cpu buffer of the @event
2872 * @event: the event to update
2873 * @info: The info to update the @event with (contains length and delta)
2874 *
2875 * Update the type and data fields of the @event. The length
2876 * is the actual size that is written to the ring buffer,
2877 * and with this, we can determine what to place into the
2878 * data field.
2879 */
2880 static void
rb_update_event(struct ring_buffer_per_cpu * cpu_buffer,struct ring_buffer_event * event,struct rb_event_info * info)2881 rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
2882 struct ring_buffer_event *event,
2883 struct rb_event_info *info)
2884 {
2885 unsigned length = info->length;
2886 u64 delta = info->delta;
2887 unsigned int nest = local_read(&cpu_buffer->committing) - 1;
2888
2889 if (!WARN_ON_ONCE(nest >= MAX_NEST))
2890 cpu_buffer->event_stamp[nest] = info->ts;
2891
2892 /*
2893 * If we need to add a timestamp, then we
2894 * add it to the start of the reserved space.
2895 */
2896 if (unlikely(info->add_timestamp))
2897 rb_add_timestamp(cpu_buffer, &event, info, &delta, &length);
2898
2899 event->time_delta = delta;
2900 length -= RB_EVNT_HDR_SIZE;
2901 if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) {
2902 event->type_len = 0;
2903 event->array[0] = length;
2904 } else
2905 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
2906 }
2907
rb_calculate_event_length(unsigned length)2908 static unsigned rb_calculate_event_length(unsigned length)
2909 {
2910 struct ring_buffer_event event; /* Used only for sizeof array */
2911
2912 /* zero length can cause confusions */
2913 if (!length)
2914 length++;
2915
2916 if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
2917 length += sizeof(event.array[0]);
2918
2919 length += RB_EVNT_HDR_SIZE;
2920 length = ALIGN(length, RB_ARCH_ALIGNMENT);
2921
2922 /*
2923 * In case the time delta is larger than the 27 bits for it
2924 * in the header, we need to add a timestamp. If another
2925 * event comes in when trying to discard this one to increase
2926 * the length, then the timestamp will be added in the allocated
2927 * space of this event. If length is bigger than the size needed
2928 * for the TIME_EXTEND, then padding has to be used. The events
2929 * length must be either RB_LEN_TIME_EXTEND, or greater than or equal
2930 * to RB_LEN_TIME_EXTEND + 8, as 8 is the minimum size for padding.
2931 * As length is a multiple of 4, we only need to worry if it
2932 * is 12 (RB_LEN_TIME_EXTEND + 4).
2933 */
2934 if (length == RB_LEN_TIME_EXTEND + RB_ALIGNMENT)
2935 length += RB_ALIGNMENT;
2936
2937 return length;
2938 }
2939
2940 static inline int
rb_try_to_discard(struct ring_buffer_per_cpu * cpu_buffer,struct ring_buffer_event * event)2941 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
2942 struct ring_buffer_event *event)
2943 {
2944 unsigned long new_index, old_index;
2945 struct buffer_page *bpage;
2946 unsigned long index;
2947 unsigned long addr;
2948
2949 new_index = rb_event_index(event);
2950 old_index = new_index + rb_event_ts_length(event);
2951 addr = (unsigned long)event;
2952 addr &= PAGE_MASK;
2953
2954 bpage = READ_ONCE(cpu_buffer->tail_page);
2955
2956 /*
2957 * Make sure the tail_page is still the same and
2958 * the next write location is the end of this event
2959 */
2960 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
2961 unsigned long write_mask =
2962 local_read(&bpage->write) & ~RB_WRITE_MASK;
2963 unsigned long event_length = rb_event_length(event);
2964
2965 /*
2966 * For the before_stamp to be different than the write_stamp
2967 * to make sure that the next event adds an absolute
2968 * value and does not rely on the saved write stamp, which
2969 * is now going to be bogus.
2970 *
2971 * By setting the before_stamp to zero, the next event
2972 * is not going to use the write_stamp and will instead
2973 * create an absolute timestamp. This means there's no
2974 * reason to update the wirte_stamp!
2975 */
2976 rb_time_set(&cpu_buffer->before_stamp, 0);
2977
2978 /*
2979 * If an event were to come in now, it would see that the
2980 * write_stamp and the before_stamp are different, and assume
2981 * that this event just added itself before updating
2982 * the write stamp. The interrupting event will fix the
2983 * write stamp for us, and use an absolute timestamp.
2984 */
2985
2986 /*
2987 * This is on the tail page. It is possible that
2988 * a write could come in and move the tail page
2989 * and write to the next page. That is fine
2990 * because we just shorten what is on this page.
2991 */
2992 old_index += write_mask;
2993 new_index += write_mask;
2994 index = local_cmpxchg(&bpage->write, old_index, new_index);
2995 if (index == old_index) {
2996 /* update counters */
2997 local_sub(event_length, &cpu_buffer->entries_bytes);
2998 return 1;
2999 }
3000 }
3001
3002 /* could not discard */
3003 return 0;
3004 }
3005
rb_start_commit(struct ring_buffer_per_cpu * cpu_buffer)3006 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
3007 {
3008 local_inc(&cpu_buffer->committing);
3009 local_inc(&cpu_buffer->commits);
3010 }
3011
3012 static __always_inline void
rb_set_commit_to_write(struct ring_buffer_per_cpu * cpu_buffer)3013 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
3014 {
3015 unsigned long max_count;
3016
3017 /*
3018 * We only race with interrupts and NMIs on this CPU.
3019 * If we own the commit event, then we can commit
3020 * all others that interrupted us, since the interruptions
3021 * are in stack format (they finish before they come
3022 * back to us). This allows us to do a simple loop to
3023 * assign the commit to the tail.
3024 */
3025 again:
3026 max_count = cpu_buffer->nr_pages * 100;
3027
3028 while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) {
3029 if (RB_WARN_ON(cpu_buffer, !(--max_count)))
3030 return;
3031 if (RB_WARN_ON(cpu_buffer,
3032 rb_is_reader_page(cpu_buffer->tail_page)))
3033 return;
3034 /*
3035 * No need for a memory barrier here, as the update
3036 * of the tail_page did it for this page.
3037 */
3038 local_set(&cpu_buffer->commit_page->page->commit,
3039 rb_page_write(cpu_buffer->commit_page));
3040 rb_inc_page(&cpu_buffer->commit_page);
3041 /* add barrier to keep gcc from optimizing too much */
3042 barrier();
3043 }
3044 while (rb_commit_index(cpu_buffer) !=
3045 rb_page_write(cpu_buffer->commit_page)) {
3046
3047 /* Make sure the readers see the content of what is committed. */
3048 smp_wmb();
3049 local_set(&cpu_buffer->commit_page->page->commit,
3050 rb_page_write(cpu_buffer->commit_page));
3051 RB_WARN_ON(cpu_buffer,
3052 local_read(&cpu_buffer->commit_page->page->commit) &
3053 ~RB_WRITE_MASK);
3054 barrier();
3055 }
3056
3057 /* again, keep gcc from optimizing */
3058 barrier();
3059
3060 /*
3061 * If an interrupt came in just after the first while loop
3062 * and pushed the tail page forward, we will be left with
3063 * a dangling commit that will never go forward.
3064 */
3065 if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)))
3066 goto again;
3067 }
3068
rb_end_commit(struct ring_buffer_per_cpu * cpu_buffer)3069 static __always_inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
3070 {
3071 unsigned long commits;
3072
3073 if (RB_WARN_ON(cpu_buffer,
3074 !local_read(&cpu_buffer->committing)))
3075 return;
3076
3077 again:
3078 commits = local_read(&cpu_buffer->commits);
3079 /* synchronize with interrupts */
3080 barrier();
3081 if (local_read(&cpu_buffer->committing) == 1)
3082 rb_set_commit_to_write(cpu_buffer);
3083
3084 local_dec(&cpu_buffer->committing);
3085
3086 /* synchronize with interrupts */
3087 barrier();
3088
3089 /*
3090 * Need to account for interrupts coming in between the
3091 * updating of the commit page and the clearing of the
3092 * committing counter.
3093 */
3094 if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
3095 !local_read(&cpu_buffer->committing)) {
3096 local_inc(&cpu_buffer->committing);
3097 goto again;
3098 }
3099 }
3100
rb_event_discard(struct ring_buffer_event * event)3101 static inline void rb_event_discard(struct ring_buffer_event *event)
3102 {
3103 if (extended_time(event))
3104 event = skip_time_extend(event);
3105
3106 /* array[0] holds the actual length for the discarded event */
3107 event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
3108 event->type_len = RINGBUF_TYPE_PADDING;
3109 /* time delta must be non zero */
3110 if (!event->time_delta)
3111 event->time_delta = 1;
3112 }
3113
rb_commit(struct ring_buffer_per_cpu * cpu_buffer,struct ring_buffer_event * event)3114 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
3115 struct ring_buffer_event *event)
3116 {
3117 local_inc(&cpu_buffer->entries);
3118 rb_end_commit(cpu_buffer);
3119 }
3120
3121 static __always_inline void
rb_wakeups(struct trace_buffer * buffer,struct ring_buffer_per_cpu * cpu_buffer)3122 rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
3123 {
3124 if (buffer->irq_work.waiters_pending) {
3125 buffer->irq_work.waiters_pending = false;
3126 /* irq_work_queue() supplies it's own memory barriers */
3127 irq_work_queue(&buffer->irq_work.work);
3128 }
3129
3130 if (cpu_buffer->irq_work.waiters_pending) {
3131 cpu_buffer->irq_work.waiters_pending = false;
3132 /* irq_work_queue() supplies it's own memory barriers */
3133 irq_work_queue(&cpu_buffer->irq_work.work);
3134 }
3135
3136 if (cpu_buffer->last_pages_touch == local_read(&cpu_buffer->pages_touched))
3137 return;
3138
3139 if (cpu_buffer->reader_page == cpu_buffer->commit_page)
3140 return;
3141
3142 if (!cpu_buffer->irq_work.full_waiters_pending)
3143 return;
3144
3145 cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched);
3146
3147 if (!full_hit(buffer, cpu_buffer->cpu, cpu_buffer->shortest_full))
3148 return;
3149
3150 cpu_buffer->irq_work.wakeup_full = true;
3151 cpu_buffer->irq_work.full_waiters_pending = false;
3152 /* irq_work_queue() supplies it's own memory barriers */
3153 irq_work_queue(&cpu_buffer->irq_work.work);
3154 }
3155
3156 #ifdef CONFIG_RING_BUFFER_RECORD_RECURSION
3157 # define do_ring_buffer_record_recursion() \
3158 do_ftrace_record_recursion(_THIS_IP_, _RET_IP_)
3159 #else
3160 # define do_ring_buffer_record_recursion() do { } while (0)
3161 #endif
3162
3163 /*
3164 * The lock and unlock are done within a preempt disable section.
3165 * The current_context per_cpu variable can only be modified
3166 * by the current task between lock and unlock. But it can
3167 * be modified more than once via an interrupt. To pass this
3168 * information from the lock to the unlock without having to
3169 * access the 'in_interrupt()' functions again (which do show
3170 * a bit of overhead in something as critical as function tracing,
3171 * we use a bitmask trick.
3172 *
3173 * bit 1 = NMI context
3174 * bit 2 = IRQ context
3175 * bit 3 = SoftIRQ context
3176 * bit 4 = normal context.
3177 *
3178 * This works because this is the order of contexts that can
3179 * preempt other contexts. A SoftIRQ never preempts an IRQ
3180 * context.
3181 *
3182 * When the context is determined, the corresponding bit is
3183 * checked and set (if it was set, then a recursion of that context
3184 * happened).
3185 *
3186 * On unlock, we need to clear this bit. To do so, just subtract
3187 * 1 from the current_context and AND it to itself.
3188 *
3189 * (binary)
3190 * 101 - 1 = 100
3191 * 101 & 100 = 100 (clearing bit zero)
3192 *
3193 * 1010 - 1 = 1001
3194 * 1010 & 1001 = 1000 (clearing bit 1)
3195 *
3196 * The least significant bit can be cleared this way, and it
3197 * just so happens that it is the same bit corresponding to
3198 * the current context.
3199 *
3200 * Now the TRANSITION bit breaks the above slightly. The TRANSITION bit
3201 * is set when a recursion is detected at the current context, and if
3202 * the TRANSITION bit is already set, it will fail the recursion.
3203 * This is needed because there's a lag between the changing of
3204 * interrupt context and updating the preempt count. In this case,
3205 * a false positive will be found. To handle this, one extra recursion
3206 * is allowed, and this is done by the TRANSITION bit. If the TRANSITION
3207 * bit is already set, then it is considered a recursion and the function
3208 * ends. Otherwise, the TRANSITION bit is set, and that bit is returned.
3209 *
3210 * On the trace_recursive_unlock(), the TRANSITION bit will be the first
3211 * to be cleared. Even if it wasn't the context that set it. That is,
3212 * if an interrupt comes in while NORMAL bit is set and the ring buffer
3213 * is called before preempt_count() is updated, since the check will
3214 * be on the NORMAL bit, the TRANSITION bit will then be set. If an
3215 * NMI then comes in, it will set the NMI bit, but when the NMI code
3216 * does the trace_recursive_unlock() it will clear the TRANSITION bit
3217 * and leave the NMI bit set. But this is fine, because the interrupt
3218 * code that set the TRANSITION bit will then clear the NMI bit when it
3219 * calls trace_recursive_unlock(). If another NMI comes in, it will
3220 * set the TRANSITION bit and continue.
3221 *
3222 * Note: The TRANSITION bit only handles a single transition between context.
3223 */
3224
3225 static __always_inline int
trace_recursive_lock(struct ring_buffer_per_cpu * cpu_buffer)3226 trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
3227 {
3228 unsigned int val = cpu_buffer->current_context;
3229 int bit = interrupt_context_level();
3230
3231 bit = RB_CTX_NORMAL - bit;
3232
3233 if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) {
3234 /*
3235 * It is possible that this was called by transitioning
3236 * between interrupt context, and preempt_count() has not
3237 * been updated yet. In this case, use the TRANSITION bit.
3238 */
3239 bit = RB_CTX_TRANSITION;
3240 if (val & (1 << (bit + cpu_buffer->nest))) {
3241 do_ring_buffer_record_recursion();
3242 return 1;
3243 }
3244 }
3245
3246 val |= (1 << (bit + cpu_buffer->nest));
3247 cpu_buffer->current_context = val;
3248
3249 return 0;
3250 }
3251
3252 static __always_inline void
trace_recursive_unlock(struct ring_buffer_per_cpu * cpu_buffer)3253 trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
3254 {
3255 cpu_buffer->current_context &=
3256 cpu_buffer->current_context - (1 << cpu_buffer->nest);
3257 }
3258
3259 /* The recursive locking above uses 5 bits */
3260 #define NESTED_BITS 5
3261
3262 /**
3263 * ring_buffer_nest_start - Allow to trace while nested
3264 * @buffer: The ring buffer to modify
3265 *
3266 * The ring buffer has a safety mechanism to prevent recursion.
3267 * But there may be a case where a trace needs to be done while
3268 * tracing something else. In this case, calling this function
3269 * will allow this function to nest within a currently active
3270 * ring_buffer_lock_reserve().
3271 *
3272 * Call this function before calling another ring_buffer_lock_reserve() and
3273 * call ring_buffer_nest_end() after the nested ring_buffer_unlock_commit().
3274 */
ring_buffer_nest_start(struct trace_buffer * buffer)3275 void ring_buffer_nest_start(struct trace_buffer *buffer)
3276 {
3277 struct ring_buffer_per_cpu *cpu_buffer;
3278 int cpu;
3279
3280 /* Enabled by ring_buffer_nest_end() */
3281 preempt_disable_notrace();
3282 cpu = raw_smp_processor_id();
3283 cpu_buffer = buffer->buffers[cpu];
3284 /* This is the shift value for the above recursive locking */
3285 cpu_buffer->nest += NESTED_BITS;
3286 }
3287
3288 /**
3289 * ring_buffer_nest_end - Allow to trace while nested
3290 * @buffer: The ring buffer to modify
3291 *
3292 * Must be called after ring_buffer_nest_start() and after the
3293 * ring_buffer_unlock_commit().
3294 */
ring_buffer_nest_end(struct trace_buffer * buffer)3295 void ring_buffer_nest_end(struct trace_buffer *buffer)
3296 {
3297 struct ring_buffer_per_cpu *cpu_buffer;
3298 int cpu;
3299
3300 /* disabled by ring_buffer_nest_start() */
3301 cpu = raw_smp_processor_id();
3302 cpu_buffer = buffer->buffers[cpu];
3303 /* This is the shift value for the above recursive locking */
3304 cpu_buffer->nest -= NESTED_BITS;
3305 preempt_enable_notrace();
3306 }
3307
3308 /**
3309 * ring_buffer_unlock_commit - commit a reserved
3310 * @buffer: The buffer to commit to
3311 * @event: The event pointer to commit.
3312 *
3313 * This commits the data to the ring buffer, and releases any locks held.
3314 *
3315 * Must be paired with ring_buffer_lock_reserve.
3316 */
ring_buffer_unlock_commit(struct trace_buffer * buffer,struct ring_buffer_event * event)3317 int ring_buffer_unlock_commit(struct trace_buffer *buffer,
3318 struct ring_buffer_event *event)
3319 {
3320 struct ring_buffer_per_cpu *cpu_buffer;
3321 int cpu = raw_smp_processor_id();
3322
3323 cpu_buffer = buffer->buffers[cpu];
3324
3325 rb_commit(cpu_buffer, event);
3326
3327 rb_wakeups(buffer, cpu_buffer);
3328
3329 trace_recursive_unlock(cpu_buffer);
3330
3331 preempt_enable_notrace();
3332
3333 return 0;
3334 }
3335 EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
3336
3337 /* Special value to validate all deltas on a page. */
3338 #define CHECK_FULL_PAGE 1L
3339
3340 #ifdef CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS
dump_buffer_page(struct buffer_data_page * bpage,struct rb_event_info * info,unsigned long tail)3341 static void dump_buffer_page(struct buffer_data_page *bpage,
3342 struct rb_event_info *info,
3343 unsigned long tail)
3344 {
3345 struct ring_buffer_event *event;
3346 u64 ts, delta;
3347 int e;
3348
3349 ts = bpage->time_stamp;
3350 pr_warn(" [%lld] PAGE TIME STAMP\n", ts);
3351
3352 for (e = 0; e < tail; e += rb_event_length(event)) {
3353
3354 event = (struct ring_buffer_event *)(bpage->data + e);
3355
3356 switch (event->type_len) {
3357
3358 case RINGBUF_TYPE_TIME_EXTEND:
3359 delta = rb_event_time_stamp(event);
3360 ts += delta;
3361 pr_warn(" [%lld] delta:%lld TIME EXTEND\n", ts, delta);
3362 break;
3363
3364 case RINGBUF_TYPE_TIME_STAMP:
3365 delta = rb_event_time_stamp(event);
3366 ts = rb_fix_abs_ts(delta, ts);
3367 pr_warn(" [%lld] absolute:%lld TIME STAMP\n", ts, delta);
3368 break;
3369
3370 case RINGBUF_TYPE_PADDING:
3371 ts += event->time_delta;
3372 pr_warn(" [%lld] delta:%d PADDING\n", ts, event->time_delta);
3373 break;
3374
3375 case RINGBUF_TYPE_DATA:
3376 ts += event->time_delta;
3377 pr_warn(" [%lld] delta:%d\n", ts, event->time_delta);
3378 break;
3379
3380 default:
3381 break;
3382 }
3383 }
3384 }
3385
3386 static DEFINE_PER_CPU(atomic_t, checking);
3387 static atomic_t ts_dump;
3388
3389 /*
3390 * Check if the current event time stamp matches the deltas on
3391 * the buffer page.
3392 */
check_buffer(struct ring_buffer_per_cpu * cpu_buffer,struct rb_event_info * info,unsigned long tail)3393 static void check_buffer(struct ring_buffer_per_cpu *cpu_buffer,
3394 struct rb_event_info *info,
3395 unsigned long tail)
3396 {
3397 struct ring_buffer_event *event;
3398 struct buffer_data_page *bpage;
3399 u64 ts, delta;
3400 bool full = false;
3401 int e;
3402
3403 bpage = info->tail_page->page;
3404
3405 if (tail == CHECK_FULL_PAGE) {
3406 full = true;
3407 tail = local_read(&bpage->commit);
3408 } else if (info->add_timestamp &
3409 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE)) {
3410 /* Ignore events with absolute time stamps */
3411 return;
3412 }
3413
3414 /*
3415 * Do not check the first event (skip possible extends too).
3416 * Also do not check if previous events have not been committed.
3417 */
3418 if (tail <= 8 || tail > local_read(&bpage->commit))
3419 return;
3420
3421 /*
3422 * If this interrupted another event,
3423 */
3424 if (atomic_inc_return(this_cpu_ptr(&checking)) != 1)
3425 goto out;
3426
3427 ts = bpage->time_stamp;
3428
3429 for (e = 0; e < tail; e += rb_event_length(event)) {
3430
3431 event = (struct ring_buffer_event *)(bpage->data + e);
3432
3433 switch (event->type_len) {
3434
3435 case RINGBUF_TYPE_TIME_EXTEND:
3436 delta = rb_event_time_stamp(event);
3437 ts += delta;
3438 break;
3439
3440 case RINGBUF_TYPE_TIME_STAMP:
3441 delta = rb_event_time_stamp(event);
3442 ts = rb_fix_abs_ts(delta, ts);
3443 break;
3444
3445 case RINGBUF_TYPE_PADDING:
3446 if (event->time_delta == 1)
3447 break;
3448 fallthrough;
3449 case RINGBUF_TYPE_DATA:
3450 ts += event->time_delta;
3451 break;
3452
3453 default:
3454 RB_WARN_ON(cpu_buffer, 1);
3455 }
3456 }
3457 if ((full && ts > info->ts) ||
3458 (!full && ts + info->delta != info->ts)) {
3459 /* If another report is happening, ignore this one */
3460 if (atomic_inc_return(&ts_dump) != 1) {
3461 atomic_dec(&ts_dump);
3462 goto out;
3463 }
3464 atomic_inc(&cpu_buffer->record_disabled);
3465 /* There's some cases in boot up that this can happen */
3466 WARN_ON_ONCE(system_state != SYSTEM_BOOTING);
3467 pr_warn("[CPU: %d]TIME DOES NOT MATCH expected:%lld actual:%lld delta:%lld before:%lld after:%lld%s\n",
3468 cpu_buffer->cpu,
3469 ts + info->delta, info->ts, info->delta,
3470 info->before, info->after,
3471 full ? " (full)" : "");
3472 dump_buffer_page(bpage, info, tail);
3473 atomic_dec(&ts_dump);
3474 /* Do not re-enable checking */
3475 return;
3476 }
3477 out:
3478 atomic_dec(this_cpu_ptr(&checking));
3479 }
3480 #else
check_buffer(struct ring_buffer_per_cpu * cpu_buffer,struct rb_event_info * info,unsigned long tail)3481 static inline void check_buffer(struct ring_buffer_per_cpu *cpu_buffer,
3482 struct rb_event_info *info,
3483 unsigned long tail)
3484 {
3485 }
3486 #endif /* CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS */
3487
3488 static struct ring_buffer_event *
__rb_reserve_next(struct ring_buffer_per_cpu * cpu_buffer,struct rb_event_info * info)3489 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
3490 struct rb_event_info *info)
3491 {
3492 struct ring_buffer_event *event;
3493 struct buffer_page *tail_page;
3494 unsigned long tail, write, w;
3495 bool a_ok;
3496 bool b_ok;
3497
3498 /* Don't let the compiler play games with cpu_buffer->tail_page */
3499 tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page);
3500
3501 /*A*/ w = local_read(&tail_page->write) & RB_WRITE_MASK;
3502 barrier();
3503 b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before);
3504 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after);
3505 barrier();
3506 info->ts = rb_time_stamp(cpu_buffer->buffer);
3507
3508 if ((info->add_timestamp & RB_ADD_STAMP_ABSOLUTE)) {
3509 info->delta = info->ts;
3510 } else {
3511 /*
3512 * If interrupting an event time update, we may need an
3513 * absolute timestamp.
3514 * Don't bother if this is the start of a new page (w == 0).
3515 */
3516 if (!w) {
3517 /* Use the sub-buffer timestamp */
3518 info->delta = 0;
3519 } else if (unlikely(!a_ok || !b_ok || info->before != info->after)) {
3520 info->add_timestamp |= RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND;
3521 info->length += RB_LEN_TIME_EXTEND;
3522 } else {
3523 info->delta = info->ts - info->after;
3524 if (unlikely(test_time_stamp(info->delta))) {
3525 info->add_timestamp |= RB_ADD_STAMP_EXTEND;
3526 info->length += RB_LEN_TIME_EXTEND;
3527 }
3528 }
3529 }
3530
3531 /*B*/ rb_time_set(&cpu_buffer->before_stamp, info->ts);
3532
3533 /*C*/ write = local_add_return(info->length, &tail_page->write);
3534
3535 /* set write to only the index of the write */
3536 write &= RB_WRITE_MASK;
3537
3538 tail = write - info->length;
3539
3540 /* See if we shot pass the end of this buffer page */
3541 if (unlikely(write > BUF_PAGE_SIZE)) {
3542 check_buffer(cpu_buffer, info, CHECK_FULL_PAGE);
3543 return rb_move_tail(cpu_buffer, tail, info);
3544 }
3545
3546 if (likely(tail == w)) {
3547 /* Nothing interrupted us between A and C */
3548 /*D*/ rb_time_set(&cpu_buffer->write_stamp, info->ts);
3549 /*
3550 * If something came in between C and D, the write stamp
3551 * may now not be in sync. But that's fine as the before_stamp
3552 * will be different and then next event will just be forced
3553 * to use an absolute timestamp.
3554 */
3555 if (likely(!(info->add_timestamp &
3556 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE))))
3557 /* This did not interrupt any time update */
3558 info->delta = info->ts - info->after;
3559 else
3560 /* Just use full timestamp for interrupting event */
3561 info->delta = info->ts;
3562 check_buffer(cpu_buffer, info, tail);
3563 } else {
3564 u64 ts;
3565 /* SLOW PATH - Interrupted between A and C */
3566
3567 /* Save the old before_stamp */
3568 a_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before);
3569 RB_WARN_ON(cpu_buffer, !a_ok);
3570
3571 /*
3572 * Read a new timestamp and update the before_stamp to make
3573 * the next event after this one force using an absolute
3574 * timestamp. This is in case an interrupt were to come in
3575 * between E and F.
3576 */
3577 ts = rb_time_stamp(cpu_buffer->buffer);
3578 rb_time_set(&cpu_buffer->before_stamp, ts);
3579
3580 barrier();
3581 /*E*/ a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after);
3582 /* Was interrupted before here, write_stamp must be valid */
3583 RB_WARN_ON(cpu_buffer, !a_ok);
3584 barrier();
3585 /*F*/ if (write == (local_read(&tail_page->write) & RB_WRITE_MASK) &&
3586 info->after == info->before && info->after < ts) {
3587 /*
3588 * Nothing came after this event between C and F, it is
3589 * safe to use info->after for the delta as it
3590 * matched info->before and is still valid.
3591 */
3592 info->delta = ts - info->after;
3593 } else {
3594 /*
3595 * Interrupted between C and F:
3596 * Lost the previous events time stamp. Just set the
3597 * delta to zero, and this will be the same time as
3598 * the event this event interrupted. And the events that
3599 * came after this will still be correct (as they would
3600 * have built their delta on the previous event.
3601 */
3602 info->delta = 0;
3603 }
3604 info->ts = ts;
3605 info->add_timestamp &= ~RB_ADD_STAMP_FORCE;
3606 }
3607
3608 /*
3609 * If this is the first commit on the page, then it has the same
3610 * timestamp as the page itself.
3611 */
3612 if (unlikely(!tail && !(info->add_timestamp &
3613 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE))))
3614 info->delta = 0;
3615
3616 /* We reserved something on the buffer */
3617
3618 event = __rb_page_index(tail_page, tail);
3619 rb_update_event(cpu_buffer, event, info);
3620
3621 local_inc(&tail_page->entries);
3622
3623 /*
3624 * If this is the first commit on the page, then update
3625 * its timestamp.
3626 */
3627 if (unlikely(!tail))
3628 tail_page->page->time_stamp = info->ts;
3629
3630 /* account for these added bytes */
3631 local_add(info->length, &cpu_buffer->entries_bytes);
3632
3633 return event;
3634 }
3635
3636 static __always_inline struct ring_buffer_event *
rb_reserve_next_event(struct trace_buffer * buffer,struct ring_buffer_per_cpu * cpu_buffer,unsigned long length)3637 rb_reserve_next_event(struct trace_buffer *buffer,
3638 struct ring_buffer_per_cpu *cpu_buffer,
3639 unsigned long length)
3640 {
3641 struct ring_buffer_event *event;
3642 struct rb_event_info info;
3643 int nr_loops = 0;
3644 int add_ts_default;
3645
3646 /* ring buffer does cmpxchg, make sure it is safe in NMI context */
3647 if (!IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG) &&
3648 (unlikely(in_nmi()))) {
3649 return NULL;
3650 }
3651
3652 rb_start_commit(cpu_buffer);
3653 /* The commit page can not change after this */
3654
3655 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
3656 /*
3657 * Due to the ability to swap a cpu buffer from a buffer
3658 * it is possible it was swapped before we committed.
3659 * (committing stops a swap). We check for it here and
3660 * if it happened, we have to fail the write.
3661 */
3662 barrier();
3663 if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) {
3664 local_dec(&cpu_buffer->committing);
3665 local_dec(&cpu_buffer->commits);
3666 return NULL;
3667 }
3668 #endif
3669
3670 info.length = rb_calculate_event_length(length);
3671
3672 if (ring_buffer_time_stamp_abs(cpu_buffer->buffer)) {
3673 add_ts_default = RB_ADD_STAMP_ABSOLUTE;
3674 info.length += RB_LEN_TIME_EXTEND;
3675 if (info.length > BUF_MAX_DATA_SIZE)
3676 goto out_fail;
3677 } else {
3678 add_ts_default = RB_ADD_STAMP_NONE;
3679 }
3680
3681 again:
3682 info.add_timestamp = add_ts_default;
3683 info.delta = 0;
3684
3685 /*
3686 * We allow for interrupts to reenter here and do a trace.
3687 * If one does, it will cause this original code to loop
3688 * back here. Even with heavy interrupts happening, this
3689 * should only happen a few times in a row. If this happens
3690 * 1000 times in a row, there must be either an interrupt
3691 * storm or we have something buggy.
3692 * Bail!
3693 */
3694 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
3695 goto out_fail;
3696
3697 event = __rb_reserve_next(cpu_buffer, &info);
3698
3699 if (unlikely(PTR_ERR(event) == -EAGAIN)) {
3700 if (info.add_timestamp & (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND))
3701 info.length -= RB_LEN_TIME_EXTEND;
3702 goto again;
3703 }
3704
3705 if (likely(event))
3706 return event;
3707 out_fail:
3708 rb_end_commit(cpu_buffer);
3709 return NULL;
3710 }
3711
3712 /**
3713 * ring_buffer_lock_reserve - reserve a part of the buffer
3714 * @buffer: the ring buffer to reserve from
3715 * @length: the length of the data to reserve (excluding event header)
3716 *
3717 * Returns a reserved event on the ring buffer to copy directly to.
3718 * The user of this interface will need to get the body to write into
3719 * and can use the ring_buffer_event_data() interface.
3720 *
3721 * The length is the length of the data needed, not the event length
3722 * which also includes the event header.
3723 *
3724 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
3725 * If NULL is returned, then nothing has been allocated or locked.
3726 */
3727 struct ring_buffer_event *
ring_buffer_lock_reserve(struct trace_buffer * buffer,unsigned long length)3728 ring_buffer_lock_reserve(struct trace_buffer *buffer, unsigned long length)
3729 {
3730 struct ring_buffer_per_cpu *cpu_buffer;
3731 struct ring_buffer_event *event;
3732 int cpu;
3733
3734 /* If we are tracing schedule, we don't want to recurse */
3735 preempt_disable_notrace();
3736
3737 if (unlikely(atomic_read(&buffer->record_disabled)))
3738 goto out;
3739
3740 cpu = raw_smp_processor_id();
3741
3742 if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask)))
3743 goto out;
3744
3745 cpu_buffer = buffer->buffers[cpu];
3746
3747 if (unlikely(atomic_read(&cpu_buffer->record_disabled)))
3748 goto out;
3749
3750 if (unlikely(length > BUF_MAX_DATA_SIZE))
3751 goto out;
3752
3753 if (unlikely(trace_recursive_lock(cpu_buffer)))
3754 goto out;
3755
3756 event = rb_reserve_next_event(buffer, cpu_buffer, length);
3757 if (!event)
3758 goto out_unlock;
3759
3760 return event;
3761
3762 out_unlock:
3763 trace_recursive_unlock(cpu_buffer);
3764 out:
3765 preempt_enable_notrace();
3766 return NULL;
3767 }
3768 EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
3769
3770 /*
3771 * Decrement the entries to the page that an event is on.
3772 * The event does not even need to exist, only the pointer
3773 * to the page it is on. This may only be called before the commit
3774 * takes place.
3775 */
3776 static inline void
rb_decrement_entry(struct ring_buffer_per_cpu * cpu_buffer,struct ring_buffer_event * event)3777 rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
3778 struct ring_buffer_event *event)
3779 {
3780 unsigned long addr = (unsigned long)event;
3781 struct buffer_page *bpage = cpu_buffer->commit_page;
3782 struct buffer_page *start;
3783
3784 addr &= PAGE_MASK;
3785
3786 /* Do the likely case first */
3787 if (likely(bpage->page == (void *)addr)) {
3788 local_dec(&bpage->entries);
3789 return;
3790 }
3791
3792 /*
3793 * Because the commit page may be on the reader page we
3794 * start with the next page and check the end loop there.
3795 */
3796 rb_inc_page(&bpage);
3797 start = bpage;
3798 do {
3799 if (bpage->page == (void *)addr) {
3800 local_dec(&bpage->entries);
3801 return;
3802 }
3803 rb_inc_page(&bpage);
3804 } while (bpage != start);
3805
3806 /* commit not part of this buffer?? */
3807 RB_WARN_ON(cpu_buffer, 1);
3808 }
3809
3810 /**
3811 * ring_buffer_discard_commit - discard an event that has not been committed
3812 * @buffer: the ring buffer
3813 * @event: non committed event to discard
3814 *
3815 * Sometimes an event that is in the ring buffer needs to be ignored.
3816 * This function lets the user discard an event in the ring buffer
3817 * and then that event will not be read later.
3818 *
3819 * This function only works if it is called before the item has been
3820 * committed. It will try to free the event from the ring buffer
3821 * if another event has not been added behind it.
3822 *
3823 * If another event has been added behind it, it will set the event
3824 * up as discarded, and perform the commit.
3825 *
3826 * If this function is called, do not call ring_buffer_unlock_commit on
3827 * the event.
3828 */
ring_buffer_discard_commit(struct trace_buffer * buffer,struct ring_buffer_event * event)3829 void ring_buffer_discard_commit(struct trace_buffer *buffer,
3830 struct ring_buffer_event *event)
3831 {
3832 struct ring_buffer_per_cpu *cpu_buffer;
3833 int cpu;
3834
3835 if (unlikely(has_ext_writer(buffer)))
3836 return;
3837
3838 /* The event is discarded regardless */
3839 rb_event_discard(event);
3840
3841 cpu = smp_processor_id();
3842 cpu_buffer = buffer->buffers[cpu];
3843
3844 /*
3845 * This must only be called if the event has not been
3846 * committed yet. Thus we can assume that preemption
3847 * is still disabled.
3848 */
3849 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
3850
3851 rb_decrement_entry(cpu_buffer, event);
3852 if (rb_try_to_discard(cpu_buffer, event))
3853 goto out;
3854
3855 out:
3856 rb_end_commit(cpu_buffer);
3857
3858 trace_recursive_unlock(cpu_buffer);
3859
3860 preempt_enable_notrace();
3861
3862 }
3863 EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
3864
3865 /**
3866 * ring_buffer_write - write data to the buffer without reserving
3867 * @buffer: The ring buffer to write to.
3868 * @length: The length of the data being written (excluding the event header)
3869 * @data: The data to write to the buffer.
3870 *
3871 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
3872 * one function. If you already have the data to write to the buffer, it
3873 * may be easier to simply call this function.
3874 *
3875 * Note, like ring_buffer_lock_reserve, the length is the length of the data
3876 * and not the length of the event which would hold the header.
3877 */
ring_buffer_write(struct trace_buffer * buffer,unsigned long length,void * data)3878 int ring_buffer_write(struct trace_buffer *buffer,
3879 unsigned long length,
3880 void *data)
3881 {
3882 struct ring_buffer_per_cpu *cpu_buffer;
3883 struct ring_buffer_event *event;
3884 void *body;
3885 int ret = -EBUSY;
3886 int cpu;
3887
3888 preempt_disable_notrace();
3889
3890 if (atomic_read(&buffer->record_disabled))
3891 goto out;
3892
3893 cpu = raw_smp_processor_id();
3894
3895 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3896 goto out;
3897
3898 cpu_buffer = buffer->buffers[cpu];
3899
3900 if (atomic_read(&cpu_buffer->record_disabled))
3901 goto out;
3902
3903 if (length > BUF_MAX_DATA_SIZE)
3904 goto out;
3905
3906 if (unlikely(trace_recursive_lock(cpu_buffer)))
3907 goto out;
3908
3909 event = rb_reserve_next_event(buffer, cpu_buffer, length);
3910 if (!event)
3911 goto out_unlock;
3912
3913 body = rb_event_data(event);
3914
3915 memcpy(body, data, length);
3916
3917 rb_commit(cpu_buffer, event);
3918
3919 rb_wakeups(buffer, cpu_buffer);
3920
3921 ret = 0;
3922
3923 out_unlock:
3924 trace_recursive_unlock(cpu_buffer);
3925
3926 out:
3927 preempt_enable_notrace();
3928
3929 return ret;
3930 }
3931 EXPORT_SYMBOL_GPL(ring_buffer_write);
3932
rb_per_cpu_empty(struct ring_buffer_per_cpu * cpu_buffer)3933 static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
3934 {
3935 struct buffer_page *reader = cpu_buffer->reader_page;
3936 struct buffer_page *head = rb_set_head_page(cpu_buffer);
3937 struct buffer_page *commit = cpu_buffer->commit_page;
3938
3939 /* In case of error, head will be NULL */
3940 if (unlikely(!head))
3941 return true;
3942
3943 /* Reader should exhaust content in reader page */
3944 if (reader->read != rb_page_commit(reader))
3945 return false;
3946
3947 /*
3948 * If writers are committing on the reader page, knowing all
3949 * committed content has been read, the ring buffer is empty.
3950 */
3951 if (commit == reader)
3952 return true;
3953
3954 /*
3955 * If writers are committing on a page other than reader page
3956 * and head page, there should always be content to read.
3957 */
3958 if (commit != head)
3959 return false;
3960
3961 /*
3962 * Writers are committing on the head page, we just need
3963 * to care about there're committed data, and the reader will
3964 * swap reader page with head page when it is to read data.
3965 */
3966 return rb_page_commit(commit) == 0;
3967 }
3968
3969 /**
3970 * ring_buffer_record_disable - stop all writes into the buffer
3971 * @buffer: The ring buffer to stop writes to.
3972 *
3973 * This prevents all writes to the buffer. Any attempt to write
3974 * to the buffer after this will fail and return NULL.
3975 *
3976 * The caller should call synchronize_rcu() after this.
3977 */
ring_buffer_record_disable(struct trace_buffer * buffer)3978 void ring_buffer_record_disable(struct trace_buffer *buffer)
3979 {
3980 atomic_inc(&buffer->record_disabled);
3981 }
3982 EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
3983
3984 /**
3985 * ring_buffer_record_enable - enable writes to the buffer
3986 * @buffer: The ring buffer to enable writes
3987 *
3988 * Note, multiple disables will need the same number of enables
3989 * to truly enable the writing (much like preempt_disable).
3990 */
ring_buffer_record_enable(struct trace_buffer * buffer)3991 void ring_buffer_record_enable(struct trace_buffer *buffer)
3992 {
3993 if (unlikely(has_ext_writer(buffer)))
3994 return;
3995
3996 atomic_dec(&buffer->record_disabled);
3997 }
3998 EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
3999
4000 /**
4001 * ring_buffer_record_off - stop all writes into the buffer
4002 * @buffer: The ring buffer to stop writes to.
4003 *
4004 * This prevents all writes to the buffer. Any attempt to write
4005 * to the buffer after this will fail and return NULL.
4006 *
4007 * This is different than ring_buffer_record_disable() as
4008 * it works like an on/off switch, where as the disable() version
4009 * must be paired with a enable().
4010 */
ring_buffer_record_off(struct trace_buffer * buffer)4011 void ring_buffer_record_off(struct trace_buffer *buffer)
4012 {
4013 unsigned int rd;
4014 unsigned int new_rd;
4015
4016 do {
4017 rd = atomic_read(&buffer->record_disabled);
4018 new_rd = rd | RB_BUFFER_OFF;
4019 } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
4020 }
4021 EXPORT_SYMBOL_GPL(ring_buffer_record_off);
4022
4023 /**
4024 * ring_buffer_record_on - restart writes into the buffer
4025 * @buffer: The ring buffer to start writes to.
4026 *
4027 * This enables all writes to the buffer that was disabled by
4028 * ring_buffer_record_off().
4029 *
4030 * This is different than ring_buffer_record_enable() as
4031 * it works like an on/off switch, where as the enable() version
4032 * must be paired with a disable().
4033 */
ring_buffer_record_on(struct trace_buffer * buffer)4034 void ring_buffer_record_on(struct trace_buffer *buffer)
4035 {
4036 unsigned int rd;
4037 unsigned int new_rd;
4038
4039 if (unlikely(has_ext_writer(buffer)))
4040 return;
4041
4042 do {
4043 rd = atomic_read(&buffer->record_disabled);
4044 new_rd = rd & ~RB_BUFFER_OFF;
4045 } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
4046 }
4047 EXPORT_SYMBOL_GPL(ring_buffer_record_on);
4048
4049 /**
4050 * ring_buffer_record_is_on - return true if the ring buffer can write
4051 * @buffer: The ring buffer to see if write is enabled
4052 *
4053 * Returns true if the ring buffer is in a state that it accepts writes.
4054 */
ring_buffer_record_is_on(struct trace_buffer * buffer)4055 bool ring_buffer_record_is_on(struct trace_buffer *buffer)
4056 {
4057 return !atomic_read(&buffer->record_disabled);
4058 }
4059
4060 /**
4061 * ring_buffer_record_is_set_on - return true if the ring buffer is set writable
4062 * @buffer: The ring buffer to see if write is set enabled
4063 *
4064 * Returns true if the ring buffer is set writable by ring_buffer_record_on().
4065 * Note that this does NOT mean it is in a writable state.
4066 *
4067 * It may return true when the ring buffer has been disabled by
4068 * ring_buffer_record_disable(), as that is a temporary disabling of
4069 * the ring buffer.
4070 */
ring_buffer_record_is_set_on(struct trace_buffer * buffer)4071 bool ring_buffer_record_is_set_on(struct trace_buffer *buffer)
4072 {
4073 return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF);
4074 }
4075
4076 /**
4077 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
4078 * @buffer: The ring buffer to stop writes to.
4079 * @cpu: The CPU buffer to stop
4080 *
4081 * This prevents all writes to the buffer. Any attempt to write
4082 * to the buffer after this will fail and return NULL.
4083 *
4084 * The caller should call synchronize_rcu() after this.
4085 */
ring_buffer_record_disable_cpu(struct trace_buffer * buffer,int cpu)4086 void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu)
4087 {
4088 struct ring_buffer_per_cpu *cpu_buffer;
4089
4090 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4091 return;
4092
4093 cpu_buffer = buffer->buffers[cpu];
4094 atomic_inc(&cpu_buffer->record_disabled);
4095 }
4096 EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
4097
4098 /**
4099 * ring_buffer_record_enable_cpu - enable writes to the buffer
4100 * @buffer: The ring buffer to enable writes
4101 * @cpu: The CPU to enable.
4102 *
4103 * Note, multiple disables will need the same number of enables
4104 * to truly enable the writing (much like preempt_disable).
4105 */
ring_buffer_record_enable_cpu(struct trace_buffer * buffer,int cpu)4106 void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu)
4107 {
4108 struct ring_buffer_per_cpu *cpu_buffer;
4109
4110 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4111 return;
4112
4113 cpu_buffer = buffer->buffers[cpu];
4114 atomic_dec(&cpu_buffer->record_disabled);
4115 }
4116 EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
4117
4118 /*
4119 * The total entries in the ring buffer is the running counter
4120 * of entries entered into the ring buffer, minus the sum of
4121 * the entries read from the ring buffer and the number of
4122 * entries that were overwritten.
4123 */
4124 static inline unsigned long
rb_num_of_entries(struct ring_buffer_per_cpu * cpu_buffer)4125 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
4126 {
4127 return local_read(&cpu_buffer->entries) -
4128 (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
4129 }
4130
4131 /**
4132 * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
4133 * @buffer: The ring buffer
4134 * @cpu: The per CPU buffer to read from.
4135 */
ring_buffer_oldest_event_ts(struct trace_buffer * buffer,int cpu)4136 u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu)
4137 {
4138 unsigned long flags;
4139 struct ring_buffer_per_cpu *cpu_buffer;
4140 struct buffer_page *bpage;
4141 u64 ret = 0;
4142
4143 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4144 return 0;
4145
4146 cpu_buffer = buffer->buffers[cpu];
4147 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4148 /*
4149 * if the tail is on reader_page, oldest time stamp is on the reader
4150 * page
4151 */
4152 if (cpu_buffer->tail_page == cpu_buffer->reader_page)
4153 bpage = cpu_buffer->reader_page;
4154 else
4155 bpage = rb_set_head_page(cpu_buffer);
4156 if (bpage)
4157 ret = bpage->page->time_stamp;
4158 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4159
4160 return ret;
4161 }
4162 EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
4163
4164 /**
4165 * ring_buffer_bytes_cpu - get the number of bytes unconsumed in a cpu buffer
4166 * @buffer: The ring buffer
4167 * @cpu: The per CPU buffer to read from.
4168 */
ring_buffer_bytes_cpu(struct trace_buffer * buffer,int cpu)4169 unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu)
4170 {
4171 struct ring_buffer_per_cpu *cpu_buffer;
4172 unsigned long ret;
4173
4174 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4175 return 0;
4176
4177 cpu_buffer = buffer->buffers[cpu];
4178 ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes;
4179
4180 return ret;
4181 }
4182 EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu);
4183
4184 /**
4185 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
4186 * @buffer: The ring buffer
4187 * @cpu: The per CPU buffer to get the entries from.
4188 */
ring_buffer_entries_cpu(struct trace_buffer * buffer,int cpu)4189 unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu)
4190 {
4191 struct ring_buffer_per_cpu *cpu_buffer;
4192
4193 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4194 return 0;
4195
4196 cpu_buffer = buffer->buffers[cpu];
4197
4198 return rb_num_of_entries(cpu_buffer);
4199 }
4200 EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
4201
4202 /**
4203 * ring_buffer_overrun_cpu - get the number of overruns caused by the ring
4204 * buffer wrapping around (only if RB_FL_OVERWRITE is on).
4205 * @buffer: The ring buffer
4206 * @cpu: The per CPU buffer to get the number of overruns from
4207 */
ring_buffer_overrun_cpu(struct trace_buffer * buffer,int cpu)4208 unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu)
4209 {
4210 struct ring_buffer_per_cpu *cpu_buffer;
4211 unsigned long ret;
4212
4213 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4214 return 0;
4215
4216 cpu_buffer = buffer->buffers[cpu];
4217 ret = local_read(&cpu_buffer->overrun);
4218
4219 return ret;
4220 }
4221 EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
4222
4223 /**
4224 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by
4225 * commits failing due to the buffer wrapping around while there are uncommitted
4226 * events, such as during an interrupt storm.
4227 * @buffer: The ring buffer
4228 * @cpu: The per CPU buffer to get the number of overruns from
4229 */
4230 unsigned long
ring_buffer_commit_overrun_cpu(struct trace_buffer * buffer,int cpu)4231 ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu)
4232 {
4233 struct ring_buffer_per_cpu *cpu_buffer;
4234 unsigned long ret;
4235
4236 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4237 return 0;
4238
4239 cpu_buffer = buffer->buffers[cpu];
4240 ret = local_read(&cpu_buffer->commit_overrun);
4241
4242 return ret;
4243 }
4244 EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
4245
4246 /**
4247 * ring_buffer_dropped_events_cpu - get the number of dropped events caused by
4248 * the ring buffer filling up (only if RB_FL_OVERWRITE is off).
4249 * @buffer: The ring buffer
4250 * @cpu: The per CPU buffer to get the number of overruns from
4251 */
4252 unsigned long
ring_buffer_dropped_events_cpu(struct trace_buffer * buffer,int cpu)4253 ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu)
4254 {
4255 struct ring_buffer_per_cpu *cpu_buffer;
4256 unsigned long ret;
4257
4258 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4259 return 0;
4260
4261 cpu_buffer = buffer->buffers[cpu];
4262 ret = local_read(&cpu_buffer->dropped_events);
4263
4264 return ret;
4265 }
4266 EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu);
4267
4268 /**
4269 * ring_buffer_read_events_cpu - get the number of events successfully read
4270 * @buffer: The ring buffer
4271 * @cpu: The per CPU buffer to get the number of events read
4272 */
4273 unsigned long
ring_buffer_read_events_cpu(struct trace_buffer * buffer,int cpu)4274 ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu)
4275 {
4276 struct ring_buffer_per_cpu *cpu_buffer;
4277
4278 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4279 return 0;
4280
4281 cpu_buffer = buffer->buffers[cpu];
4282 return cpu_buffer->read;
4283 }
4284 EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu);
4285
4286 /**
4287 * ring_buffer_entries - get the number of entries in a buffer
4288 * @buffer: The ring buffer
4289 *
4290 * Returns the total number of entries in the ring buffer
4291 * (all CPU entries)
4292 */
ring_buffer_entries(struct trace_buffer * buffer)4293 unsigned long ring_buffer_entries(struct trace_buffer *buffer)
4294 {
4295 struct ring_buffer_per_cpu *cpu_buffer;
4296 unsigned long entries = 0;
4297 int cpu;
4298
4299 /* if you care about this being correct, lock the buffer */
4300 for_each_buffer_cpu(buffer, cpu) {
4301 cpu_buffer = buffer->buffers[cpu];
4302 entries += rb_num_of_entries(cpu_buffer);
4303 }
4304
4305 return entries;
4306 }
4307 EXPORT_SYMBOL_GPL(ring_buffer_entries);
4308
4309 /**
4310 * ring_buffer_overruns - get the number of overruns in buffer
4311 * @buffer: The ring buffer
4312 *
4313 * Returns the total number of overruns in the ring buffer
4314 * (all CPU entries)
4315 */
ring_buffer_overruns(struct trace_buffer * buffer)4316 unsigned long ring_buffer_overruns(struct trace_buffer *buffer)
4317 {
4318 struct ring_buffer_per_cpu *cpu_buffer;
4319 unsigned long overruns = 0;
4320 int cpu;
4321
4322 /* if you care about this being correct, lock the buffer */
4323 for_each_buffer_cpu(buffer, cpu) {
4324 cpu_buffer = buffer->buffers[cpu];
4325 overruns += local_read(&cpu_buffer->overrun);
4326 }
4327
4328 return overruns;
4329 }
4330 EXPORT_SYMBOL_GPL(ring_buffer_overruns);
4331
rb_iter_reset(struct ring_buffer_iter * iter)4332 static void rb_iter_reset(struct ring_buffer_iter *iter)
4333 {
4334 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4335
4336 /* Iterator usage is expected to have record disabled */
4337 iter->head_page = cpu_buffer->reader_page;
4338 iter->head = cpu_buffer->reader_page->read;
4339 iter->next_event = iter->head;
4340
4341 iter->cache_reader_page = iter->head_page;
4342 iter->cache_read = cpu_buffer->read;
4343
4344 if (iter->head) {
4345 iter->read_stamp = cpu_buffer->read_stamp;
4346 iter->page_stamp = cpu_buffer->reader_page->page->time_stamp;
4347 } else {
4348 iter->read_stamp = iter->head_page->page->time_stamp;
4349 iter->page_stamp = iter->read_stamp;
4350 }
4351 }
4352
4353 /**
4354 * ring_buffer_iter_reset - reset an iterator
4355 * @iter: The iterator to reset
4356 *
4357 * Resets the iterator, so that it will start from the beginning
4358 * again.
4359 */
ring_buffer_iter_reset(struct ring_buffer_iter * iter)4360 void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
4361 {
4362 struct ring_buffer_per_cpu *cpu_buffer;
4363 unsigned long flags;
4364
4365 if (!iter)
4366 return;
4367
4368 cpu_buffer = iter->cpu_buffer;
4369
4370 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4371 rb_iter_reset(iter);
4372 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4373 }
4374 EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
4375
4376 /**
4377 * ring_buffer_iter_empty - check if an iterator has no more to read
4378 * @iter: The iterator to check
4379 */
ring_buffer_iter_empty(struct ring_buffer_iter * iter)4380 int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
4381 {
4382 struct ring_buffer_per_cpu *cpu_buffer;
4383 struct buffer_page *reader;
4384 struct buffer_page *head_page;
4385 struct buffer_page *commit_page;
4386 struct buffer_page *curr_commit_page;
4387 unsigned commit;
4388 u64 curr_commit_ts;
4389 u64 commit_ts;
4390
4391 cpu_buffer = iter->cpu_buffer;
4392 reader = cpu_buffer->reader_page;
4393 head_page = cpu_buffer->head_page;
4394 commit_page = cpu_buffer->commit_page;
4395 commit_ts = commit_page->page->time_stamp;
4396
4397 /*
4398 * When the writer goes across pages, it issues a cmpxchg which
4399 * is a mb(), which will synchronize with the rmb here.
4400 * (see rb_tail_page_update())
4401 */
4402 smp_rmb();
4403 commit = rb_page_commit(commit_page);
4404 /* We want to make sure that the commit page doesn't change */
4405 smp_rmb();
4406
4407 /* Make sure commit page didn't change */
4408 curr_commit_page = READ_ONCE(cpu_buffer->commit_page);
4409 curr_commit_ts = READ_ONCE(curr_commit_page->page->time_stamp);
4410
4411 /* If the commit page changed, then there's more data */
4412 if (curr_commit_page != commit_page ||
4413 curr_commit_ts != commit_ts)
4414 return 0;
4415
4416 /* Still racy, as it may return a false positive, but that's OK */
4417 return ((iter->head_page == commit_page && iter->head >= commit) ||
4418 (iter->head_page == reader && commit_page == head_page &&
4419 head_page->read == commit &&
4420 iter->head == rb_page_commit(cpu_buffer->reader_page)));
4421 }
4422 EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
4423
4424 static void
rb_update_read_stamp(struct ring_buffer_per_cpu * cpu_buffer,struct ring_buffer_event * event)4425 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
4426 struct ring_buffer_event *event)
4427 {
4428 u64 delta;
4429
4430 switch (event->type_len) {
4431 case RINGBUF_TYPE_PADDING:
4432 return;
4433
4434 case RINGBUF_TYPE_TIME_EXTEND:
4435 delta = rb_event_time_stamp(event);
4436 cpu_buffer->read_stamp += delta;
4437 return;
4438
4439 case RINGBUF_TYPE_TIME_STAMP:
4440 delta = rb_event_time_stamp(event);
4441 delta = rb_fix_abs_ts(delta, cpu_buffer->read_stamp);
4442 cpu_buffer->read_stamp = delta;
4443 return;
4444
4445 case RINGBUF_TYPE_DATA:
4446 cpu_buffer->read_stamp += event->time_delta;
4447 return;
4448
4449 default:
4450 RB_WARN_ON(cpu_buffer, 1);
4451 }
4452 return;
4453 }
4454
4455 static void
rb_update_iter_read_stamp(struct ring_buffer_iter * iter,struct ring_buffer_event * event)4456 rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
4457 struct ring_buffer_event *event)
4458 {
4459 u64 delta;
4460
4461 switch (event->type_len) {
4462 case RINGBUF_TYPE_PADDING:
4463 return;
4464
4465 case RINGBUF_TYPE_TIME_EXTEND:
4466 delta = rb_event_time_stamp(event);
4467 iter->read_stamp += delta;
4468 return;
4469
4470 case RINGBUF_TYPE_TIME_STAMP:
4471 delta = rb_event_time_stamp(event);
4472 delta = rb_fix_abs_ts(delta, iter->read_stamp);
4473 iter->read_stamp = delta;
4474 return;
4475
4476 case RINGBUF_TYPE_DATA:
4477 iter->read_stamp += event->time_delta;
4478 return;
4479
4480 default:
4481 RB_WARN_ON(iter->cpu_buffer, 1);
4482 }
4483 return;
4484 }
4485
__set_head_page_flag(struct buffer_page * head,int flag)4486 static void __set_head_page_flag(struct buffer_page *head, int flag)
4487 {
4488 struct list_head *prev = head->list.prev;
4489
4490 prev->next = (struct list_head *)(((unsigned long)prev->next & ~RB_FLAG_MASK) | flag);
4491 }
4492
__read_footer_reader_status(struct buffer_page * bpage)4493 static int __read_footer_reader_status(struct buffer_page *bpage)
4494 {
4495 struct rb_ext_page_footer *footer = rb_ext_page_get_footer(bpage->page);
4496
4497 return atomic_read(&footer->reader_status);
4498 }
4499
__read_footer_writer_status(struct buffer_page * bpage)4500 static int __read_footer_writer_status(struct buffer_page *bpage)
4501 {
4502 struct rb_ext_page_footer *footer = rb_ext_page_get_footer(bpage->page);
4503
4504 return atomic_read(&footer->writer_status);
4505 }
4506
4507 static struct buffer_page *
ring_buffer_search_footer(struct buffer_page * start,unsigned long flag)4508 ring_buffer_search_footer(struct buffer_page *start, unsigned long flag)
4509 {
4510 bool search_writer = flag == RB_PAGE_FT_COMMIT;
4511 struct buffer_page *bpage = start;
4512 unsigned long status;
4513 int cnt = 0;
4514 again:
4515 do {
4516 status = search_writer ? __read_footer_writer_status(bpage) :
4517 __read_footer_reader_status(bpage);
4518 if (flag & status)
4519 return bpage;
4520
4521 rb_inc_page(&bpage);
4522 } while (bpage != start);
4523
4524 /*
4525 * There's a chance the writer is in the middle of moving the flag and
4526 * we might not find anything after a first round. Let's try again.
4527 */
4528 if (cnt++ < 3)
4529 goto again;
4530
4531 return NULL;
4532 }
4533
4534 static struct buffer_page *
rb_swap_reader_page_ext(struct ring_buffer_per_cpu * cpu_buffer)4535 noinline rb_swap_reader_page_ext(struct ring_buffer_per_cpu *cpu_buffer)
4536 {
4537 struct buffer_page *new_reader, *new_rb_page, *new_head;
4538 struct rb_ext_page_footer *footer;
4539 unsigned long overrun;
4540
4541 if (cpu_buffer->buffer->ext_cb->swap_reader(cpu_buffer->cpu)) {
4542 WARN_ON(1);
4543 return NULL;
4544 }
4545
4546 new_rb_page = cpu_buffer->reader_page;
4547
4548 /*
4549 * Find what page is the new reader... starting with the latest known
4550 * head.
4551 */
4552 new_reader = ring_buffer_search_footer(cpu_buffer->head_page,
4553 RB_PAGE_FT_READER);
4554 if (!new_reader) {
4555 WARN_ON(1);
4556 return NULL;
4557 }
4558
4559 /* ... and install it into the ring buffer in place of the old head */
4560 rb_list_head_clear(&new_reader->list);
4561 new_rb_page->list.next = new_reader->list.next;
4562 new_rb_page->list.prev = new_reader->list.prev;
4563 new_rb_page->list.next->prev = &new_rb_page->list;
4564 new_rb_page->list.prev->next = &new_rb_page->list;
4565
4566 cpu_buffer->reader_page = new_reader;
4567 cpu_buffer->reader_page->read = 0;
4568
4569 /* Install the new head page */
4570 new_head = new_rb_page;
4571 rb_inc_page(&new_head);
4572 cpu_buffer->head_page = new_head;
4573
4574 /*
4575 * cpu_buffer->pages just needs to point to the buffer, it
4576 * has no specific buffer page to point to. Lets move it out
4577 * of our way so we don't accidentally swap it.
4578 */
4579 cpu_buffer->pages = &new_head->list;
4580
4581 __set_head_page_flag(new_head, RB_PAGE_HEAD);
4582
4583 footer = rb_ext_page_get_footer(new_reader->page);
4584 overrun = footer->stats.overrun;
4585 if (overrun != cpu_buffer->last_overrun) {
4586 cpu_buffer->lost_events = overrun - cpu_buffer->last_overrun;
4587 cpu_buffer->last_overrun = overrun;
4588 }
4589
4590 return new_reader;
4591 }
4592
4593 static struct buffer_page *
rb_swap_reader_page(struct ring_buffer_per_cpu * cpu_buffer)4594 rb_swap_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
4595 {
4596 struct buffer_page *reader;
4597 unsigned long overwrite;
4598 int ret;
4599
4600 /*
4601 * Reset the reader page to size zero.
4602 */
4603 local_set(&cpu_buffer->reader_page->write, 0);
4604 local_set(&cpu_buffer->reader_page->entries, 0);
4605 local_set(&cpu_buffer->reader_page->page->commit, 0);
4606 cpu_buffer->reader_page->real_end = 0;
4607
4608 spin:
4609 /*
4610 * Splice the empty reader page into the list around the head.
4611 */
4612 reader = rb_set_head_page(cpu_buffer);
4613 if (!reader)
4614 return NULL;
4615
4616 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
4617 cpu_buffer->reader_page->list.prev = reader->list.prev;
4618
4619 /*
4620 * cpu_buffer->pages just needs to point to the buffer, it
4621 * has no specific buffer page to point to. Lets move it out
4622 * of our way so we don't accidentally swap it.
4623 */
4624 cpu_buffer->pages = reader->list.prev;
4625
4626 /* The reader page will be pointing to the new head */
4627 rb_set_list_to_head(&cpu_buffer->reader_page->list);
4628
4629 /*
4630 * We want to make sure we read the overruns after we set up our
4631 * pointers to the next object. The writer side does a
4632 * cmpxchg to cross pages which acts as the mb on the writer
4633 * side. Note, the reader will constantly fail the swap
4634 * while the writer is updating the pointers, so this
4635 * guarantees that the overwrite recorded here is the one we
4636 * want to compare with the last_overrun.
4637 */
4638 smp_mb();
4639 overwrite = local_read(&(cpu_buffer->overrun));
4640
4641 /*
4642 * Here's the tricky part.
4643 *
4644 * We need to move the pointer past the header page.
4645 * But we can only do that if a writer is not currently
4646 * moving it. The page before the header page has the
4647 * flag bit '1' set if it is pointing to the page we want.
4648 * but if the writer is in the process of moving it
4649 * than it will be '2' or already moved '0'.
4650 */
4651
4652 ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
4653
4654 /*
4655 * If we did not convert it, then we must try again.
4656 */
4657 if (!ret)
4658 goto spin;
4659
4660 /*
4661 * Yay! We succeeded in replacing the page.
4662 *
4663 * Now make the new head point back to the reader page.
4664 */
4665 rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
4666 rb_inc_page(&cpu_buffer->head_page);
4667
4668 local_inc(&cpu_buffer->pages_read);
4669
4670 /* Finally update the reader page to the new head */
4671 cpu_buffer->reader_page = reader;
4672 cpu_buffer->reader_page->read = 0;
4673
4674 if (overwrite != cpu_buffer->last_overrun) {
4675 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
4676 cpu_buffer->last_overrun = overwrite;
4677 }
4678
4679 return reader;
4680 }
4681
4682 static struct buffer_page *
rb_get_reader_page(struct ring_buffer_per_cpu * cpu_buffer)4683 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
4684 {
4685 struct buffer_page *reader = NULL;
4686 unsigned long flags;
4687 int nr_loops = 0;
4688 unsigned int page_size;
4689
4690 local_irq_save(flags);
4691 arch_spin_lock(&cpu_buffer->lock);
4692
4693 again:
4694 /*
4695 * This should normally only loop twice. But because the
4696 * start of the reader inserts an empty page, it causes
4697 * a case where we will loop three times. There should be no
4698 * reason to loop four times (that I know of).
4699 */
4700 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
4701 reader = NULL;
4702 goto out;
4703 }
4704
4705 reader = cpu_buffer->reader_page;
4706
4707 /* If there's more to read, return this page */
4708 if (cpu_buffer->reader_page->read < rb_page_size(reader))
4709 goto out;
4710
4711 page_size = rb_page_size(reader);
4712 /* Never should we have an index greater than the size */
4713 if (RB_WARN_ON(cpu_buffer,
4714 cpu_buffer->reader_page->read > page_size))
4715 goto out;
4716
4717 /* check if we caught up to the tail */
4718 reader = NULL;
4719 if (cpu_buffer->commit_page == cpu_buffer->reader_page)
4720 goto out;
4721
4722 /* Don't bother swapping if the ring buffer is empty */
4723 if (rb_num_of_entries(cpu_buffer) == 0)
4724 goto out;
4725
4726 if (rb_has_ext_writer(cpu_buffer))
4727 reader = rb_swap_reader_page_ext(cpu_buffer);
4728 else
4729 reader = rb_swap_reader_page(cpu_buffer);
4730
4731 if (reader)
4732 goto again;
4733
4734 out:
4735 /* Update the read_stamp on the first event */
4736 if (reader && reader->read == 0)
4737 cpu_buffer->read_stamp = reader->page->time_stamp;
4738
4739 arch_spin_unlock(&cpu_buffer->lock);
4740 local_irq_restore(flags);
4741
4742 /*
4743 * The writer has preempt disable, wait for it. But not forever
4744 * Although, 1 second is pretty much "forever"
4745 */
4746 #define USECS_WAIT 1000000
4747 for (nr_loops = 0; nr_loops < USECS_WAIT; nr_loops++) {
4748 /* If the write is past the end of page, a writer is still updating it */
4749 if (likely(!reader || rb_page_write(reader) <= BUF_PAGE_SIZE))
4750 break;
4751
4752 udelay(1);
4753
4754 /* Get the latest version of the reader write value */
4755 smp_rmb();
4756 }
4757
4758 /* The writer is not moving forward? Something is wrong */
4759 if (RB_WARN_ON(cpu_buffer, nr_loops == USECS_WAIT))
4760 reader = NULL;
4761
4762 /*
4763 * Make sure we see any padding after the write update
4764 * (see rb_reset_tail()).
4765 *
4766 * In addition, a writer may be writing on the reader page
4767 * if the page has not been fully filled, so the read barrier
4768 * is also needed to make sure we see the content of what is
4769 * committed by the writer (see rb_set_commit_to_write()).
4770 */
4771 smp_rmb();
4772
4773
4774 return reader;
4775 }
4776
rb_advance_reader(struct ring_buffer_per_cpu * cpu_buffer)4777 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
4778 {
4779 struct ring_buffer_event *event;
4780 struct buffer_page *reader;
4781 unsigned length;
4782
4783 reader = rb_get_reader_page(cpu_buffer);
4784
4785 /* This function should not be called when buffer is empty */
4786 if (RB_WARN_ON(cpu_buffer, !reader))
4787 return;
4788
4789 event = rb_reader_event(cpu_buffer);
4790
4791 if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
4792 cpu_buffer->read++;
4793
4794 rb_update_read_stamp(cpu_buffer, event);
4795
4796 length = rb_event_length(event);
4797 cpu_buffer->reader_page->read += length;
4798 cpu_buffer->read_bytes += length;
4799 }
4800
rb_advance_iter(struct ring_buffer_iter * iter)4801 static void rb_advance_iter(struct ring_buffer_iter *iter)
4802 {
4803 struct ring_buffer_per_cpu *cpu_buffer;
4804
4805 cpu_buffer = iter->cpu_buffer;
4806
4807 /* If head == next_event then we need to jump to the next event */
4808 if (iter->head == iter->next_event) {
4809 /* If the event gets overwritten again, there's nothing to do */
4810 if (rb_iter_head_event(iter) == NULL)
4811 return;
4812 }
4813
4814 iter->head = iter->next_event;
4815
4816 /*
4817 * Check if we are at the end of the buffer.
4818 */
4819 if (iter->next_event >= rb_page_size(iter->head_page)) {
4820 /* discarded commits can make the page empty */
4821 if (iter->head_page == cpu_buffer->commit_page)
4822 return;
4823 rb_inc_iter(iter);
4824 return;
4825 }
4826
4827 rb_update_iter_read_stamp(iter, iter->event);
4828 }
4829
rb_lost_events(struct ring_buffer_per_cpu * cpu_buffer)4830 static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
4831 {
4832 return cpu_buffer->lost_events;
4833 }
4834
4835 static struct ring_buffer_event *
rb_buffer_peek(struct ring_buffer_per_cpu * cpu_buffer,u64 * ts,unsigned long * lost_events)4836 rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
4837 unsigned long *lost_events)
4838 {
4839 struct ring_buffer_event *event;
4840 struct buffer_page *reader;
4841 int nr_loops = 0;
4842
4843 if (ts)
4844 *ts = 0;
4845 again:
4846 /*
4847 * We repeat when a time extend is encountered.
4848 * Since the time extend is always attached to a data event,
4849 * we should never loop more than once.
4850 * (We never hit the following condition more than twice).
4851 */
4852 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
4853 return NULL;
4854
4855 reader = rb_get_reader_page(cpu_buffer);
4856 if (!reader)
4857 return NULL;
4858
4859 event = rb_reader_event(cpu_buffer);
4860
4861 switch (event->type_len) {
4862 case RINGBUF_TYPE_PADDING:
4863 if (rb_null_event(event))
4864 RB_WARN_ON(cpu_buffer, 1);
4865 /*
4866 * Because the writer could be discarding every
4867 * event it creates (which would probably be bad)
4868 * if we were to go back to "again" then we may never
4869 * catch up, and will trigger the warn on, or lock
4870 * the box. Return the padding, and we will release
4871 * the current locks, and try again.
4872 */
4873 return event;
4874
4875 case RINGBUF_TYPE_TIME_EXTEND:
4876 /* Internal data, OK to advance */
4877 rb_advance_reader(cpu_buffer);
4878 goto again;
4879
4880 case RINGBUF_TYPE_TIME_STAMP:
4881 if (ts) {
4882 *ts = rb_event_time_stamp(event);
4883 *ts = rb_fix_abs_ts(*ts, reader->page->time_stamp);
4884 ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
4885 cpu_buffer->cpu, ts);
4886 }
4887 /* Internal data, OK to advance */
4888 rb_advance_reader(cpu_buffer);
4889 goto again;
4890
4891 case RINGBUF_TYPE_DATA:
4892 if (ts && !(*ts)) {
4893 *ts = cpu_buffer->read_stamp + event->time_delta;
4894 ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
4895 cpu_buffer->cpu, ts);
4896 }
4897 if (lost_events)
4898 *lost_events = rb_lost_events(cpu_buffer);
4899 return event;
4900
4901 default:
4902 RB_WARN_ON(cpu_buffer, 1);
4903 }
4904
4905 return NULL;
4906 }
4907 EXPORT_SYMBOL_GPL(ring_buffer_peek);
4908
4909 static struct ring_buffer_event *
rb_iter_peek(struct ring_buffer_iter * iter,u64 * ts)4910 rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
4911 {
4912 struct trace_buffer *buffer;
4913 struct ring_buffer_per_cpu *cpu_buffer;
4914 struct ring_buffer_event *event;
4915 int nr_loops = 0;
4916
4917 if (ts)
4918 *ts = 0;
4919
4920 cpu_buffer = iter->cpu_buffer;
4921 buffer = cpu_buffer->buffer;
4922
4923 /*
4924 * Check if someone performed a consuming read to
4925 * the buffer. A consuming read invalidates the iterator
4926 * and we need to reset the iterator in this case.
4927 */
4928 if (unlikely(iter->cache_read != cpu_buffer->read ||
4929 iter->cache_reader_page != cpu_buffer->reader_page))
4930 rb_iter_reset(iter);
4931
4932 again:
4933 if (ring_buffer_iter_empty(iter))
4934 return NULL;
4935
4936 /*
4937 * As the writer can mess with what the iterator is trying
4938 * to read, just give up if we fail to get an event after
4939 * three tries. The iterator is not as reliable when reading
4940 * the ring buffer with an active write as the consumer is.
4941 * Do not warn if the three failures is reached.
4942 */
4943 if (++nr_loops > 3)
4944 return NULL;
4945
4946 if (rb_per_cpu_empty(cpu_buffer))
4947 return NULL;
4948
4949 if (iter->head >= rb_page_size(iter->head_page)) {
4950 rb_inc_iter(iter);
4951 goto again;
4952 }
4953
4954 event = rb_iter_head_event(iter);
4955 if (!event)
4956 goto again;
4957
4958 switch (event->type_len) {
4959 case RINGBUF_TYPE_PADDING:
4960 if (rb_null_event(event)) {
4961 rb_inc_iter(iter);
4962 goto again;
4963 }
4964 rb_advance_iter(iter);
4965 return event;
4966
4967 case RINGBUF_TYPE_TIME_EXTEND:
4968 /* Internal data, OK to advance */
4969 rb_advance_iter(iter);
4970 goto again;
4971
4972 case RINGBUF_TYPE_TIME_STAMP:
4973 if (ts) {
4974 *ts = rb_event_time_stamp(event);
4975 *ts = rb_fix_abs_ts(*ts, iter->head_page->page->time_stamp);
4976 ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
4977 cpu_buffer->cpu, ts);
4978 }
4979 /* Internal data, OK to advance */
4980 rb_advance_iter(iter);
4981 goto again;
4982
4983 case RINGBUF_TYPE_DATA:
4984 if (ts && !(*ts)) {
4985 *ts = iter->read_stamp + event->time_delta;
4986 ring_buffer_normalize_time_stamp(buffer,
4987 cpu_buffer->cpu, ts);
4988 }
4989 return event;
4990
4991 default:
4992 RB_WARN_ON(cpu_buffer, 1);
4993 }
4994
4995 return NULL;
4996 }
4997 EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
4998
rb_reader_lock(struct ring_buffer_per_cpu * cpu_buffer)4999 static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer)
5000 {
5001 if (likely(!in_nmi())) {
5002 raw_spin_lock(&cpu_buffer->reader_lock);
5003 return true;
5004 }
5005
5006 /*
5007 * If an NMI die dumps out the content of the ring buffer
5008 * trylock must be used to prevent a deadlock if the NMI
5009 * preempted a task that holds the ring buffer locks. If
5010 * we get the lock then all is fine, if not, then continue
5011 * to do the read, but this can corrupt the ring buffer,
5012 * so it must be permanently disabled from future writes.
5013 * Reading from NMI is a oneshot deal.
5014 */
5015 if (raw_spin_trylock(&cpu_buffer->reader_lock))
5016 return true;
5017
5018 /* Continue without locking, but disable the ring buffer */
5019 atomic_inc(&cpu_buffer->record_disabled);
5020 return false;
5021 }
5022
5023 static inline void
rb_reader_unlock(struct ring_buffer_per_cpu * cpu_buffer,bool locked)5024 rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked)
5025 {
5026 if (likely(locked))
5027 raw_spin_unlock(&cpu_buffer->reader_lock);
5028 return;
5029 }
5030
5031 /**
5032 * ring_buffer_peek - peek at the next event to be read
5033 * @buffer: The ring buffer to read
5034 * @cpu: The cpu to peak at
5035 * @ts: The timestamp counter of this event.
5036 * @lost_events: a variable to store if events were lost (may be NULL)
5037 *
5038 * This will return the event that will be read next, but does
5039 * not consume the data.
5040 */
5041 struct ring_buffer_event *
ring_buffer_peek(struct trace_buffer * buffer,int cpu,u64 * ts,unsigned long * lost_events)5042 ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts,
5043 unsigned long *lost_events)
5044 {
5045 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
5046 struct ring_buffer_event *event;
5047 unsigned long flags;
5048 bool dolock;
5049
5050 if (!cpumask_test_cpu(cpu, buffer->cpumask))
5051 return NULL;
5052
5053 again:
5054 local_irq_save(flags);
5055 dolock = rb_reader_lock(cpu_buffer);
5056 event = rb_buffer_peek(cpu_buffer, ts, lost_events);
5057 if (event && event->type_len == RINGBUF_TYPE_PADDING)
5058 rb_advance_reader(cpu_buffer);
5059 rb_reader_unlock(cpu_buffer, dolock);
5060 local_irq_restore(flags);
5061
5062 if (event && event->type_len == RINGBUF_TYPE_PADDING)
5063 goto again;
5064
5065 return event;
5066 }
5067
5068 /** ring_buffer_iter_dropped - report if there are dropped events
5069 * @iter: The ring buffer iterator
5070 *
5071 * Returns true if there was dropped events since the last peek.
5072 */
ring_buffer_iter_dropped(struct ring_buffer_iter * iter)5073 bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter)
5074 {
5075 bool ret = iter->missed_events != 0;
5076
5077 iter->missed_events = 0;
5078 return ret;
5079 }
5080 EXPORT_SYMBOL_GPL(ring_buffer_iter_dropped);
5081
5082 /**
5083 * ring_buffer_iter_peek - peek at the next event to be read
5084 * @iter: The ring buffer iterator
5085 * @ts: The timestamp counter of this event.
5086 *
5087 * This will return the event that will be read next, but does
5088 * not increment the iterator.
5089 */
5090 struct ring_buffer_event *
ring_buffer_iter_peek(struct ring_buffer_iter * iter,u64 * ts)5091 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
5092 {
5093 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
5094 struct ring_buffer_event *event;
5095 unsigned long flags;
5096
5097 again:
5098 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5099 event = rb_iter_peek(iter, ts);
5100 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5101
5102 if (event && event->type_len == RINGBUF_TYPE_PADDING)
5103 goto again;
5104
5105 return event;
5106 }
5107
5108 /**
5109 * ring_buffer_consume - return an event and consume it
5110 * @buffer: The ring buffer to get the next event from
5111 * @cpu: the cpu to read the buffer from
5112 * @ts: a variable to store the timestamp (may be NULL)
5113 * @lost_events: a variable to store if events were lost (may be NULL)
5114 *
5115 * Returns the next event in the ring buffer, and that event is consumed.
5116 * Meaning, that sequential reads will keep returning a different event,
5117 * and eventually empty the ring buffer if the producer is slower.
5118 */
5119 struct ring_buffer_event *
ring_buffer_consume(struct trace_buffer * buffer,int cpu,u64 * ts,unsigned long * lost_events)5120 ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts,
5121 unsigned long *lost_events)
5122 {
5123 struct ring_buffer_per_cpu *cpu_buffer;
5124 struct ring_buffer_event *event = NULL;
5125 unsigned long flags;
5126 bool dolock;
5127
5128 again:
5129 /* might be called in atomic */
5130 preempt_disable();
5131
5132 if (!cpumask_test_cpu(cpu, buffer->cpumask))
5133 goto out;
5134
5135 cpu_buffer = buffer->buffers[cpu];
5136 local_irq_save(flags);
5137 dolock = rb_reader_lock(cpu_buffer);
5138
5139 event = rb_buffer_peek(cpu_buffer, ts, lost_events);
5140 if (event) {
5141 cpu_buffer->lost_events = 0;
5142 rb_advance_reader(cpu_buffer);
5143 }
5144
5145 rb_reader_unlock(cpu_buffer, dolock);
5146 local_irq_restore(flags);
5147
5148 out:
5149 preempt_enable();
5150
5151 if (event && event->type_len == RINGBUF_TYPE_PADDING)
5152 goto again;
5153
5154 return event;
5155 }
5156 EXPORT_SYMBOL_GPL(ring_buffer_consume);
5157
ring_buffer_update_view(struct ring_buffer_per_cpu * cpu_buffer)5158 static void ring_buffer_update_view(struct ring_buffer_per_cpu *cpu_buffer)
5159 {
5160 struct rb_ext_page_footer *footer;
5161 struct buffer_page *bpage;
5162
5163 if (!rb_has_ext_writer(cpu_buffer))
5164 return;
5165
5166 raw_spin_lock_irq(&cpu_buffer->reader_lock);
5167 arch_spin_lock(&cpu_buffer->lock);
5168
5169 cpu_buffer->buffer->ext_cb->update_footers(cpu_buffer->cpu);
5170
5171 bpage = cpu_buffer->reader_page;
5172 footer = rb_ext_page_get_footer(bpage->page);
5173
5174 local_set(&cpu_buffer->entries, footer->stats.entries);
5175 local_set(&cpu_buffer->pages_touched, footer->stats.pages_touched);
5176 local_set(&cpu_buffer->overrun, footer->stats.overrun);
5177
5178 /* Update the commit page */
5179 bpage = ring_buffer_search_footer(cpu_buffer->commit_page,
5180 RB_PAGE_FT_COMMIT);
5181 if (!bpage) {
5182 WARN_ON(1);
5183 goto unlock;
5184 }
5185 cpu_buffer->commit_page = bpage;
5186
5187 /* Update the head page */
5188 bpage = ring_buffer_search_footer(cpu_buffer->head_page,
5189 RB_PAGE_FT_HEAD);
5190 if (!bpage) {
5191 WARN_ON(1);
5192 goto unlock;
5193 }
5194
5195 /* Reset the previous RB_PAGE_HEAD flag */
5196 __set_head_page_flag(cpu_buffer->head_page, RB_PAGE_NORMAL);
5197
5198 /* Set RB_PAGE_HEAD flag pointing to the new head */
5199 __set_head_page_flag(bpage, RB_PAGE_HEAD);
5200
5201 cpu_buffer->reader_page->list.next = &cpu_buffer->head_page->list;
5202
5203 cpu_buffer->head_page = bpage;
5204
5205 unlock:
5206 arch_spin_unlock(&cpu_buffer->lock);
5207 raw_spin_unlock_irq(&cpu_buffer->reader_lock);
5208 }
5209
ring_buffer_poke(struct trace_buffer * buffer,int cpu)5210 int ring_buffer_poke(struct trace_buffer *buffer, int cpu)
5211 {
5212 struct ring_buffer_per_cpu *cpu_buffer;
5213
5214 if (!cpumask_test_cpu(cpu, buffer->cpumask))
5215 return -EINVAL;
5216
5217 cpu_buffer = buffer->buffers[cpu];
5218
5219 ring_buffer_update_view(cpu_buffer);
5220 rb_wakeups(buffer, cpu_buffer);
5221
5222 return 0;
5223 }
5224
5225 /**
5226 * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
5227 * @buffer: The ring buffer to read from
5228 * @cpu: The cpu buffer to iterate over
5229 * @flags: gfp flags to use for memory allocation
5230 *
5231 * This performs the initial preparations necessary to iterate
5232 * through the buffer. Memory is allocated, buffer recording
5233 * is disabled, and the iterator pointer is returned to the caller.
5234 *
5235 * Disabling buffer recording prevents the reading from being
5236 * corrupted. This is not a consuming read, so a producer is not
5237 * expected.
5238 *
5239 * After a sequence of ring_buffer_read_prepare calls, the user is
5240 * expected to make at least one call to ring_buffer_read_prepare_sync.
5241 * Afterwards, ring_buffer_read_start is invoked to get things going
5242 * for real.
5243 *
5244 * This overall must be paired with ring_buffer_read_finish.
5245 */
5246 struct ring_buffer_iter *
ring_buffer_read_prepare(struct trace_buffer * buffer,int cpu,gfp_t flags)5247 ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags)
5248 {
5249 struct ring_buffer_per_cpu *cpu_buffer;
5250 struct ring_buffer_iter *iter;
5251
5252 if (!cpumask_test_cpu(cpu, buffer->cpumask))
5253 return NULL;
5254
5255 iter = kzalloc(sizeof(*iter), flags);
5256 if (!iter)
5257 return NULL;
5258
5259 /* Holds the entire event: data and meta data */
5260 iter->event = kmalloc(BUF_PAGE_SIZE, flags);
5261 if (!iter->event) {
5262 kfree(iter);
5263 return NULL;
5264 }
5265
5266 cpu_buffer = buffer->buffers[cpu];
5267
5268 iter->cpu_buffer = cpu_buffer;
5269
5270 atomic_inc(&cpu_buffer->resize_disabled);
5271
5272 ring_buffer_update_view(cpu_buffer);
5273
5274 return iter;
5275 }
5276 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
5277
5278 /**
5279 * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
5280 *
5281 * All previously invoked ring_buffer_read_prepare calls to prepare
5282 * iterators will be synchronized. Afterwards, read_buffer_read_start
5283 * calls on those iterators are allowed.
5284 */
5285 void
ring_buffer_read_prepare_sync(void)5286 ring_buffer_read_prepare_sync(void)
5287 {
5288 synchronize_rcu();
5289 }
5290 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
5291
5292 /**
5293 * ring_buffer_read_start - start a non consuming read of the buffer
5294 * @iter: The iterator returned by ring_buffer_read_prepare
5295 *
5296 * This finalizes the startup of an iteration through the buffer.
5297 * The iterator comes from a call to ring_buffer_read_prepare and
5298 * an intervening ring_buffer_read_prepare_sync must have been
5299 * performed.
5300 *
5301 * Must be paired with ring_buffer_read_finish.
5302 */
5303 void
ring_buffer_read_start(struct ring_buffer_iter * iter)5304 ring_buffer_read_start(struct ring_buffer_iter *iter)
5305 {
5306 struct ring_buffer_per_cpu *cpu_buffer;
5307 unsigned long flags;
5308
5309 if (!iter)
5310 return;
5311
5312 cpu_buffer = iter->cpu_buffer;
5313
5314 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5315 arch_spin_lock(&cpu_buffer->lock);
5316 rb_iter_reset(iter);
5317 arch_spin_unlock(&cpu_buffer->lock);
5318 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5319 }
5320 EXPORT_SYMBOL_GPL(ring_buffer_read_start);
5321
5322 /**
5323 * ring_buffer_read_finish - finish reading the iterator of the buffer
5324 * @iter: The iterator retrieved by ring_buffer_start
5325 *
5326 * This re-enables the recording to the buffer, and frees the
5327 * iterator.
5328 */
5329 void
ring_buffer_read_finish(struct ring_buffer_iter * iter)5330 ring_buffer_read_finish(struct ring_buffer_iter *iter)
5331 {
5332 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
5333 unsigned long flags;
5334
5335 /*
5336 * Ring buffer is disabled from recording, here's a good place
5337 * to check the integrity of the ring buffer.
5338 * Must prevent readers from trying to read, as the check
5339 * clears the HEAD page and readers require it.
5340 */
5341 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5342 rb_check_pages(cpu_buffer);
5343 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5344
5345 atomic_dec(&cpu_buffer->resize_disabled);
5346 kfree(iter->event);
5347 kfree(iter);
5348 }
5349 EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
5350
5351 /**
5352 * ring_buffer_iter_advance - advance the iterator to the next location
5353 * @iter: The ring buffer iterator
5354 *
5355 * Move the location of the iterator such that the next read will
5356 * be the next location of the iterator.
5357 */
ring_buffer_iter_advance(struct ring_buffer_iter * iter)5358 void ring_buffer_iter_advance(struct ring_buffer_iter *iter)
5359 {
5360 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
5361 unsigned long flags;
5362
5363 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5364
5365 rb_advance_iter(iter);
5366
5367 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5368 }
5369 EXPORT_SYMBOL_GPL(ring_buffer_iter_advance);
5370
5371 /**
5372 * ring_buffer_size - return the size of the ring buffer (in bytes)
5373 * @buffer: The ring buffer.
5374 * @cpu: The CPU to get ring buffer size from.
5375 */
ring_buffer_size(struct trace_buffer * buffer,int cpu)5376 unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu)
5377 {
5378 /*
5379 * Earlier, this method returned
5380 * BUF_PAGE_SIZE * buffer->nr_pages
5381 * Since the nr_pages field is now removed, we have converted this to
5382 * return the per cpu buffer value.
5383 */
5384 if (!cpumask_test_cpu(cpu, buffer->cpumask))
5385 return 0;
5386
5387 return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages;
5388 }
5389 EXPORT_SYMBOL_GPL(ring_buffer_size);
5390
rb_clear_buffer_page(struct buffer_page * page)5391 static void rb_clear_buffer_page(struct buffer_page *page)
5392 {
5393 local_set(&page->write, 0);
5394 local_set(&page->entries, 0);
5395 rb_init_page(page->page);
5396 page->read = 0;
5397 }
5398
5399 static void
rb_reset_cpu(struct ring_buffer_per_cpu * cpu_buffer)5400 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
5401 {
5402 struct buffer_page *page;
5403
5404 rb_head_page_deactivate(cpu_buffer);
5405
5406 cpu_buffer->head_page
5407 = list_entry(cpu_buffer->pages, struct buffer_page, list);
5408 rb_clear_buffer_page(cpu_buffer->head_page);
5409 list_for_each_entry(page, cpu_buffer->pages, list) {
5410 rb_clear_buffer_page(page);
5411 }
5412
5413 cpu_buffer->tail_page = cpu_buffer->head_page;
5414 cpu_buffer->commit_page = cpu_buffer->head_page;
5415
5416 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
5417 INIT_LIST_HEAD(&cpu_buffer->new_pages);
5418 rb_clear_buffer_page(cpu_buffer->reader_page);
5419
5420 local_set(&cpu_buffer->entries_bytes, 0);
5421 local_set(&cpu_buffer->overrun, 0);
5422 local_set(&cpu_buffer->commit_overrun, 0);
5423 local_set(&cpu_buffer->dropped_events, 0);
5424 local_set(&cpu_buffer->entries, 0);
5425 local_set(&cpu_buffer->committing, 0);
5426 local_set(&cpu_buffer->commits, 0);
5427 local_set(&cpu_buffer->pages_touched, 0);
5428 local_set(&cpu_buffer->pages_lost, 0);
5429 local_set(&cpu_buffer->pages_read, 0);
5430 cpu_buffer->last_pages_touch = 0;
5431 cpu_buffer->shortest_full = 0;
5432 cpu_buffer->read = 0;
5433 cpu_buffer->read_bytes = 0;
5434
5435 rb_time_set(&cpu_buffer->write_stamp, 0);
5436 rb_time_set(&cpu_buffer->before_stamp, 0);
5437
5438 memset(cpu_buffer->event_stamp, 0, sizeof(cpu_buffer->event_stamp));
5439
5440 cpu_buffer->lost_events = 0;
5441 cpu_buffer->last_overrun = 0;
5442
5443 rb_head_page_activate(cpu_buffer);
5444 }
5445
5446 /* Must have disabled the cpu buffer then done a synchronize_rcu */
reset_disabled_cpu_buffer(struct ring_buffer_per_cpu * cpu_buffer)5447 static void reset_disabled_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
5448 {
5449 unsigned long flags;
5450
5451 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5452
5453 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
5454 goto out;
5455
5456 arch_spin_lock(&cpu_buffer->lock);
5457
5458 rb_reset_cpu(cpu_buffer);
5459
5460 arch_spin_unlock(&cpu_buffer->lock);
5461
5462 out:
5463 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5464 }
5465
5466 /**
5467 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
5468 * @buffer: The ring buffer to reset a per cpu buffer of
5469 * @cpu: The CPU buffer to be reset
5470 */
ring_buffer_reset_cpu(struct trace_buffer * buffer,int cpu)5471 void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu)
5472 {
5473 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
5474
5475 if (!cpumask_test_cpu(cpu, buffer->cpumask))
5476 return;
5477
5478 /* prevent another thread from changing buffer sizes */
5479 mutex_lock(&buffer->mutex);
5480
5481 atomic_inc(&cpu_buffer->resize_disabled);
5482 atomic_inc(&cpu_buffer->record_disabled);
5483
5484 /* Make sure all commits have finished */
5485 synchronize_rcu();
5486
5487 reset_disabled_cpu_buffer(cpu_buffer);
5488
5489 atomic_dec(&cpu_buffer->record_disabled);
5490 atomic_dec(&cpu_buffer->resize_disabled);
5491
5492 mutex_unlock(&buffer->mutex);
5493 }
5494 EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
5495
5496 /* Flag to ensure proper resetting of atomic variables */
5497 #define RESET_BIT (1 << 30)
5498
5499 /**
5500 * ring_buffer_reset_online_cpus - reset a ring buffer per CPU buffer
5501 * @buffer: The ring buffer to reset a per cpu buffer of
5502 * @cpu: The CPU buffer to be reset
5503 */
ring_buffer_reset_online_cpus(struct trace_buffer * buffer)5504 void ring_buffer_reset_online_cpus(struct trace_buffer *buffer)
5505 {
5506 struct ring_buffer_per_cpu *cpu_buffer;
5507 int cpu;
5508
5509 /* prevent another thread from changing buffer sizes */
5510 mutex_lock(&buffer->mutex);
5511
5512 for_each_online_buffer_cpu(buffer, cpu) {
5513 cpu_buffer = buffer->buffers[cpu];
5514
5515 atomic_add(RESET_BIT, &cpu_buffer->resize_disabled);
5516 atomic_inc(&cpu_buffer->record_disabled);
5517 }
5518
5519 /* Make sure all commits have finished */
5520 synchronize_rcu();
5521
5522 for_each_buffer_cpu(buffer, cpu) {
5523 cpu_buffer = buffer->buffers[cpu];
5524
5525 /*
5526 * If a CPU came online during the synchronize_rcu(), then
5527 * ignore it.
5528 */
5529 if (!(atomic_read(&cpu_buffer->resize_disabled) & RESET_BIT))
5530 continue;
5531
5532 reset_disabled_cpu_buffer(cpu_buffer);
5533
5534 atomic_dec(&cpu_buffer->record_disabled);
5535 atomic_sub(RESET_BIT, &cpu_buffer->resize_disabled);
5536 }
5537
5538 mutex_unlock(&buffer->mutex);
5539 }
5540
5541 /**
5542 * ring_buffer_reset - reset a ring buffer
5543 * @buffer: The ring buffer to reset all cpu buffers
5544 */
ring_buffer_reset(struct trace_buffer * buffer)5545 void ring_buffer_reset(struct trace_buffer *buffer)
5546 {
5547 struct ring_buffer_per_cpu *cpu_buffer;
5548 int cpu;
5549
5550 /* prevent another thread from changing buffer sizes */
5551 mutex_lock(&buffer->mutex);
5552
5553 for_each_buffer_cpu(buffer, cpu) {
5554 cpu_buffer = buffer->buffers[cpu];
5555
5556 atomic_inc(&cpu_buffer->resize_disabled);
5557 atomic_inc(&cpu_buffer->record_disabled);
5558 }
5559
5560 /* Make sure all commits have finished */
5561 synchronize_rcu();
5562
5563 for_each_buffer_cpu(buffer, cpu) {
5564 cpu_buffer = buffer->buffers[cpu];
5565
5566 reset_disabled_cpu_buffer(cpu_buffer);
5567
5568 atomic_dec(&cpu_buffer->record_disabled);
5569 atomic_dec(&cpu_buffer->resize_disabled);
5570 }
5571
5572 mutex_unlock(&buffer->mutex);
5573 }
5574 EXPORT_SYMBOL_GPL(ring_buffer_reset);
5575
5576 /**
5577 * ring_buffer_empty - is the ring buffer empty?
5578 * @buffer: The ring buffer to test
5579 */
ring_buffer_empty(struct trace_buffer * buffer)5580 bool ring_buffer_empty(struct trace_buffer *buffer)
5581 {
5582 struct ring_buffer_per_cpu *cpu_buffer;
5583 unsigned long flags;
5584 bool dolock;
5585 int cpu;
5586 int ret;
5587
5588 /* yes this is racy, but if you don't like the race, lock the buffer */
5589 for_each_buffer_cpu(buffer, cpu) {
5590 cpu_buffer = buffer->buffers[cpu];
5591 local_irq_save(flags);
5592 dolock = rb_reader_lock(cpu_buffer);
5593 ret = rb_per_cpu_empty(cpu_buffer);
5594 rb_reader_unlock(cpu_buffer, dolock);
5595 local_irq_restore(flags);
5596
5597 if (!ret)
5598 return false;
5599 }
5600
5601 return true;
5602 }
5603 EXPORT_SYMBOL_GPL(ring_buffer_empty);
5604
5605 /**
5606 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
5607 * @buffer: The ring buffer
5608 * @cpu: The CPU buffer to test
5609 */
ring_buffer_empty_cpu(struct trace_buffer * buffer,int cpu)5610 bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu)
5611 {
5612 struct ring_buffer_per_cpu *cpu_buffer;
5613 unsigned long flags;
5614 bool dolock;
5615 int ret;
5616
5617 if (!cpumask_test_cpu(cpu, buffer->cpumask))
5618 return true;
5619
5620 cpu_buffer = buffer->buffers[cpu];
5621 local_irq_save(flags);
5622 dolock = rb_reader_lock(cpu_buffer);
5623 ret = rb_per_cpu_empty(cpu_buffer);
5624 rb_reader_unlock(cpu_buffer, dolock);
5625 local_irq_restore(flags);
5626
5627 return ret;
5628 }
5629 EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
5630
5631 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
5632 /**
5633 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
5634 * @buffer_a: One buffer to swap with
5635 * @buffer_b: The other buffer to swap with
5636 * @cpu: the CPU of the buffers to swap
5637 *
5638 * This function is useful for tracers that want to take a "snapshot"
5639 * of a CPU buffer and has another back up buffer lying around.
5640 * it is expected that the tracer handles the cpu buffer not being
5641 * used at the moment.
5642 */
ring_buffer_swap_cpu(struct trace_buffer * buffer_a,struct trace_buffer * buffer_b,int cpu)5643 int ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
5644 struct trace_buffer *buffer_b, int cpu)
5645 {
5646 struct ring_buffer_per_cpu *cpu_buffer_a;
5647 struct ring_buffer_per_cpu *cpu_buffer_b;
5648 int ret = -EINVAL;
5649
5650 if (unlikely(has_ext_writer(buffer_a) || has_ext_writer(buffer_b)))
5651 return -EINVAL;
5652
5653 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
5654 !cpumask_test_cpu(cpu, buffer_b->cpumask))
5655 goto out;
5656
5657 cpu_buffer_a = buffer_a->buffers[cpu];
5658 cpu_buffer_b = buffer_b->buffers[cpu];
5659
5660 /* At least make sure the two buffers are somewhat the same */
5661 if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages)
5662 goto out;
5663
5664 ret = -EAGAIN;
5665
5666 if (atomic_read(&buffer_a->record_disabled))
5667 goto out;
5668
5669 if (atomic_read(&buffer_b->record_disabled))
5670 goto out;
5671
5672 if (atomic_read(&cpu_buffer_a->record_disabled))
5673 goto out;
5674
5675 if (atomic_read(&cpu_buffer_b->record_disabled))
5676 goto out;
5677
5678 /*
5679 * We can't do a synchronize_rcu here because this
5680 * function can be called in atomic context.
5681 * Normally this will be called from the same CPU as cpu.
5682 * If not it's up to the caller to protect this.
5683 */
5684 atomic_inc(&cpu_buffer_a->record_disabled);
5685 atomic_inc(&cpu_buffer_b->record_disabled);
5686
5687 ret = -EBUSY;
5688 if (local_read(&cpu_buffer_a->committing))
5689 goto out_dec;
5690 if (local_read(&cpu_buffer_b->committing))
5691 goto out_dec;
5692
5693 /*
5694 * When resize is in progress, we cannot swap it because
5695 * it will mess the state of the cpu buffer.
5696 */
5697 if (atomic_read(&buffer_a->resizing))
5698 goto out_dec;
5699 if (atomic_read(&buffer_b->resizing))
5700 goto out_dec;
5701
5702 buffer_a->buffers[cpu] = cpu_buffer_b;
5703 buffer_b->buffers[cpu] = cpu_buffer_a;
5704
5705 cpu_buffer_b->buffer = buffer_a;
5706 cpu_buffer_a->buffer = buffer_b;
5707
5708 ret = 0;
5709
5710 out_dec:
5711 atomic_dec(&cpu_buffer_a->record_disabled);
5712 atomic_dec(&cpu_buffer_b->record_disabled);
5713 out:
5714 return ret;
5715 }
5716 EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
5717 #endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
5718
5719 /**
5720 * ring_buffer_alloc_read_page - allocate a page to read from buffer
5721 * @buffer: the buffer to allocate for.
5722 * @cpu: the cpu buffer to allocate.
5723 *
5724 * This function is used in conjunction with ring_buffer_read_page.
5725 * When reading a full page from the ring buffer, these functions
5726 * can be used to speed up the process. The calling function should
5727 * allocate a few pages first with this function. Then when it
5728 * needs to get pages from the ring buffer, it passes the result
5729 * of this function into ring_buffer_read_page, which will swap
5730 * the page that was allocated, with the read page of the buffer.
5731 *
5732 * Returns:
5733 * The page allocated, or ERR_PTR
5734 */
ring_buffer_alloc_read_page(struct trace_buffer * buffer,int cpu)5735 void *ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu)
5736 {
5737 struct ring_buffer_per_cpu *cpu_buffer;
5738 struct buffer_data_page *bpage = NULL;
5739 unsigned long flags;
5740 struct page *page;
5741
5742 if (!cpumask_test_cpu(cpu, buffer->cpumask))
5743 return ERR_PTR(-ENODEV);
5744
5745 cpu_buffer = buffer->buffers[cpu];
5746 local_irq_save(flags);
5747 arch_spin_lock(&cpu_buffer->lock);
5748
5749 if (cpu_buffer->free_page) {
5750 bpage = cpu_buffer->free_page;
5751 cpu_buffer->free_page = NULL;
5752 }
5753
5754 arch_spin_unlock(&cpu_buffer->lock);
5755 local_irq_restore(flags);
5756
5757 if (bpage)
5758 goto out;
5759
5760 page = alloc_pages_node(cpu_to_node(cpu),
5761 GFP_KERNEL | __GFP_NORETRY, 0);
5762 if (!page)
5763 return ERR_PTR(-ENOMEM);
5764
5765 bpage = page_address(page);
5766
5767 out:
5768 rb_init_page(bpage);
5769
5770 return bpage;
5771 }
5772 EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
5773
5774 /**
5775 * ring_buffer_free_read_page - free an allocated read page
5776 * @buffer: the buffer the page was allocate for
5777 * @cpu: the cpu buffer the page came from
5778 * @data: the page to free
5779 *
5780 * Free a page allocated from ring_buffer_alloc_read_page.
5781 */
ring_buffer_free_read_page(struct trace_buffer * buffer,int cpu,void * data)5782 void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data)
5783 {
5784 struct ring_buffer_per_cpu *cpu_buffer;
5785 struct buffer_data_page *bpage = data;
5786 struct page *page = virt_to_page(bpage);
5787 unsigned long flags;
5788
5789 if (!buffer || !buffer->buffers || !buffer->buffers[cpu])
5790 return;
5791
5792 cpu_buffer = buffer->buffers[cpu];
5793
5794 /* If the page is still in use someplace else, we can't reuse it */
5795 if (page_ref_count(page) > 1)
5796 goto out;
5797
5798 local_irq_save(flags);
5799 arch_spin_lock(&cpu_buffer->lock);
5800
5801 if (!cpu_buffer->free_page) {
5802 cpu_buffer->free_page = bpage;
5803 bpage = NULL;
5804 }
5805
5806 arch_spin_unlock(&cpu_buffer->lock);
5807 local_irq_restore(flags);
5808
5809 out:
5810 free_page((unsigned long)bpage);
5811 }
5812 EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
5813
5814 /**
5815 * ring_buffer_read_page - extract a page from the ring buffer
5816 * @buffer: buffer to extract from
5817 * @data_page: the page to use allocated from ring_buffer_alloc_read_page
5818 * @len: amount to extract
5819 * @cpu: the cpu of the buffer to extract
5820 * @full: should the extraction only happen when the page is full.
5821 *
5822 * This function will pull out a page from the ring buffer and consume it.
5823 * @data_page must be the address of the variable that was returned
5824 * from ring_buffer_alloc_read_page. This is because the page might be used
5825 * to swap with a page in the ring buffer.
5826 *
5827 * for example:
5828 * rpage = ring_buffer_alloc_read_page(buffer, cpu);
5829 * if (IS_ERR(rpage))
5830 * return PTR_ERR(rpage);
5831 * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
5832 * if (ret >= 0)
5833 * process_page(rpage, ret);
5834 *
5835 * When @full is set, the function will not return true unless
5836 * the writer is off the reader page.
5837 *
5838 * Note: it is up to the calling functions to handle sleeps and wakeups.
5839 * The ring buffer can be used anywhere in the kernel and can not
5840 * blindly call wake_up. The layer that uses the ring buffer must be
5841 * responsible for that.
5842 *
5843 * Returns:
5844 * >=0 if data has been transferred, returns the offset of consumed data.
5845 * <0 if no data has been transferred.
5846 */
ring_buffer_read_page(struct trace_buffer * buffer,void ** data_page,size_t len,int cpu,int full)5847 int ring_buffer_read_page(struct trace_buffer *buffer,
5848 void **data_page, size_t len, int cpu, int full)
5849 {
5850 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
5851 struct ring_buffer_event *event;
5852 struct buffer_data_page *bpage;
5853 struct buffer_page *reader;
5854 unsigned long missed_events;
5855 unsigned long flags;
5856 unsigned int commit;
5857 unsigned int read;
5858 u64 save_timestamp;
5859 int ret = -1;
5860
5861 if (!cpumask_test_cpu(cpu, buffer->cpumask))
5862 goto out;
5863
5864 /*
5865 * If len is not big enough to hold the page header, then
5866 * we can not copy anything.
5867 */
5868 if (len <= BUF_PAGE_HDR_SIZE)
5869 goto out;
5870
5871 len -= BUF_PAGE_HDR_SIZE;
5872
5873 if (!data_page)
5874 goto out;
5875
5876 bpage = *data_page;
5877 if (!bpage)
5878 goto out;
5879
5880 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5881
5882 reader = rb_get_reader_page(cpu_buffer);
5883 if (!reader)
5884 goto out_unlock;
5885
5886 event = rb_reader_event(cpu_buffer);
5887
5888 read = reader->read;
5889 commit = rb_page_commit(reader);
5890
5891 /* Check if any events were dropped */
5892 missed_events = cpu_buffer->lost_events;
5893
5894 /*
5895 * If this page has been partially read or
5896 * if len is not big enough to read the rest of the page or
5897 * a writer is still on the page, then
5898 * we must copy the data from the page to the buffer.
5899 * Otherwise, we can simply swap the page with the one passed in.
5900 */
5901 if (read || (len < (commit - read)) ||
5902 cpu_buffer->reader_page == cpu_buffer->commit_page ||
5903 unlikely(has_ext_writer(buffer))) {
5904 struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
5905 unsigned int rpos = read;
5906 unsigned int pos = 0;
5907 unsigned int size;
5908
5909 /*
5910 * If a full page is expected, this can still be returned
5911 * if there's been a previous partial read and the
5912 * rest of the page can be read and the commit page is off
5913 * the reader page.
5914 */
5915 if (full &&
5916 (!read || (len < (commit - read)) ||
5917 cpu_buffer->reader_page == cpu_buffer->commit_page))
5918 goto out_unlock;
5919
5920 if (len > (commit - read))
5921 len = (commit - read);
5922
5923 /* Always keep the time extend and data together */
5924 size = rb_event_ts_length(event);
5925
5926 if (len < size)
5927 goto out_unlock;
5928
5929 /* save the current timestamp, since the user will need it */
5930 save_timestamp = cpu_buffer->read_stamp;
5931
5932 /* Need to copy one event at a time */
5933 do {
5934 /* We need the size of one event, because
5935 * rb_advance_reader only advances by one event,
5936 * whereas rb_event_ts_length may include the size of
5937 * one or two events.
5938 * We have already ensured there's enough space if this
5939 * is a time extend. */
5940 size = rb_event_length(event);
5941 memcpy(bpage->data + pos, rpage->data + rpos, size);
5942
5943 len -= size;
5944
5945 rb_advance_reader(cpu_buffer);
5946 rpos = reader->read;
5947 pos += size;
5948
5949 if (rpos >= commit)
5950 break;
5951
5952 event = rb_reader_event(cpu_buffer);
5953 /* Always keep the time extend and data together */
5954 size = rb_event_ts_length(event);
5955 } while (len >= size);
5956
5957 /* update bpage */
5958 local_set(&bpage->commit, pos);
5959 bpage->time_stamp = save_timestamp;
5960
5961 /* we copied everything to the beginning */
5962 read = 0;
5963 } else {
5964 /* update the entry counter */
5965 cpu_buffer->read += rb_page_entries(reader);
5966 cpu_buffer->read_bytes += rb_page_commit(reader);
5967
5968 /* swap the pages */
5969 rb_init_page(bpage);
5970 bpage = reader->page;
5971 reader->page = *data_page;
5972 local_set(&reader->write, 0);
5973 local_set(&reader->entries, 0);
5974 reader->read = 0;
5975 *data_page = bpage;
5976
5977 /*
5978 * Use the real_end for the data size,
5979 * This gives us a chance to store the lost events
5980 * on the page.
5981 */
5982 if (reader->real_end)
5983 local_set(&bpage->commit, reader->real_end);
5984 }
5985 ret = read;
5986
5987 cpu_buffer->lost_events = 0;
5988
5989 commit = local_read(&bpage->commit);
5990 /*
5991 * Set a flag in the commit field if we lost events
5992 */
5993 if (missed_events) {
5994 /* If there is room at the end of the page to save the
5995 * missed events, then record it there.
5996 */
5997 if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) {
5998 memcpy(&bpage->data[commit], &missed_events,
5999 sizeof(missed_events));
6000 local_add(RB_MISSED_STORED, &bpage->commit);
6001 commit += sizeof(missed_events);
6002 }
6003 local_add(RB_MISSED_EVENTS, &bpage->commit);
6004 }
6005
6006 /*
6007 * This page may be off to user land. Zero it out here.
6008 */
6009 if (commit < BUF_PAGE_SIZE)
6010 memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
6011
6012 out_unlock:
6013 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
6014
6015 out:
6016 return ret;
6017 }
6018 EXPORT_SYMBOL_GPL(ring_buffer_read_page);
6019
6020 /*
6021 * We only allocate new buffers, never free them if the CPU goes down.
6022 * If we were to free the buffer, then the user would lose any trace that was in
6023 * the buffer.
6024 */
trace_rb_cpu_prepare(unsigned int cpu,struct hlist_node * node)6025 int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node)
6026 {
6027 struct trace_buffer *buffer;
6028 long nr_pages_same;
6029 int cpu_i;
6030 unsigned long nr_pages;
6031
6032 buffer = container_of(node, struct trace_buffer, node);
6033 if (cpumask_test_cpu(cpu, buffer->cpumask))
6034 return 0;
6035
6036 nr_pages = 0;
6037 nr_pages_same = 1;
6038 /* check if all cpu sizes are same */
6039 for_each_buffer_cpu(buffer, cpu_i) {
6040 /* fill in the size from first enabled cpu */
6041 if (nr_pages == 0)
6042 nr_pages = buffer->buffers[cpu_i]->nr_pages;
6043 if (nr_pages != buffer->buffers[cpu_i]->nr_pages) {
6044 nr_pages_same = 0;
6045 break;
6046 }
6047 }
6048 /* allocate minimum pages, user can later expand it */
6049 if (!nr_pages_same)
6050 nr_pages = 2;
6051 buffer->buffers[cpu] =
6052 rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
6053 if (!buffer->buffers[cpu]) {
6054 WARN(1, "failed to allocate ring buffer on CPU %u\n",
6055 cpu);
6056 return -ENOMEM;
6057 }
6058 smp_wmb();
6059 cpumask_set_cpu(cpu, buffer->cpumask);
6060 return 0;
6061 }
6062
6063 #define TRACE_BUFFER_PACK_HDR_SIZE offsetof(struct trace_buffer_pack, __data)
6064 #define RING_BUFFER_PACK_HDR_SIZE offsetof(struct ring_buffer_pack, page_va)
6065
trace_buffer_pack_size(struct trace_buffer * trace_buffer)6066 size_t trace_buffer_pack_size(struct trace_buffer *trace_buffer)
6067 {
6068 size_t size = 0;
6069 int cpu;
6070
6071 for_each_buffer_cpu(trace_buffer, cpu) {
6072 struct ring_buffer_per_cpu *rb = trace_buffer->buffers[cpu];
6073 size += rb->nr_pages * sizeof(unsigned long);
6074 size += RING_BUFFER_PACK_HDR_SIZE;
6075 }
6076
6077 size += TRACE_BUFFER_PACK_HDR_SIZE;
6078
6079 return size;
6080 }
6081
trace_buffer_pack(struct trace_buffer * trace_buffer,struct trace_buffer_pack * pack)6082 int trace_buffer_pack(struct trace_buffer *trace_buffer,
6083 struct trace_buffer_pack *pack)
6084 {
6085 struct ring_buffer_pack *cpu_pack;
6086 int cpu = -1, pack_cpu, j;
6087
6088 if (!has_ext_writer(trace_buffer))
6089 return -EINVAL;
6090
6091 pack->nr_cpus = cpumask_weight(trace_buffer->cpumask);
6092 pack->total_pages = 0;
6093
6094 for_each_ring_buffer_pack(cpu_pack, pack_cpu, pack) {
6095 struct ring_buffer_per_cpu *rb;
6096 unsigned long flags, nr_pages;
6097 struct buffer_page *bpage;
6098
6099 cpu = cpumask_next(cpu, trace_buffer->cpumask);
6100 if (cpu > nr_cpu_ids) {
6101 WARN_ON(1);
6102 break;
6103 }
6104
6105 rb = trace_buffer->buffers[cpu];
6106
6107 local_irq_save(flags);
6108 arch_spin_lock(&rb->lock);
6109
6110 bpage = rb->head_page;
6111 nr_pages = rb->nr_pages;
6112
6113 pack->total_pages += nr_pages + 1;
6114
6115 cpu_pack->cpu = cpu;
6116 cpu_pack->reader_page_va = (unsigned long)rb->reader_page->page;
6117 cpu_pack->nr_pages = nr_pages;
6118
6119 for (j = 0; j < nr_pages; j++) {
6120 cpu_pack->page_va[j] = (unsigned long)bpage->page;
6121 rb_inc_page(&bpage);
6122 }
6123
6124 arch_spin_unlock(&rb->lock);
6125 local_irq_restore(flags);
6126 }
6127
6128 return 0;
6129 }
6130
6131 #ifdef CONFIG_RING_BUFFER_STARTUP_TEST
6132 /*
6133 * This is a basic integrity check of the ring buffer.
6134 * Late in the boot cycle this test will run when configured in.
6135 * It will kick off a thread per CPU that will go into a loop
6136 * writing to the per cpu ring buffer various sizes of data.
6137 * Some of the data will be large items, some small.
6138 *
6139 * Another thread is created that goes into a spin, sending out
6140 * IPIs to the other CPUs to also write into the ring buffer.
6141 * this is to test the nesting ability of the buffer.
6142 *
6143 * Basic stats are recorded and reported. If something in the
6144 * ring buffer should happen that's not expected, a big warning
6145 * is displayed and all ring buffers are disabled.
6146 */
6147 static struct task_struct *rb_threads[NR_CPUS] __initdata;
6148
6149 struct rb_test_data {
6150 struct trace_buffer *buffer;
6151 unsigned long events;
6152 unsigned long bytes_written;
6153 unsigned long bytes_alloc;
6154 unsigned long bytes_dropped;
6155 unsigned long events_nested;
6156 unsigned long bytes_written_nested;
6157 unsigned long bytes_alloc_nested;
6158 unsigned long bytes_dropped_nested;
6159 int min_size_nested;
6160 int max_size_nested;
6161 int max_size;
6162 int min_size;
6163 int cpu;
6164 int cnt;
6165 };
6166
6167 static struct rb_test_data rb_data[NR_CPUS] __initdata;
6168
6169 /* 1 meg per cpu */
6170 #define RB_TEST_BUFFER_SIZE 1048576
6171
6172 static char rb_string[] __initdata =
6173 "abcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()?+\\"
6174 "?+|:';\",.<>/?abcdefghijklmnopqrstuvwxyz1234567890"
6175 "!@#$%^&*()?+\\?+|:';\",.<>/?abcdefghijklmnopqrstuv";
6176
6177 static bool rb_test_started __initdata;
6178
6179 struct rb_item {
6180 int size;
6181 char str[];
6182 };
6183
rb_write_something(struct rb_test_data * data,bool nested)6184 static __init int rb_write_something(struct rb_test_data *data, bool nested)
6185 {
6186 struct ring_buffer_event *event;
6187 struct rb_item *item;
6188 bool started;
6189 int event_len;
6190 int size;
6191 int len;
6192 int cnt;
6193
6194 /* Have nested writes different that what is written */
6195 cnt = data->cnt + (nested ? 27 : 0);
6196
6197 /* Multiply cnt by ~e, to make some unique increment */
6198 size = (cnt * 68 / 25) % (sizeof(rb_string) - 1);
6199
6200 len = size + sizeof(struct rb_item);
6201
6202 started = rb_test_started;
6203 /* read rb_test_started before checking buffer enabled */
6204 smp_rmb();
6205
6206 event = ring_buffer_lock_reserve(data->buffer, len);
6207 if (!event) {
6208 /* Ignore dropped events before test starts. */
6209 if (started) {
6210 if (nested)
6211 data->bytes_dropped += len;
6212 else
6213 data->bytes_dropped_nested += len;
6214 }
6215 return len;
6216 }
6217
6218 event_len = ring_buffer_event_length(event);
6219
6220 if (RB_WARN_ON(data->buffer, event_len < len))
6221 goto out;
6222
6223 item = ring_buffer_event_data(event);
6224 item->size = size;
6225 memcpy(item->str, rb_string, size);
6226
6227 if (nested) {
6228 data->bytes_alloc_nested += event_len;
6229 data->bytes_written_nested += len;
6230 data->events_nested++;
6231 if (!data->min_size_nested || len < data->min_size_nested)
6232 data->min_size_nested = len;
6233 if (len > data->max_size_nested)
6234 data->max_size_nested = len;
6235 } else {
6236 data->bytes_alloc += event_len;
6237 data->bytes_written += len;
6238 data->events++;
6239 if (!data->min_size || len < data->min_size)
6240 data->max_size = len;
6241 if (len > data->max_size)
6242 data->max_size = len;
6243 }
6244
6245 out:
6246 ring_buffer_unlock_commit(data->buffer, event);
6247
6248 return 0;
6249 }
6250
rb_test(void * arg)6251 static __init int rb_test(void *arg)
6252 {
6253 struct rb_test_data *data = arg;
6254
6255 while (!kthread_should_stop()) {
6256 rb_write_something(data, false);
6257 data->cnt++;
6258
6259 set_current_state(TASK_INTERRUPTIBLE);
6260 /* Now sleep between a min of 100-300us and a max of 1ms */
6261 usleep_range(((data->cnt % 3) + 1) * 100, 1000);
6262 }
6263
6264 return 0;
6265 }
6266
rb_ipi(void * ignore)6267 static __init void rb_ipi(void *ignore)
6268 {
6269 struct rb_test_data *data;
6270 int cpu = smp_processor_id();
6271
6272 data = &rb_data[cpu];
6273 rb_write_something(data, true);
6274 }
6275
rb_hammer_test(void * arg)6276 static __init int rb_hammer_test(void *arg)
6277 {
6278 while (!kthread_should_stop()) {
6279
6280 /* Send an IPI to all cpus to write data! */
6281 smp_call_function(rb_ipi, NULL, 1);
6282 /* No sleep, but for non preempt, let others run */
6283 schedule();
6284 }
6285
6286 return 0;
6287 }
6288
test_ringbuffer(void)6289 static __init int test_ringbuffer(void)
6290 {
6291 struct task_struct *rb_hammer;
6292 struct trace_buffer *buffer;
6293 int cpu;
6294 int ret = 0;
6295
6296 if (security_locked_down(LOCKDOWN_TRACEFS)) {
6297 pr_warn("Lockdown is enabled, skipping ring buffer tests\n");
6298 return 0;
6299 }
6300
6301 pr_info("Running ring buffer tests...\n");
6302
6303 buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE);
6304 if (WARN_ON(!buffer))
6305 return 0;
6306
6307 /* Disable buffer so that threads can't write to it yet */
6308 ring_buffer_record_off(buffer);
6309
6310 for_each_online_cpu(cpu) {
6311 rb_data[cpu].buffer = buffer;
6312 rb_data[cpu].cpu = cpu;
6313 rb_data[cpu].cnt = cpu;
6314 rb_threads[cpu] = kthread_run_on_cpu(rb_test, &rb_data[cpu],
6315 cpu, "rbtester/%u");
6316 if (WARN_ON(IS_ERR(rb_threads[cpu]))) {
6317 pr_cont("FAILED\n");
6318 ret = PTR_ERR(rb_threads[cpu]);
6319 goto out_free;
6320 }
6321 }
6322
6323 /* Now create the rb hammer! */
6324 rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer");
6325 if (WARN_ON(IS_ERR(rb_hammer))) {
6326 pr_cont("FAILED\n");
6327 ret = PTR_ERR(rb_hammer);
6328 goto out_free;
6329 }
6330
6331 ring_buffer_record_on(buffer);
6332 /*
6333 * Show buffer is enabled before setting rb_test_started.
6334 * Yes there's a small race window where events could be
6335 * dropped and the thread wont catch it. But when a ring
6336 * buffer gets enabled, there will always be some kind of
6337 * delay before other CPUs see it. Thus, we don't care about
6338 * those dropped events. We care about events dropped after
6339 * the threads see that the buffer is active.
6340 */
6341 smp_wmb();
6342 rb_test_started = true;
6343
6344 set_current_state(TASK_INTERRUPTIBLE);
6345 /* Just run for 10 seconds */;
6346 schedule_timeout(10 * HZ);
6347
6348 kthread_stop(rb_hammer);
6349
6350 out_free:
6351 for_each_online_cpu(cpu) {
6352 if (!rb_threads[cpu])
6353 break;
6354 kthread_stop(rb_threads[cpu]);
6355 }
6356 if (ret) {
6357 ring_buffer_free(buffer);
6358 return ret;
6359 }
6360
6361 /* Report! */
6362 pr_info("finished\n");
6363 for_each_online_cpu(cpu) {
6364 struct ring_buffer_event *event;
6365 struct rb_test_data *data = &rb_data[cpu];
6366 struct rb_item *item;
6367 unsigned long total_events;
6368 unsigned long total_dropped;
6369 unsigned long total_written;
6370 unsigned long total_alloc;
6371 unsigned long total_read = 0;
6372 unsigned long total_size = 0;
6373 unsigned long total_len = 0;
6374 unsigned long total_lost = 0;
6375 unsigned long lost;
6376 int big_event_size;
6377 int small_event_size;
6378
6379 ret = -1;
6380
6381 total_events = data->events + data->events_nested;
6382 total_written = data->bytes_written + data->bytes_written_nested;
6383 total_alloc = data->bytes_alloc + data->bytes_alloc_nested;
6384 total_dropped = data->bytes_dropped + data->bytes_dropped_nested;
6385
6386 big_event_size = data->max_size + data->max_size_nested;
6387 small_event_size = data->min_size + data->min_size_nested;
6388
6389 pr_info("CPU %d:\n", cpu);
6390 pr_info(" events: %ld\n", total_events);
6391 pr_info(" dropped bytes: %ld\n", total_dropped);
6392 pr_info(" alloced bytes: %ld\n", total_alloc);
6393 pr_info(" written bytes: %ld\n", total_written);
6394 pr_info(" biggest event: %d\n", big_event_size);
6395 pr_info(" smallest event: %d\n", small_event_size);
6396
6397 if (RB_WARN_ON(buffer, total_dropped))
6398 break;
6399
6400 ret = 0;
6401
6402 while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) {
6403 total_lost += lost;
6404 item = ring_buffer_event_data(event);
6405 total_len += ring_buffer_event_length(event);
6406 total_size += item->size + sizeof(struct rb_item);
6407 if (memcmp(&item->str[0], rb_string, item->size) != 0) {
6408 pr_info("FAILED!\n");
6409 pr_info("buffer had: %.*s\n", item->size, item->str);
6410 pr_info("expected: %.*s\n", item->size, rb_string);
6411 RB_WARN_ON(buffer, 1);
6412 ret = -1;
6413 break;
6414 }
6415 total_read++;
6416 }
6417 if (ret)
6418 break;
6419
6420 ret = -1;
6421
6422 pr_info(" read events: %ld\n", total_read);
6423 pr_info(" lost events: %ld\n", total_lost);
6424 pr_info(" total events: %ld\n", total_lost + total_read);
6425 pr_info(" recorded len bytes: %ld\n", total_len);
6426 pr_info(" recorded size bytes: %ld\n", total_size);
6427 if (total_lost) {
6428 pr_info(" With dropped events, record len and size may not match\n"
6429 " alloced and written from above\n");
6430 } else {
6431 if (RB_WARN_ON(buffer, total_len != total_alloc ||
6432 total_size != total_written))
6433 break;
6434 }
6435 if (RB_WARN_ON(buffer, total_lost + total_read != total_events))
6436 break;
6437
6438 ret = 0;
6439 }
6440 if (!ret)
6441 pr_info("Ring buffer PASSED!\n");
6442
6443 ring_buffer_free(buffer);
6444 return 0;
6445 }
6446
6447 late_initcall(test_ringbuffer);
6448 #endif /* CONFIG_RING_BUFFER_STARTUP_TEST */
6449