1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3 * Copyright (C) 2009, 2010 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
4 *
5 */
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9
10 #include "kbuffer.h"
11
12 #define MISSING_EVENTS (1UL << 31)
13 #define MISSING_STORED (1UL << 30)
14
15 #define COMMIT_MASK ((1 << 27) - 1)
16
17 enum {
18 KBUFFER_FL_HOST_BIG_ENDIAN = (1<<0),
19 KBUFFER_FL_BIG_ENDIAN = (1<<1),
20 KBUFFER_FL_LONG_8 = (1<<2),
21 KBUFFER_FL_OLD_FORMAT = (1<<3),
22 };
23
24 #define ENDIAN_MASK (KBUFFER_FL_HOST_BIG_ENDIAN | KBUFFER_FL_BIG_ENDIAN)
25
26 /** kbuffer
27 * @timestamp - timestamp of current event
28 * @lost_events - # of lost events between this subbuffer and previous
29 * @flags - special flags of the kbuffer
30 * @subbuffer - pointer to the sub-buffer page
31 * @data - pointer to the start of data on the sub-buffer page
32 * @index - index from @data to the @curr event data
33 * @curr - offset from @data to the start of current event
34 * (includes metadata)
35 * @next - offset from @data to the start of next event
36 * @size - The size of data on @data
37 * @start - The offset from @subbuffer where @data lives
38 *
39 * @read_4 - Function to read 4 raw bytes (may swap)
40 * @read_8 - Function to read 8 raw bytes (may swap)
41 * @read_long - Function to read a long word (4 or 8 bytes with needed swap)
42 */
43 struct kbuffer {
44 unsigned long long timestamp;
45 long long lost_events;
46 unsigned long flags;
47 void *subbuffer;
48 void *data;
49 unsigned int index;
50 unsigned int curr;
51 unsigned int next;
52 unsigned int size;
53 unsigned int start;
54
55 unsigned int (*read_4)(void *ptr);
56 unsigned long long (*read_8)(void *ptr);
57 unsigned long long (*read_long)(struct kbuffer *kbuf, void *ptr);
58 int (*next_event)(struct kbuffer *kbuf);
59 };
60
zmalloc(size_t size)61 static void *zmalloc(size_t size)
62 {
63 return calloc(1, size);
64 }
65
host_is_bigendian(void)66 static int host_is_bigendian(void)
67 {
68 unsigned char str[] = { 0x1, 0x2, 0x3, 0x4 };
69 unsigned int *ptr;
70
71 ptr = (unsigned int *)str;
72 return *ptr == 0x01020304;
73 }
74
do_swap(struct kbuffer * kbuf)75 static int do_swap(struct kbuffer *kbuf)
76 {
77 return ((kbuf->flags & KBUFFER_FL_HOST_BIG_ENDIAN) + kbuf->flags) &
78 ENDIAN_MASK;
79 }
80
__read_8(void * ptr)81 static unsigned long long __read_8(void *ptr)
82 {
83 unsigned long long data = *(unsigned long long *)ptr;
84
85 return data;
86 }
87
__read_8_sw(void * ptr)88 static unsigned long long __read_8_sw(void *ptr)
89 {
90 unsigned long long data = *(unsigned long long *)ptr;
91 unsigned long long swap;
92
93 swap = ((data & 0xffULL) << 56) |
94 ((data & (0xffULL << 8)) << 40) |
95 ((data & (0xffULL << 16)) << 24) |
96 ((data & (0xffULL << 24)) << 8) |
97 ((data & (0xffULL << 32)) >> 8) |
98 ((data & (0xffULL << 40)) >> 24) |
99 ((data & (0xffULL << 48)) >> 40) |
100 ((data & (0xffULL << 56)) >> 56);
101
102 return swap;
103 }
104
__read_4(void * ptr)105 static unsigned int __read_4(void *ptr)
106 {
107 unsigned int data = *(unsigned int *)ptr;
108
109 return data;
110 }
111
__read_4_sw(void * ptr)112 static unsigned int __read_4_sw(void *ptr)
113 {
114 unsigned int data = *(unsigned int *)ptr;
115 unsigned int swap;
116
117 swap = ((data & 0xffULL) << 24) |
118 ((data & (0xffULL << 8)) << 8) |
119 ((data & (0xffULL << 16)) >> 8) |
120 ((data & (0xffULL << 24)) >> 24);
121
122 return swap;
123 }
124
read_8(struct kbuffer * kbuf,void * ptr)125 static unsigned long long read_8(struct kbuffer *kbuf, void *ptr)
126 {
127 return kbuf->read_8(ptr);
128 }
129
read_4(struct kbuffer * kbuf,void * ptr)130 static unsigned int read_4(struct kbuffer *kbuf, void *ptr)
131 {
132 return kbuf->read_4(ptr);
133 }
134
__read_long_8(struct kbuffer * kbuf,void * ptr)135 static unsigned long long __read_long_8(struct kbuffer *kbuf, void *ptr)
136 {
137 return kbuf->read_8(ptr);
138 }
139
__read_long_4(struct kbuffer * kbuf,void * ptr)140 static unsigned long long __read_long_4(struct kbuffer *kbuf, void *ptr)
141 {
142 return kbuf->read_4(ptr);
143 }
144
read_long(struct kbuffer * kbuf,void * ptr)145 static unsigned long long read_long(struct kbuffer *kbuf, void *ptr)
146 {
147 return kbuf->read_long(kbuf, ptr);
148 }
149
calc_index(struct kbuffer * kbuf,void * ptr)150 static int calc_index(struct kbuffer *kbuf, void *ptr)
151 {
152 return (unsigned long)ptr - (unsigned long)kbuf->data;
153 }
154
155 static int __next_event(struct kbuffer *kbuf);
156
157 /**
158 * kbuffer_alloc - allocat a new kbuffer
159 * @size; enum to denote size of word
160 * @endian: enum to denote endianness
161 *
162 * Allocates and returns a new kbuffer.
163 */
164 struct kbuffer *
kbuffer_alloc(enum kbuffer_long_size size,enum kbuffer_endian endian)165 kbuffer_alloc(enum kbuffer_long_size size, enum kbuffer_endian endian)
166 {
167 struct kbuffer *kbuf;
168 int flags = 0;
169
170 switch (size) {
171 case KBUFFER_LSIZE_4:
172 break;
173 case KBUFFER_LSIZE_8:
174 flags |= KBUFFER_FL_LONG_8;
175 break;
176 default:
177 return NULL;
178 }
179
180 switch (endian) {
181 case KBUFFER_ENDIAN_LITTLE:
182 break;
183 case KBUFFER_ENDIAN_BIG:
184 flags |= KBUFFER_FL_BIG_ENDIAN;
185 break;
186 default:
187 return NULL;
188 }
189
190 kbuf = zmalloc(sizeof(*kbuf));
191 if (!kbuf)
192 return NULL;
193
194 kbuf->flags = flags;
195
196 if (host_is_bigendian())
197 kbuf->flags |= KBUFFER_FL_HOST_BIG_ENDIAN;
198
199 if (do_swap(kbuf)) {
200 kbuf->read_8 = __read_8_sw;
201 kbuf->read_4 = __read_4_sw;
202 } else {
203 kbuf->read_8 = __read_8;
204 kbuf->read_4 = __read_4;
205 }
206
207 if (kbuf->flags & KBUFFER_FL_LONG_8)
208 kbuf->read_long = __read_long_8;
209 else
210 kbuf->read_long = __read_long_4;
211
212 /* May be changed by kbuffer_set_old_format() */
213 kbuf->next_event = __next_event;
214
215 return kbuf;
216 }
217
218 /** kbuffer_free - free an allocated kbuffer
219 * @kbuf: The kbuffer to free
220 *
221 * Can take NULL as a parameter.
222 */
kbuffer_free(struct kbuffer * kbuf)223 void kbuffer_free(struct kbuffer *kbuf)
224 {
225 free(kbuf);
226 }
227
type4host(struct kbuffer * kbuf,unsigned int type_len_ts)228 static unsigned int type4host(struct kbuffer *kbuf,
229 unsigned int type_len_ts)
230 {
231 if (kbuf->flags & KBUFFER_FL_BIG_ENDIAN)
232 return (type_len_ts >> 29) & 3;
233 else
234 return type_len_ts & 3;
235 }
236
len4host(struct kbuffer * kbuf,unsigned int type_len_ts)237 static unsigned int len4host(struct kbuffer *kbuf,
238 unsigned int type_len_ts)
239 {
240 if (kbuf->flags & KBUFFER_FL_BIG_ENDIAN)
241 return (type_len_ts >> 27) & 7;
242 else
243 return (type_len_ts >> 2) & 7;
244 }
245
type_len4host(struct kbuffer * kbuf,unsigned int type_len_ts)246 static unsigned int type_len4host(struct kbuffer *kbuf,
247 unsigned int type_len_ts)
248 {
249 if (kbuf->flags & KBUFFER_FL_BIG_ENDIAN)
250 return (type_len_ts >> 27) & ((1 << 5) - 1);
251 else
252 return type_len_ts & ((1 << 5) - 1);
253 }
254
ts4host(struct kbuffer * kbuf,unsigned int type_len_ts)255 static unsigned int ts4host(struct kbuffer *kbuf,
256 unsigned int type_len_ts)
257 {
258 if (kbuf->flags & KBUFFER_FL_BIG_ENDIAN)
259 return type_len_ts & ((1 << 27) - 1);
260 else
261 return type_len_ts >> 5;
262 }
263
264 /*
265 * Linux 2.6.30 and earlier (not much ealier) had a different
266 * ring buffer format. It should be obsolete, but we handle it anyway.
267 */
268 enum old_ring_buffer_type {
269 OLD_RINGBUF_TYPE_PADDING,
270 OLD_RINGBUF_TYPE_TIME_EXTEND,
271 OLD_RINGBUF_TYPE_TIME_STAMP,
272 OLD_RINGBUF_TYPE_DATA,
273 };
274
old_update_pointers(struct kbuffer * kbuf)275 static unsigned int old_update_pointers(struct kbuffer *kbuf)
276 {
277 unsigned long long extend;
278 unsigned int type_len_ts;
279 unsigned int type;
280 unsigned int len;
281 unsigned int delta;
282 unsigned int length;
283 void *ptr = kbuf->data + kbuf->curr;
284
285 type_len_ts = read_4(kbuf, ptr);
286 ptr += 4;
287
288 type = type4host(kbuf, type_len_ts);
289 len = len4host(kbuf, type_len_ts);
290 delta = ts4host(kbuf, type_len_ts);
291
292 switch (type) {
293 case OLD_RINGBUF_TYPE_PADDING:
294 kbuf->next = kbuf->size;
295 return 0;
296
297 case OLD_RINGBUF_TYPE_TIME_EXTEND:
298 extend = read_4(kbuf, ptr);
299 extend <<= TS_SHIFT;
300 extend += delta;
301 delta = extend;
302 ptr += 4;
303 length = 0;
304 break;
305
306 case OLD_RINGBUF_TYPE_TIME_STAMP:
307 /* should never happen! */
308 kbuf->curr = kbuf->size;
309 kbuf->next = kbuf->size;
310 kbuf->index = kbuf->size;
311 return -1;
312 default:
313 if (len)
314 length = len * 4;
315 else {
316 length = read_4(kbuf, ptr);
317 length -= 4;
318 ptr += 4;
319 }
320 break;
321 }
322
323 kbuf->timestamp += delta;
324 kbuf->index = calc_index(kbuf, ptr);
325 kbuf->next = kbuf->index + length;
326
327 return type;
328 }
329
__old_next_event(struct kbuffer * kbuf)330 static int __old_next_event(struct kbuffer *kbuf)
331 {
332 int type;
333
334 do {
335 kbuf->curr = kbuf->next;
336 if (kbuf->next >= kbuf->size)
337 return -1;
338 type = old_update_pointers(kbuf);
339 } while (type == OLD_RINGBUF_TYPE_TIME_EXTEND || type == OLD_RINGBUF_TYPE_PADDING);
340
341 return 0;
342 }
343
344 static unsigned int
translate_data(struct kbuffer * kbuf,void * data,void ** rptr,unsigned long long * delta,int * length)345 translate_data(struct kbuffer *kbuf, void *data, void **rptr,
346 unsigned long long *delta, int *length)
347 {
348 unsigned long long extend;
349 unsigned int type_len_ts;
350 unsigned int type_len;
351
352 type_len_ts = read_4(kbuf, data);
353 data += 4;
354
355 type_len = type_len4host(kbuf, type_len_ts);
356 *delta = ts4host(kbuf, type_len_ts);
357
358 switch (type_len) {
359 case KBUFFER_TYPE_PADDING:
360 *length = read_4(kbuf, data);
361 break;
362
363 case KBUFFER_TYPE_TIME_EXTEND:
364 extend = read_4(kbuf, data);
365 data += 4;
366 extend <<= TS_SHIFT;
367 extend += *delta;
368 *delta = extend;
369 *length = 0;
370 break;
371
372 case KBUFFER_TYPE_TIME_STAMP:
373 data += 12;
374 *length = 0;
375 break;
376 case 0:
377 *length = read_4(kbuf, data) - 4;
378 *length = (*length + 3) & ~3;
379 data += 4;
380 break;
381 default:
382 *length = type_len * 4;
383 break;
384 }
385
386 *rptr = data;
387
388 return type_len;
389 }
390
update_pointers(struct kbuffer * kbuf)391 static unsigned int update_pointers(struct kbuffer *kbuf)
392 {
393 unsigned long long delta;
394 unsigned int type_len;
395 int length;
396 void *ptr = kbuf->data + kbuf->curr;
397
398 type_len = translate_data(kbuf, ptr, &ptr, &delta, &length);
399
400 kbuf->timestamp += delta;
401 kbuf->index = calc_index(kbuf, ptr);
402 kbuf->next = kbuf->index + length;
403
404 return type_len;
405 }
406
407 /**
408 * kbuffer_translate_data - read raw data to get a record
409 * @swap: Set to 1 if bytes in words need to be swapped when read
410 * @data: The raw data to read
411 * @size: Address to store the size of the event data.
412 *
413 * Returns a pointer to the event data. To determine the entire
414 * record size (record metadata + data) just add the difference between
415 * @data and the returned value to @size.
416 */
kbuffer_translate_data(int swap,void * data,unsigned int * size)417 void *kbuffer_translate_data(int swap, void *data, unsigned int *size)
418 {
419 unsigned long long delta;
420 struct kbuffer kbuf;
421 int type_len;
422 int length;
423 void *ptr;
424
425 if (swap) {
426 kbuf.read_8 = __read_8_sw;
427 kbuf.read_4 = __read_4_sw;
428 kbuf.flags = host_is_bigendian() ? 0 : KBUFFER_FL_BIG_ENDIAN;
429 } else {
430 kbuf.read_8 = __read_8;
431 kbuf.read_4 = __read_4;
432 kbuf.flags = host_is_bigendian() ? KBUFFER_FL_BIG_ENDIAN: 0;
433 }
434
435 type_len = translate_data(&kbuf, data, &ptr, &delta, &length);
436 switch (type_len) {
437 case KBUFFER_TYPE_PADDING:
438 case KBUFFER_TYPE_TIME_EXTEND:
439 case KBUFFER_TYPE_TIME_STAMP:
440 return NULL;
441 };
442
443 *size = length;
444
445 return ptr;
446 }
447
__next_event(struct kbuffer * kbuf)448 static int __next_event(struct kbuffer *kbuf)
449 {
450 int type;
451
452 do {
453 kbuf->curr = kbuf->next;
454 if (kbuf->next >= kbuf->size)
455 return -1;
456 type = update_pointers(kbuf);
457 } while (type == KBUFFER_TYPE_TIME_EXTEND || type == KBUFFER_TYPE_PADDING);
458
459 return 0;
460 }
461
next_event(struct kbuffer * kbuf)462 static int next_event(struct kbuffer *kbuf)
463 {
464 return kbuf->next_event(kbuf);
465 }
466
467 /**
468 * kbuffer_next_event - increment the current pointer
469 * @kbuf: The kbuffer to read
470 * @ts: Address to store the next record's timestamp (may be NULL to ignore)
471 *
472 * Increments the pointers into the subbuffer of the kbuffer to point to the
473 * next event so that the next kbuffer_read_event() will return a
474 * new event.
475 *
476 * Returns the data of the next event if a new event exists on the subbuffer,
477 * NULL otherwise.
478 */
kbuffer_next_event(struct kbuffer * kbuf,unsigned long long * ts)479 void *kbuffer_next_event(struct kbuffer *kbuf, unsigned long long *ts)
480 {
481 int ret;
482
483 if (!kbuf || !kbuf->subbuffer)
484 return NULL;
485
486 ret = next_event(kbuf);
487 if (ret < 0)
488 return NULL;
489
490 if (ts)
491 *ts = kbuf->timestamp;
492
493 return kbuf->data + kbuf->index;
494 }
495
496 /**
497 * kbuffer_load_subbuffer - load a new subbuffer into the kbuffer
498 * @kbuf: The kbuffer to load
499 * @subbuffer: The subbuffer to load into @kbuf.
500 *
501 * Load a new subbuffer (page) into @kbuf. This will reset all
502 * the pointers and update the @kbuf timestamp. The next read will
503 * return the first event on @subbuffer.
504 *
505 * Returns 0 on succes, -1 otherwise.
506 */
kbuffer_load_subbuffer(struct kbuffer * kbuf,void * subbuffer)507 int kbuffer_load_subbuffer(struct kbuffer *kbuf, void *subbuffer)
508 {
509 unsigned long long flags;
510 void *ptr = subbuffer;
511
512 if (!kbuf || !subbuffer)
513 return -1;
514
515 kbuf->subbuffer = subbuffer;
516
517 kbuf->timestamp = read_8(kbuf, ptr);
518 ptr += 8;
519
520 kbuf->curr = 0;
521
522 if (kbuf->flags & KBUFFER_FL_LONG_8)
523 kbuf->start = 16;
524 else
525 kbuf->start = 12;
526
527 kbuf->data = subbuffer + kbuf->start;
528
529 flags = read_long(kbuf, ptr);
530 kbuf->size = (unsigned int)flags & COMMIT_MASK;
531
532 if (flags & MISSING_EVENTS) {
533 if (flags & MISSING_STORED) {
534 ptr = kbuf->data + kbuf->size;
535 kbuf->lost_events = read_long(kbuf, ptr);
536 } else
537 kbuf->lost_events = -1;
538 } else
539 kbuf->lost_events = 0;
540
541 kbuf->index = 0;
542 kbuf->next = 0;
543
544 next_event(kbuf);
545
546 return 0;
547 }
548
549 /**
550 * kbuffer_read_event - read the next event in the kbuffer subbuffer
551 * @kbuf: The kbuffer to read from
552 * @ts: The address to store the timestamp of the event (may be NULL to ignore)
553 *
554 * Returns a pointer to the data part of the current event.
555 * NULL if no event is left on the subbuffer.
556 */
kbuffer_read_event(struct kbuffer * kbuf,unsigned long long * ts)557 void *kbuffer_read_event(struct kbuffer *kbuf, unsigned long long *ts)
558 {
559 if (!kbuf || !kbuf->subbuffer)
560 return NULL;
561
562 if (kbuf->curr >= kbuf->size)
563 return NULL;
564
565 if (ts)
566 *ts = kbuf->timestamp;
567 return kbuf->data + kbuf->index;
568 }
569
570 /**
571 * kbuffer_timestamp - Return the timestamp of the current event
572 * @kbuf: The kbuffer to read from
573 *
574 * Returns the timestamp of the current (next) event.
575 */
kbuffer_timestamp(struct kbuffer * kbuf)576 unsigned long long kbuffer_timestamp(struct kbuffer *kbuf)
577 {
578 return kbuf->timestamp;
579 }
580
581 /**
582 * kbuffer_read_at_offset - read the event that is at offset
583 * @kbuf: The kbuffer to read from
584 * @offset: The offset into the subbuffer
585 * @ts: The address to store the timestamp of the event (may be NULL to ignore)
586 *
587 * The @offset must be an index from the @kbuf subbuffer beginning.
588 * If @offset is bigger than the stored subbuffer, NULL will be returned.
589 *
590 * Returns the data of the record that is at @offset. Note, @offset does
591 * not need to be the start of the record, the offset just needs to be
592 * in the record (or beginning of it).
593 *
594 * Note, the kbuf timestamp and pointers are updated to the
595 * returned record. That is, kbuffer_read_event() will return the same
596 * data and timestamp, and kbuffer_next_event() will increment from
597 * this record.
598 */
kbuffer_read_at_offset(struct kbuffer * kbuf,int offset,unsigned long long * ts)599 void *kbuffer_read_at_offset(struct kbuffer *kbuf, int offset,
600 unsigned long long *ts)
601 {
602 void *data;
603
604 if (offset < kbuf->start)
605 offset = 0;
606 else
607 offset -= kbuf->start;
608
609 /* Reset the buffer */
610 kbuffer_load_subbuffer(kbuf, kbuf->subbuffer);
611 data = kbuffer_read_event(kbuf, ts);
612
613 while (kbuf->curr < offset) {
614 data = kbuffer_next_event(kbuf, ts);
615 if (!data)
616 break;
617 }
618
619 return data;
620 }
621
622 /**
623 * kbuffer_subbuffer_size - the size of the loaded subbuffer
624 * @kbuf: The kbuffer to read from
625 *
626 * Returns the size of the subbuffer. Note, this size is
627 * where the last event resides. The stored subbuffer may actually be
628 * bigger due to padding and such.
629 */
kbuffer_subbuffer_size(struct kbuffer * kbuf)630 int kbuffer_subbuffer_size(struct kbuffer *kbuf)
631 {
632 return kbuf->size;
633 }
634
635 /**
636 * kbuffer_curr_index - Return the index of the record
637 * @kbuf: The kbuffer to read from
638 *
639 * Returns the index from the start of the data part of
640 * the subbuffer to the current location. Note this is not
641 * from the start of the subbuffer. An index of zero will
642 * point to the first record. Use kbuffer_curr_offset() for
643 * the actually offset (that can be used by kbuffer_read_at_offset())
644 */
kbuffer_curr_index(struct kbuffer * kbuf)645 int kbuffer_curr_index(struct kbuffer *kbuf)
646 {
647 return kbuf->curr;
648 }
649
650 /**
651 * kbuffer_curr_offset - Return the offset of the record
652 * @kbuf: The kbuffer to read from
653 *
654 * Returns the offset from the start of the subbuffer to the
655 * current location.
656 */
kbuffer_curr_offset(struct kbuffer * kbuf)657 int kbuffer_curr_offset(struct kbuffer *kbuf)
658 {
659 return kbuf->curr + kbuf->start;
660 }
661
662 /**
663 * kbuffer_event_size - return the size of the event data
664 * @kbuf: The kbuffer to read
665 *
666 * Returns the size of the event data (the payload not counting
667 * the meta data of the record) of the current event.
668 */
kbuffer_event_size(struct kbuffer * kbuf)669 int kbuffer_event_size(struct kbuffer *kbuf)
670 {
671 return kbuf->next - kbuf->index;
672 }
673
674 /**
675 * kbuffer_curr_size - return the size of the entire record
676 * @kbuf: The kbuffer to read
677 *
678 * Returns the size of the entire record (meta data and payload)
679 * of the current event.
680 */
kbuffer_curr_size(struct kbuffer * kbuf)681 int kbuffer_curr_size(struct kbuffer *kbuf)
682 {
683 return kbuf->next - kbuf->curr;
684 }
685
686 /**
687 * kbuffer_missed_events - return the # of missed events from last event.
688 * @kbuf: The kbuffer to read from
689 *
690 * Returns the # of missed events (if recorded) before the current
691 * event. Note, only events on the beginning of a subbuffer can
692 * have missed events, all other events within the buffer will be
693 * zero.
694 */
kbuffer_missed_events(struct kbuffer * kbuf)695 int kbuffer_missed_events(struct kbuffer *kbuf)
696 {
697 /* Only the first event can have missed events */
698 if (kbuf->curr)
699 return 0;
700
701 return kbuf->lost_events;
702 }
703
704 /**
705 * kbuffer_set_old_forma - set the kbuffer to use the old format parsing
706 * @kbuf: The kbuffer to set
707 *
708 * This is obsolete (or should be). The first kernels to use the
709 * new ring buffer had a slightly different ring buffer format
710 * (2.6.30 and earlier). It is still somewhat supported by kbuffer,
711 * but should not be counted on in the future.
712 */
kbuffer_set_old_format(struct kbuffer * kbuf)713 void kbuffer_set_old_format(struct kbuffer *kbuf)
714 {
715 kbuf->flags |= KBUFFER_FL_OLD_FORMAT;
716
717 kbuf->next_event = __old_next_event;
718 }
719
720 /**
721 * kbuffer_start_of_data - return offset of where data starts on subbuffer
722 * @kbuf: The kbuffer
723 *
724 * Returns the location on the subbuffer where the data starts.
725 */
kbuffer_start_of_data(struct kbuffer * kbuf)726 int kbuffer_start_of_data(struct kbuffer *kbuf)
727 {
728 return kbuf->start;
729 }
730
731 /**
732 * kbuffer_raw_get - get raw buffer info
733 * @kbuf: The kbuffer
734 * @subbuf: Start of mapped subbuffer
735 * @info: Info descriptor to fill in
736 *
737 * For debugging. This can return internals of the ring buffer.
738 * Expects to have info->next set to what it will read.
739 * The type, length and timestamp delta will be filled in, and
740 * @info->next will be updated to the next element.
741 * The @subbuf is used to know if the info is passed the end of
742 * data and NULL will be returned if it is.
743 */
744 struct kbuffer_raw_info *
kbuffer_raw_get(struct kbuffer * kbuf,void * subbuf,struct kbuffer_raw_info * info)745 kbuffer_raw_get(struct kbuffer *kbuf, void *subbuf, struct kbuffer_raw_info *info)
746 {
747 unsigned long long flags;
748 unsigned long long delta;
749 unsigned int type_len;
750 unsigned int size;
751 int start;
752 int length;
753 void *ptr = info->next;
754
755 if (!kbuf || !subbuf)
756 return NULL;
757
758 if (kbuf->flags & KBUFFER_FL_LONG_8)
759 start = 16;
760 else
761 start = 12;
762
763 flags = read_long(kbuf, subbuf + 8);
764 size = (unsigned int)flags & COMMIT_MASK;
765
766 if (ptr < subbuf || ptr >= subbuf + start + size)
767 return NULL;
768
769 type_len = translate_data(kbuf, ptr, &ptr, &delta, &length);
770
771 info->next = ptr + length;
772
773 info->type = type_len;
774 info->delta = delta;
775 info->length = length;
776
777 return info;
778 }
779