• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  * Copyright (C) 2023 Google Inc, Steven Rostedt <rostedt@goodmis.org>
4  */
5 #include <stdlib.h>
6 #include <unistd.h>
7 #include <sys/mman.h>
8 #include <sys/ioctl.h>
9 #include <asm/types.h>
10 #include "tracefs-local.h"
11 
12 /**
13  * struct trace_buffer_meta - Ring-buffer Meta-page description
14  * @meta_page_size:	Size of this meta-page.
15  * @meta_struct_len:	Size of this structure.
16  * @subbuf_size:	Size of each sub-buffer.
17  * @nr_subbufs:		Number of subbfs in the ring-buffer, including the reader.
18  * @reader.lost_events:	Number of events lost at the time of the reader swap.
19  * @reader.id:		subbuf ID of the current reader. ID range [0 : @nr_subbufs - 1]
20  * @reader.read:	Number of bytes read on the reader subbuf.
21  * @flags:		Placeholder for now, 0 until new features are supported.
22  * @entries:		Number of entries in the ring-buffer.
23  * @overrun:		Number of entries lost in the ring-buffer.
24  * @read:		Number of entries that have been read.
25  * @Reserved1:		Internal use only.
26  * @Reserved2:		Internal use only.
27  */
28 struct trace_buffer_meta {
29 	__u32		meta_page_size;
30 	__u32		meta_struct_len;
31 
32 	__u32		subbuf_size;
33 	__u32		nr_subbufs;
34 
35 	struct {
36 		__u64	lost_events;
37 		__u32	id;
38 		__u32	read;
39 	} reader;
40 
41 	__u64	flags;
42 
43 	__u64	entries;
44 	__u64	overrun;
45 	__u64	read;
46 
47 	__u64	Reserved1;
48 	__u64	Reserved2;
49 };
50 
51 #define TRACE_MMAP_IOCTL_GET_READER		_IO('R', 0x20)
52 
53 struct trace_mmap {
54 	struct trace_buffer_meta	*map;
55 	struct kbuffer			*kbuf;
56 	void				*data;
57 	int				*data_pages;
58 	int				fd;
59 	int				last_idx;
60 	int				last_read;
61 	int				meta_len;
62 	int				data_len;
63 };
64 
65 /**
66  * trace_mmap - try to mmap the ring buffer
67  * @fd: The file descriptor to the trace_pipe_raw file
68  * @kbuf: The kbuffer to load the subbuffer to
69  *
70  * Will try to mmap the ring buffer if it is supported, and
71  * if not, will return NULL, otherwise it returns a descriptor
72  * to handle the mapping.
73  */
trace_mmap(int fd,struct kbuffer * kbuf)74 __hidden void *trace_mmap(int fd, struct kbuffer *kbuf)
75 {
76 	struct trace_mmap *tmap;
77 	int page_size;
78 	void *meta;
79 	void *data;
80 
81 	page_size = getpagesize();
82 	meta = mmap(NULL, page_size, PROT_READ, MAP_SHARED, fd, 0);
83 	if (meta == MAP_FAILED)
84 		return NULL;
85 
86 	tmap = calloc(1, sizeof(*tmap));
87 	if (!tmap) {
88 		munmap(meta, page_size);
89 		return NULL;
90 	}
91 
92 	tmap->kbuf = kbuffer_dup(kbuf);
93 	if (!tmap->kbuf) {
94 		munmap(meta, page_size);
95 		free(tmap);
96 	}
97 	kbuf = tmap->kbuf;
98 
99 	tmap->fd = fd;
100 
101 	tmap->map = meta;
102 	tmap->meta_len = tmap->map->meta_page_size;
103 
104 	if (tmap->meta_len > page_size) {
105 		munmap(meta, page_size);
106 		meta = mmap(NULL, tmap->meta_len, PROT_READ, MAP_SHARED, fd, 0);
107 		if (meta == MAP_FAILED) {
108 			kbuffer_free(kbuf);
109 			free(tmap);
110 			return NULL;
111 		}
112 		tmap->map = meta;
113 	}
114 
115 	tmap->data_pages = meta + tmap->meta_len;
116 
117 	tmap->data_len = tmap->map->subbuf_size * tmap->map->nr_subbufs;
118 
119 	tmap->data = mmap(NULL, tmap->data_len, PROT_READ, MAP_SHARED,
120 			  fd, tmap->meta_len);
121 	if (tmap->data == MAP_FAILED) {
122 		munmap(meta, tmap->meta_len);
123 		kbuffer_free(kbuf);
124 		free(tmap);
125 		return NULL;
126 	}
127 
128 	tmap->last_idx = tmap->map->reader.id;
129 
130 	data = tmap->data + tmap->map->subbuf_size * tmap->last_idx;
131 	kbuffer_load_subbuffer(kbuf, data);
132 
133 	/*
134 	 * The page could have left over data on it that was already
135 	 * consumed. Move the "read" forward in that case.
136 	 */
137 	if (tmap->map->reader.read) {
138 		int size = kbuffer_start_of_data(kbuf) + tmap->map->reader.read;
139 		char tmpbuf[size];
140 		kbuffer_read_buffer(kbuf, tmpbuf, size);
141 	}
142 
143 	return tmap;
144 }
145 
trace_unmap(void * mapping)146 __hidden void trace_unmap(void *mapping)
147 {
148 	struct trace_mmap *tmap = mapping;
149 
150 	if (!tmap)
151 		return;
152 
153 	munmap(tmap->data, tmap->data_len);
154 	munmap(tmap->map, tmap->meta_len);
155 	kbuffer_free(tmap->kbuf);
156 	free(tmap);
157 }
158 
get_reader(struct trace_mmap * tmap)159 static int get_reader(struct trace_mmap *tmap)
160 {
161 	return ioctl(tmap->fd, TRACE_MMAP_IOCTL_GET_READER);
162 }
163 
trace_mmap_load_subbuf(void * mapping,struct kbuffer * kbuf)164 __hidden int trace_mmap_load_subbuf(void *mapping, struct kbuffer *kbuf)
165 {
166 	struct trace_mmap *tmap = mapping;
167 	void *data;
168 	int id;
169 
170 	if (!tmap)
171 		return -1;
172 
173 	id = tmap->map->reader.id;
174 	data = tmap->data + tmap->map->subbuf_size * id;
175 
176 	/*
177 	 * If kbuf doesn't point to the current sub-buffer
178 	 * just load it and return.
179 	 */
180 	if (data != kbuffer_subbuffer(kbuf)) {
181 		kbuffer_load_subbuffer(kbuf, data);
182 		/* Move the read pointer forward if need be */
183 		if (kbuffer_curr_index(tmap->kbuf)) {
184 			int size = kbuffer_curr_offset(tmap->kbuf);
185 			char tmpbuf[size];
186 			kbuffer_read_buffer(kbuf, tmpbuf, size);
187 		}
188 		return 1;
189 	}
190 
191 	/*
192 	 * Perhaps the reader page had a write that added
193 	 * more data.
194 	 */
195 	kbuffer_refresh(kbuf);
196 
197 	/* Are there still events to read? */
198 	if (kbuffer_curr_size(kbuf)) {
199 		/* If current is greater than what was read, refresh */
200 		if (kbuffer_curr_offset(kbuf) + kbuffer_curr_size(kbuf) >
201 		    tmap->map->reader.read) {
202 			if (get_reader(tmap) < 0)
203 				return -1;
204 		}
205 		return 1;
206 	}
207 
208 	/* See if a new page is ready? */
209 	if (get_reader(tmap) < 0)
210 		return -1;
211 	id = tmap->map->reader.id;
212 	data = tmap->data + tmap->map->subbuf_size * id;
213 
214 	/*
215 	 * If the sub-buffer hasn't changed, then there's no more
216 	 * events to read.
217 	 */
218 	if (data == kbuffer_subbuffer(kbuf))
219 		return 0;
220 
221 	kbuffer_load_subbuffer(kbuf, data);
222 	return 1;
223 }
224 
trace_mmap_read(void * mapping,void * buffer)225 __hidden int trace_mmap_read(void *mapping, void *buffer)
226 {
227 	struct trace_mmap *tmap = mapping;
228 	struct kbuffer *kbuf;
229 	int ret;
230 
231 	if (!tmap)
232 		return -1;
233 
234 	kbuf = tmap->kbuf;
235 
236 	ret = trace_mmap_load_subbuf(mapping, kbuf);
237 	/* Return for error or no more events */
238 	if (ret <= 0)
239 		return ret;
240 
241 	/* Update the buffer */
242 	ret = kbuffer_read_buffer(kbuf, buffer, tmap->map->subbuf_size);
243 	if (ret <= 0)
244 		return ret;
245 
246 	/* This needs to include the size of the meta data too */
247 	return ret + kbuffer_start_of_data(kbuf);
248 }
249