1 #ifndef FIO_IOLOG_H
2 #define FIO_IOLOG_H
3
4 #include "lib/rbtree.h"
5 #include "lib/ieee754.h"
6 #include "flist.h"
7 #include "ioengines.h"
8
9 /*
10 * Use for maintaining statistics
11 */
12 struct io_stat {
13 uint64_t max_val;
14 uint64_t min_val;
15 uint64_t samples;
16
17 fio_fp64_t mean;
18 fio_fp64_t S;
19 };
20
21 struct io_hist {
22 uint64_t samples;
23 unsigned long hist_last;
24 struct flist_head list;
25 };
26
27
28 union io_sample_data {
29 uint64_t val;
30 struct io_u_plat_entry *plat_entry;
31 };
32
33 #define sample_val(value) ((union io_sample_data) { .val = value })
34 #define sample_plat(plat) ((union io_sample_data) { .plat_entry = plat })
35
36 /*
37 * A single data sample
38 */
39 struct io_sample {
40 uint64_t time;
41 union io_sample_data data;
42 uint32_t __ddir;
43 uint32_t bs;
44 };
45
46 struct io_sample_offset {
47 struct io_sample s;
48 uint64_t offset;
49 };
50
51 enum {
52 IO_LOG_TYPE_LAT = 1,
53 IO_LOG_TYPE_CLAT,
54 IO_LOG_TYPE_SLAT,
55 IO_LOG_TYPE_BW,
56 IO_LOG_TYPE_IOPS,
57 IO_LOG_TYPE_HIST,
58 };
59
60 #define DEF_LOG_ENTRIES 1024
61 #define MAX_LOG_ENTRIES (1024 * DEF_LOG_ENTRIES)
62
63 struct io_logs {
64 struct flist_head list;
65 uint64_t nr_samples;
66 uint64_t max_samples;
67 void *log;
68 };
69
70 /*
71 * Dynamically growing data sample log
72 */
73 struct io_log {
74 /*
75 * Entries already logged
76 */
77 struct flist_head io_logs;
78 uint32_t cur_log_max;
79
80 /*
81 * When the current log runs out of space, store events here until
82 * we have a chance to regrow
83 */
84 struct io_logs *pending;
85
86 unsigned int log_ddir_mask;
87
88 char *filename;
89
90 struct thread_data *td;
91
92 unsigned int log_type;
93
94 /*
95 * If we fail extending the log, stop collecting more entries.
96 */
97 bool disabled;
98
99 /*
100 * Log offsets
101 */
102 unsigned int log_offset;
103
104 /*
105 * Max size of log entries before a chunk is compressed
106 */
107 unsigned int log_gz;
108
109 /*
110 * Don't deflate for storing, just store the compressed bits
111 */
112 unsigned int log_gz_store;
113
114 /*
115 * Windowed average, for logging single entries average over some
116 * period of time.
117 */
118 struct io_stat avg_window[DDIR_RWDIR_CNT];
119 unsigned long avg_msec;
120 unsigned long avg_last;
121
122 /*
123 * Windowed latency histograms, for keeping track of when we need to
124 * save a copy of the histogram every approximately hist_msec
125 * milliseconds.
126 */
127 struct io_hist hist_window[DDIR_RWDIR_CNT];
128 unsigned long hist_msec;
129 unsigned int hist_coarseness;
130
131 pthread_mutex_t chunk_lock;
132 unsigned int chunk_seq;
133 struct flist_head chunk_list;
134 };
135
136 /*
137 * If the upper bit is set, then we have the offset as well
138 */
139 #define LOG_OFFSET_SAMPLE_BIT 0x80000000U
140 #define io_sample_ddir(io) ((io)->__ddir & ~LOG_OFFSET_SAMPLE_BIT)
141
io_sample_set_ddir(struct io_log * log,struct io_sample * io,enum fio_ddir ddir)142 static inline void io_sample_set_ddir(struct io_log *log,
143 struct io_sample *io,
144 enum fio_ddir ddir)
145 {
146 io->__ddir = ddir | log->log_ddir_mask;
147 }
148
__log_entry_sz(int log_offset)149 static inline size_t __log_entry_sz(int log_offset)
150 {
151 if (log_offset)
152 return sizeof(struct io_sample_offset);
153 else
154 return sizeof(struct io_sample);
155 }
156
log_entry_sz(struct io_log * log)157 static inline size_t log_entry_sz(struct io_log *log)
158 {
159 return __log_entry_sz(log->log_offset);
160 }
161
log_sample_sz(struct io_log * log,struct io_logs * cur_log)162 static inline size_t log_sample_sz(struct io_log *log, struct io_logs *cur_log)
163 {
164 return cur_log->nr_samples * log_entry_sz(log);
165 }
166
__get_sample(void * samples,int log_offset,uint64_t sample)167 static inline struct io_sample *__get_sample(void *samples, int log_offset,
168 uint64_t sample)
169 {
170 uint64_t sample_offset = sample * __log_entry_sz(log_offset);
171 return (struct io_sample *) ((char *) samples + sample_offset);
172 }
173
174 struct io_logs *iolog_cur_log(struct io_log *);
175 uint64_t iolog_nr_samples(struct io_log *);
176 void regrow_logs(struct thread_data *);
177
get_sample(struct io_log * iolog,struct io_logs * cur_log,uint64_t sample)178 static inline struct io_sample *get_sample(struct io_log *iolog,
179 struct io_logs *cur_log,
180 uint64_t sample)
181 {
182 return __get_sample(cur_log->log, iolog->log_offset, sample);
183 }
184
185 enum {
186 IP_F_ONRB = 1,
187 IP_F_ONLIST = 2,
188 IP_F_TRIMMED = 4,
189 IP_F_IN_FLIGHT = 8,
190 };
191
192 /*
193 * When logging io actions, this matches a single sent io_u
194 */
195 struct io_piece {
196 union {
197 struct rb_node rb_node;
198 struct flist_head list;
199 };
200 struct flist_head trim_list;
201 union {
202 int fileno;
203 struct fio_file *file;
204 };
205 unsigned long long offset;
206 unsigned short numberio;
207 unsigned long len;
208 unsigned int flags;
209 enum fio_ddir ddir;
210 union {
211 unsigned long delay;
212 unsigned int file_action;
213 };
214 };
215
216 /*
217 * Log exports
218 */
219 enum file_log_act {
220 FIO_LOG_ADD_FILE,
221 FIO_LOG_OPEN_FILE,
222 FIO_LOG_CLOSE_FILE,
223 FIO_LOG_UNLINK_FILE,
224 };
225
226 struct io_u;
227 extern int __must_check read_iolog_get(struct thread_data *, struct io_u *);
228 extern void log_io_u(const struct thread_data *, const struct io_u *);
229 extern void log_file(struct thread_data *, struct fio_file *, enum file_log_act);
230 extern int __must_check init_iolog(struct thread_data *td);
231 extern void log_io_piece(struct thread_data *, struct io_u *);
232 extern void unlog_io_piece(struct thread_data *, struct io_u *);
233 extern void trim_io_piece(struct thread_data *, const struct io_u *);
234 extern void queue_io_piece(struct thread_data *, struct io_piece *);
235 extern void prune_io_piece_log(struct thread_data *);
236 extern void write_iolog_close(struct thread_data *);
237 extern int iolog_compress_init(struct thread_data *, struct sk_out *);
238 extern void iolog_compress_exit(struct thread_data *);
239 extern size_t log_chunk_sizes(struct io_log *);
240
241 #ifdef CONFIG_ZLIB
242 extern int iolog_file_inflate(const char *);
243 #endif
244
245 /*
246 * Logging
247 */
248 struct log_params {
249 struct thread_data *td;
250 unsigned long avg_msec;
251 unsigned long hist_msec;
252 int hist_coarseness;
253 int log_type;
254 int log_offset;
255 int log_gz;
256 int log_gz_store;
257 int log_compress;
258 };
259
per_unit_log(struct io_log * log)260 static inline bool per_unit_log(struct io_log *log)
261 {
262 return log && !log->avg_msec;
263 }
264
inline_log(struct io_log * log)265 static inline bool inline_log(struct io_log *log)
266 {
267 return log->log_type == IO_LOG_TYPE_LAT ||
268 log->log_type == IO_LOG_TYPE_CLAT ||
269 log->log_type == IO_LOG_TYPE_SLAT;
270 }
271
ipo_bytes_align(unsigned int replay_align,struct io_piece * ipo)272 static inline void ipo_bytes_align(unsigned int replay_align, struct io_piece *ipo)
273 {
274 if (!replay_align)
275 return;
276
277 ipo->offset &= ~(replay_align - (uint64_t)1);
278 }
279
280 extern void finalize_logs(struct thread_data *td, bool);
281 extern void setup_log(struct io_log **, struct log_params *, const char *);
282 extern void flush_log(struct io_log *, bool);
283 extern void flush_samples(FILE *, void *, uint64_t);
284 extern unsigned long hist_sum(int, int, unsigned int *, unsigned int *);
285 extern void free_log(struct io_log *);
286 extern void fio_writeout_logs(bool);
287 extern void td_writeout_logs(struct thread_data *, bool);
288 extern int iolog_cur_flush(struct io_log *, struct io_logs *);
289
init_ipo(struct io_piece * ipo)290 static inline void init_ipo(struct io_piece *ipo)
291 {
292 memset(ipo, 0, sizeof(*ipo));
293 INIT_FLIST_HEAD(&ipo->trim_list);
294 }
295
296 struct iolog_compress {
297 struct flist_head list;
298 void *buf;
299 size_t len;
300 unsigned int seq;
301 };
302
303 #endif
304