• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #include <stdio.h>
2 #include <string.h>
3 #include <sys/time.h>
4 #include <sys/types.h>
5 #include <sys/stat.h>
6 #include <dirent.h>
7 #include <libgen.h>
8 #include <math.h>
9 
10 #include "fio.h"
11 #include "diskutil.h"
12 #include "lib/ieee754.h"
13 #include "json.h"
14 #include "lib/getrusage.h"
15 #include "idletime.h"
16 #include "lib/pow2.h"
17 #include "lib/output_buffer.h"
18 #include "helper_thread.h"
19 #include "smalloc.h"
20 
21 #define LOG_MSEC_SLACK	10
22 
23 struct fio_mutex *stat_mutex;
24 
clear_rusage_stat(struct thread_data * td)25 void clear_rusage_stat(struct thread_data *td)
26 {
27 	struct thread_stat *ts = &td->ts;
28 
29 	fio_getrusage(&td->ru_start);
30 	ts->usr_time = ts->sys_time = 0;
31 	ts->ctx = 0;
32 	ts->minf = ts->majf = 0;
33 }
34 
update_rusage_stat(struct thread_data * td)35 void update_rusage_stat(struct thread_data *td)
36 {
37 	struct thread_stat *ts = &td->ts;
38 
39 	fio_getrusage(&td->ru_end);
40 	ts->usr_time += mtime_since(&td->ru_start.ru_utime,
41 					&td->ru_end.ru_utime);
42 	ts->sys_time += mtime_since(&td->ru_start.ru_stime,
43 					&td->ru_end.ru_stime);
44 	ts->ctx += td->ru_end.ru_nvcsw + td->ru_end.ru_nivcsw
45 			- (td->ru_start.ru_nvcsw + td->ru_start.ru_nivcsw);
46 	ts->minf += td->ru_end.ru_minflt - td->ru_start.ru_minflt;
47 	ts->majf += td->ru_end.ru_majflt - td->ru_start.ru_majflt;
48 
49 	memcpy(&td->ru_start, &td->ru_end, sizeof(td->ru_end));
50 }
51 
52 /*
53  * Given a latency, return the index of the corresponding bucket in
54  * the structure tracking percentiles.
55  *
56  * (1) find the group (and error bits) that the value (latency)
57  * belongs to by looking at its MSB. (2) find the bucket number in the
58  * group by looking at the index bits.
59  *
60  */
plat_val_to_idx(unsigned int val)61 static unsigned int plat_val_to_idx(unsigned int val)
62 {
63 	unsigned int msb, error_bits, base, offset, idx;
64 
65 	/* Find MSB starting from bit 0 */
66 	if (val == 0)
67 		msb = 0;
68 	else
69 		msb = (sizeof(val)*8) - __builtin_clz(val) - 1;
70 
71 	/*
72 	 * MSB <= (FIO_IO_U_PLAT_BITS-1), cannot be rounded off. Use
73 	 * all bits of the sample as index
74 	 */
75 	if (msb <= FIO_IO_U_PLAT_BITS)
76 		return val;
77 
78 	/* Compute the number of error bits to discard*/
79 	error_bits = msb - FIO_IO_U_PLAT_BITS;
80 
81 	/* Compute the number of buckets before the group */
82 	base = (error_bits + 1) << FIO_IO_U_PLAT_BITS;
83 
84 	/*
85 	 * Discard the error bits and apply the mask to find the
86 	 * index for the buckets in the group
87 	 */
88 	offset = (FIO_IO_U_PLAT_VAL - 1) & (val >> error_bits);
89 
90 	/* Make sure the index does not exceed (array size - 1) */
91 	idx = (base + offset) < (FIO_IO_U_PLAT_NR - 1) ?
92 		(base + offset) : (FIO_IO_U_PLAT_NR - 1);
93 
94 	return idx;
95 }
96 
97 /*
98  * Convert the given index of the bucket array to the value
99  * represented by the bucket
100  */
plat_idx_to_val(unsigned int idx)101 static unsigned long long plat_idx_to_val(unsigned int idx)
102 {
103 	unsigned int error_bits, k, base;
104 
105 	assert(idx < FIO_IO_U_PLAT_NR);
106 
107 	/* MSB <= (FIO_IO_U_PLAT_BITS-1), cannot be rounded off. Use
108 	 * all bits of the sample as index */
109 	if (idx < (FIO_IO_U_PLAT_VAL << 1))
110 		return idx;
111 
112 	/* Find the group and compute the minimum value of that group */
113 	error_bits = (idx >> FIO_IO_U_PLAT_BITS) - 1;
114 	base = 1 << (error_bits + FIO_IO_U_PLAT_BITS);
115 
116 	/* Find its bucket number of the group */
117 	k = idx % FIO_IO_U_PLAT_VAL;
118 
119 	/* Return the mean of the range of the bucket */
120 	return base + ((k + 0.5) * (1 << error_bits));
121 }
122 
double_cmp(const void * a,const void * b)123 static int double_cmp(const void *a, const void *b)
124 {
125 	const fio_fp64_t fa = *(const fio_fp64_t *) a;
126 	const fio_fp64_t fb = *(const fio_fp64_t *) b;
127 	int cmp = 0;
128 
129 	if (fa.u.f > fb.u.f)
130 		cmp = 1;
131 	else if (fa.u.f < fb.u.f)
132 		cmp = -1;
133 
134 	return cmp;
135 }
136 
calc_clat_percentiles(unsigned int * io_u_plat,unsigned long nr,fio_fp64_t * plist,unsigned int ** output,unsigned int * maxv,unsigned int * minv)137 unsigned int calc_clat_percentiles(unsigned int *io_u_plat, unsigned long nr,
138 				   fio_fp64_t *plist, unsigned int **output,
139 				   unsigned int *maxv, unsigned int *minv)
140 {
141 	unsigned long sum = 0;
142 	unsigned int len, i, j = 0;
143 	unsigned int oval_len = 0;
144 	unsigned int *ovals = NULL;
145 	int is_last;
146 
147 	*minv = -1U;
148 	*maxv = 0;
149 
150 	len = 0;
151 	while (len < FIO_IO_U_LIST_MAX_LEN && plist[len].u.f != 0.0)
152 		len++;
153 
154 	if (!len)
155 		return 0;
156 
157 	/*
158 	 * Sort the percentile list. Note that it may already be sorted if
159 	 * we are using the default values, but since it's a short list this
160 	 * isn't a worry. Also note that this does not work for NaN values.
161 	 */
162 	if (len > 1)
163 		qsort((void *)plist, len, sizeof(plist[0]), double_cmp);
164 
165 	/*
166 	 * Calculate bucket values, note down max and min values
167 	 */
168 	is_last = 0;
169 	for (i = 0; i < FIO_IO_U_PLAT_NR && !is_last; i++) {
170 		sum += io_u_plat[i];
171 		while (sum >= (plist[j].u.f / 100.0 * nr)) {
172 			assert(plist[j].u.f <= 100.0);
173 
174 			if (j == oval_len) {
175 				oval_len += 100;
176 				ovals = realloc(ovals, oval_len * sizeof(unsigned int));
177 			}
178 
179 			ovals[j] = plat_idx_to_val(i);
180 			if (ovals[j] < *minv)
181 				*minv = ovals[j];
182 			if (ovals[j] > *maxv)
183 				*maxv = ovals[j];
184 
185 			is_last = (j == len - 1);
186 			if (is_last)
187 				break;
188 
189 			j++;
190 		}
191 	}
192 
193 	*output = ovals;
194 	return len;
195 }
196 
197 /*
198  * Find and display the p-th percentile of clat
199  */
show_clat_percentiles(unsigned int * io_u_plat,unsigned long nr,fio_fp64_t * plist,unsigned int precision,struct buf_output * out)200 static void show_clat_percentiles(unsigned int *io_u_plat, unsigned long nr,
201 				  fio_fp64_t *plist, unsigned int precision,
202 				  struct buf_output *out)
203 {
204 	unsigned int len, j = 0, minv, maxv;
205 	unsigned int *ovals;
206 	int is_last, per_line, scale_down;
207 	char fmt[32];
208 
209 	len = calc_clat_percentiles(io_u_plat, nr, plist, &ovals, &maxv, &minv);
210 	if (!len)
211 		goto out;
212 
213 	/*
214 	 * We default to usecs, but if the value range is such that we
215 	 * should scale down to msecs, do that.
216 	 */
217 	if (minv > 2000 && maxv > 99999) {
218 		scale_down = 1;
219 		log_buf(out, "    clat percentiles (msec):\n     |");
220 	} else {
221 		scale_down = 0;
222 		log_buf(out, "    clat percentiles (usec):\n     |");
223 	}
224 
225 	snprintf(fmt, sizeof(fmt), "%%1.%uf", precision);
226 	per_line = (80 - 7) / (precision + 14);
227 
228 	for (j = 0; j < len; j++) {
229 		char fbuf[16], *ptr = fbuf;
230 
231 		/* for formatting */
232 		if (j != 0 && (j % per_line) == 0)
233 			log_buf(out, "     |");
234 
235 		/* end of the list */
236 		is_last = (j == len - 1);
237 
238 		if (plist[j].u.f < 10.0)
239 			ptr += sprintf(fbuf, " ");
240 
241 		snprintf(ptr, sizeof(fbuf), fmt, plist[j].u.f);
242 
243 		if (scale_down)
244 			ovals[j] = (ovals[j] + 999) / 1000;
245 
246 		log_buf(out, " %sth=[%5u]%c", fbuf, ovals[j], is_last ? '\n' : ',');
247 
248 		if (is_last)
249 			break;
250 
251 		if ((j % per_line) == per_line - 1)	/* for formatting */
252 			log_buf(out, "\n");
253 	}
254 
255 out:
256 	if (ovals)
257 		free(ovals);
258 }
259 
calc_lat(struct io_stat * is,unsigned long * min,unsigned long * max,double * mean,double * dev)260 bool calc_lat(struct io_stat *is, unsigned long *min, unsigned long *max,
261 	      double *mean, double *dev)
262 {
263 	double n = (double) is->samples;
264 
265 	if (n == 0)
266 		return false;
267 
268 	*min = is->min_val;
269 	*max = is->max_val;
270 	*mean = is->mean.u.f;
271 
272 	if (n > 1.0)
273 		*dev = sqrt(is->S.u.f / (n - 1.0));
274 	else
275 		*dev = 0;
276 
277 	return true;
278 }
279 
show_group_stats(struct group_run_stats * rs,struct buf_output * out)280 void show_group_stats(struct group_run_stats *rs, struct buf_output *out)
281 {
282 	char *io, *agg, *min, *max;
283 	char *ioalt, *aggalt, *minalt, *maxalt;
284 	const char *str[] = { "   READ", "  WRITE" , "   TRIM"};
285 	int i;
286 
287 	log_buf(out, "\nRun status group %d (all jobs):\n", rs->groupid);
288 
289 	for (i = 0; i < DDIR_RWDIR_CNT; i++) {
290 		const int i2p = is_power_of_2(rs->kb_base);
291 
292 		if (!rs->max_run[i])
293 			continue;
294 
295 		io = num2str(rs->iobytes[i], 4, 1, i2p, N2S_BYTE);
296 		ioalt = num2str(rs->iobytes[i], 4, 1, !i2p, N2S_BYTE);
297 		agg = num2str(rs->agg[i], 4, 1, i2p, rs->unit_base);
298 		aggalt = num2str(rs->agg[i], 4, 1, !i2p, rs->unit_base);
299 		min = num2str(rs->min_bw[i], 4, 1, i2p, rs->unit_base);
300 		minalt = num2str(rs->min_bw[i], 4, 1, !i2p, rs->unit_base);
301 		max = num2str(rs->max_bw[i], 4, 1, i2p, rs->unit_base);
302 		maxalt = num2str(rs->max_bw[i], 4, 1, !i2p, rs->unit_base);
303 		log_buf(out, "%s: bw=%s (%s), %s-%s (%s-%s), io=%s (%s), run=%llu-%llumsec\n",
304 				rs->unified_rw_rep ? "  MIXED" : str[i],
305 				agg, aggalt, min, max, minalt, maxalt, io, ioalt,
306 				(unsigned long long) rs->min_run[i],
307 				(unsigned long long) rs->max_run[i]);
308 
309 		free(io);
310 		free(agg);
311 		free(min);
312 		free(max);
313 		free(ioalt);
314 		free(aggalt);
315 		free(minalt);
316 		free(maxalt);
317 	}
318 }
319 
stat_calc_dist(unsigned int * map,unsigned long total,double * io_u_dist)320 void stat_calc_dist(unsigned int *map, unsigned long total, double *io_u_dist)
321 {
322 	int i;
323 
324 	/*
325 	 * Do depth distribution calculations
326 	 */
327 	for (i = 0; i < FIO_IO_U_MAP_NR; i++) {
328 		if (total) {
329 			io_u_dist[i] = (double) map[i] / (double) total;
330 			io_u_dist[i] *= 100.0;
331 			if (io_u_dist[i] < 0.1 && map[i])
332 				io_u_dist[i] = 0.1;
333 		} else
334 			io_u_dist[i] = 0.0;
335 	}
336 }
337 
stat_calc_lat(struct thread_stat * ts,double * dst,unsigned int * src,int nr)338 static void stat_calc_lat(struct thread_stat *ts, double *dst,
339 			  unsigned int *src, int nr)
340 {
341 	unsigned long total = ddir_rw_sum(ts->total_io_u);
342 	int i;
343 
344 	/*
345 	 * Do latency distribution calculations
346 	 */
347 	for (i = 0; i < nr; i++) {
348 		if (total) {
349 			dst[i] = (double) src[i] / (double) total;
350 			dst[i] *= 100.0;
351 			if (dst[i] < 0.01 && src[i])
352 				dst[i] = 0.01;
353 		} else
354 			dst[i] = 0.0;
355 	}
356 }
357 
stat_calc_lat_u(struct thread_stat * ts,double * io_u_lat)358 void stat_calc_lat_u(struct thread_stat *ts, double *io_u_lat)
359 {
360 	stat_calc_lat(ts, io_u_lat, ts->io_u_lat_u, FIO_IO_U_LAT_U_NR);
361 }
362 
stat_calc_lat_m(struct thread_stat * ts,double * io_u_lat)363 void stat_calc_lat_m(struct thread_stat *ts, double *io_u_lat)
364 {
365 	stat_calc_lat(ts, io_u_lat, ts->io_u_lat_m, FIO_IO_U_LAT_M_NR);
366 }
367 
display_lat(const char * name,unsigned long min,unsigned long max,double mean,double dev,struct buf_output * out)368 static void display_lat(const char *name, unsigned long min, unsigned long max,
369 			double mean, double dev, struct buf_output *out)
370 {
371 	const char *base = "(usec)";
372 	char *minp, *maxp;
373 
374 	if (usec_to_msec(&min, &max, &mean, &dev))
375 		base = "(msec)";
376 
377 	minp = num2str(min, 6, 1, 0, N2S_NONE);
378 	maxp = num2str(max, 6, 1, 0, N2S_NONE);
379 
380 	log_buf(out, "    %s %s: min=%s, max=%s, avg=%5.02f,"
381 		 " stdev=%5.02f\n", name, base, minp, maxp, mean, dev);
382 
383 	free(minp);
384 	free(maxp);
385 }
386 
show_ddir_status(struct group_run_stats * rs,struct thread_stat * ts,int ddir,struct buf_output * out)387 static void show_ddir_status(struct group_run_stats *rs, struct thread_stat *ts,
388 			     int ddir, struct buf_output *out)
389 {
390 	const char *str[] = { " read", "write", " trim" };
391 	unsigned long min, max, runt;
392 	unsigned long long bw, iops;
393 	double mean, dev;
394 	char *io_p, *bw_p, *bw_p_alt, *iops_p;
395 	int i2p;
396 
397 	assert(ddir_rw(ddir));
398 
399 	if (!ts->runtime[ddir])
400 		return;
401 
402 	i2p = is_power_of_2(rs->kb_base);
403 	runt = ts->runtime[ddir];
404 
405 	bw = (1000 * ts->io_bytes[ddir]) / runt;
406 	io_p = num2str(ts->io_bytes[ddir], 4, 1, i2p, N2S_BYTE);
407 	bw_p = num2str(bw, 4, 1, i2p, ts->unit_base);
408 	bw_p_alt = num2str(bw, 4, 1, !i2p, ts->unit_base);
409 
410 	iops = (1000 * (uint64_t)ts->total_io_u[ddir]) / runt;
411 	iops_p = num2str(iops, 4, 1, 0, N2S_NONE);
412 
413 	log_buf(out, "  %s: IOPS=%s, BW=%s (%s)(%s/%llumsec)\n",
414 			rs->unified_rw_rep ? "mixed" : str[ddir],
415 			iops_p, bw_p, bw_p_alt, io_p,
416 			(unsigned long long) ts->runtime[ddir]);
417 
418 	free(io_p);
419 	free(bw_p);
420 	free(bw_p_alt);
421 	free(iops_p);
422 
423 	if (calc_lat(&ts->slat_stat[ddir], &min, &max, &mean, &dev))
424 		display_lat("slat", min, max, mean, dev, out);
425 	if (calc_lat(&ts->clat_stat[ddir], &min, &max, &mean, &dev))
426 		display_lat("clat", min, max, mean, dev, out);
427 	if (calc_lat(&ts->lat_stat[ddir], &min, &max, &mean, &dev))
428 		display_lat(" lat", min, max, mean, dev, out);
429 
430 	if (ts->clat_percentiles) {
431 		show_clat_percentiles(ts->io_u_plat[ddir],
432 					ts->clat_stat[ddir].samples,
433 					ts->percentile_list,
434 					ts->percentile_precision, out);
435 	}
436 	if (calc_lat(&ts->bw_stat[ddir], &min, &max, &mean, &dev)) {
437 		double p_of_agg = 100.0, fkb_base = (double)rs->kb_base;
438 		const char *bw_str;
439 
440 		if ((rs->unit_base == 1) && i2p)
441 			bw_str = "Kibit";
442 		else if (rs->unit_base == 1)
443 			bw_str = "kbit";
444 		else if (i2p)
445 			bw_str = "KiB";
446 		else
447 			bw_str = "kB";
448 
449 		if (rs->unit_base == 1) {
450 			min *= 8.0;
451 			max *= 8.0;
452 			mean *= 8.0;
453 			dev *= 8.0;
454 		}
455 
456 		if (rs->agg[ddir]) {
457 			p_of_agg = mean * 100 / (double) rs->agg[ddir];
458 			if (p_of_agg > 100.0)
459 				p_of_agg = 100.0;
460 		}
461 
462 		if (mean > fkb_base * fkb_base) {
463 			min /= fkb_base;
464 			max /= fkb_base;
465 			mean /= fkb_base;
466 			dev /= fkb_base;
467 			bw_str = (rs->unit_base == 1 ? "Mibit" : "MiB");
468 		}
469 
470 		log_buf(out, "   bw (%5s/s): min=%5lu, max=%5lu, per=%3.2f%%, avg=%5.02f, stdev=%5.02f\n",
471 			bw_str, min, max, p_of_agg, mean, dev);
472 	}
473 }
474 
show_lat(double * io_u_lat,int nr,const char ** ranges,const char * msg,struct buf_output * out)475 static int show_lat(double *io_u_lat, int nr, const char **ranges,
476 		    const char *msg, struct buf_output *out)
477 {
478 	int new_line = 1, i, line = 0, shown = 0;
479 
480 	for (i = 0; i < nr; i++) {
481 		if (io_u_lat[i] <= 0.0)
482 			continue;
483 		shown = 1;
484 		if (new_line) {
485 			if (line)
486 				log_buf(out, "\n");
487 			log_buf(out, "    lat (%s) : ", msg);
488 			new_line = 0;
489 			line = 0;
490 		}
491 		if (line)
492 			log_buf(out, ", ");
493 		log_buf(out, "%s%3.2f%%", ranges[i], io_u_lat[i]);
494 		line++;
495 		if (line == 5)
496 			new_line = 1;
497 	}
498 
499 	if (shown)
500 		log_buf(out, "\n");
501 
502 	return shown;
503 }
504 
show_lat_u(double * io_u_lat_u,struct buf_output * out)505 static void show_lat_u(double *io_u_lat_u, struct buf_output *out)
506 {
507 	const char *ranges[] = { "2=", "4=", "10=", "20=", "50=", "100=",
508 				 "250=", "500=", "750=", "1000=", };
509 
510 	show_lat(io_u_lat_u, FIO_IO_U_LAT_U_NR, ranges, "usec", out);
511 }
512 
show_lat_m(double * io_u_lat_m,struct buf_output * out)513 static void show_lat_m(double *io_u_lat_m, struct buf_output *out)
514 {
515 	const char *ranges[] = { "2=", "4=", "10=", "20=", "50=", "100=",
516 				 "250=", "500=", "750=", "1000=", "2000=",
517 				 ">=2000=", };
518 
519 	show_lat(io_u_lat_m, FIO_IO_U_LAT_M_NR, ranges, "msec", out);
520 }
521 
show_latencies(struct thread_stat * ts,struct buf_output * out)522 static void show_latencies(struct thread_stat *ts, struct buf_output *out)
523 {
524 	double io_u_lat_u[FIO_IO_U_LAT_U_NR];
525 	double io_u_lat_m[FIO_IO_U_LAT_M_NR];
526 
527 	stat_calc_lat_u(ts, io_u_lat_u);
528 	stat_calc_lat_m(ts, io_u_lat_m);
529 
530 	show_lat_u(io_u_lat_u, out);
531 	show_lat_m(io_u_lat_m, out);
532 }
533 
block_state_category(int block_state)534 static int block_state_category(int block_state)
535 {
536 	switch (block_state) {
537 	case BLOCK_STATE_UNINIT:
538 		return 0;
539 	case BLOCK_STATE_TRIMMED:
540 	case BLOCK_STATE_WRITTEN:
541 		return 1;
542 	case BLOCK_STATE_WRITE_FAILURE:
543 	case BLOCK_STATE_TRIM_FAILURE:
544 		return 2;
545 	default:
546 		/* Silence compile warning on some BSDs and have a return */
547 		assert(0);
548 		return -1;
549 	}
550 }
551 
compare_block_infos(const void * bs1,const void * bs2)552 static int compare_block_infos(const void *bs1, const void *bs2)
553 {
554 	uint32_t block1 = *(uint32_t *)bs1;
555 	uint32_t block2 = *(uint32_t *)bs2;
556 	int state1 = BLOCK_INFO_STATE(block1);
557 	int state2 = BLOCK_INFO_STATE(block2);
558 	int bscat1 = block_state_category(state1);
559 	int bscat2 = block_state_category(state2);
560 	int cycles1 = BLOCK_INFO_TRIMS(block1);
561 	int cycles2 = BLOCK_INFO_TRIMS(block2);
562 
563 	if (bscat1 < bscat2)
564 		return -1;
565 	if (bscat1 > bscat2)
566 		return 1;
567 
568 	if (cycles1 < cycles2)
569 		return -1;
570 	if (cycles1 > cycles2)
571 		return 1;
572 
573 	if (state1 < state2)
574 		return -1;
575 	if (state1 > state2)
576 		return 1;
577 
578 	assert(block1 == block2);
579 	return 0;
580 }
581 
calc_block_percentiles(int nr_block_infos,uint32_t * block_infos,fio_fp64_t * plist,unsigned int ** percentiles,unsigned int * types)582 static int calc_block_percentiles(int nr_block_infos, uint32_t *block_infos,
583 				  fio_fp64_t *plist, unsigned int **percentiles,
584 				  unsigned int *types)
585 {
586 	int len = 0;
587 	int i, nr_uninit;
588 
589 	qsort(block_infos, nr_block_infos, sizeof(uint32_t), compare_block_infos);
590 
591 	while (len < FIO_IO_U_LIST_MAX_LEN && plist[len].u.f != 0.0)
592 		len++;
593 
594 	if (!len)
595 		return 0;
596 
597 	/*
598 	 * Sort the percentile list. Note that it may already be sorted if
599 	 * we are using the default values, but since it's a short list this
600 	 * isn't a worry. Also note that this does not work for NaN values.
601 	 */
602 	if (len > 1)
603 		qsort((void *)plist, len, sizeof(plist[0]), double_cmp);
604 
605 	nr_uninit = 0;
606 	/* Start only after the uninit entries end */
607 	for (nr_uninit = 0;
608 	     nr_uninit < nr_block_infos
609 		&& BLOCK_INFO_STATE(block_infos[nr_uninit]) == BLOCK_STATE_UNINIT;
610 	     nr_uninit ++)
611 		;
612 
613 	if (nr_uninit == nr_block_infos)
614 		return 0;
615 
616 	*percentiles = calloc(len, sizeof(**percentiles));
617 
618 	for (i = 0; i < len; i++) {
619 		int idx = (plist[i].u.f * (nr_block_infos - nr_uninit) / 100)
620 				+ nr_uninit;
621 		(*percentiles)[i] = BLOCK_INFO_TRIMS(block_infos[idx]);
622 	}
623 
624 	memset(types, 0, sizeof(*types) * BLOCK_STATE_COUNT);
625 	for (i = 0; i < nr_block_infos; i++)
626 		types[BLOCK_INFO_STATE(block_infos[i])]++;
627 
628 	return len;
629 }
630 
631 static const char *block_state_names[] = {
632 	[BLOCK_STATE_UNINIT] = "unwritten",
633 	[BLOCK_STATE_TRIMMED] = "trimmed",
634 	[BLOCK_STATE_WRITTEN] = "written",
635 	[BLOCK_STATE_TRIM_FAILURE] = "trim failure",
636 	[BLOCK_STATE_WRITE_FAILURE] = "write failure",
637 };
638 
show_block_infos(int nr_block_infos,uint32_t * block_infos,fio_fp64_t * plist,struct buf_output * out)639 static void show_block_infos(int nr_block_infos, uint32_t *block_infos,
640 			     fio_fp64_t *plist, struct buf_output *out)
641 {
642 	int len, pos, i;
643 	unsigned int *percentiles = NULL;
644 	unsigned int block_state_counts[BLOCK_STATE_COUNT];
645 
646 	len = calc_block_percentiles(nr_block_infos, block_infos, plist,
647 				     &percentiles, block_state_counts);
648 
649 	log_buf(out, "  block lifetime percentiles :\n   |");
650 	pos = 0;
651 	for (i = 0; i < len; i++) {
652 		uint32_t block_info = percentiles[i];
653 #define LINE_LENGTH	75
654 		char str[LINE_LENGTH];
655 		int strln = snprintf(str, LINE_LENGTH, " %3.2fth=%u%c",
656 				     plist[i].u.f, block_info,
657 				     i == len - 1 ? '\n' : ',');
658 		assert(strln < LINE_LENGTH);
659 		if (pos + strln > LINE_LENGTH) {
660 			pos = 0;
661 			log_buf(out, "\n   |");
662 		}
663 		log_buf(out, "%s", str);
664 		pos += strln;
665 #undef LINE_LENGTH
666 	}
667 	if (percentiles)
668 		free(percentiles);
669 
670 	log_buf(out, "        states               :");
671 	for (i = 0; i < BLOCK_STATE_COUNT; i++)
672 		log_buf(out, " %s=%u%c",
673 			 block_state_names[i], block_state_counts[i],
674 			 i == BLOCK_STATE_COUNT - 1 ? '\n' : ',');
675 }
676 
show_ss_normal(struct thread_stat * ts,struct buf_output * out)677 static void show_ss_normal(struct thread_stat *ts, struct buf_output *out)
678 {
679 	char *p1, *p1alt, *p2;
680 	unsigned long long bw_mean, iops_mean;
681 	const int i2p = is_power_of_2(ts->kb_base);
682 
683 	if (!ts->ss_dur)
684 		return;
685 
686 	bw_mean = steadystate_bw_mean(ts);
687 	iops_mean = steadystate_iops_mean(ts);
688 
689 	p1 = num2str(bw_mean / ts->kb_base, 4, ts->kb_base, i2p, ts->unit_base);
690 	p1alt = num2str(bw_mean / ts->kb_base, 4, ts->kb_base, !i2p, ts->unit_base);
691 	p2 = num2str(iops_mean, 4, 1, 0, N2S_NONE);
692 
693 	log_buf(out, "  steadystate  : attained=%s, bw=%s (%s), iops=%s, %s%s=%.3f%s\n",
694 		ts->ss_state & __FIO_SS_ATTAINED ? "yes" : "no",
695 		p1, p1alt, p2,
696 		ts->ss_state & __FIO_SS_IOPS ? "iops" : "bw",
697 		ts->ss_state & __FIO_SS_SLOPE ? " slope": " mean dev",
698 		ts->ss_criterion.u.f,
699 		ts->ss_state & __FIO_SS_PCT ? "%" : "");
700 
701 	free(p1);
702 	free(p1alt);
703 	free(p2);
704 }
705 
show_thread_status_normal(struct thread_stat * ts,struct group_run_stats * rs,struct buf_output * out)706 static void show_thread_status_normal(struct thread_stat *ts,
707 				      struct group_run_stats *rs,
708 				      struct buf_output *out)
709 {
710 	double usr_cpu, sys_cpu;
711 	unsigned long runtime;
712 	double io_u_dist[FIO_IO_U_MAP_NR];
713 	time_t time_p;
714 	char time_buf[32];
715 
716 	if (!ddir_rw_sum(ts->io_bytes) && !ddir_rw_sum(ts->total_io_u))
717 		return;
718 
719 	memset(time_buf, 0, sizeof(time_buf));
720 
721 	time(&time_p);
722 	os_ctime_r((const time_t *) &time_p, time_buf, sizeof(time_buf));
723 
724 	if (!ts->error) {
725 		log_buf(out, "%s: (groupid=%d, jobs=%d): err=%2d: pid=%d: %s",
726 					ts->name, ts->groupid, ts->members,
727 					ts->error, (int) ts->pid, time_buf);
728 	} else {
729 		log_buf(out, "%s: (groupid=%d, jobs=%d): err=%2d (%s): pid=%d: %s",
730 					ts->name, ts->groupid, ts->members,
731 					ts->error, ts->verror, (int) ts->pid,
732 					time_buf);
733 	}
734 
735 	if (strlen(ts->description))
736 		log_buf(out, "  Description  : [%s]\n", ts->description);
737 
738 	if (ts->io_bytes[DDIR_READ])
739 		show_ddir_status(rs, ts, DDIR_READ, out);
740 	if (ts->io_bytes[DDIR_WRITE])
741 		show_ddir_status(rs, ts, DDIR_WRITE, out);
742 	if (ts->io_bytes[DDIR_TRIM])
743 		show_ddir_status(rs, ts, DDIR_TRIM, out);
744 
745 	show_latencies(ts, out);
746 
747 	runtime = ts->total_run_time;
748 	if (runtime) {
749 		double runt = (double) runtime;
750 
751 		usr_cpu = (double) ts->usr_time * 100 / runt;
752 		sys_cpu = (double) ts->sys_time * 100 / runt;
753 	} else {
754 		usr_cpu = 0;
755 		sys_cpu = 0;
756 	}
757 
758 	log_buf(out, "  cpu          : usr=%3.2f%%, sys=%3.2f%%, ctx=%llu,"
759 		 " majf=%llu, minf=%llu\n", usr_cpu, sys_cpu,
760 			(unsigned long long) ts->ctx,
761 			(unsigned long long) ts->majf,
762 			(unsigned long long) ts->minf);
763 
764 	stat_calc_dist(ts->io_u_map, ddir_rw_sum(ts->total_io_u), io_u_dist);
765 	log_buf(out, "  IO depths    : 1=%3.1f%%, 2=%3.1f%%, 4=%3.1f%%, 8=%3.1f%%,"
766 		 " 16=%3.1f%%, 32=%3.1f%%, >=64=%3.1f%%\n", io_u_dist[0],
767 					io_u_dist[1], io_u_dist[2],
768 					io_u_dist[3], io_u_dist[4],
769 					io_u_dist[5], io_u_dist[6]);
770 
771 	stat_calc_dist(ts->io_u_submit, ts->total_submit, io_u_dist);
772 	log_buf(out, "     submit    : 0=%3.1f%%, 4=%3.1f%%, 8=%3.1f%%, 16=%3.1f%%,"
773 		 " 32=%3.1f%%, 64=%3.1f%%, >=64=%3.1f%%\n", io_u_dist[0],
774 					io_u_dist[1], io_u_dist[2],
775 					io_u_dist[3], io_u_dist[4],
776 					io_u_dist[5], io_u_dist[6]);
777 	stat_calc_dist(ts->io_u_complete, ts->total_complete, io_u_dist);
778 	log_buf(out, "     complete  : 0=%3.1f%%, 4=%3.1f%%, 8=%3.1f%%, 16=%3.1f%%,"
779 		 " 32=%3.1f%%, 64=%3.1f%%, >=64=%3.1f%%\n", io_u_dist[0],
780 					io_u_dist[1], io_u_dist[2],
781 					io_u_dist[3], io_u_dist[4],
782 					io_u_dist[5], io_u_dist[6]);
783 	log_buf(out, "     issued rwt: total=%llu,%llu,%llu,"
784 				 " short=%llu,%llu,%llu,"
785 				 " dropped=%llu,%llu,%llu\n",
786 					(unsigned long long) ts->total_io_u[0],
787 					(unsigned long long) ts->total_io_u[1],
788 					(unsigned long long) ts->total_io_u[2],
789 					(unsigned long long) ts->short_io_u[0],
790 					(unsigned long long) ts->short_io_u[1],
791 					(unsigned long long) ts->short_io_u[2],
792 					(unsigned long long) ts->drop_io_u[0],
793 					(unsigned long long) ts->drop_io_u[1],
794 					(unsigned long long) ts->drop_io_u[2]);
795 	if (ts->continue_on_error) {
796 		log_buf(out, "     errors    : total=%llu, first_error=%d/<%s>\n",
797 					(unsigned long long)ts->total_err_count,
798 					ts->first_error,
799 					strerror(ts->first_error));
800 	}
801 	if (ts->latency_depth) {
802 		log_buf(out, "     latency   : target=%llu, window=%llu, percentile=%.2f%%, depth=%u\n",
803 					(unsigned long long)ts->latency_target,
804 					(unsigned long long)ts->latency_window,
805 					ts->latency_percentile.u.f,
806 					ts->latency_depth);
807 	}
808 
809 	if (ts->nr_block_infos)
810 		show_block_infos(ts->nr_block_infos, ts->block_infos,
811 				  ts->percentile_list, out);
812 
813 	if (ts->ss_dur)
814 		show_ss_normal(ts, out);
815 }
816 
show_ddir_status_terse(struct thread_stat * ts,struct group_run_stats * rs,int ddir,struct buf_output * out)817 static void show_ddir_status_terse(struct thread_stat *ts,
818 				   struct group_run_stats *rs, int ddir,
819 				   struct buf_output *out)
820 {
821 	unsigned long min, max;
822 	unsigned long long bw, iops;
823 	unsigned int *ovals = NULL;
824 	double mean, dev;
825 	unsigned int len, minv, maxv;
826 	int i;
827 
828 	assert(ddir_rw(ddir));
829 
830 	iops = bw = 0;
831 	if (ts->runtime[ddir]) {
832 		uint64_t runt = ts->runtime[ddir];
833 
834 		bw = ((1000 * ts->io_bytes[ddir]) / runt) / 1024; /* KiB/s */
835 		iops = (1000 * (uint64_t) ts->total_io_u[ddir]) / runt;
836 	}
837 
838 	log_buf(out, ";%llu;%llu;%llu;%llu",
839 		(unsigned long long) ts->io_bytes[ddir] >> 10, bw, iops,
840 					(unsigned long long) ts->runtime[ddir]);
841 
842 	if (calc_lat(&ts->slat_stat[ddir], &min, &max, &mean, &dev))
843 		log_buf(out, ";%lu;%lu;%f;%f", min, max, mean, dev);
844 	else
845 		log_buf(out, ";%lu;%lu;%f;%f", 0UL, 0UL, 0.0, 0.0);
846 
847 	if (calc_lat(&ts->clat_stat[ddir], &min, &max, &mean, &dev))
848 		log_buf(out, ";%lu;%lu;%f;%f", min, max, mean, dev);
849 	else
850 		log_buf(out, ";%lu;%lu;%f;%f", 0UL, 0UL, 0.0, 0.0);
851 
852 	if (ts->clat_percentiles) {
853 		len = calc_clat_percentiles(ts->io_u_plat[ddir],
854 					ts->clat_stat[ddir].samples,
855 					ts->percentile_list, &ovals, &maxv,
856 					&minv);
857 	} else
858 		len = 0;
859 
860 	for (i = 0; i < FIO_IO_U_LIST_MAX_LEN; i++) {
861 		if (i >= len) {
862 			log_buf(out, ";0%%=0");
863 			continue;
864 		}
865 		log_buf(out, ";%f%%=%u", ts->percentile_list[i].u.f, ovals[i]);
866 	}
867 
868 	if (calc_lat(&ts->lat_stat[ddir], &min, &max, &mean, &dev))
869 		log_buf(out, ";%lu;%lu;%f;%f", min, max, mean, dev);
870 	else
871 		log_buf(out, ";%lu;%lu;%f;%f", 0UL, 0UL, 0.0, 0.0);
872 
873 	if (ovals)
874 		free(ovals);
875 
876 	if (calc_lat(&ts->bw_stat[ddir], &min, &max, &mean, &dev)) {
877 		double p_of_agg = 100.0;
878 
879 		if (rs->agg[ddir]) {
880 			p_of_agg = mean * 100 / (double) rs->agg[ddir];
881 			if (p_of_agg > 100.0)
882 				p_of_agg = 100.0;
883 		}
884 
885 		log_buf(out, ";%lu;%lu;%f%%;%f;%f", min, max, p_of_agg, mean, dev);
886 	} else
887 		log_buf(out, ";%lu;%lu;%f%%;%f;%f", 0UL, 0UL, 0.0, 0.0, 0.0);
888 }
889 
add_ddir_status_json(struct thread_stat * ts,struct group_run_stats * rs,int ddir,struct json_object * parent)890 static void add_ddir_status_json(struct thread_stat *ts,
891 		struct group_run_stats *rs, int ddir, struct json_object *parent)
892 {
893 	unsigned long min, max;
894 	unsigned long long bw;
895 	unsigned int *ovals = NULL;
896 	double mean, dev, iops;
897 	unsigned int len, minv, maxv;
898 	int i;
899 	const char *ddirname[] = {"read", "write", "trim"};
900 	struct json_object *dir_object, *tmp_object, *percentile_object, *clat_bins_object;
901 	char buf[120];
902 	double p_of_agg = 100.0;
903 
904 	assert(ddir_rw(ddir));
905 
906 	if (ts->unified_rw_rep && ddir != DDIR_READ)
907 		return;
908 
909 	dir_object = json_create_object();
910 	json_object_add_value_object(parent,
911 		ts->unified_rw_rep ? "mixed" : ddirname[ddir], dir_object);
912 
913 	bw = 0;
914 	iops = 0.0;
915 	if (ts->runtime[ddir]) {
916 		uint64_t runt = ts->runtime[ddir];
917 
918 		bw = ((1000 * ts->io_bytes[ddir]) / runt) / 1024; /* KiB/s */
919 		iops = (1000.0 * (uint64_t) ts->total_io_u[ddir]) / runt;
920 	}
921 
922 	json_object_add_value_int(dir_object, "io_bytes", ts->io_bytes[ddir] >> 10);
923 	json_object_add_value_int(dir_object, "bw", bw);
924 	json_object_add_value_float(dir_object, "iops", iops);
925 	json_object_add_value_int(dir_object, "runtime", ts->runtime[ddir]);
926 	json_object_add_value_int(dir_object, "total_ios", ts->total_io_u[ddir]);
927 	json_object_add_value_int(dir_object, "short_ios", ts->short_io_u[ddir]);
928 	json_object_add_value_int(dir_object, "drop_ios", ts->drop_io_u[ddir]);
929 
930 	if (!calc_lat(&ts->slat_stat[ddir], &min, &max, &mean, &dev)) {
931 		min = max = 0;
932 		mean = dev = 0.0;
933 	}
934 	tmp_object = json_create_object();
935 	json_object_add_value_object(dir_object, "slat", tmp_object);
936 	json_object_add_value_int(tmp_object, "min", min);
937 	json_object_add_value_int(tmp_object, "max", max);
938 	json_object_add_value_float(tmp_object, "mean", mean);
939 	json_object_add_value_float(tmp_object, "stddev", dev);
940 
941 	if (!calc_lat(&ts->clat_stat[ddir], &min, &max, &mean, &dev)) {
942 		min = max = 0;
943 		mean = dev = 0.0;
944 	}
945 	tmp_object = json_create_object();
946 	json_object_add_value_object(dir_object, "clat", tmp_object);
947 	json_object_add_value_int(tmp_object, "min", min);
948 	json_object_add_value_int(tmp_object, "max", max);
949 	json_object_add_value_float(tmp_object, "mean", mean);
950 	json_object_add_value_float(tmp_object, "stddev", dev);
951 
952 	if (ts->clat_percentiles) {
953 		len = calc_clat_percentiles(ts->io_u_plat[ddir],
954 					ts->clat_stat[ddir].samples,
955 					ts->percentile_list, &ovals, &maxv,
956 					&minv);
957 	} else
958 		len = 0;
959 
960 	percentile_object = json_create_object();
961 	json_object_add_value_object(tmp_object, "percentile", percentile_object);
962 	for (i = 0; i < FIO_IO_U_LIST_MAX_LEN; i++) {
963 		if (i >= len) {
964 			json_object_add_value_int(percentile_object, "0.00", 0);
965 			continue;
966 		}
967 		snprintf(buf, sizeof(buf), "%f", ts->percentile_list[i].u.f);
968 		json_object_add_value_int(percentile_object, (const char *)buf, ovals[i]);
969 	}
970 
971 	if (output_format & FIO_OUTPUT_JSON_PLUS) {
972 		clat_bins_object = json_create_object();
973 		json_object_add_value_object(tmp_object, "bins", clat_bins_object);
974 		for(i = 0; i < FIO_IO_U_PLAT_NR; i++) {
975 			if (ts->io_u_plat[ddir][i]) {
976 				snprintf(buf, sizeof(buf), "%llu", plat_idx_to_val(i));
977 				json_object_add_value_int(clat_bins_object, (const char *)buf, ts->io_u_plat[ddir][i]);
978 			}
979 		}
980 	}
981 
982 	if (!calc_lat(&ts->lat_stat[ddir], &min, &max, &mean, &dev)) {
983 		min = max = 0;
984 		mean = dev = 0.0;
985 	}
986 	tmp_object = json_create_object();
987 	json_object_add_value_object(dir_object, "lat", tmp_object);
988 	json_object_add_value_int(tmp_object, "min", min);
989 	json_object_add_value_int(tmp_object, "max", max);
990 	json_object_add_value_float(tmp_object, "mean", mean);
991 	json_object_add_value_float(tmp_object, "stddev", dev);
992 	if (ovals)
993 		free(ovals);
994 
995 	if (calc_lat(&ts->bw_stat[ddir], &min, &max, &mean, &dev)) {
996 		if (rs->agg[ddir]) {
997 			p_of_agg = mean * 100 / (double) rs->agg[ddir];
998 			if (p_of_agg > 100.0)
999 				p_of_agg = 100.0;
1000 		}
1001 	} else {
1002 		min = max = 0;
1003 		p_of_agg = mean = dev = 0.0;
1004 	}
1005 	json_object_add_value_int(dir_object, "bw_min", min);
1006 	json_object_add_value_int(dir_object, "bw_max", max);
1007 	json_object_add_value_float(dir_object, "bw_agg", p_of_agg);
1008 	json_object_add_value_float(dir_object, "bw_mean", mean);
1009 	json_object_add_value_float(dir_object, "bw_dev", dev);
1010 }
1011 
show_thread_status_terse_v2(struct thread_stat * ts,struct group_run_stats * rs,struct buf_output * out)1012 static void show_thread_status_terse_v2(struct thread_stat *ts,
1013 					struct group_run_stats *rs,
1014 					struct buf_output *out)
1015 {
1016 	double io_u_dist[FIO_IO_U_MAP_NR];
1017 	double io_u_lat_u[FIO_IO_U_LAT_U_NR];
1018 	double io_u_lat_m[FIO_IO_U_LAT_M_NR];
1019 	double usr_cpu, sys_cpu;
1020 	int i;
1021 
1022 	/* General Info */
1023 	log_buf(out, "2;%s;%d;%d", ts->name, ts->groupid, ts->error);
1024 	/* Log Read Status */
1025 	show_ddir_status_terse(ts, rs, DDIR_READ, out);
1026 	/* Log Write Status */
1027 	show_ddir_status_terse(ts, rs, DDIR_WRITE, out);
1028 	/* Log Trim Status */
1029 	show_ddir_status_terse(ts, rs, DDIR_TRIM, out);
1030 
1031 	/* CPU Usage */
1032 	if (ts->total_run_time) {
1033 		double runt = (double) ts->total_run_time;
1034 
1035 		usr_cpu = (double) ts->usr_time * 100 / runt;
1036 		sys_cpu = (double) ts->sys_time * 100 / runt;
1037 	} else {
1038 		usr_cpu = 0;
1039 		sys_cpu = 0;
1040 	}
1041 
1042 	log_buf(out, ";%f%%;%f%%;%llu;%llu;%llu", usr_cpu, sys_cpu,
1043 						(unsigned long long) ts->ctx,
1044 						(unsigned long long) ts->majf,
1045 						(unsigned long long) ts->minf);
1046 
1047 	/* Calc % distribution of IO depths, usecond, msecond latency */
1048 	stat_calc_dist(ts->io_u_map, ddir_rw_sum(ts->total_io_u), io_u_dist);
1049 	stat_calc_lat_u(ts, io_u_lat_u);
1050 	stat_calc_lat_m(ts, io_u_lat_m);
1051 
1052 	/* Only show fixed 7 I/O depth levels*/
1053 	log_buf(out, ";%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%",
1054 			io_u_dist[0], io_u_dist[1], io_u_dist[2], io_u_dist[3],
1055 			io_u_dist[4], io_u_dist[5], io_u_dist[6]);
1056 
1057 	/* Microsecond latency */
1058 	for (i = 0; i < FIO_IO_U_LAT_U_NR; i++)
1059 		log_buf(out, ";%3.2f%%", io_u_lat_u[i]);
1060 	/* Millisecond latency */
1061 	for (i = 0; i < FIO_IO_U_LAT_M_NR; i++)
1062 		log_buf(out, ";%3.2f%%", io_u_lat_m[i]);
1063 	/* Additional output if continue_on_error set - default off*/
1064 	if (ts->continue_on_error)
1065 		log_buf(out, ";%llu;%d", (unsigned long long) ts->total_err_count, ts->first_error);
1066 	log_buf(out, "\n");
1067 
1068 	/* Additional output if description is set */
1069 	if (strlen(ts->description))
1070 		log_buf(out, ";%s", ts->description);
1071 
1072 	log_buf(out, "\n");
1073 }
1074 
show_thread_status_terse_v3_v4(struct thread_stat * ts,struct group_run_stats * rs,int ver,struct buf_output * out)1075 static void show_thread_status_terse_v3_v4(struct thread_stat *ts,
1076 					   struct group_run_stats *rs, int ver,
1077 					   struct buf_output *out)
1078 {
1079 	double io_u_dist[FIO_IO_U_MAP_NR];
1080 	double io_u_lat_u[FIO_IO_U_LAT_U_NR];
1081 	double io_u_lat_m[FIO_IO_U_LAT_M_NR];
1082 	double usr_cpu, sys_cpu;
1083 	int i;
1084 
1085 	/* General Info */
1086 	log_buf(out, "%d;%s;%s;%d;%d", ver, fio_version_string,
1087 					ts->name, ts->groupid, ts->error);
1088 	/* Log Read Status */
1089 	show_ddir_status_terse(ts, rs, DDIR_READ, out);
1090 	/* Log Write Status */
1091 	show_ddir_status_terse(ts, rs, DDIR_WRITE, out);
1092 	/* Log Trim Status */
1093 	if (ver == 4)
1094 		show_ddir_status_terse(ts, rs, DDIR_TRIM, out);
1095 
1096 	/* CPU Usage */
1097 	if (ts->total_run_time) {
1098 		double runt = (double) ts->total_run_time;
1099 
1100 		usr_cpu = (double) ts->usr_time * 100 / runt;
1101 		sys_cpu = (double) ts->sys_time * 100 / runt;
1102 	} else {
1103 		usr_cpu = 0;
1104 		sys_cpu = 0;
1105 	}
1106 
1107 	log_buf(out, ";%f%%;%f%%;%llu;%llu;%llu", usr_cpu, sys_cpu,
1108 						(unsigned long long) ts->ctx,
1109 						(unsigned long long) ts->majf,
1110 						(unsigned long long) ts->minf);
1111 
1112 	/* Calc % distribution of IO depths, usecond, msecond latency */
1113 	stat_calc_dist(ts->io_u_map, ddir_rw_sum(ts->total_io_u), io_u_dist);
1114 	stat_calc_lat_u(ts, io_u_lat_u);
1115 	stat_calc_lat_m(ts, io_u_lat_m);
1116 
1117 	/* Only show fixed 7 I/O depth levels*/
1118 	log_buf(out, ";%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%",
1119 			io_u_dist[0], io_u_dist[1], io_u_dist[2], io_u_dist[3],
1120 			io_u_dist[4], io_u_dist[5], io_u_dist[6]);
1121 
1122 	/* Microsecond latency */
1123 	for (i = 0; i < FIO_IO_U_LAT_U_NR; i++)
1124 		log_buf(out, ";%3.2f%%", io_u_lat_u[i]);
1125 	/* Millisecond latency */
1126 	for (i = 0; i < FIO_IO_U_LAT_M_NR; i++)
1127 		log_buf(out, ";%3.2f%%", io_u_lat_m[i]);
1128 
1129 	/* disk util stats, if any */
1130 	show_disk_util(1, NULL, out);
1131 
1132 	/* Additional output if continue_on_error set - default off*/
1133 	if (ts->continue_on_error)
1134 		log_buf(out, ";%llu;%d", (unsigned long long) ts->total_err_count, ts->first_error);
1135 
1136 	/* Additional output if description is set */
1137 	if (strlen(ts->description))
1138 		log_buf(out, ";%s", ts->description);
1139 
1140 	log_buf(out, "\n");
1141 }
1142 
json_add_job_opts(struct json_object * root,const char * name,struct flist_head * opt_list,bool num_jobs)1143 static void json_add_job_opts(struct json_object *root, const char *name,
1144 			      struct flist_head *opt_list, bool num_jobs)
1145 {
1146 	struct json_object *dir_object;
1147 	struct flist_head *entry;
1148 	struct print_option *p;
1149 
1150 	if (flist_empty(opt_list))
1151 		return;
1152 
1153 	dir_object = json_create_object();
1154 	json_object_add_value_object(root, name, dir_object);
1155 
1156 	flist_for_each(entry, opt_list) {
1157 		const char *pos = "";
1158 
1159 		p = flist_entry(entry, struct print_option, list);
1160 		if (!num_jobs && !strcmp(p->name, "numjobs"))
1161 			continue;
1162 		if (p->value)
1163 			pos = p->value;
1164 		json_object_add_value_string(dir_object, p->name, pos);
1165 	}
1166 }
1167 
show_thread_status_json(struct thread_stat * ts,struct group_run_stats * rs,struct flist_head * opt_list)1168 static struct json_object *show_thread_status_json(struct thread_stat *ts,
1169 						   struct group_run_stats *rs,
1170 						   struct flist_head *opt_list)
1171 {
1172 	struct json_object *root, *tmp;
1173 	struct jobs_eta *je;
1174 	double io_u_dist[FIO_IO_U_MAP_NR];
1175 	double io_u_lat_u[FIO_IO_U_LAT_U_NR];
1176 	double io_u_lat_m[FIO_IO_U_LAT_M_NR];
1177 	double usr_cpu, sys_cpu;
1178 	int i;
1179 	size_t size;
1180 
1181 	root = json_create_object();
1182 	json_object_add_value_string(root, "jobname", ts->name);
1183 	json_object_add_value_int(root, "groupid", ts->groupid);
1184 	json_object_add_value_int(root, "error", ts->error);
1185 
1186 	/* ETA Info */
1187 	je = get_jobs_eta(true, &size);
1188 	if (je) {
1189 		json_object_add_value_int(root, "eta", je->eta_sec);
1190 		json_object_add_value_int(root, "elapsed", je->elapsed_sec);
1191 	}
1192 
1193 	if (opt_list)
1194 		json_add_job_opts(root, "job options", opt_list, true);
1195 
1196 	add_ddir_status_json(ts, rs, DDIR_READ, root);
1197 	add_ddir_status_json(ts, rs, DDIR_WRITE, root);
1198 	add_ddir_status_json(ts, rs, DDIR_TRIM, root);
1199 
1200 	/* CPU Usage */
1201 	if (ts->total_run_time) {
1202 		double runt = (double) ts->total_run_time;
1203 
1204 		usr_cpu = (double) ts->usr_time * 100 / runt;
1205 		sys_cpu = (double) ts->sys_time * 100 / runt;
1206 	} else {
1207 		usr_cpu = 0;
1208 		sys_cpu = 0;
1209 	}
1210 	json_object_add_value_float(root, "usr_cpu", usr_cpu);
1211 	json_object_add_value_float(root, "sys_cpu", sys_cpu);
1212 	json_object_add_value_int(root, "ctx", ts->ctx);
1213 	json_object_add_value_int(root, "majf", ts->majf);
1214 	json_object_add_value_int(root, "minf", ts->minf);
1215 
1216 
1217 	/* Calc % distribution of IO depths, usecond, msecond latency */
1218 	stat_calc_dist(ts->io_u_map, ddir_rw_sum(ts->total_io_u), io_u_dist);
1219 	stat_calc_lat_u(ts, io_u_lat_u);
1220 	stat_calc_lat_m(ts, io_u_lat_m);
1221 
1222 	tmp = json_create_object();
1223 	json_object_add_value_object(root, "iodepth_level", tmp);
1224 	/* Only show fixed 7 I/O depth levels*/
1225 	for (i = 0; i < 7; i++) {
1226 		char name[20];
1227 		if (i < 6)
1228 			snprintf(name, 20, "%d", 1 << i);
1229 		else
1230 			snprintf(name, 20, ">=%d", 1 << i);
1231 		json_object_add_value_float(tmp, (const char *)name, io_u_dist[i]);
1232 	}
1233 
1234 	tmp = json_create_object();
1235 	json_object_add_value_object(root, "latency_us", tmp);
1236 	/* Microsecond latency */
1237 	for (i = 0; i < FIO_IO_U_LAT_U_NR; i++) {
1238 		const char *ranges[] = { "2", "4", "10", "20", "50", "100",
1239 				 "250", "500", "750", "1000", };
1240 		json_object_add_value_float(tmp, ranges[i], io_u_lat_u[i]);
1241 	}
1242 	/* Millisecond latency */
1243 	tmp = json_create_object();
1244 	json_object_add_value_object(root, "latency_ms", tmp);
1245 	for (i = 0; i < FIO_IO_U_LAT_M_NR; i++) {
1246 		const char *ranges[] = { "2", "4", "10", "20", "50", "100",
1247 				 "250", "500", "750", "1000", "2000",
1248 				 ">=2000", };
1249 		json_object_add_value_float(tmp, ranges[i], io_u_lat_m[i]);
1250 	}
1251 
1252 	/* Additional output if continue_on_error set - default off*/
1253 	if (ts->continue_on_error) {
1254 		json_object_add_value_int(root, "total_err", ts->total_err_count);
1255 		json_object_add_value_int(root, "first_error", ts->first_error);
1256 	}
1257 
1258 	if (ts->latency_depth) {
1259 		json_object_add_value_int(root, "latency_depth", ts->latency_depth);
1260 		json_object_add_value_int(root, "latency_target", ts->latency_target);
1261 		json_object_add_value_float(root, "latency_percentile", ts->latency_percentile.u.f);
1262 		json_object_add_value_int(root, "latency_window", ts->latency_window);
1263 	}
1264 
1265 	/* Additional output if description is set */
1266 	if (strlen(ts->description))
1267 		json_object_add_value_string(root, "desc", ts->description);
1268 
1269 	if (ts->nr_block_infos) {
1270 		/* Block error histogram and types */
1271 		int len;
1272 		unsigned int *percentiles = NULL;
1273 		unsigned int block_state_counts[BLOCK_STATE_COUNT];
1274 
1275 		len = calc_block_percentiles(ts->nr_block_infos, ts->block_infos,
1276 					     ts->percentile_list,
1277 					     &percentiles, block_state_counts);
1278 
1279 		if (len) {
1280 			struct json_object *block, *percentile_object, *states;
1281 			int state;
1282 			block = json_create_object();
1283 			json_object_add_value_object(root, "block", block);
1284 
1285 			percentile_object = json_create_object();
1286 			json_object_add_value_object(block, "percentiles",
1287 						     percentile_object);
1288 			for (i = 0; i < len; i++) {
1289 				char buf[20];
1290 				snprintf(buf, sizeof(buf), "%f",
1291 					 ts->percentile_list[i].u.f);
1292 				json_object_add_value_int(percentile_object,
1293 							  (const char *)buf,
1294 							  percentiles[i]);
1295 			}
1296 
1297 			states = json_create_object();
1298 			json_object_add_value_object(block, "states", states);
1299 			for (state = 0; state < BLOCK_STATE_COUNT; state++) {
1300 				json_object_add_value_int(states,
1301 					block_state_names[state],
1302 					block_state_counts[state]);
1303 			}
1304 			free(percentiles);
1305 		}
1306 	}
1307 
1308 	if (ts->ss_dur) {
1309 		struct json_object *data;
1310 		struct json_array *iops, *bw;
1311 		int i, j, k;
1312 		char ss_buf[64];
1313 
1314 		snprintf(ss_buf, sizeof(ss_buf), "%s%s:%f%s",
1315 			ts->ss_state & __FIO_SS_IOPS ? "iops" : "bw",
1316 			ts->ss_state & __FIO_SS_SLOPE ? "_slope" : "",
1317 			(float) ts->ss_limit.u.f,
1318 			ts->ss_state & __FIO_SS_PCT ? "%" : "");
1319 
1320 		tmp = json_create_object();
1321 		json_object_add_value_object(root, "steadystate", tmp);
1322 		json_object_add_value_string(tmp, "ss", ss_buf);
1323 		json_object_add_value_int(tmp, "duration", (int)ts->ss_dur);
1324 		json_object_add_value_int(tmp, "attained", (ts->ss_state & __FIO_SS_ATTAINED) > 0);
1325 
1326 		snprintf(ss_buf, sizeof(ss_buf), "%f%s", (float) ts->ss_criterion.u.f,
1327 			ts->ss_state & __FIO_SS_PCT ? "%" : "");
1328 		json_object_add_value_string(tmp, "criterion", ss_buf);
1329 		json_object_add_value_float(tmp, "max_deviation", ts->ss_deviation.u.f);
1330 		json_object_add_value_float(tmp, "slope", ts->ss_slope.u.f);
1331 
1332 		data = json_create_object();
1333 		json_object_add_value_object(tmp, "data", data);
1334 		bw = json_create_array();
1335 		iops = json_create_array();
1336 
1337 		/*
1338 		** if ss was attained or the buffer is not full,
1339 		** ss->head points to the first element in the list.
1340 		** otherwise it actually points to the second element
1341 		** in the list
1342 		*/
1343 		if ((ts->ss_state & __FIO_SS_ATTAINED) || !(ts->ss_state & __FIO_SS_BUFFER_FULL))
1344 			j = ts->ss_head;
1345 		else
1346 			j = ts->ss_head == 0 ? ts->ss_dur - 1 : ts->ss_head - 1;
1347 		for (i = 0; i < ts->ss_dur; i++) {
1348 			k = (j + i) % ts->ss_dur;
1349 			json_array_add_value_int(bw, ts->ss_bw_data[k]);
1350 			json_array_add_value_int(iops, ts->ss_iops_data[k]);
1351 		}
1352 		json_object_add_value_int(data, "bw_mean", steadystate_bw_mean(ts));
1353 		json_object_add_value_int(data, "iops_mean", steadystate_iops_mean(ts));
1354 		json_object_add_value_array(data, "iops", iops);
1355 		json_object_add_value_array(data, "bw", bw);
1356 	}
1357 
1358 	return root;
1359 }
1360 
show_thread_status_terse(struct thread_stat * ts,struct group_run_stats * rs,struct buf_output * out)1361 static void show_thread_status_terse(struct thread_stat *ts,
1362 				     struct group_run_stats *rs,
1363 				     struct buf_output *out)
1364 {
1365 	if (terse_version == 2)
1366 		show_thread_status_terse_v2(ts, rs, out);
1367 	else if (terse_version == 3 || terse_version == 4)
1368 		show_thread_status_terse_v3_v4(ts, rs, terse_version, out);
1369 	else
1370 		log_err("fio: bad terse version!? %d\n", terse_version);
1371 }
1372 
show_thread_status(struct thread_stat * ts,struct group_run_stats * rs,struct flist_head * opt_list,struct buf_output * out)1373 struct json_object *show_thread_status(struct thread_stat *ts,
1374 				       struct group_run_stats *rs,
1375 				       struct flist_head *opt_list,
1376 				       struct buf_output *out)
1377 {
1378 	struct json_object *ret = NULL;
1379 
1380 	if (output_format & FIO_OUTPUT_TERSE)
1381 		show_thread_status_terse(ts, rs,  out);
1382 	if (output_format & FIO_OUTPUT_JSON)
1383 		ret = show_thread_status_json(ts, rs, opt_list);
1384 	if (output_format & FIO_OUTPUT_NORMAL)
1385 		show_thread_status_normal(ts, rs,  out);
1386 
1387 	return ret;
1388 }
1389 
sum_stat(struct io_stat * dst,struct io_stat * src,bool first)1390 static void sum_stat(struct io_stat *dst, struct io_stat *src, bool first)
1391 {
1392 	double mean, S;
1393 
1394 	if (src->samples == 0)
1395 		return;
1396 
1397 	dst->min_val = min(dst->min_val, src->min_val);
1398 	dst->max_val = max(dst->max_val, src->max_val);
1399 
1400 	/*
1401 	 * Compute new mean and S after the merge
1402 	 * <http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
1403 	 *  #Parallel_algorithm>
1404 	 */
1405 	if (first) {
1406 		mean = src->mean.u.f;
1407 		S = src->S.u.f;
1408 	} else {
1409 		double delta = src->mean.u.f - dst->mean.u.f;
1410 
1411 		mean = ((src->mean.u.f * src->samples) +
1412 			(dst->mean.u.f * dst->samples)) /
1413 			(dst->samples + src->samples);
1414 
1415 		S =  src->S.u.f + dst->S.u.f + pow(delta, 2.0) *
1416 			(dst->samples * src->samples) /
1417 			(dst->samples + src->samples);
1418 	}
1419 
1420 	dst->samples += src->samples;
1421 	dst->mean.u.f = mean;
1422 	dst->S.u.f = S;
1423 }
1424 
sum_group_stats(struct group_run_stats * dst,struct group_run_stats * src)1425 void sum_group_stats(struct group_run_stats *dst, struct group_run_stats *src)
1426 {
1427 	int i;
1428 
1429 	for (i = 0; i < DDIR_RWDIR_CNT; i++) {
1430 		if (dst->max_run[i] < src->max_run[i])
1431 			dst->max_run[i] = src->max_run[i];
1432 		if (dst->min_run[i] && dst->min_run[i] > src->min_run[i])
1433 			dst->min_run[i] = src->min_run[i];
1434 		if (dst->max_bw[i] < src->max_bw[i])
1435 			dst->max_bw[i] = src->max_bw[i];
1436 		if (dst->min_bw[i] && dst->min_bw[i] > src->min_bw[i])
1437 			dst->min_bw[i] = src->min_bw[i];
1438 
1439 		dst->iobytes[i] += src->iobytes[i];
1440 		dst->agg[i] += src->agg[i];
1441 	}
1442 
1443 	if (!dst->kb_base)
1444 		dst->kb_base = src->kb_base;
1445 	if (!dst->unit_base)
1446 		dst->unit_base = src->unit_base;
1447 }
1448 
sum_thread_stats(struct thread_stat * dst,struct thread_stat * src,bool first)1449 void sum_thread_stats(struct thread_stat *dst, struct thread_stat *src,
1450 		      bool first)
1451 {
1452 	int l, k;
1453 
1454 	for (l = 0; l < DDIR_RWDIR_CNT; l++) {
1455 		if (!dst->unified_rw_rep) {
1456 			sum_stat(&dst->clat_stat[l], &src->clat_stat[l], first);
1457 			sum_stat(&dst->slat_stat[l], &src->slat_stat[l], first);
1458 			sum_stat(&dst->lat_stat[l], &src->lat_stat[l], first);
1459 			sum_stat(&dst->bw_stat[l], &src->bw_stat[l], first);
1460 
1461 			dst->io_bytes[l] += src->io_bytes[l];
1462 
1463 			if (dst->runtime[l] < src->runtime[l])
1464 				dst->runtime[l] = src->runtime[l];
1465 		} else {
1466 			sum_stat(&dst->clat_stat[0], &src->clat_stat[l], first);
1467 			sum_stat(&dst->slat_stat[0], &src->slat_stat[l], first);
1468 			sum_stat(&dst->lat_stat[0], &src->lat_stat[l], first);
1469 			sum_stat(&dst->bw_stat[0], &src->bw_stat[l], first);
1470 
1471 			dst->io_bytes[0] += src->io_bytes[l];
1472 
1473 			if (dst->runtime[0] < src->runtime[l])
1474 				dst->runtime[0] = src->runtime[l];
1475 
1476 			/*
1477 			 * We're summing to the same destination, so override
1478 			 * 'first' after the first iteration of the loop
1479 			 */
1480 			first = false;
1481 		}
1482 	}
1483 
1484 	dst->usr_time += src->usr_time;
1485 	dst->sys_time += src->sys_time;
1486 	dst->ctx += src->ctx;
1487 	dst->majf += src->majf;
1488 	dst->minf += src->minf;
1489 
1490 	for (k = 0; k < FIO_IO_U_MAP_NR; k++)
1491 		dst->io_u_map[k] += src->io_u_map[k];
1492 	for (k = 0; k < FIO_IO_U_MAP_NR; k++)
1493 		dst->io_u_submit[k] += src->io_u_submit[k];
1494 	for (k = 0; k < FIO_IO_U_MAP_NR; k++)
1495 		dst->io_u_complete[k] += src->io_u_complete[k];
1496 	for (k = 0; k < FIO_IO_U_LAT_U_NR; k++)
1497 		dst->io_u_lat_u[k] += src->io_u_lat_u[k];
1498 	for (k = 0; k < FIO_IO_U_LAT_M_NR; k++)
1499 		dst->io_u_lat_m[k] += src->io_u_lat_m[k];
1500 
1501 	for (k = 0; k < DDIR_RWDIR_CNT; k++) {
1502 		if (!dst->unified_rw_rep) {
1503 			dst->total_io_u[k] += src->total_io_u[k];
1504 			dst->short_io_u[k] += src->short_io_u[k];
1505 			dst->drop_io_u[k] += src->drop_io_u[k];
1506 		} else {
1507 			dst->total_io_u[0] += src->total_io_u[k];
1508 			dst->short_io_u[0] += src->short_io_u[k];
1509 			dst->drop_io_u[0] += src->drop_io_u[k];
1510 		}
1511 	}
1512 
1513 	for (k = 0; k < DDIR_RWDIR_CNT; k++) {
1514 		int m;
1515 
1516 		for (m = 0; m < FIO_IO_U_PLAT_NR; m++) {
1517 			if (!dst->unified_rw_rep)
1518 				dst->io_u_plat[k][m] += src->io_u_plat[k][m];
1519 			else
1520 				dst->io_u_plat[0][m] += src->io_u_plat[k][m];
1521 		}
1522 	}
1523 
1524 	dst->total_run_time += src->total_run_time;
1525 	dst->total_submit += src->total_submit;
1526 	dst->total_complete += src->total_complete;
1527 }
1528 
init_group_run_stat(struct group_run_stats * gs)1529 void init_group_run_stat(struct group_run_stats *gs)
1530 {
1531 	int i;
1532 	memset(gs, 0, sizeof(*gs));
1533 
1534 	for (i = 0; i < DDIR_RWDIR_CNT; i++)
1535 		gs->min_bw[i] = gs->min_run[i] = ~0UL;
1536 }
1537 
init_thread_stat(struct thread_stat * ts)1538 void init_thread_stat(struct thread_stat *ts)
1539 {
1540 	int j;
1541 
1542 	memset(ts, 0, sizeof(*ts));
1543 
1544 	for (j = 0; j < DDIR_RWDIR_CNT; j++) {
1545 		ts->lat_stat[j].min_val = -1UL;
1546 		ts->clat_stat[j].min_val = -1UL;
1547 		ts->slat_stat[j].min_val = -1UL;
1548 		ts->bw_stat[j].min_val = -1UL;
1549 	}
1550 	ts->groupid = -1;
1551 }
1552 
__show_run_stats(void)1553 void __show_run_stats(void)
1554 {
1555 	struct group_run_stats *runstats, *rs;
1556 	struct thread_data *td;
1557 	struct thread_stat *threadstats, *ts;
1558 	int i, j, k, nr_ts, last_ts, idx;
1559 	int kb_base_warned = 0;
1560 	int unit_base_warned = 0;
1561 	struct json_object *root = NULL;
1562 	struct json_array *array = NULL;
1563 	struct buf_output output[FIO_OUTPUT_NR];
1564 	struct flist_head **opt_lists;
1565 
1566 	runstats = malloc(sizeof(struct group_run_stats) * (groupid + 1));
1567 
1568 	for (i = 0; i < groupid + 1; i++)
1569 		init_group_run_stat(&runstats[i]);
1570 
1571 	/*
1572 	 * find out how many threads stats we need. if group reporting isn't
1573 	 * enabled, it's one-per-td.
1574 	 */
1575 	nr_ts = 0;
1576 	last_ts = -1;
1577 	for_each_td(td, i) {
1578 		if (!td->o.group_reporting) {
1579 			nr_ts++;
1580 			continue;
1581 		}
1582 		if (last_ts == td->groupid)
1583 			continue;
1584 		if (!td->o.stats)
1585 			continue;
1586 
1587 		last_ts = td->groupid;
1588 		nr_ts++;
1589 	}
1590 
1591 	threadstats = malloc(nr_ts * sizeof(struct thread_stat));
1592 	opt_lists = malloc(nr_ts * sizeof(struct flist_head *));
1593 
1594 	for (i = 0; i < nr_ts; i++) {
1595 		init_thread_stat(&threadstats[i]);
1596 		opt_lists[i] = NULL;
1597 	}
1598 
1599 	j = 0;
1600 	last_ts = -1;
1601 	idx = 0;
1602 	for_each_td(td, i) {
1603 		if (!td->o.stats)
1604 			continue;
1605 		if (idx && (!td->o.group_reporting ||
1606 		    (td->o.group_reporting && last_ts != td->groupid))) {
1607 			idx = 0;
1608 			j++;
1609 		}
1610 
1611 		last_ts = td->groupid;
1612 
1613 		ts = &threadstats[j];
1614 
1615 		ts->clat_percentiles = td->o.clat_percentiles;
1616 		ts->percentile_precision = td->o.percentile_precision;
1617 		memcpy(ts->percentile_list, td->o.percentile_list, sizeof(td->o.percentile_list));
1618 		opt_lists[j] = &td->opt_list;
1619 
1620 		idx++;
1621 		ts->members++;
1622 
1623 		if (ts->groupid == -1) {
1624 			/*
1625 			 * These are per-group shared already
1626 			 */
1627 			strncpy(ts->name, td->o.name, FIO_JOBNAME_SIZE - 1);
1628 			if (td->o.description)
1629 				strncpy(ts->description, td->o.description,
1630 						FIO_JOBDESC_SIZE - 1);
1631 			else
1632 				memset(ts->description, 0, FIO_JOBDESC_SIZE);
1633 
1634 			/*
1635 			 * If multiple entries in this group, this is
1636 			 * the first member.
1637 			 */
1638 			ts->thread_number = td->thread_number;
1639 			ts->groupid = td->groupid;
1640 
1641 			/*
1642 			 * first pid in group, not very useful...
1643 			 */
1644 			ts->pid = td->pid;
1645 
1646 			ts->kb_base = td->o.kb_base;
1647 			ts->unit_base = td->o.unit_base;
1648 			ts->unified_rw_rep = td->o.unified_rw_rep;
1649 		} else if (ts->kb_base != td->o.kb_base && !kb_base_warned) {
1650 			log_info("fio: kb_base differs for jobs in group, using"
1651 				 " %u as the base\n", ts->kb_base);
1652 			kb_base_warned = 1;
1653 		} else if (ts->unit_base != td->o.unit_base && !unit_base_warned) {
1654 			log_info("fio: unit_base differs for jobs in group, using"
1655 				 " %u as the base\n", ts->unit_base);
1656 			unit_base_warned = 1;
1657 		}
1658 
1659 		ts->continue_on_error = td->o.continue_on_error;
1660 		ts->total_err_count += td->total_err_count;
1661 		ts->first_error = td->first_error;
1662 		if (!ts->error) {
1663 			if (!td->error && td->o.continue_on_error &&
1664 			    td->first_error) {
1665 				ts->error = td->first_error;
1666 				ts->verror[sizeof(ts->verror) - 1] = '\0';
1667 				strncpy(ts->verror, td->verror, sizeof(ts->verror) - 1);
1668 			} else  if (td->error) {
1669 				ts->error = td->error;
1670 				ts->verror[sizeof(ts->verror) - 1] = '\0';
1671 				strncpy(ts->verror, td->verror, sizeof(ts->verror) - 1);
1672 			}
1673 		}
1674 
1675 		ts->latency_depth = td->latency_qd;
1676 		ts->latency_target = td->o.latency_target;
1677 		ts->latency_percentile = td->o.latency_percentile;
1678 		ts->latency_window = td->o.latency_window;
1679 
1680 		ts->nr_block_infos = td->ts.nr_block_infos;
1681 		for (k = 0; k < ts->nr_block_infos; k++)
1682 			ts->block_infos[k] = td->ts.block_infos[k];
1683 
1684 		sum_thread_stats(ts, &td->ts, idx == 1);
1685 
1686 		if (td->o.ss_dur) {
1687 			ts->ss_state = td->ss.state;
1688 			ts->ss_dur = td->ss.dur;
1689 			ts->ss_head = td->ss.head;
1690 			ts->ss_bw_data = td->ss.bw_data;
1691 			ts->ss_iops_data = td->ss.iops_data;
1692 			ts->ss_limit.u.f = td->ss.limit;
1693 			ts->ss_slope.u.f = td->ss.slope;
1694 			ts->ss_deviation.u.f = td->ss.deviation;
1695 			ts->ss_criterion.u.f = td->ss.criterion;
1696 		}
1697 		else
1698 			ts->ss_dur = ts->ss_state = 0;
1699 	}
1700 
1701 	for (i = 0; i < nr_ts; i++) {
1702 		unsigned long long bw;
1703 
1704 		ts = &threadstats[i];
1705 		if (ts->groupid == -1)
1706 			continue;
1707 		rs = &runstats[ts->groupid];
1708 		rs->kb_base = ts->kb_base;
1709 		rs->unit_base = ts->unit_base;
1710 		rs->unified_rw_rep += ts->unified_rw_rep;
1711 
1712 		for (j = 0; j < DDIR_RWDIR_CNT; j++) {
1713 			if (!ts->runtime[j])
1714 				continue;
1715 			if (ts->runtime[j] < rs->min_run[j] || !rs->min_run[j])
1716 				rs->min_run[j] = ts->runtime[j];
1717 			if (ts->runtime[j] > rs->max_run[j])
1718 				rs->max_run[j] = ts->runtime[j];
1719 
1720 			bw = 0;
1721 			if (ts->runtime[j])
1722 				bw = ts->io_bytes[j] * 1000 / ts->runtime[j];
1723 			if (bw < rs->min_bw[j])
1724 				rs->min_bw[j] = bw;
1725 			if (bw > rs->max_bw[j])
1726 				rs->max_bw[j] = bw;
1727 
1728 			rs->iobytes[j] += ts->io_bytes[j];
1729 		}
1730 	}
1731 
1732 	for (i = 0; i < groupid + 1; i++) {
1733 		int ddir;
1734 
1735 		rs = &runstats[i];
1736 
1737 		for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++) {
1738 			if (rs->max_run[ddir])
1739 				rs->agg[ddir] = (rs->iobytes[ddir] * 1000) /
1740 						rs->max_run[ddir];
1741 		}
1742 	}
1743 
1744 	for (i = 0; i < FIO_OUTPUT_NR; i++)
1745 		buf_output_init(&output[i]);
1746 
1747 	/*
1748 	 * don't overwrite last signal output
1749 	 */
1750 	if (output_format & FIO_OUTPUT_NORMAL)
1751 		log_buf(&output[__FIO_OUTPUT_NORMAL], "\n");
1752 	if (output_format & FIO_OUTPUT_JSON) {
1753 		struct thread_data *global;
1754 		char time_buf[32];
1755 		struct timeval now;
1756 		unsigned long long ms_since_epoch;
1757 
1758 		gettimeofday(&now, NULL);
1759 		ms_since_epoch = (unsigned long long)(now.tv_sec) * 1000 +
1760 		                 (unsigned long long)(now.tv_usec) / 1000;
1761 
1762 		os_ctime_r((const time_t *) &now.tv_sec, time_buf,
1763 				sizeof(time_buf));
1764 		if (time_buf[strlen(time_buf) - 1] == '\n')
1765 			time_buf[strlen(time_buf) - 1] = '\0';
1766 
1767 		root = json_create_object();
1768 		json_object_add_value_string(root, "fio version", fio_version_string);
1769 		json_object_add_value_int(root, "timestamp", now.tv_sec);
1770 		json_object_add_value_int(root, "timestamp_ms", ms_since_epoch);
1771 		json_object_add_value_string(root, "time", time_buf);
1772 		global = get_global_options();
1773 		json_add_job_opts(root, "global options", &global->opt_list, false);
1774 		array = json_create_array();
1775 		json_object_add_value_array(root, "jobs", array);
1776 	}
1777 
1778 	if (is_backend)
1779 		fio_server_send_job_options(&get_global_options()->opt_list, -1U);
1780 
1781 	for (i = 0; i < nr_ts; i++) {
1782 		ts = &threadstats[i];
1783 		rs = &runstats[ts->groupid];
1784 
1785 		if (is_backend) {
1786 			fio_server_send_job_options(opt_lists[i], i);
1787 			fio_server_send_ts(ts, rs);
1788 		} else {
1789 			if (output_format & FIO_OUTPUT_TERSE)
1790 				show_thread_status_terse(ts, rs, &output[__FIO_OUTPUT_TERSE]);
1791 			if (output_format & FIO_OUTPUT_JSON) {
1792 				struct json_object *tmp = show_thread_status_json(ts, rs, opt_lists[i]);
1793 				json_array_add_value_object(array, tmp);
1794 			}
1795 			if (output_format & FIO_OUTPUT_NORMAL)
1796 				show_thread_status_normal(ts, rs, &output[__FIO_OUTPUT_NORMAL]);
1797 		}
1798 	}
1799 	if (!is_backend && (output_format & FIO_OUTPUT_JSON)) {
1800 		/* disk util stats, if any */
1801 		show_disk_util(1, root, &output[__FIO_OUTPUT_JSON]);
1802 
1803 		show_idle_prof_stats(FIO_OUTPUT_JSON, root, &output[__FIO_OUTPUT_JSON]);
1804 
1805 		json_print_object(root, &output[__FIO_OUTPUT_JSON]);
1806 		log_buf(&output[__FIO_OUTPUT_JSON], "\n");
1807 		json_free_object(root);
1808 	}
1809 
1810 	for (i = 0; i < groupid + 1; i++) {
1811 		rs = &runstats[i];
1812 
1813 		rs->groupid = i;
1814 		if (is_backend)
1815 			fio_server_send_gs(rs);
1816 		else if (output_format & FIO_OUTPUT_NORMAL)
1817 			show_group_stats(rs, &output[__FIO_OUTPUT_NORMAL]);
1818 	}
1819 
1820 	if (is_backend)
1821 		fio_server_send_du();
1822 	else if (output_format & FIO_OUTPUT_NORMAL) {
1823 		show_disk_util(0, NULL, &output[__FIO_OUTPUT_NORMAL]);
1824 		show_idle_prof_stats(FIO_OUTPUT_NORMAL, NULL, &output[__FIO_OUTPUT_NORMAL]);
1825 	}
1826 
1827 	for (i = 0; i < FIO_OUTPUT_NR; i++) {
1828 		buf_output_flush(&output[i]);
1829 		buf_output_free(&output[i]);
1830 	}
1831 
1832 	log_info_flush();
1833 	free(runstats);
1834 	free(threadstats);
1835 	free(opt_lists);
1836 }
1837 
show_run_stats(void)1838 void show_run_stats(void)
1839 {
1840 	fio_mutex_down(stat_mutex);
1841 	__show_run_stats();
1842 	fio_mutex_up(stat_mutex);
1843 }
1844 
__show_running_run_stats(void)1845 void __show_running_run_stats(void)
1846 {
1847 	struct thread_data *td;
1848 	unsigned long long *rt;
1849 	struct timeval tv;
1850 	int i;
1851 
1852 	fio_mutex_down(stat_mutex);
1853 
1854 	rt = malloc(thread_number * sizeof(unsigned long long));
1855 	fio_gettime(&tv, NULL);
1856 
1857 	for_each_td(td, i) {
1858 		td->update_rusage = 1;
1859 		td->ts.io_bytes[DDIR_READ] = td->io_bytes[DDIR_READ];
1860 		td->ts.io_bytes[DDIR_WRITE] = td->io_bytes[DDIR_WRITE];
1861 		td->ts.io_bytes[DDIR_TRIM] = td->io_bytes[DDIR_TRIM];
1862 		td->ts.total_run_time = mtime_since(&td->epoch, &tv);
1863 
1864 		rt[i] = mtime_since(&td->start, &tv);
1865 		if (td_read(td) && td->ts.io_bytes[DDIR_READ])
1866 			td->ts.runtime[DDIR_READ] += rt[i];
1867 		if (td_write(td) && td->ts.io_bytes[DDIR_WRITE])
1868 			td->ts.runtime[DDIR_WRITE] += rt[i];
1869 		if (td_trim(td) && td->ts.io_bytes[DDIR_TRIM])
1870 			td->ts.runtime[DDIR_TRIM] += rt[i];
1871 	}
1872 
1873 	for_each_td(td, i) {
1874 		if (td->runstate >= TD_EXITED)
1875 			continue;
1876 		if (td->rusage_sem) {
1877 			td->update_rusage = 1;
1878 			fio_mutex_down(td->rusage_sem);
1879 		}
1880 		td->update_rusage = 0;
1881 	}
1882 
1883 	__show_run_stats();
1884 
1885 	for_each_td(td, i) {
1886 		if (td_read(td) && td->ts.io_bytes[DDIR_READ])
1887 			td->ts.runtime[DDIR_READ] -= rt[i];
1888 		if (td_write(td) && td->ts.io_bytes[DDIR_WRITE])
1889 			td->ts.runtime[DDIR_WRITE] -= rt[i];
1890 		if (td_trim(td) && td->ts.io_bytes[DDIR_TRIM])
1891 			td->ts.runtime[DDIR_TRIM] -= rt[i];
1892 	}
1893 
1894 	free(rt);
1895 	fio_mutex_up(stat_mutex);
1896 }
1897 
1898 static int status_interval_init;
1899 static struct timeval status_time;
1900 static int status_file_disabled;
1901 
1902 #define FIO_STATUS_FILE		"fio-dump-status"
1903 
check_status_file(void)1904 static int check_status_file(void)
1905 {
1906 	struct stat sb;
1907 	const char *temp_dir;
1908 	char fio_status_file_path[PATH_MAX];
1909 
1910 	if (status_file_disabled)
1911 		return 0;
1912 
1913 	temp_dir = getenv("TMPDIR");
1914 	if (temp_dir == NULL) {
1915 		temp_dir = getenv("TEMP");
1916 		if (temp_dir && strlen(temp_dir) >= PATH_MAX)
1917 			temp_dir = NULL;
1918 	}
1919 	if (temp_dir == NULL)
1920 		temp_dir = "/tmp";
1921 
1922 	snprintf(fio_status_file_path, sizeof(fio_status_file_path), "%s/%s", temp_dir, FIO_STATUS_FILE);
1923 
1924 	if (stat(fio_status_file_path, &sb))
1925 		return 0;
1926 
1927 	if (unlink(fio_status_file_path) < 0) {
1928 		log_err("fio: failed to unlink %s: %s\n", fio_status_file_path,
1929 							strerror(errno));
1930 		log_err("fio: disabling status file updates\n");
1931 		status_file_disabled = 1;
1932 	}
1933 
1934 	return 1;
1935 }
1936 
check_for_running_stats(void)1937 void check_for_running_stats(void)
1938 {
1939 	if (status_interval) {
1940 		if (!status_interval_init) {
1941 			fio_gettime(&status_time, NULL);
1942 			status_interval_init = 1;
1943 		} else if (mtime_since_now(&status_time) >= status_interval) {
1944 			show_running_run_stats();
1945 			fio_gettime(&status_time, NULL);
1946 			return;
1947 		}
1948 	}
1949 	if (check_status_file()) {
1950 		show_running_run_stats();
1951 		return;
1952 	}
1953 }
1954 
add_stat_sample(struct io_stat * is,unsigned long data)1955 static inline void add_stat_sample(struct io_stat *is, unsigned long data)
1956 {
1957 	double val = data;
1958 	double delta;
1959 
1960 	if (data > is->max_val)
1961 		is->max_val = data;
1962 	if (data < is->min_val)
1963 		is->min_val = data;
1964 
1965 	delta = val - is->mean.u.f;
1966 	if (delta) {
1967 		is->mean.u.f += delta / (is->samples + 1.0);
1968 		is->S.u.f += delta * (val - is->mean.u.f);
1969 	}
1970 
1971 	is->samples++;
1972 }
1973 
1974 /*
1975  * Return a struct io_logs, which is added to the tail of the log
1976  * list for 'iolog'.
1977  */
get_new_log(struct io_log * iolog)1978 static struct io_logs *get_new_log(struct io_log *iolog)
1979 {
1980 	size_t new_size, new_samples;
1981 	struct io_logs *cur_log;
1982 
1983 	/*
1984 	 * Cap the size at MAX_LOG_ENTRIES, so we don't keep doubling
1985 	 * forever
1986 	 */
1987 	if (!iolog->cur_log_max)
1988 		new_samples = DEF_LOG_ENTRIES;
1989 	else {
1990 		new_samples = iolog->cur_log_max * 2;
1991 		if (new_samples > MAX_LOG_ENTRIES)
1992 			new_samples = MAX_LOG_ENTRIES;
1993 	}
1994 
1995 	new_size = new_samples * log_entry_sz(iolog);
1996 
1997 	cur_log = smalloc(sizeof(*cur_log));
1998 	if (cur_log) {
1999 		INIT_FLIST_HEAD(&cur_log->list);
2000 		cur_log->log = malloc(new_size);
2001 		if (cur_log->log) {
2002 			cur_log->nr_samples = 0;
2003 			cur_log->max_samples = new_samples;
2004 			flist_add_tail(&cur_log->list, &iolog->io_logs);
2005 			iolog->cur_log_max = new_samples;
2006 			return cur_log;
2007 		}
2008 		sfree(cur_log);
2009 	}
2010 
2011 	return NULL;
2012 }
2013 
2014 /*
2015  * Add and return a new log chunk, or return current log if big enough
2016  */
regrow_log(struct io_log * iolog)2017 static struct io_logs *regrow_log(struct io_log *iolog)
2018 {
2019 	struct io_logs *cur_log;
2020 	int i;
2021 
2022 	if (!iolog || iolog->disabled)
2023 		goto disable;
2024 
2025 	cur_log = iolog_cur_log(iolog);
2026 	if (!cur_log) {
2027 		cur_log = get_new_log(iolog);
2028 		if (!cur_log)
2029 			return NULL;
2030 	}
2031 
2032 	if (cur_log->nr_samples < cur_log->max_samples)
2033 		return cur_log;
2034 
2035 	/*
2036 	 * No room for a new sample. If we're compressing on the fly, flush
2037 	 * out the current chunk
2038 	 */
2039 	if (iolog->log_gz) {
2040 		if (iolog_cur_flush(iolog, cur_log)) {
2041 			log_err("fio: failed flushing iolog! Will stop logging.\n");
2042 			return NULL;
2043 		}
2044 	}
2045 
2046 	/*
2047 	 * Get a new log array, and add to our list
2048 	 */
2049 	cur_log = get_new_log(iolog);
2050 	if (!cur_log) {
2051 		log_err("fio: failed extending iolog! Will stop logging.\n");
2052 		return NULL;
2053 	}
2054 
2055 	if (!iolog->pending || !iolog->pending->nr_samples)
2056 		return cur_log;
2057 
2058 	/*
2059 	 * Flush pending items to new log
2060 	 */
2061 	for (i = 0; i < iolog->pending->nr_samples; i++) {
2062 		struct io_sample *src, *dst;
2063 
2064 		src = get_sample(iolog, iolog->pending, i);
2065 		dst = get_sample(iolog, cur_log, i);
2066 		memcpy(dst, src, log_entry_sz(iolog));
2067 	}
2068 	cur_log->nr_samples = iolog->pending->nr_samples;
2069 
2070 	iolog->pending->nr_samples = 0;
2071 	return cur_log;
2072 disable:
2073 	if (iolog)
2074 		iolog->disabled = true;
2075 	return NULL;
2076 }
2077 
regrow_logs(struct thread_data * td)2078 void regrow_logs(struct thread_data *td)
2079 {
2080 	regrow_log(td->slat_log);
2081 	regrow_log(td->clat_log);
2082 	regrow_log(td->clat_hist_log);
2083 	regrow_log(td->lat_log);
2084 	regrow_log(td->bw_log);
2085 	regrow_log(td->iops_log);
2086 	td->flags &= ~TD_F_REGROW_LOGS;
2087 }
2088 
get_cur_log(struct io_log * iolog)2089 static struct io_logs *get_cur_log(struct io_log *iolog)
2090 {
2091 	struct io_logs *cur_log;
2092 
2093 	cur_log = iolog_cur_log(iolog);
2094 	if (!cur_log) {
2095 		cur_log = get_new_log(iolog);
2096 		if (!cur_log)
2097 			return NULL;
2098 	}
2099 
2100 	if (cur_log->nr_samples < cur_log->max_samples)
2101 		return cur_log;
2102 
2103 	/*
2104 	 * Out of space. If we're in IO offload mode, or we're not doing
2105 	 * per unit logging (hence logging happens outside of the IO thread
2106 	 * as well), add a new log chunk inline. If we're doing inline
2107 	 * submissions, flag 'td' as needing a log regrow and we'll take
2108 	 * care of it on the submission side.
2109 	 */
2110 	if (iolog->td->o.io_submit_mode == IO_MODE_OFFLOAD ||
2111 	    !per_unit_log(iolog))
2112 		return regrow_log(iolog);
2113 
2114 	iolog->td->flags |= TD_F_REGROW_LOGS;
2115 	assert(iolog->pending->nr_samples < iolog->pending->max_samples);
2116 	return iolog->pending;
2117 }
2118 
__add_log_sample(struct io_log * iolog,union io_sample_data data,enum fio_ddir ddir,unsigned int bs,unsigned long t,uint64_t offset)2119 static void __add_log_sample(struct io_log *iolog, union io_sample_data data,
2120 			     enum fio_ddir ddir, unsigned int bs,
2121 			     unsigned long t, uint64_t offset)
2122 {
2123 	struct io_logs *cur_log;
2124 
2125 	if (iolog->disabled)
2126 		return;
2127 	if (flist_empty(&iolog->io_logs))
2128 		iolog->avg_last = t;
2129 
2130 	cur_log = get_cur_log(iolog);
2131 	if (cur_log) {
2132 		struct io_sample *s;
2133 
2134 		s = get_sample(iolog, cur_log, cur_log->nr_samples);
2135 
2136 		s->data = data;
2137 		s->time = t + (iolog->td ? iolog->td->unix_epoch : 0);
2138 		io_sample_set_ddir(iolog, s, ddir);
2139 		s->bs = bs;
2140 
2141 		if (iolog->log_offset) {
2142 			struct io_sample_offset *so = (void *) s;
2143 
2144 			so->offset = offset;
2145 		}
2146 
2147 		cur_log->nr_samples++;
2148 		return;
2149 	}
2150 
2151 	iolog->disabled = true;
2152 }
2153 
reset_io_stat(struct io_stat * ios)2154 static inline void reset_io_stat(struct io_stat *ios)
2155 {
2156 	ios->max_val = ios->min_val = ios->samples = 0;
2157 	ios->mean.u.f = ios->S.u.f = 0;
2158 }
2159 
reset_io_stats(struct thread_data * td)2160 void reset_io_stats(struct thread_data *td)
2161 {
2162 	struct thread_stat *ts = &td->ts;
2163 	int i, j;
2164 
2165 	for (i = 0; i < DDIR_RWDIR_CNT; i++) {
2166 		reset_io_stat(&ts->clat_stat[i]);
2167 		reset_io_stat(&ts->slat_stat[i]);
2168 		reset_io_stat(&ts->lat_stat[i]);
2169 		reset_io_stat(&ts->bw_stat[i]);
2170 		reset_io_stat(&ts->iops_stat[i]);
2171 
2172 		ts->io_bytes[i] = 0;
2173 		ts->runtime[i] = 0;
2174 		ts->total_io_u[i] = 0;
2175 		ts->short_io_u[i] = 0;
2176 		ts->drop_io_u[i] = 0;
2177 
2178 		for (j = 0; j < FIO_IO_U_PLAT_NR; j++)
2179 			ts->io_u_plat[i][j] = 0;
2180 	}
2181 
2182 	for (i = 0; i < FIO_IO_U_MAP_NR; i++) {
2183 		ts->io_u_map[i] = 0;
2184 		ts->io_u_submit[i] = 0;
2185 		ts->io_u_complete[i] = 0;
2186 	}
2187 
2188 	for (i = 0; i < FIO_IO_U_LAT_U_NR; i++)
2189 		ts->io_u_lat_u[i] = 0;
2190 	for (i = 0; i < FIO_IO_U_LAT_M_NR; i++)
2191 		ts->io_u_lat_m[i] = 0;
2192 
2193 	ts->total_submit = 0;
2194 	ts->total_complete = 0;
2195 }
2196 
__add_stat_to_log(struct io_log * iolog,enum fio_ddir ddir,unsigned long elapsed,bool log_max)2197 static void __add_stat_to_log(struct io_log *iolog, enum fio_ddir ddir,
2198 			      unsigned long elapsed, bool log_max)
2199 {
2200 	/*
2201 	 * Note an entry in the log. Use the mean from the logged samples,
2202 	 * making sure to properly round up. Only write a log entry if we
2203 	 * had actual samples done.
2204 	 */
2205 	if (iolog->avg_window[ddir].samples) {
2206 		union io_sample_data data;
2207 
2208 		if (log_max)
2209 			data.val = iolog->avg_window[ddir].max_val;
2210 		else
2211 			data.val = iolog->avg_window[ddir].mean.u.f + 0.50;
2212 
2213 		__add_log_sample(iolog, data, ddir, 0, elapsed, 0);
2214 	}
2215 
2216 	reset_io_stat(&iolog->avg_window[ddir]);
2217 }
2218 
_add_stat_to_log(struct io_log * iolog,unsigned long elapsed,bool log_max)2219 static void _add_stat_to_log(struct io_log *iolog, unsigned long elapsed,
2220 			     bool log_max)
2221 {
2222 	int ddir;
2223 
2224 	for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++)
2225 		__add_stat_to_log(iolog, ddir, elapsed, log_max);
2226 }
2227 
add_log_sample(struct thread_data * td,struct io_log * iolog,union io_sample_data data,enum fio_ddir ddir,unsigned int bs,uint64_t offset)2228 static long add_log_sample(struct thread_data *td, struct io_log *iolog,
2229 			   union io_sample_data data, enum fio_ddir ddir,
2230 			   unsigned int bs, uint64_t offset)
2231 {
2232 	unsigned long elapsed, this_window;
2233 
2234 	if (!ddir_rw(ddir))
2235 		return 0;
2236 
2237 	elapsed = mtime_since_now(&td->epoch);
2238 
2239 	/*
2240 	 * If no time averaging, just add the log sample.
2241 	 */
2242 	if (!iolog->avg_msec) {
2243 		__add_log_sample(iolog, data, ddir, bs, elapsed, offset);
2244 		return 0;
2245 	}
2246 
2247 	/*
2248 	 * Add the sample. If the time period has passed, then
2249 	 * add that entry to the log and clear.
2250 	 */
2251 	add_stat_sample(&iolog->avg_window[ddir], data.val);
2252 
2253 	/*
2254 	 * If period hasn't passed, adding the above sample is all we
2255 	 * need to do.
2256 	 */
2257 	this_window = elapsed - iolog->avg_last;
2258 	if (elapsed < iolog->avg_last)
2259 		return iolog->avg_last - elapsed;
2260 	else if (this_window < iolog->avg_msec) {
2261 		int diff = iolog->avg_msec - this_window;
2262 
2263 		if (inline_log(iolog) || diff > LOG_MSEC_SLACK)
2264 			return diff;
2265 	}
2266 
2267 	_add_stat_to_log(iolog, elapsed, td->o.log_max != 0);
2268 
2269 	iolog->avg_last = elapsed - (this_window - iolog->avg_msec);
2270 	return iolog->avg_msec;
2271 }
2272 
finalize_logs(struct thread_data * td,bool unit_logs)2273 void finalize_logs(struct thread_data *td, bool unit_logs)
2274 {
2275 	unsigned long elapsed;
2276 
2277 	elapsed = mtime_since_now(&td->epoch);
2278 
2279 	if (td->clat_log && unit_logs)
2280 		_add_stat_to_log(td->clat_log, elapsed, td->o.log_max != 0);
2281 	if (td->slat_log && unit_logs)
2282 		_add_stat_to_log(td->slat_log, elapsed, td->o.log_max != 0);
2283 	if (td->lat_log && unit_logs)
2284 		_add_stat_to_log(td->lat_log, elapsed, td->o.log_max != 0);
2285 	if (td->bw_log && (unit_logs == per_unit_log(td->bw_log)))
2286 		_add_stat_to_log(td->bw_log, elapsed, td->o.log_max != 0);
2287 	if (td->iops_log && (unit_logs == per_unit_log(td->iops_log)))
2288 		_add_stat_to_log(td->iops_log, elapsed, td->o.log_max != 0);
2289 }
2290 
add_agg_sample(union io_sample_data data,enum fio_ddir ddir,unsigned int bs)2291 void add_agg_sample(union io_sample_data data, enum fio_ddir ddir, unsigned int bs)
2292 {
2293 	struct io_log *iolog;
2294 
2295 	if (!ddir_rw(ddir))
2296 		return;
2297 
2298 	iolog = agg_io_log[ddir];
2299 	__add_log_sample(iolog, data, ddir, bs, mtime_since_genesis(), 0);
2300 }
2301 
add_clat_percentile_sample(struct thread_stat * ts,unsigned long usec,enum fio_ddir ddir)2302 static void add_clat_percentile_sample(struct thread_stat *ts,
2303 				unsigned long usec, enum fio_ddir ddir)
2304 {
2305 	unsigned int idx = plat_val_to_idx(usec);
2306 	assert(idx < FIO_IO_U_PLAT_NR);
2307 
2308 	ts->io_u_plat[ddir][idx]++;
2309 }
2310 
add_clat_sample(struct thread_data * td,enum fio_ddir ddir,unsigned long usec,unsigned int bs,uint64_t offset)2311 void add_clat_sample(struct thread_data *td, enum fio_ddir ddir,
2312 		     unsigned long usec, unsigned int bs, uint64_t offset)
2313 {
2314 	unsigned long elapsed, this_window;
2315 	struct thread_stat *ts = &td->ts;
2316 	struct io_log *iolog = td->clat_hist_log;
2317 
2318 	td_io_u_lock(td);
2319 
2320 	add_stat_sample(&ts->clat_stat[ddir], usec);
2321 
2322 	if (td->clat_log)
2323 		add_log_sample(td, td->clat_log, sample_val(usec), ddir, bs,
2324 			       offset);
2325 
2326 	if (ts->clat_percentiles)
2327 		add_clat_percentile_sample(ts, usec, ddir);
2328 
2329 	if (iolog && iolog->hist_msec) {
2330 		struct io_hist *hw = &iolog->hist_window[ddir];
2331 
2332 		hw->samples++;
2333 		elapsed = mtime_since_now(&td->epoch);
2334 		if (!hw->hist_last)
2335 			hw->hist_last = elapsed;
2336 		this_window = elapsed - hw->hist_last;
2337 
2338 		if (this_window >= iolog->hist_msec) {
2339 			unsigned int *io_u_plat;
2340 			struct io_u_plat_entry *dst;
2341 
2342 			/*
2343 			 * Make a byte-for-byte copy of the latency histogram
2344 			 * stored in td->ts.io_u_plat[ddir], recording it in a
2345 			 * log sample. Note that the matching call to free() is
2346 			 * located in iolog.c after printing this sample to the
2347 			 * log file.
2348 			 */
2349 			io_u_plat = (unsigned int *) td->ts.io_u_plat[ddir];
2350 			dst = malloc(sizeof(struct io_u_plat_entry));
2351 			memcpy(&(dst->io_u_plat), io_u_plat,
2352 				FIO_IO_U_PLAT_NR * sizeof(unsigned int));
2353 			flist_add(&dst->list, &hw->list);
2354 			__add_log_sample(iolog, sample_plat(dst), ddir, bs,
2355 						elapsed, offset);
2356 
2357 			/*
2358 			 * Update the last time we recorded as being now, minus
2359 			 * any drift in time we encountered before actually
2360 			 * making the record.
2361 			 */
2362 			hw->hist_last = elapsed - (this_window - iolog->hist_msec);
2363 			hw->samples = 0;
2364 		}
2365 	}
2366 
2367 	td_io_u_unlock(td);
2368 }
2369 
add_slat_sample(struct thread_data * td,enum fio_ddir ddir,unsigned long usec,unsigned int bs,uint64_t offset)2370 void add_slat_sample(struct thread_data *td, enum fio_ddir ddir,
2371 		     unsigned long usec, unsigned int bs, uint64_t offset)
2372 {
2373 	struct thread_stat *ts = &td->ts;
2374 
2375 	if (!ddir_rw(ddir))
2376 		return;
2377 
2378 	td_io_u_lock(td);
2379 
2380 	add_stat_sample(&ts->slat_stat[ddir], usec);
2381 
2382 	if (td->slat_log)
2383 		add_log_sample(td, td->slat_log, sample_val(usec), ddir, bs, offset);
2384 
2385 	td_io_u_unlock(td);
2386 }
2387 
add_lat_sample(struct thread_data * td,enum fio_ddir ddir,unsigned long usec,unsigned int bs,uint64_t offset)2388 void add_lat_sample(struct thread_data *td, enum fio_ddir ddir,
2389 		    unsigned long usec, unsigned int bs, uint64_t offset)
2390 {
2391 	struct thread_stat *ts = &td->ts;
2392 
2393 	if (!ddir_rw(ddir))
2394 		return;
2395 
2396 	td_io_u_lock(td);
2397 
2398 	add_stat_sample(&ts->lat_stat[ddir], usec);
2399 
2400 	if (td->lat_log)
2401 		add_log_sample(td, td->lat_log, sample_val(usec), ddir, bs,
2402 			       offset);
2403 
2404 	td_io_u_unlock(td);
2405 }
2406 
add_bw_sample(struct thread_data * td,struct io_u * io_u,unsigned int bytes,unsigned long spent)2407 void add_bw_sample(struct thread_data *td, struct io_u *io_u,
2408 		   unsigned int bytes, unsigned long spent)
2409 {
2410 	struct thread_stat *ts = &td->ts;
2411 	unsigned long rate;
2412 
2413 	if (spent)
2414 		rate = bytes * 1000 / spent;
2415 	else
2416 		rate = 0;
2417 
2418 	td_io_u_lock(td);
2419 
2420 	add_stat_sample(&ts->bw_stat[io_u->ddir], rate);
2421 
2422 	if (td->bw_log)
2423 		add_log_sample(td, td->bw_log, sample_val(rate), io_u->ddir,
2424 			       bytes, io_u->offset);
2425 
2426 	td->stat_io_bytes[io_u->ddir] = td->this_io_bytes[io_u->ddir];
2427 	td_io_u_unlock(td);
2428 }
2429 
__add_samples(struct thread_data * td,struct timeval * parent_tv,struct timeval * t,unsigned int avg_time,uint64_t * this_io_bytes,uint64_t * stat_io_bytes,struct io_stat * stat,struct io_log * log,bool is_kb)2430 static int __add_samples(struct thread_data *td, struct timeval *parent_tv,
2431 			 struct timeval *t, unsigned int avg_time,
2432 			 uint64_t *this_io_bytes, uint64_t *stat_io_bytes,
2433 			 struct io_stat *stat, struct io_log *log,
2434 			 bool is_kb)
2435 {
2436 	unsigned long spent, rate;
2437 	enum fio_ddir ddir;
2438 	unsigned int next, next_log;
2439 
2440 	next_log = avg_time;
2441 
2442 	spent = mtime_since(parent_tv, t);
2443 	if (spent < avg_time && avg_time - spent >= LOG_MSEC_SLACK)
2444 		return avg_time - spent;
2445 
2446 	td_io_u_lock(td);
2447 
2448 	/*
2449 	 * Compute both read and write rates for the interval.
2450 	 */
2451 	for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++) {
2452 		uint64_t delta;
2453 
2454 		delta = this_io_bytes[ddir] - stat_io_bytes[ddir];
2455 		if (!delta)
2456 			continue; /* No entries for interval */
2457 
2458 		if (spent) {
2459 			if (is_kb)
2460 				rate = delta * 1000 / spent / 1024; /* KiB/s */
2461 			else
2462 				rate = (delta * 1000) / spent;
2463 		} else
2464 			rate = 0;
2465 
2466 		add_stat_sample(&stat[ddir], rate);
2467 
2468 		if (log) {
2469 			unsigned int bs = 0;
2470 
2471 			if (td->o.min_bs[ddir] == td->o.max_bs[ddir])
2472 				bs = td->o.min_bs[ddir];
2473 
2474 			next = add_log_sample(td, log, sample_val(rate), ddir, bs, 0);
2475 			next_log = min(next_log, next);
2476 		}
2477 
2478 		stat_io_bytes[ddir] = this_io_bytes[ddir];
2479 	}
2480 
2481 	timeval_add_msec(parent_tv, avg_time);
2482 
2483 	td_io_u_unlock(td);
2484 
2485 	if (spent <= avg_time)
2486 		next = avg_time;
2487 	else
2488 		next = avg_time - (1 + spent - avg_time);
2489 
2490 	return min(next, next_log);
2491 }
2492 
add_bw_samples(struct thread_data * td,struct timeval * t)2493 static int add_bw_samples(struct thread_data *td, struct timeval *t)
2494 {
2495 	return __add_samples(td, &td->bw_sample_time, t, td->o.bw_avg_time,
2496 				td->this_io_bytes, td->stat_io_bytes,
2497 				td->ts.bw_stat, td->bw_log, true);
2498 }
2499 
add_iops_sample(struct thread_data * td,struct io_u * io_u,unsigned int bytes)2500 void add_iops_sample(struct thread_data *td, struct io_u *io_u,
2501 		     unsigned int bytes)
2502 {
2503 	struct thread_stat *ts = &td->ts;
2504 
2505 	td_io_u_lock(td);
2506 
2507 	add_stat_sample(&ts->iops_stat[io_u->ddir], 1);
2508 
2509 	if (td->iops_log)
2510 		add_log_sample(td, td->iops_log, sample_val(1), io_u->ddir,
2511 			       bytes, io_u->offset);
2512 
2513 	td->stat_io_blocks[io_u->ddir] = td->this_io_blocks[io_u->ddir];
2514 	td_io_u_unlock(td);
2515 }
2516 
add_iops_samples(struct thread_data * td,struct timeval * t)2517 static int add_iops_samples(struct thread_data *td, struct timeval *t)
2518 {
2519 	return __add_samples(td, &td->iops_sample_time, t, td->o.iops_avg_time,
2520 				td->this_io_blocks, td->stat_io_blocks,
2521 				td->ts.iops_stat, td->iops_log, false);
2522 }
2523 
2524 /*
2525  * Returns msecs to next event
2526  */
calc_log_samples(void)2527 int calc_log_samples(void)
2528 {
2529 	struct thread_data *td;
2530 	unsigned int next = ~0U, tmp;
2531 	struct timeval now;
2532 	int i;
2533 
2534 	fio_gettime(&now, NULL);
2535 
2536 	for_each_td(td, i) {
2537 		if (!td->o.stats)
2538 			continue;
2539 		if (in_ramp_time(td) ||
2540 		    !(td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING)) {
2541 			next = min(td->o.iops_avg_time, td->o.bw_avg_time);
2542 			continue;
2543 		}
2544 		if (!td->bw_log ||
2545 			(td->bw_log && !per_unit_log(td->bw_log))) {
2546 			tmp = add_bw_samples(td, &now);
2547 			if (tmp < next)
2548 				next = tmp;
2549 		}
2550 		if (!td->iops_log ||
2551 			(td->iops_log && !per_unit_log(td->iops_log))) {
2552 			tmp = add_iops_samples(td, &now);
2553 			if (tmp < next)
2554 				next = tmp;
2555 		}
2556 	}
2557 
2558 	return next == ~0U ? 0 : next;
2559 }
2560 
stat_init(void)2561 void stat_init(void)
2562 {
2563 	stat_mutex = fio_mutex_init(FIO_MUTEX_UNLOCKED);
2564 }
2565 
stat_exit(void)2566 void stat_exit(void)
2567 {
2568 	/*
2569 	 * When we have the mutex, we know out-of-band access to it
2570 	 * have ended.
2571 	 */
2572 	fio_mutex_down(stat_mutex);
2573 	fio_mutex_remove(stat_mutex);
2574 }
2575 
2576 /*
2577  * Called from signal handler. Wake up status thread.
2578  */
show_running_run_stats(void)2579 void show_running_run_stats(void)
2580 {
2581 	helper_do_stat();
2582 }
2583 
io_u_block_info(struct thread_data * td,struct io_u * io_u)2584 uint32_t *io_u_block_info(struct thread_data *td, struct io_u *io_u)
2585 {
2586 	/* Ignore io_u's which span multiple blocks--they will just get
2587 	 * inaccurate counts. */
2588 	int idx = (io_u->offset - io_u->file->file_offset)
2589 			/ td->o.bs[DDIR_TRIM];
2590 	uint32_t *info = &td->ts.block_infos[idx];
2591 	assert(idx < td->ts.nr_block_infos);
2592 	return info;
2593 }
2594