• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #include <stdio.h>
2 #include <stdio.h>
3 #include <unistd.h>
4 #include <inttypes.h>
5 #include <math.h>
6 #include <assert.h>
7 
8 #include "../io_ddir.h"
9 #include "../flist.h"
10 #include "../hash.h"
11 #include "../fifo.h"
12 #include "../blktrace_api.h"
13 #include "../os/os.h"
14 #include "../log.h"
15 #include "../lib/linux-dev-lookup.h"
16 
17 #define TRACE_FIFO_SIZE	8192
18 
19 static unsigned int rt_threshold = 1000000;
20 static unsigned int ios_threshold = 10;
21 static unsigned int rate_threshold;
22 static unsigned int set_rate;
23 static unsigned int max_depth = 256;
24 static int output_ascii = 1;
25 static char *filename;
26 
27 static char **add_opts;
28 static int n_add_opts;
29 
30 /*
31  * Collapse defaults
32  */
33 static unsigned int collapse_entries = 0;
34 static unsigned int depth_diff = 1;
35 static unsigned int random_diff = 5;
36 
37 struct bs {
38 	unsigned int bs;
39 	unsigned int nr;
40 	int merges;
41 };
42 
43 struct trace_file {
44 	char *name;
45 	int major, minor;
46 };
47 
48 struct btrace_out {
49 	unsigned long ios[DDIR_RWDIR_CNT];
50 	unsigned long merges[DDIR_RWDIR_CNT];
51 
52 	uint64_t last_end[DDIR_RWDIR_CNT];
53 	uint64_t seq[DDIR_RWDIR_CNT];
54 
55 	struct bs *bs[DDIR_RWDIR_CNT];
56 	unsigned int nr_bs[DDIR_RWDIR_CNT];
57 
58 	int inflight;
59 	unsigned int depth;
60 	int depth_disabled;
61 	int complete_seen;
62 
63 	uint64_t first_ttime[DDIR_RWDIR_CNT];
64 	uint64_t last_ttime[DDIR_RWDIR_CNT];
65 	uint64_t kb[DDIR_RWDIR_CNT];
66 
67 	uint64_t start_delay;
68 };
69 
70 struct btrace_pid {
71 	struct flist_head hash_list;
72 	struct flist_head pid_list;
73 	pid_t pid;
74 
75 	pid_t *merge_pids;
76 	unsigned int nr_merge_pids;
77 
78 	struct trace_file *files;
79 	int nr_files;
80 	unsigned int last_major, last_minor;
81 	int numjobs;
82 	int ignore;
83 
84 	struct btrace_out o;
85 };
86 
87 struct inflight {
88 	struct flist_head list;
89 	struct btrace_pid *p;
90 	uint64_t end_sector;
91 };
92 
93 #define PID_HASH_BITS	10
94 #define PID_HASH_SIZE	(1U << PID_HASH_BITS)
95 
96 static struct flist_head pid_hash[PID_HASH_SIZE];
97 static FLIST_HEAD(pid_list);
98 
99 #define INFLIGHT_HASH_BITS	8
100 #define INFLIGHT_HASH_SIZE	(1U << INFLIGHT_HASH_BITS)
101 static struct flist_head inflight_hash[INFLIGHT_HASH_SIZE];
102 
103 static uint64_t first_ttime = -1ULL;
104 
inflight_find(uint64_t sector)105 static struct inflight *inflight_find(uint64_t sector)
106 {
107 	struct flist_head *inflight_list;
108 	struct flist_head *e;
109 
110 	inflight_list = &inflight_hash[hash_long(sector, INFLIGHT_HASH_BITS)];
111 
112 	flist_for_each(e, inflight_list) {
113 		struct inflight *i = flist_entry(e, struct inflight, list);
114 
115 		if (i->end_sector == sector)
116 			return i;
117 	}
118 
119 	return NULL;
120 }
121 
inflight_remove(struct inflight * i)122 static void inflight_remove(struct inflight *i)
123 {
124 	struct btrace_out *o = &i->p->o;
125 
126 	o->inflight--;
127 	assert(o->inflight >= 0);
128 	flist_del(&i->list);
129 	free(i);
130 }
131 
__inflight_add(struct inflight * i)132 static void __inflight_add(struct inflight *i)
133 {
134 	struct flist_head *list;
135 
136 	list = &inflight_hash[hash_long(i->end_sector, INFLIGHT_HASH_BITS)];
137 	flist_add_tail(&i->list, list);
138 }
139 
inflight_add(struct btrace_pid * p,uint64_t sector,uint32_t len)140 static void inflight_add(struct btrace_pid *p, uint64_t sector, uint32_t len)
141 {
142 	struct btrace_out *o = &p->o;
143 	struct inflight *i;
144 
145 	i = calloc(1, sizeof(*i));
146 	i->p = p;
147 	o->inflight++;
148 	if (!o->depth_disabled) {
149 		o->depth = max((int) o->depth, o->inflight);
150 		if (o->depth >= max_depth && !o->complete_seen) {
151 			o->depth_disabled = 1;
152 			o->depth = max_depth;
153 		}
154 	}
155 	i->end_sector = sector + (len >> 9);
156 	__inflight_add(i);
157 }
158 
inflight_merge(struct inflight * i,int rw,unsigned int size)159 static void inflight_merge(struct inflight *i, int rw, unsigned int size)
160 {
161 	i->p->o.merges[rw]++;
162 	if (size) {
163 		i->end_sector += (size >> 9);
164 		flist_del(&i->list);
165 		__inflight_add(i);
166 	}
167 }
168 
169 /*
170  * fifo refill frontend, to avoid reading data in trace sized bites
171  */
refill_fifo(struct fifo * fifo,int fd)172 static int refill_fifo(struct fifo *fifo, int fd)
173 {
174 	char buf[TRACE_FIFO_SIZE];
175 	unsigned int total;
176 	int ret;
177 
178 	total = sizeof(buf);
179 	if (total > fifo_room(fifo))
180 		total = fifo_room(fifo);
181 
182 	ret = read(fd, buf, total);
183 	if (ret < 0) {
184 		perror("read refill");
185 		return -1;
186 	}
187 
188 	if (ret > 0)
189 		ret = fifo_put(fifo, buf, ret);
190 
191 	return ret;
192 }
193 
194 /*
195  * Retrieve 'len' bytes from the fifo, refilling if necessary.
196  */
trace_fifo_get(struct fifo * fifo,int fd,void * buf,unsigned int len)197 static int trace_fifo_get(struct fifo *fifo, int fd, void *buf,
198 			  unsigned int len)
199 {
200 	if (fifo_len(fifo) < len) {
201 		int ret = refill_fifo(fifo, fd);
202 
203 		if (ret < 0)
204 			return ret;
205 	}
206 
207 	return fifo_get(fifo, buf, len);
208 }
209 
210 /*
211  * Just discard the pdu by seeking past it.
212  */
discard_pdu(struct fifo * fifo,int fd,struct blk_io_trace * t)213 static int discard_pdu(struct fifo *fifo, int fd, struct blk_io_trace *t)
214 {
215 	if (t->pdu_len == 0)
216 		return 0;
217 
218 	return trace_fifo_get(fifo, fd, NULL, t->pdu_len);
219 }
220 
handle_trace_notify(struct blk_io_trace * t)221 static int handle_trace_notify(struct blk_io_trace *t)
222 {
223 	switch (t->action) {
224 	case BLK_TN_PROCESS:
225 		//printf("got process notify: %x, %d\n", t->action, t->pid);
226 		break;
227 	case BLK_TN_TIMESTAMP:
228 		//printf("got timestamp notify: %x, %d\n", t->action, t->pid);
229 		break;
230 	case BLK_TN_MESSAGE:
231 		break;
232 	default:
233 		log_err("unknown trace act %x\n", t->action);
234 		return 1;
235 	}
236 
237 	return 0;
238 }
239 
__add_bs(struct btrace_out * o,unsigned int len,int rw)240 static void __add_bs(struct btrace_out *o, unsigned int len, int rw)
241 {
242 	o->bs[rw] = realloc(o->bs[rw], (o->nr_bs[rw] + 1) * sizeof(struct bs));
243 	o->bs[rw][o->nr_bs[rw]].bs = len;
244 	o->bs[rw][o->nr_bs[rw]].nr = 1;
245 	o->nr_bs[rw]++;
246 }
247 
add_bs(struct btrace_out * o,unsigned int len,int rw)248 static void add_bs(struct btrace_out *o, unsigned int len, int rw)
249 {
250 	struct bs *bs = o->bs[rw];
251 	int i;
252 
253 	if (!o->nr_bs[rw]) {
254 		__add_bs(o, len, rw);
255 		return;
256 	}
257 
258 	for (i = 0; i < o->nr_bs[rw]; i++) {
259 		if (bs[i].bs == len) {
260 			bs[i].nr++;
261 			return;
262 		}
263 	}
264 
265 	__add_bs(o, len, rw);
266 }
267 
268 #define FMINORBITS	20
269 #define FMINORMASK	((1U << FMINORBITS) - 1)
270 #define FMAJOR(dev)	((unsigned int) ((dev) >> FMINORBITS))
271 #define FMINOR(dev)	((unsigned int) ((dev) & FMINORMASK))
272 
btrace_add_file(struct btrace_pid * p,uint32_t devno)273 static int btrace_add_file(struct btrace_pid *p, uint32_t devno)
274 {
275 	unsigned int maj = FMAJOR(devno);
276 	unsigned int min = FMINOR(devno);
277 	struct trace_file *f;
278 	unsigned int i;
279 	char dev[256];
280 
281 	if (filename)
282 		return 0;
283 	if (p->last_major == maj && p->last_minor == min)
284 		return 0;
285 
286 	p->last_major = maj;
287 	p->last_minor = min;
288 
289 	/*
290 	 * check for this file in our list
291 	 */
292 	for (i = 0; i < p->nr_files; i++) {
293 		f = &p->files[i];
294 
295 		if (f->major == maj && f->minor == min)
296 			return 0;
297 	}
298 
299 	strcpy(dev, "/dev");
300 	if (!blktrace_lookup_device(NULL, dev, maj, min)) {
301 		log_err("fio: failed to find device %u/%u\n", maj, min);
302 		if (!output_ascii) {
303 			log_err("fio: use -d to specify device\n");
304 			return 1;
305 		}
306 		return 0;
307 	}
308 
309 	p->files = realloc(p->files, (p->nr_files + 1) * sizeof(*f));
310 	f = &p->files[p->nr_files];
311 	f->name = strdup(dev);
312 	f->major = maj;
313 	f->minor = min;
314 	p->nr_files++;
315 	return 0;
316 }
317 
t_to_rwdir(struct blk_io_trace * t)318 static int t_to_rwdir(struct blk_io_trace *t)
319 {
320 	if (t->action & BLK_TC_ACT(BLK_TC_DISCARD))
321 		return DDIR_TRIM;
322 
323 	return (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
324 }
325 
handle_trace_discard(struct blk_io_trace * t,struct btrace_pid * p)326 static int handle_trace_discard(struct blk_io_trace *t, struct btrace_pid *p)
327 {
328 	struct btrace_out *o = &p->o;
329 
330 	if (btrace_add_file(p, t->device))
331 		return 1;
332 
333 	if (o->first_ttime[2] == -1ULL)
334 		o->first_ttime[2] = t->time;
335 
336 	o->ios[DDIR_TRIM]++;
337 	add_bs(o, t->bytes, DDIR_TRIM);
338 	return 0;
339 }
340 
handle_trace_fs(struct blk_io_trace * t,struct btrace_pid * p)341 static int handle_trace_fs(struct blk_io_trace *t, struct btrace_pid *p)
342 {
343 	struct btrace_out *o = &p->o;
344 	int rw;
345 
346 	if (btrace_add_file(p, t->device))
347 		return 1;
348 
349 	first_ttime = min(first_ttime, (uint64_t) t->time);
350 
351 	rw = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
352 
353 	if (o->first_ttime[rw] == -1ULL)
354 		o->first_ttime[rw] = t->time;
355 
356 	add_bs(o, t->bytes, rw);
357 	o->ios[rw]++;
358 
359 	if (t->sector == o->last_end[rw] || o->last_end[rw] == -1ULL)
360 		o->seq[rw]++;
361 
362 	o->last_end[rw] = t->sector + (t->bytes >> 9);
363 	return 0;
364 }
365 
handle_queue_trace(struct blk_io_trace * t,struct btrace_pid * p)366 static int handle_queue_trace(struct blk_io_trace *t, struct btrace_pid *p)
367 {
368 	if (t->action & BLK_TC_ACT(BLK_TC_NOTIFY))
369 		return handle_trace_notify(t);
370 	else if (t->action & BLK_TC_ACT(BLK_TC_DISCARD))
371 		return handle_trace_discard(t, p);
372 	else
373 		return handle_trace_fs(t, p);
374 }
375 
handle_trace(struct blk_io_trace * t,struct btrace_pid * p)376 static int handle_trace(struct blk_io_trace *t, struct btrace_pid *p)
377 {
378 	unsigned int act = t->action & 0xffff;
379 	int ret = 0;
380 
381 	if (act == __BLK_TA_QUEUE) {
382 		inflight_add(p, t->sector, t->bytes);
383 		ret = handle_queue_trace(t, p);
384 	} else if (act == __BLK_TA_BACKMERGE) {
385 		struct inflight *i;
386 
387 		i = inflight_find(t->sector + (t->bytes >> 9));
388 		if (i)
389 			inflight_remove(i);
390 
391 		i = inflight_find(t->sector);
392 		if (i)
393 			inflight_merge(i, t_to_rwdir(t), t->bytes);
394 	} else if (act == __BLK_TA_FRONTMERGE) {
395 		struct inflight *i;
396 
397 		i = inflight_find(t->sector + (t->bytes >> 9));
398 		if (i)
399 			inflight_remove(i);
400 
401 		i = inflight_find(t->sector);
402 		if (i)
403 			inflight_merge(i, t_to_rwdir(t), 0);
404 	} else if (act == __BLK_TA_COMPLETE) {
405 		struct inflight *i;
406 
407 		i = inflight_find(t->sector + (t->bytes >> 9));
408 		if (i) {
409 			i->p->o.kb[t_to_rwdir(t)] += (t->bytes >> 10);
410 			i->p->o.complete_seen = 1;
411 			inflight_remove(i);
412 		}
413 	}
414 
415 	return ret;
416 }
417 
byteswap_trace(struct blk_io_trace * t)418 static void byteswap_trace(struct blk_io_trace *t)
419 {
420 	t->magic = fio_swap32(t->magic);
421 	t->sequence = fio_swap32(t->sequence);
422 	t->time = fio_swap64(t->time);
423 	t->sector = fio_swap64(t->sector);
424 	t->bytes = fio_swap32(t->bytes);
425 	t->action = fio_swap32(t->action);
426 	t->pid = fio_swap32(t->pid);
427 	t->device = fio_swap32(t->device);
428 	t->cpu = fio_swap32(t->cpu);
429 	t->error = fio_swap16(t->error);
430 	t->pdu_len = fio_swap16(t->pdu_len);
431 }
432 
pid_hash_find(pid_t pid,struct flist_head * list)433 static struct btrace_pid *pid_hash_find(pid_t pid, struct flist_head *list)
434 {
435 	struct flist_head *e;
436 	struct btrace_pid *p;
437 
438 	flist_for_each(e, list) {
439 		p = flist_entry(e, struct btrace_pid, hash_list);
440 		if (p->pid == pid)
441 			return p;
442 	}
443 
444 	return NULL;
445 }
446 
pid_hash_get(pid_t pid)447 static struct btrace_pid *pid_hash_get(pid_t pid)
448 {
449 	struct flist_head *hash_list;
450 	struct btrace_pid *p;
451 
452 	hash_list = &pid_hash[hash_long(pid, PID_HASH_BITS)];
453 
454 	p = pid_hash_find(pid, hash_list);
455 	if (!p) {
456 		int i;
457 
458 		p = calloc(1, sizeof(*p));
459 
460 		for (i = 0; i < DDIR_RWDIR_CNT; i++) {
461 			p->o.first_ttime[i] = -1ULL;
462 			p->o.last_ttime[i] = -1ULL;
463 			p->o.last_end[i] = -1ULL;
464 		}
465 
466 		p->pid = pid;
467 		p->numjobs = 1;
468 		flist_add_tail(&p->hash_list, hash_list);
469 		flist_add_tail(&p->pid_list, &pid_list);
470 	}
471 
472 	return p;
473 }
474 
475 /*
476  * Load a blktrace file by reading all the blk_io_trace entries, and storing
477  * them as io_pieces like the fio text version would do.
478  */
load_blktrace(const char * fname,int need_swap)479 static int load_blktrace(const char *fname, int need_swap)
480 {
481 	struct btrace_pid *p;
482 	unsigned long traces;
483 	struct blk_io_trace t;
484 	struct fifo *fifo;
485 	int fd, ret = 0;
486 
487 	fd = open(fname, O_RDONLY);
488 	if (fd < 0) {
489 		perror("open trace file\n");
490 		return 1;
491 	}
492 
493 	fifo = fifo_alloc(TRACE_FIFO_SIZE);
494 
495 	traces = 0;
496 	do {
497 		ret = trace_fifo_get(fifo, fd, &t, sizeof(t));
498 		if (ret < 0)
499 			goto err;
500 		else if (!ret)
501 			break;
502 		else if (ret < (int) sizeof(t)) {
503 			log_err("fio: short fifo get\n");
504 			break;
505 		}
506 
507 		if (need_swap)
508 			byteswap_trace(&t);
509 
510 		if ((t.magic & 0xffffff00) != BLK_IO_TRACE_MAGIC) {
511 			log_err("fio: bad magic in blktrace data: %x\n", t.magic);
512 			goto err;
513 		}
514 		if ((t.magic & 0xff) != BLK_IO_TRACE_VERSION) {
515 			log_err("fio: bad blktrace version %d\n", t.magic & 0xff);
516 			goto err;
517 		}
518 		ret = discard_pdu(fifo, fd, &t);
519 		if (ret < 0) {
520 			log_err("blktrace lseek\n");
521 			goto err;
522 		} else if (t.pdu_len != ret) {
523 			log_err("fio: discarded %d of %d\n", ret, t.pdu_len);
524 			goto err;
525 		}
526 
527 		p = pid_hash_get(t.pid);
528 		ret = handle_trace(&t, p);
529 		if (ret)
530 			break;
531 		p->o.last_ttime[t_to_rwdir(&t)] = t.time;
532 		traces++;
533 	} while (1);
534 
535 	fifo_free(fifo);
536 	close(fd);
537 
538 	if (ret)
539 		return ret;
540 
541 	if (output_ascii)
542 		printf("Traces loaded: %lu\n", traces);
543 
544 	return 0;
545 err:
546 	close(fd);
547 	fifo_free(fifo);
548 	return 1;
549 }
550 
bs_cmp(const void * ba,const void * bb)551 static int bs_cmp(const void *ba, const void *bb)
552 {
553 	const struct bs *bsa = ba;
554 	const struct bs *bsb = bb;
555 
556 	return bsb->nr - bsa->nr;
557 }
558 
o_to_kb_rate(struct btrace_out * o,int rw)559 static unsigned long o_to_kb_rate(struct btrace_out *o, int rw)
560 {
561 	uint64_t usec = (o->last_ttime[rw] - o->first_ttime[rw]) / 1000ULL;
562 	uint64_t val;
563 
564 	if (!usec)
565 		return 0;
566 
567 	usec /= 1000;
568 	if (!usec)
569 		return 0;
570 
571 	val = o->kb[rw] * 1000ULL;
572 	return val / usec;
573 }
574 
o_first_ttime(struct btrace_out * o)575 static uint64_t o_first_ttime(struct btrace_out *o)
576 {
577 	uint64_t first;
578 
579 	first = min(o->first_ttime[0], o->first_ttime[1]);
580 	return min(first, o->first_ttime[2]);
581 }
582 
o_longest_ttime(struct btrace_out * o)583 static uint64_t o_longest_ttime(struct btrace_out *o)
584 {
585 	uint64_t ret = 0;
586 	int i;
587 
588 	for (i = 0; i < DDIR_RWDIR_CNT; i++) {
589 		uint64_t diff;
590 
591 		diff = o->last_ttime[i] - o->first_ttime[i];
592 		ret = max(diff, ret);
593 	}
594 
595 	return ret;
596 }
597 
__output_p_ascii(struct btrace_pid * p,unsigned long * ios)598 static void __output_p_ascii(struct btrace_pid *p, unsigned long *ios)
599 {
600 	const char *msg[] = { "reads", "writes", "trims" };
601 	struct btrace_out *o = &p->o;
602 	unsigned long total, usec;
603 	int i, j;
604 
605 	printf("[pid:\t%u", p->pid);
606 	if (p->nr_merge_pids)
607 		for (i = 0; i < p->nr_merge_pids; i++)
608 			printf(", %u", p->merge_pids[i]);
609 	printf("]\n");
610 
611 	total = ddir_rw_sum(o->ios);
612 	for (i = 0; i < DDIR_RWDIR_CNT; i++) {
613 		float perc;
614 
615 		if (!o->ios[i])
616 			continue;
617 
618 		ios[i] += o->ios[i] + o->merges[i];
619 		printf("%s\n", msg[i]);
620 		perc = ((float) o->ios[i] * 100.0) / (float) total;
621 		printf("\tios:    %lu (perc=%3.2f%%)\n", o->ios[i], perc);
622 		perc = ((float) o->merges[i] * 100.0) / (float) total;
623 		printf("\tmerges: %lu (perc=%3.2f%%)\n", o->merges[i], perc);
624 		perc = ((float) o->seq[i] * 100.0) / (float) o->ios[i];
625 		printf("\tseq:    %lu (perc=%3.2f%%)\n", (unsigned long) o->seq[i], perc);
626 		printf("\trate:   %lu KB/sec\n", o_to_kb_rate(o, i));
627 
628 		for (j = 0; j < o->nr_bs[i]; j++) {
629 			struct bs *bs = &o->bs[i][j];
630 
631 			perc = (((float) bs->nr * 100.0) / (float) o->ios[i]);
632 			printf("\tbs=%u, perc=%3.2f%%\n", bs->bs, perc);
633 		}
634 	}
635 
636 	printf("depth:\t%u\n", o->depth);
637 	usec = o_longest_ttime(o) / 1000ULL;
638 	printf("usec:\t%lu (delay=%llu)\n", usec, (unsigned long long) o->start_delay);
639 
640 	printf("files:\t");
641 	for (i = 0; i < p->nr_files; i++)
642 		printf("%s,", p->files[i].name);
643 	printf("\n");
644 
645 	printf("\n");
646 }
647 
__output_p_fio(struct btrace_pid * p,unsigned long * ios)648 static int __output_p_fio(struct btrace_pid *p, unsigned long *ios)
649 {
650 	struct btrace_out *o = &p->o;
651 	unsigned long total;
652 	unsigned long long time;
653 	float perc;
654 	int i, j;
655 
656 	if ((o->ios[0] + o->ios[1]) && o->ios[2]) {
657 		log_err("fio: trace has both read/write and trim\n");
658 		return 1;
659 	}
660 	if (!p->nr_files) {
661 		log_err("fio: no devices found\n");
662 		return 1;
663 	}
664 
665 	printf("[pid%u", p->pid);
666 	if (p->nr_merge_pids)
667 		for (i = 0; i < p->nr_merge_pids; i++)
668 			printf(",pid%u", p->merge_pids[i]);
669 	printf("]\n");
670 
671 	printf("numjobs=%u\n", p->numjobs);
672 	printf("direct=1\n");
673 	if (o->depth == 1)
674 		printf("ioengine=sync\n");
675 	else
676 		printf("ioengine=libaio\niodepth=%u\n", o->depth);
677 
678 	if (o->ios[0] && !o->ios[1])
679 		printf("rw=randread\n");
680 	else if (!o->ios[0] && o->ios[1])
681 		printf("rw=randwrite\n");
682 	else if (o->ios[2])
683 		printf("rw=randtrim\n");
684 	else {
685 		printf("rw=randrw\n");
686 		total = ddir_rw_sum(o->ios);
687 		perc = ((float) o->ios[0] * 100.0) / (float) total;
688 		printf("rwmixread=%u\n", (int) floor(perc + 0.50));
689 	}
690 
691 	printf("percentage_random=");
692 	for (i = 0; i < DDIR_RWDIR_CNT; i++) {
693 		if (o->seq[i] && o->ios[i]) {
694 			perc = ((float) o->seq[i] * 100.0) / (float) o->ios[i];
695 			if (perc >= 99.0)
696 				perc = 100.0;
697 		} else
698 			perc = 100.0;
699 
700 		if (i)
701 			printf(",");
702 		perc = 100.0 - perc;
703 		printf("%u", (int) floor(perc + 0.5));
704 	}
705 	printf("\n");
706 
707 	printf("filename=");
708 	for (i = 0; i < p->nr_files; i++) {
709 		if (i)
710 			printf(":");
711 		printf("%s", p->files[i].name);
712 	}
713 	printf("\n");
714 
715 	if (o->start_delay / 1000000ULL)
716 		printf("startdelay=%llus\n", o->start_delay / 1000000ULL);
717 
718 	time = o_longest_ttime(o);
719 	time = (time + 1000000000ULL - 1) / 1000000000ULL;
720 	printf("runtime=%llus\n", time);
721 
722 	printf("bssplit=");
723 	for (i = 0; i < DDIR_RWDIR_CNT; i++) {
724 
725 		if (i && o->nr_bs[i - 1] && o->nr_bs[i])
726 			printf(",");
727 
728 		for (j = 0; j < o->nr_bs[i]; j++) {
729 			struct bs *bs = &o->bs[i][j];
730 
731 			perc = (((float) bs->nr * 100.0) / (float) o->ios[i]);
732 			if (perc < 1.00)
733 				continue;
734 			if (j)
735 				printf(":");
736 			if (j + 1 == o->nr_bs[i])
737 				printf("%u/", bs->bs);
738 			else
739 				printf("%u/%u", bs->bs, (int) floor(perc + 0.5));
740 		}
741 	}
742 	printf("\n");
743 
744 	if (set_rate) {
745 		printf("rate=");
746 		for (i = 0; i < DDIR_RWDIR_CNT; i++) {
747 			unsigned long rate;
748 
749 			rate = o_to_kb_rate(o, i);
750 			if (i)
751 				printf(",");
752 			if (rate)
753 				printf("%luk", rate);
754 		}
755 		printf("\n");
756 	}
757 
758 	if (n_add_opts)
759 		for (i = 0; i < n_add_opts; i++)
760 			printf("%s\n", add_opts[i]);
761 
762 	printf("\n");
763 	return 0;
764 }
765 
__output_p(struct btrace_pid * p,unsigned long * ios)766 static int __output_p(struct btrace_pid *p, unsigned long *ios)
767 {
768 	struct btrace_out *o = &p->o;
769 	int i, ret = 0;
770 
771 	for (i = 0; i < DDIR_RWDIR_CNT; i++) {
772 		if (o->nr_bs[i] <= 1)
773 			continue;
774 		qsort(o->bs[i], o->nr_bs[i], sizeof(struct bs), bs_cmp);
775 	}
776 
777 	if (filename) {
778 		p->files = malloc(sizeof(struct trace_file));
779 		p->nr_files++;
780 		p->files[0].name = filename;
781 	}
782 
783 	if (output_ascii)
784 		__output_p_ascii(p, ios);
785 	else
786 		ret = __output_p_fio(p, ios);
787 
788 	return ret;
789 }
790 
remove_ddir(struct btrace_out * o,int rw)791 static void remove_ddir(struct btrace_out *o, int rw)
792 {
793 	o->ios[rw] = 0;
794 }
795 
prune_entry(struct btrace_out * o)796 static int prune_entry(struct btrace_out *o)
797 {
798 	unsigned long rate;
799 	uint64_t time;
800 	int i;
801 
802 	if (ddir_rw_sum(o->ios) < ios_threshold)
803 		return 1;
804 
805 	time = o_longest_ttime(o) / 1000ULL;
806 	if (time < rt_threshold)
807 		return 1;
808 
809 	rate = 0;
810 	for (i = 0; i < DDIR_RWDIR_CNT; i++) {
811 		unsigned long this_rate;
812 
813 		this_rate = o_to_kb_rate(o, i);
814 		if (this_rate < rate_threshold) {
815 			remove_ddir(o, i);
816 			this_rate = 0;
817 		}
818 		rate += this_rate;
819 	}
820 
821 	if (rate < rate_threshold)
822 		return 1;
823 
824 	return 0;
825 }
826 
entry_cmp(void * priv,struct flist_head * a,struct flist_head * b)827 static int entry_cmp(void *priv, struct flist_head *a, struct flist_head *b)
828 {
829 	struct btrace_pid *pa = flist_entry(a, struct btrace_pid, pid_list);
830 	struct btrace_pid *pb = flist_entry(b, struct btrace_pid, pid_list);
831 
832 	return ddir_rw_sum(pb->o.ios) - ddir_rw_sum(pa->o.ios);
833 }
834 
free_p(struct btrace_pid * p)835 static void free_p(struct btrace_pid *p)
836 {
837 	struct btrace_out *o = &p->o;
838 	int i;
839 
840 	for (i = 0; i < p->nr_files; i++) {
841 		if (p->files[i].name && p->files[i].name != filename)
842 			free(p->files[i].name);
843 	}
844 
845 	for (i = 0; i < DDIR_RWDIR_CNT; i++)
846 		free(o->bs[i]);
847 
848 	free(p->files);
849 	flist_del(&p->pid_list);
850 	flist_del(&p->hash_list);
851 	free(p);
852 }
853 
entries_close(struct btrace_pid * pida,struct btrace_pid * pidb)854 static int entries_close(struct btrace_pid *pida, struct btrace_pid *pidb)
855 {
856 	float perca, percb, fdiff;
857 	int i, idiff;
858 
859 	for (i = 0; i < DDIR_RWDIR_CNT; i++) {
860 		if ((pida->o.ios[i] && !pidb->o.ios[i]) ||
861 		    (pidb->o.ios[i] && !pida->o.ios[i]))
862 			return 0;
863 		if (pida->o.ios[i] && pidb->o.ios[i]) {
864 			perca = ((float) pida->o.seq[i] * 100.0) / (float) pida->o.ios[i];
865 			percb = ((float) pidb->o.seq[i] * 100.0) / (float) pidb->o.ios[i];
866 			fdiff = perca - percb;
867 			if (fabs(fdiff) > random_diff)
868 				return 0;
869 		}
870 
871 		idiff = pida->o.depth - pidb->o.depth;
872 		if (abs(idiff) > depth_diff)
873 			return 0;
874 	}
875 
876 	return 1;
877 }
878 
merge_bs(struct bs ** bsap,unsigned int * nr_bsap,struct bs * bsb,unsigned int nr_bsb)879 static void merge_bs(struct bs **bsap, unsigned int *nr_bsap,
880 		     struct bs *bsb, unsigned int nr_bsb)
881 {
882 	struct bs *bsa = *bsap;
883 	unsigned int nr_bsa = *nr_bsap;
884 	int a, b;
885 
886 	for (b = 0; b < nr_bsb; b++) {
887 		int next, found = 0;
888 
889 		for (a = 0; a < nr_bsa; a++) {
890 			if (bsb[b].bs != bsa[a].bs)
891 				continue;
892 
893 			bsa[a].nr += bsb[b].nr;
894 			bsa[a].merges += bsb[b].merges;
895 			found = 1;
896 			break;
897 		}
898 
899 		if (found)
900 			continue;
901 
902 		next = *nr_bsap;
903 		bsa = realloc(bsa, (next + 1) * sizeof(struct bs));
904 		bsa[next].bs = bsb[b].bs;
905 		bsa[next].nr = bsb[b].nr;
906 		(*nr_bsap)++;
907 		*bsap = bsa;
908 	}
909 }
910 
merge_entries(struct btrace_pid * pida,struct btrace_pid * pidb)911 static int merge_entries(struct btrace_pid *pida, struct btrace_pid *pidb)
912 {
913 	int i;
914 
915 	if (!entries_close(pida, pidb))
916 		return 0;
917 
918 	pida->nr_merge_pids++;
919 	pida->merge_pids = realloc(pida->merge_pids, pida->nr_merge_pids * sizeof(pid_t));
920 	pida->merge_pids[pida->nr_merge_pids - 1] = pidb->pid;
921 
922 	for (i = 0; i < DDIR_RWDIR_CNT; i++) {
923 		struct btrace_out *oa = &pida->o;
924 		struct btrace_out *ob = &pidb->o;
925 
926 		oa->ios[i] += ob->ios[i];
927 		oa->merges[i] += ob->merges[i];
928 		oa->seq[i] += ob->seq[i];
929 		oa->kb[i] += ob->kb[i];
930 		oa->first_ttime[i] = min(oa->first_ttime[i], ob->first_ttime[i]);
931 		oa->last_ttime[i] = max(oa->last_ttime[i], ob->last_ttime[i]);
932 		merge_bs(&oa->bs[i], &oa->nr_bs[i], ob->bs[i], ob->nr_bs[i]);
933 	}
934 
935 	pida->o.start_delay = min(pida->o.start_delay, pidb->o.start_delay);
936 	pida->o.depth = (pida->o.depth + pidb->o.depth) / 2;
937 	return 1;
938 }
939 
check_merges(struct btrace_pid * p,struct flist_head * pid_list)940 static void check_merges(struct btrace_pid *p, struct flist_head *pid_list)
941 {
942 	struct flist_head *e, *tmp;
943 
944 	if (p->ignore)
945 		return;
946 
947 	flist_for_each_safe(e, tmp, pid_list) {
948 		struct btrace_pid *pidb;
949 
950 		pidb = flist_entry(e, struct btrace_pid, pid_list);
951 		if (pidb == p)
952 			continue;
953 
954 		if (merge_entries(p, pidb)) {
955 			pidb->ignore = 1;
956 			p->numjobs++;
957 		}
958 	}
959 }
960 
output_p(void)961 static int output_p(void)
962 {
963 	unsigned long ios[DDIR_RWDIR_CNT];
964 	struct flist_head *e, *tmp;
965 	int depth_disabled = 0;
966 	int ret = 0;
967 
968 	flist_for_each_safe(e, tmp, &pid_list) {
969 		struct btrace_pid *p;
970 
971 		p = flist_entry(e, struct btrace_pid, pid_list);
972 		if (prune_entry(&p->o)) {
973 			free_p(p);
974 			continue;
975 		}
976 		p->o.start_delay = (o_first_ttime(&p->o) / 1000ULL) - first_ttime;
977 		depth_disabled += p->o.depth_disabled;
978 	}
979 
980 	if (collapse_entries) {
981 		struct btrace_pid *p;
982 
983 		flist_for_each_safe(e, tmp, &pid_list) {
984 			p = flist_entry(e, struct btrace_pid, pid_list);
985 			check_merges(p, &pid_list);
986 		}
987 
988 		flist_for_each_safe(e, tmp, &pid_list) {
989 			p = flist_entry(e, struct btrace_pid, pid_list);
990 			if (p->ignore)
991 				free_p(p);
992 		}
993 	}
994 
995 	if (depth_disabled)
996 		log_err("fio: missing completion traces, depths capped at %u\n", max_depth);
997 
998 	memset(ios, 0, sizeof(ios));
999 
1000 	flist_sort(NULL, &pid_list, entry_cmp);
1001 
1002 	flist_for_each(e, &pid_list) {
1003 		struct btrace_pid *p;
1004 
1005 		p = flist_entry(e, struct btrace_pid, pid_list);
1006 		ret |= __output_p(p, ios);
1007 		if (ret && !output_ascii)
1008 			break;
1009 	}
1010 
1011 	if (output_ascii)
1012 		printf("Total: reads=%lu, writes=%lu\n", ios[0], ios[1]);
1013 
1014 	return ret;
1015 }
1016 
usage(char * argv[])1017 static int usage(char *argv[])
1018 {
1019 	log_err("%s: [options] <blktrace bin file>\n", argv[0]);
1020 	log_err("\t-t\tUsec threshold to ignore task\n");
1021 	log_err("\t-n\tNumber IOS threshold to ignore task\n");
1022 	log_err("\t-f\tFio job file output\n");
1023 	log_err("\t-d\tUse this file/device for replay\n");
1024 	log_err("\t-r\tIgnore jobs with less than this KB/sec rate\n");
1025 	log_err("\t-R\tSet rate in fio job (def=%u)\n", set_rate);
1026 	log_err("\t-D\tCap queue depth at this value (def=%u)\n", max_depth);
1027 	log_err("\t-c\tCollapse \"identical\" jobs (def=%u)\n", collapse_entries);
1028 	log_err("\t-u\tDepth difference for collapse (def=%u)\n", depth_diff);
1029 	log_err("\t-x\tRandom difference for collapse (def=%u)\n", random_diff);
1030 	log_err("\t-a\tAdditional fio option to add to job file\n");
1031 	return 1;
1032 }
1033 
trace_needs_swap(const char * trace_file,int * swap)1034 static int trace_needs_swap(const char *trace_file, int *swap)
1035 {
1036 	struct blk_io_trace t;
1037 	int fd, ret;
1038 
1039 	*swap = -1;
1040 
1041 	fd = open(trace_file, O_RDONLY);
1042 	if (fd < 0) {
1043 		perror("open");
1044 		return 1;
1045 	}
1046 
1047 	ret = read(fd, &t, sizeof(t));
1048 	if (ret < 0) {
1049 		close(fd);
1050 		perror("read");
1051 		return 1;
1052 	} else if (ret != sizeof(t)) {
1053 		close(fd);
1054 		log_err("fio: short read on trace file\n");
1055 		return 1;
1056 	}
1057 
1058 	close(fd);
1059 
1060 	if ((t.magic & 0xffffff00) == BLK_IO_TRACE_MAGIC)
1061 		*swap = 0;
1062 	else {
1063 		/*
1064 		 * Maybe it needs to be endian swapped...
1065 		 */
1066 		t.magic = fio_swap32(t.magic);
1067 		if ((t.magic & 0xffffff00) == BLK_IO_TRACE_MAGIC)
1068 			*swap = 1;
1069 	}
1070 
1071 	if (*swap == -1) {
1072 		log_err("fio: blktrace appears corrupt\n");
1073 		return 1;
1074 	}
1075 
1076 	return 0;
1077 }
1078 
main(int argc,char * argv[])1079 int main(int argc, char *argv[])
1080 {
1081 	int need_swap, i, c;
1082 
1083 	if (argc < 2)
1084 		return usage(argv);
1085 
1086 	while ((c = getopt(argc, argv, "t:n:fd:r:RD:c:u:x:a:")) != -1) {
1087 		switch (c) {
1088 		case 'R':
1089 			set_rate = 1;
1090 			break;
1091 		case 'r':
1092 			rate_threshold = atoi(optarg);
1093 			break;
1094 		case 't':
1095 			rt_threshold = atoi(optarg);
1096 			break;
1097 		case 'n':
1098 			ios_threshold = atoi(optarg);
1099 			break;
1100 		case 'f':
1101 			output_ascii = 0;
1102 			break;
1103 		case 'd':
1104 			filename = strdup(optarg);
1105 			break;
1106 		case 'D':
1107 			max_depth = atoi(optarg);
1108 			break;
1109 		case 'c':
1110 			collapse_entries = atoi(optarg);
1111 			break;
1112 		case 'u':
1113 			depth_diff = atoi(optarg);
1114 			break;
1115 		case 'x':
1116 			random_diff = atoi(optarg);
1117 			break;
1118 		case 'a':
1119 			add_opts = realloc(add_opts, (n_add_opts + 1) * sizeof(char *));
1120 			add_opts[n_add_opts] = strdup(optarg);
1121 			n_add_opts++;
1122 			break;
1123 		case '?':
1124 		default:
1125 			return usage(argv);
1126 		}
1127 	}
1128 
1129 	if (argc == optind)
1130 		return usage(argv);
1131 
1132 	if (trace_needs_swap(argv[optind], &need_swap))
1133 		return 1;
1134 
1135 	for (i = 0; i < PID_HASH_SIZE; i++)
1136 		INIT_FLIST_HEAD(&pid_hash[i]);
1137 	for (i = 0; i < INFLIGHT_HASH_SIZE; i++)
1138 		INIT_FLIST_HEAD(&inflight_hash[i]);
1139 
1140 	load_blktrace(argv[optind], need_swap);
1141 	first_ttime /= 1000ULL;
1142 
1143 	return output_p();
1144 }
1145