• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Status and ETA code
3  */
4 #include <unistd.h>
5 #include <fcntl.h>
6 #include <string.h>
7 
8 #include "fio.h"
9 #include "lib/pow2.h"
10 
11 static char __run_str[REAL_MAX_JOBS + 1];
12 static char run_str[__THREAD_RUNSTR_SZ(REAL_MAX_JOBS)];
13 
update_condensed_str(char * rstr,char * run_str_condensed)14 static void update_condensed_str(char *rstr, char *run_str_condensed)
15 {
16 	if (*rstr) {
17 		while (*rstr) {
18 			int nr = 1;
19 
20 			*run_str_condensed++ = *rstr++;
21 			while (*(rstr - 1) == *rstr) {
22 				rstr++;
23 				nr++;
24 			}
25 			run_str_condensed += sprintf(run_str_condensed, "(%u),", nr);
26 		}
27 		run_str_condensed--;
28 	}
29 	*run_str_condensed = '\0';
30 }
31 
32 /*
33  * Sets the status of the 'td' in the printed status map.
34  */
check_str_update(struct thread_data * td)35 static void check_str_update(struct thread_data *td)
36 {
37 	char c = __run_str[td->thread_number - 1];
38 
39 	switch (td->runstate) {
40 	case TD_REAPED:
41 		if (td->error)
42 			c = 'X';
43 		else if (td->sig)
44 			c = 'K';
45 		else
46 			c = '_';
47 		break;
48 	case TD_EXITED:
49 		c = 'E';
50 		break;
51 	case TD_RAMP:
52 		c = '/';
53 		break;
54 	case TD_RUNNING:
55 		if (td_rw(td)) {
56 			if (td_random(td)) {
57 				if (td->o.rwmix[DDIR_READ] == 100)
58 					c = 'r';
59 				else if (td->o.rwmix[DDIR_WRITE] == 100)
60 					c = 'w';
61 				else
62 					c = 'm';
63 			} else {
64 				if (td->o.rwmix[DDIR_READ] == 100)
65 					c = 'R';
66 				else if (td->o.rwmix[DDIR_WRITE] == 100)
67 					c = 'W';
68 				else
69 					c = 'M';
70 			}
71 		} else if (td_read(td)) {
72 			if (td_random(td))
73 				c = 'r';
74 			else
75 				c = 'R';
76 		} else if (td_write(td)) {
77 			if (td_random(td))
78 				c = 'w';
79 			else
80 				c = 'W';
81 		} else {
82 			if (td_random(td))
83 				c = 'd';
84 			else
85 				c = 'D';
86 		}
87 		break;
88 	case TD_PRE_READING:
89 		c = 'p';
90 		break;
91 	case TD_VERIFYING:
92 		c = 'V';
93 		break;
94 	case TD_FSYNCING:
95 		c = 'F';
96 		break;
97 	case TD_FINISHING:
98 		c = 'f';
99 		break;
100 	case TD_CREATED:
101 		c = 'C';
102 		break;
103 	case TD_INITIALIZED:
104 	case TD_SETTING_UP:
105 		c = 'I';
106 		break;
107 	case TD_NOT_CREATED:
108 		c = 'P';
109 		break;
110 	default:
111 		log_err("state %d\n", td->runstate);
112 	}
113 
114 	__run_str[td->thread_number - 1] = c;
115 	update_condensed_str(__run_str, run_str);
116 }
117 
118 /*
119  * Convert seconds to a printable string.
120  */
eta_to_str(char * str,unsigned long eta_sec)121 void eta_to_str(char *str, unsigned long eta_sec)
122 {
123 	unsigned int d, h, m, s;
124 	int disp_hour = 0;
125 
126 	if (eta_sec == -1) {
127 		sprintf(str, "--");
128 		return;
129 	}
130 
131 	s = eta_sec % 60;
132 	eta_sec /= 60;
133 	m = eta_sec % 60;
134 	eta_sec /= 60;
135 	h = eta_sec % 24;
136 	eta_sec /= 24;
137 	d = eta_sec;
138 
139 	if (d) {
140 		disp_hour = 1;
141 		str += sprintf(str, "%02ud:", d);
142 	}
143 
144 	if (h || disp_hour)
145 		str += sprintf(str, "%02uh:", h);
146 
147 	str += sprintf(str, "%02um:", m);
148 	str += sprintf(str, "%02us", s);
149 }
150 
151 /*
152  * Best effort calculation of the estimated pending runtime of a job.
153  */
thread_eta(struct thread_data * td)154 static unsigned long thread_eta(struct thread_data *td)
155 {
156 	unsigned long long bytes_total, bytes_done;
157 	unsigned long eta_sec = 0;
158 	unsigned long elapsed;
159 	uint64_t timeout;
160 
161 	elapsed = (mtime_since_now(&td->epoch) + 999) / 1000;
162 	timeout = td->o.timeout / 1000000UL;
163 
164 	bytes_total = td->total_io_size;
165 
166 	if (td->flags & TD_F_NO_PROGRESS)
167 		return -1;
168 
169 	if (td->o.fill_device && td->o.size  == -1ULL) {
170 		if (!td->fill_device_size || td->fill_device_size == -1ULL)
171 			return 0;
172 
173 		bytes_total = td->fill_device_size;
174 	}
175 
176 	if (td->o.zone_size && td->o.zone_skip && bytes_total) {
177 		unsigned int nr_zones;
178 		uint64_t zone_bytes;
179 
180 		zone_bytes = bytes_total + td->o.zone_size + td->o.zone_skip;
181 		nr_zones = (zone_bytes - 1) / (td->o.zone_size + td->o.zone_skip);
182 		bytes_total -= nr_zones * td->o.zone_skip;
183 	}
184 
185 	/*
186 	 * if writing and verifying afterwards, bytes_total will be twice the
187 	 * size. In a mixed workload, verify phase will be the size of the
188 	 * first stage writes.
189 	 */
190 	if (td->o.do_verify && td->o.verify && td_write(td)) {
191 		if (td_rw(td)) {
192 			unsigned int perc = 50;
193 
194 			if (td->o.rwmix[DDIR_WRITE])
195 				perc = td->o.rwmix[DDIR_WRITE];
196 
197 			bytes_total += (bytes_total * perc) / 100;
198 		} else
199 			bytes_total <<= 1;
200 	}
201 
202 	if (td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING) {
203 		double perc, perc_t;
204 
205 		bytes_done = ddir_rw_sum(td->io_bytes);
206 
207 		if (bytes_total) {
208 			perc = (double) bytes_done / (double) bytes_total;
209 			if (perc > 1.0)
210 				perc = 1.0;
211 		} else
212 			perc = 0.0;
213 
214 		if (td->o.time_based) {
215 			if (timeout) {
216 				perc_t = (double) elapsed / (double) timeout;
217 				if (perc_t < perc)
218 					perc = perc_t;
219 			} else {
220 				/*
221 				 * Will never hit, we can't have time_based
222 				 * without a timeout set.
223 				 */
224 				perc = 0.0;
225 			}
226 		}
227 
228 		if (perc == 0.0) {
229 			eta_sec = timeout;
230 		} else {
231 			eta_sec = (unsigned long) (elapsed * (1.0 / perc)) - elapsed;
232 		}
233 
234 		if (td->o.timeout &&
235 		    eta_sec > (timeout + done_secs - elapsed))
236 			eta_sec = timeout + done_secs - elapsed;
237 	} else if (td->runstate == TD_NOT_CREATED || td->runstate == TD_CREATED
238 			|| td->runstate == TD_INITIALIZED
239 			|| td->runstate == TD_SETTING_UP
240 			|| td->runstate == TD_RAMP
241 			|| td->runstate == TD_PRE_READING) {
242 		int64_t t_eta = 0, r_eta = 0;
243 		unsigned long long rate_bytes;
244 
245 		/*
246 		 * We can only guess - assume it'll run the full timeout
247 		 * if given, otherwise assume it'll run at the specified rate.
248 		 */
249 		if (td->o.timeout) {
250 			uint64_t __timeout = td->o.timeout;
251 			uint64_t start_delay = td->o.start_delay;
252 			uint64_t ramp_time = td->o.ramp_time;
253 
254 			t_eta = __timeout + start_delay;
255 			if (!td->ramp_time_over) {
256 				t_eta += ramp_time;
257 			}
258 			t_eta /= 1000000ULL;
259 
260 			if ((td->runstate == TD_RAMP) && in_ramp_time(td)) {
261 				unsigned long ramp_left;
262 
263 				ramp_left = mtime_since_now(&td->epoch);
264 				ramp_left = (ramp_left + 999) / 1000;
265 				if (ramp_left <= t_eta)
266 					t_eta -= ramp_left;
267 			}
268 		}
269 		rate_bytes = 0;
270 		if (td_read(td))
271 			rate_bytes  = td->o.rate[DDIR_READ];
272 		if (td_write(td))
273 			rate_bytes += td->o.rate[DDIR_WRITE];
274 		if (td_trim(td))
275 			rate_bytes += td->o.rate[DDIR_TRIM];
276 
277 		if (rate_bytes) {
278 			r_eta = bytes_total / rate_bytes;
279 			r_eta += (td->o.start_delay / 1000000ULL);
280 		}
281 
282 		if (r_eta && t_eta)
283 			eta_sec = min(r_eta, t_eta);
284 		else if (r_eta)
285 			eta_sec = r_eta;
286 		else if (t_eta)
287 			eta_sec = t_eta;
288 		else
289 			eta_sec = 0;
290 	} else {
291 		/*
292 		 * thread is already done or waiting for fsync
293 		 */
294 		eta_sec = 0;
295 	}
296 
297 	return eta_sec;
298 }
299 
calc_rate(int unified_rw_rep,unsigned long mtime,unsigned long long * io_bytes,unsigned long long * prev_io_bytes,uint64_t * rate)300 static void calc_rate(int unified_rw_rep, unsigned long mtime,
301 		      unsigned long long *io_bytes,
302 		      unsigned long long *prev_io_bytes, uint64_t *rate)
303 {
304 	int i;
305 
306 	for (i = 0; i < DDIR_RWDIR_CNT; i++) {
307 		unsigned long long diff, this_rate;
308 
309 		diff = io_bytes[i] - prev_io_bytes[i];
310 		if (mtime)
311 			this_rate = ((1000 * diff) / mtime) / 1024; /* KiB/s */
312 		else
313 			this_rate = 0;
314 
315 		if (unified_rw_rep) {
316 			rate[i] = 0;
317 			rate[0] += this_rate;
318 		} else
319 			rate[i] = this_rate;
320 
321 		prev_io_bytes[i] = io_bytes[i];
322 	}
323 }
324 
calc_iops(int unified_rw_rep,unsigned long mtime,unsigned long long * io_iops,unsigned long long * prev_io_iops,unsigned int * iops)325 static void calc_iops(int unified_rw_rep, unsigned long mtime,
326 		      unsigned long long *io_iops,
327 		      unsigned long long *prev_io_iops, unsigned int *iops)
328 {
329 	int i;
330 
331 	for (i = 0; i < DDIR_RWDIR_CNT; i++) {
332 		unsigned long long diff, this_iops;
333 
334 		diff = io_iops[i] - prev_io_iops[i];
335 		if (mtime)
336 			this_iops = (diff * 1000) / mtime;
337 		else
338 			this_iops = 0;
339 
340 		if (unified_rw_rep) {
341 			iops[i] = 0;
342 			iops[0] += this_iops;
343 		} else
344 			iops[i] = this_iops;
345 
346 		prev_io_iops[i] = io_iops[i];
347 	}
348 }
349 
350 /*
351  * Print status of the jobs we know about. This includes rate estimates,
352  * ETA, thread state, etc.
353  */
calc_thread_status(struct jobs_eta * je,int force)354 bool calc_thread_status(struct jobs_eta *je, int force)
355 {
356 	struct thread_data *td;
357 	int i, unified_rw_rep;
358 	uint64_t rate_time, disp_time, bw_avg_time, *eta_secs;
359 	unsigned long long io_bytes[DDIR_RWDIR_CNT];
360 	unsigned long long io_iops[DDIR_RWDIR_CNT];
361 	struct timeval now;
362 
363 	static unsigned long long rate_io_bytes[DDIR_RWDIR_CNT];
364 	static unsigned long long disp_io_bytes[DDIR_RWDIR_CNT];
365 	static unsigned long long disp_io_iops[DDIR_RWDIR_CNT];
366 	static struct timeval rate_prev_time, disp_prev_time;
367 
368 	if (!force) {
369 		if (!(output_format & FIO_OUTPUT_NORMAL) &&
370 		    f_out == stdout)
371 			return false;
372 		if (temp_stall_ts || eta_print == FIO_ETA_NEVER)
373 			return false;
374 
375 		if (!isatty(STDOUT_FILENO) && (eta_print != FIO_ETA_ALWAYS))
376 			return false;
377 	}
378 
379 	if (!ddir_rw_sum(rate_io_bytes))
380 		fill_start_time(&rate_prev_time);
381 	if (!ddir_rw_sum(disp_io_bytes))
382 		fill_start_time(&disp_prev_time);
383 
384 	eta_secs = malloc(thread_number * sizeof(uint64_t));
385 	memset(eta_secs, 0, thread_number * sizeof(uint64_t));
386 
387 	je->elapsed_sec = (mtime_since_genesis() + 999) / 1000;
388 
389 	io_bytes[DDIR_READ] = io_bytes[DDIR_WRITE] = io_bytes[DDIR_TRIM] = 0;
390 	io_iops[DDIR_READ] = io_iops[DDIR_WRITE] = io_iops[DDIR_TRIM] = 0;
391 	bw_avg_time = ULONG_MAX;
392 	unified_rw_rep = 0;
393 	for_each_td(td, i) {
394 		unified_rw_rep += td->o.unified_rw_rep;
395 		if (is_power_of_2(td->o.kb_base))
396 			je->is_pow2 = 1;
397 		je->unit_base = td->o.unit_base;
398 		if (td->o.bw_avg_time < bw_avg_time)
399 			bw_avg_time = td->o.bw_avg_time;
400 		if (td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING
401 		    || td->runstate == TD_FSYNCING
402 		    || td->runstate == TD_PRE_READING
403 		    || td->runstate == TD_FINISHING) {
404 			je->nr_running++;
405 			if (td_read(td)) {
406 				je->t_rate[0] += td->o.rate[DDIR_READ];
407 				je->t_iops[0] += td->o.rate_iops[DDIR_READ];
408 				je->m_rate[0] += td->o.ratemin[DDIR_READ];
409 				je->m_iops[0] += td->o.rate_iops_min[DDIR_READ];
410 			}
411 			if (td_write(td)) {
412 				je->t_rate[1] += td->o.rate[DDIR_WRITE];
413 				je->t_iops[1] += td->o.rate_iops[DDIR_WRITE];
414 				je->m_rate[1] += td->o.ratemin[DDIR_WRITE];
415 				je->m_iops[1] += td->o.rate_iops_min[DDIR_WRITE];
416 			}
417 			if (td_trim(td)) {
418 				je->t_rate[2] += td->o.rate[DDIR_TRIM];
419 				je->t_iops[2] += td->o.rate_iops[DDIR_TRIM];
420 				je->m_rate[2] += td->o.ratemin[DDIR_TRIM];
421 				je->m_iops[2] += td->o.rate_iops_min[DDIR_TRIM];
422 			}
423 
424 			je->files_open += td->nr_open_files;
425 		} else if (td->runstate == TD_RAMP) {
426 			je->nr_running++;
427 			je->nr_ramp++;
428 		} else if (td->runstate == TD_SETTING_UP)
429 			je->nr_setting_up++;
430 		else if (td->runstate < TD_RUNNING)
431 			je->nr_pending++;
432 
433 		if (je->elapsed_sec >= 3)
434 			eta_secs[i] = thread_eta(td);
435 		else
436 			eta_secs[i] = INT_MAX;
437 
438 		check_str_update(td);
439 
440 		if (td->runstate > TD_SETTING_UP) {
441 			int ddir;
442 
443 			for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++) {
444 				if (unified_rw_rep) {
445 					io_bytes[0] += td->io_bytes[ddir];
446 					io_iops[0] += td->io_blocks[ddir];
447 				} else {
448 					io_bytes[ddir] += td->io_bytes[ddir];
449 					io_iops[ddir] += td->io_blocks[ddir];
450 				}
451 			}
452 		}
453 	}
454 
455 	if (exitall_on_terminate) {
456 		je->eta_sec = INT_MAX;
457 		for_each_td(td, i) {
458 			if (eta_secs[i] < je->eta_sec)
459 				je->eta_sec = eta_secs[i];
460 		}
461 	} else {
462 		unsigned long eta_stone = 0;
463 
464 		je->eta_sec = 0;
465 		for_each_td(td, i) {
466 			if ((td->runstate == TD_NOT_CREATED) && td->o.stonewall)
467 				eta_stone += eta_secs[i];
468 			else {
469 				if (eta_secs[i] > je->eta_sec)
470 					je->eta_sec = eta_secs[i];
471 			}
472 		}
473 		je->eta_sec += eta_stone;
474 	}
475 
476 	free(eta_secs);
477 
478 	fio_gettime(&now, NULL);
479 	rate_time = mtime_since(&rate_prev_time, &now);
480 
481 	if (write_bw_log && rate_time > bw_avg_time && !in_ramp_time(td)) {
482 		calc_rate(unified_rw_rep, rate_time, io_bytes, rate_io_bytes,
483 				je->rate);
484 		memcpy(&rate_prev_time, &now, sizeof(now));
485 		add_agg_sample(sample_val(je->rate[DDIR_READ]), DDIR_READ, 0);
486 		add_agg_sample(sample_val(je->rate[DDIR_WRITE]), DDIR_WRITE, 0);
487 		add_agg_sample(sample_val(je->rate[DDIR_TRIM]), DDIR_TRIM, 0);
488 	}
489 
490 	disp_time = mtime_since(&disp_prev_time, &now);
491 
492 	/*
493 	 * Allow a little slack, the target is to print it every 1000 msecs
494 	 */
495 	if (!force && disp_time < 900)
496 		return false;
497 
498 	calc_rate(unified_rw_rep, disp_time, io_bytes, disp_io_bytes, je->rate);
499 	calc_iops(unified_rw_rep, disp_time, io_iops, disp_io_iops, je->iops);
500 
501 	memcpy(&disp_prev_time, &now, sizeof(now));
502 
503 	if (!force && !je->nr_running && !je->nr_pending)
504 		return false;
505 
506 	je->nr_threads = thread_number;
507 	update_condensed_str(__run_str, run_str);
508 	memcpy(je->run_str, run_str, strlen(run_str));
509 	return true;
510 }
511 
display_thread_status(struct jobs_eta * je)512 void display_thread_status(struct jobs_eta *je)
513 {
514 	static struct timeval disp_eta_new_line;
515 	static int eta_new_line_init, eta_new_line_pending;
516 	static int linelen_last;
517 	static int eta_good;
518 	char output[REAL_MAX_JOBS + 512], *p = output;
519 	char eta_str[128];
520 	double perc = 0.0;
521 
522 	if (je->eta_sec != INT_MAX && je->elapsed_sec) {
523 		perc = (double) je->elapsed_sec / (double) (je->elapsed_sec + je->eta_sec);
524 		eta_to_str(eta_str, je->eta_sec);
525 	}
526 
527 	if (eta_new_line_pending) {
528 		eta_new_line_pending = 0;
529 		p += sprintf(p, "\n");
530 	}
531 
532 	p += sprintf(p, "Jobs: %d (f=%d)", je->nr_running, je->files_open);
533 
534 	/* rate limits, if any */
535 	if (je->m_rate[0] || je->m_rate[1] || je->m_rate[2] ||
536 	    je->t_rate[0] || je->t_rate[1] || je->t_rate[2]) {
537 		char *tr, *mr;
538 
539 		mr = num2str(je->m_rate[0] + je->m_rate[1] + je->m_rate[2],
540 				4, 0, je->is_pow2, N2S_BYTEPERSEC);
541 		tr = num2str(je->t_rate[0] + je->t_rate[1] + je->t_rate[2],
542 				4, 0, je->is_pow2, N2S_BYTEPERSEC);
543 
544 		p += sprintf(p, ", %s-%s", mr, tr);
545 		free(tr);
546 		free(mr);
547 	} else if (je->m_iops[0] || je->m_iops[1] || je->m_iops[2] ||
548 		   je->t_iops[0] || je->t_iops[1] || je->t_iops[2]) {
549 		p += sprintf(p, ", %d-%d IOPS",
550 					je->m_iops[0] + je->m_iops[1] + je->m_iops[2],
551 					je->t_iops[0] + je->t_iops[1] + je->t_iops[2]);
552 	}
553 
554 	/* current run string, % done, bandwidth, iops, eta */
555 	if (je->eta_sec != INT_MAX && je->nr_running) {
556 		char perc_str[32];
557 		char *iops_str[DDIR_RWDIR_CNT];
558 		char *rate_str[DDIR_RWDIR_CNT];
559 		size_t left;
560 		int l;
561 		int ddir;
562 
563 		if ((!je->eta_sec && !eta_good) || je->nr_ramp == je->nr_running ||
564 		    je->eta_sec == -1)
565 			strcpy(perc_str, "-.-%");
566 		else {
567 			double mult = 100.0;
568 
569 			if (je->nr_setting_up && je->nr_running)
570 				mult *= (1.0 - (double) je->nr_setting_up / (double) je->nr_running);
571 
572 			eta_good = 1;
573 			perc *= mult;
574 			sprintf(perc_str, "%3.1f%%", perc);
575 		}
576 
577 		for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++) {
578 			rate_str[ddir] = num2str(je->rate[ddir], 4,
579 						1024, je->is_pow2, je->unit_base);
580 			iops_str[ddir] = num2str(je->iops[ddir], 4, 1, 0, N2S_NONE);
581 		}
582 
583 		left = sizeof(output) - (p - output) - 1;
584 
585 		if (je->rate[DDIR_TRIM] || je->iops[DDIR_TRIM])
586 			l = snprintf(p, left,
587 				": [%s][%s][r=%s,w=%s,t=%s][r=%s,w=%s,t=%s IOPS][eta %s]",
588 				je->run_str, perc_str, rate_str[DDIR_READ],
589 				rate_str[DDIR_WRITE], rate_str[DDIR_TRIM],
590 				iops_str[DDIR_READ], iops_str[DDIR_WRITE],
591 				iops_str[DDIR_TRIM], eta_str);
592 		else
593 			l = snprintf(p, left,
594 				": [%s][%s][r=%s,w=%s][r=%s,w=%s IOPS][eta %s]",
595 				je->run_str, perc_str,
596 				rate_str[DDIR_READ], rate_str[DDIR_WRITE],
597 				iops_str[DDIR_READ], iops_str[DDIR_WRITE],
598 				eta_str);
599 		p += l;
600 		if (l >= 0 && l < linelen_last)
601 			p += sprintf(p, "%*s", linelen_last - l, "");
602 		linelen_last = l;
603 
604 		for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++) {
605 			free(rate_str[ddir]);
606 			free(iops_str[ddir]);
607 		}
608 	}
609 	p += sprintf(p, "\r");
610 
611 	printf("%s", output);
612 
613 	if (!eta_new_line_init) {
614 		fio_gettime(&disp_eta_new_line, NULL);
615 		eta_new_line_init = 1;
616 	} else if (eta_new_line && mtime_since_now(&disp_eta_new_line) > eta_new_line) {
617 		fio_gettime(&disp_eta_new_line, NULL);
618 		eta_new_line_pending = 1;
619 	}
620 
621 	fflush(stdout);
622 }
623 
get_jobs_eta(bool force,size_t * size)624 struct jobs_eta *get_jobs_eta(bool force, size_t *size)
625 {
626 	struct jobs_eta *je;
627 
628 	if (!thread_number)
629 		return NULL;
630 
631 	*size = sizeof(*je) + THREAD_RUNSTR_SZ + 8;
632 	je = malloc(*size);
633 	if (!je)
634 		return NULL;
635 	memset(je, 0, *size);
636 
637 	if (!calc_thread_status(je, force)) {
638 		free(je);
639 		return NULL;
640 	}
641 
642 	*size = sizeof(*je) + strlen((char *) je->run_str) + 1;
643 	return je;
644 }
645 
print_thread_status(void)646 void print_thread_status(void)
647 {
648 	struct jobs_eta *je;
649 	size_t size;
650 
651 	je = get_jobs_eta(false, &size);
652 	if (je)
653 		display_thread_status(je);
654 
655 	free(je);
656 }
657 
print_status_init(int thr_number)658 void print_status_init(int thr_number)
659 {
660 	__run_str[thr_number] = 'P';
661 	update_condensed_str(__run_str, run_str);
662 }
663