• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #include <stdio.h>
2 #include <stdlib.h>
3 #include <stdarg.h>
4 #include <unistd.h>
5 #include <limits.h>
6 #include <errno.h>
7 #include <sys/poll.h>
8 #include <sys/types.h>
9 #include <sys/wait.h>
10 #include <sys/socket.h>
11 #include <sys/stat.h>
12 #include <sys/un.h>
13 #include <sys/uio.h>
14 #include <netinet/in.h>
15 #include <arpa/inet.h>
16 #include <netdb.h>
17 #include <syslog.h>
18 #include <signal.h>
19 #ifdef CONFIG_ZLIB
20 #include <zlib.h>
21 #endif
22 
23 #include "fio.h"
24 #include "options.h"
25 #include "server.h"
26 #include "crc/crc16.h"
27 #include "lib/ieee754.h"
28 #include "verify.h"
29 #include "smalloc.h"
30 
31 int fio_net_port = FIO_NET_PORT;
32 
33 int exit_backend = 0;
34 
35 enum {
36 	SK_F_FREE	= 1,
37 	SK_F_COPY	= 2,
38 	SK_F_SIMPLE	= 4,
39 	SK_F_VEC	= 8,
40 	SK_F_INLINE	= 16,
41 };
42 
43 struct sk_entry {
44 	struct flist_head list;	/* link on sk_out->list */
45 	int flags;		/* SK_F_* */
46 	int opcode;		/* Actual command fields */
47 	void *buf;
48 	off_t size;
49 	uint64_t tag;
50 	struct flist_head next;	/* Other sk_entry's, if linked command */
51 };
52 
53 static char *fio_server_arg;
54 static char *bind_sock;
55 static struct sockaddr_in saddr_in;
56 static struct sockaddr_in6 saddr_in6;
57 static int use_ipv6;
58 #ifdef CONFIG_ZLIB
59 static unsigned int has_zlib = 1;
60 #else
61 static unsigned int has_zlib = 0;
62 #endif
63 static unsigned int use_zlib;
64 static char me[128];
65 
66 static pthread_key_t sk_out_key;
67 
68 struct fio_fork_item {
69 	struct flist_head list;
70 	int exitval;
71 	int signal;
72 	int exited;
73 	pid_t pid;
74 };
75 
76 struct cmd_reply {
77 	struct fio_mutex lock;
78 	void *data;
79 	size_t size;
80 	int error;
81 };
82 
83 static const char *fio_server_ops[FIO_NET_CMD_NR] = {
84 	"",
85 	"QUIT",
86 	"EXIT",
87 	"JOB",
88 	"JOBLINE",
89 	"TEXT",
90 	"TS",
91 	"GS",
92 	"SEND_ETA",
93 	"ETA",
94 	"PROBE",
95 	"START",
96 	"STOP",
97 	"DISK_UTIL",
98 	"SERVER_START",
99 	"ADD_JOB",
100 	"RUN",
101 	"IOLOG",
102 	"UPDATE_JOB",
103 	"LOAD_FILE",
104 	"VTRIGGER",
105 	"SENDFILE",
106 	"JOB_OPT",
107 };
108 
sk_lock(struct sk_out * sk_out)109 static void sk_lock(struct sk_out *sk_out)
110 {
111 	fio_mutex_down(&sk_out->lock);
112 }
113 
sk_unlock(struct sk_out * sk_out)114 static void sk_unlock(struct sk_out *sk_out)
115 {
116 	fio_mutex_up(&sk_out->lock);
117 }
118 
sk_out_assign(struct sk_out * sk_out)119 void sk_out_assign(struct sk_out *sk_out)
120 {
121 	if (!sk_out)
122 		return;
123 
124 	sk_lock(sk_out);
125 	sk_out->refs++;
126 	sk_unlock(sk_out);
127 	pthread_setspecific(sk_out_key, sk_out);
128 }
129 
sk_out_free(struct sk_out * sk_out)130 static void sk_out_free(struct sk_out *sk_out)
131 {
132 	__fio_mutex_remove(&sk_out->lock);
133 	__fio_mutex_remove(&sk_out->wait);
134 	__fio_mutex_remove(&sk_out->xmit);
135 	sfree(sk_out);
136 }
137 
__sk_out_drop(struct sk_out * sk_out)138 static int __sk_out_drop(struct sk_out *sk_out)
139 {
140 	if (sk_out) {
141 		int refs;
142 
143 		sk_lock(sk_out);
144 		assert(sk_out->refs != 0);
145 		refs = --sk_out->refs;
146 		sk_unlock(sk_out);
147 
148 		if (!refs) {
149 			sk_out_free(sk_out);
150 			pthread_setspecific(sk_out_key, NULL);
151 			return 0;
152 		}
153 	}
154 
155 	return 1;
156 }
157 
sk_out_drop(void)158 void sk_out_drop(void)
159 {
160 	struct sk_out *sk_out;
161 
162 	sk_out = pthread_getspecific(sk_out_key);
163 	__sk_out_drop(sk_out);
164 }
165 
__fio_init_net_cmd(struct fio_net_cmd * cmd,uint16_t opcode,uint32_t pdu_len,uint64_t tag)166 static void __fio_init_net_cmd(struct fio_net_cmd *cmd, uint16_t opcode,
167 			       uint32_t pdu_len, uint64_t tag)
168 {
169 	memset(cmd, 0, sizeof(*cmd));
170 
171 	cmd->version	= __cpu_to_le16(FIO_SERVER_VER);
172 	cmd->opcode	= cpu_to_le16(opcode);
173 	cmd->tag	= cpu_to_le64(tag);
174 	cmd->pdu_len	= cpu_to_le32(pdu_len);
175 }
176 
177 
fio_init_net_cmd(struct fio_net_cmd * cmd,uint16_t opcode,const void * pdu,uint32_t pdu_len,uint64_t tag)178 static void fio_init_net_cmd(struct fio_net_cmd *cmd, uint16_t opcode,
179 			     const void *pdu, uint32_t pdu_len, uint64_t tag)
180 {
181 	__fio_init_net_cmd(cmd, opcode, pdu_len, tag);
182 
183 	if (pdu)
184 		memcpy(&cmd->payload, pdu, pdu_len);
185 }
186 
fio_server_op(unsigned int op)187 const char *fio_server_op(unsigned int op)
188 {
189 	static char buf[32];
190 
191 	if (op < FIO_NET_CMD_NR)
192 		return fio_server_ops[op];
193 
194 	sprintf(buf, "UNKNOWN/%d", op);
195 	return buf;
196 }
197 
iov_total_len(const struct iovec * iov,int count)198 static ssize_t iov_total_len(const struct iovec *iov, int count)
199 {
200 	ssize_t ret = 0;
201 
202 	while (count--) {
203 		ret += iov->iov_len;
204 		iov++;
205 	}
206 
207 	return ret;
208 }
209 
fio_sendv_data(int sk,struct iovec * iov,int count)210 static int fio_sendv_data(int sk, struct iovec *iov, int count)
211 {
212 	ssize_t total_len = iov_total_len(iov, count);
213 	ssize_t ret;
214 
215 	do {
216 		ret = writev(sk, iov, count);
217 		if (ret > 0) {
218 			total_len -= ret;
219 			if (!total_len)
220 				break;
221 
222 			while (ret) {
223 				if (ret >= iov->iov_len) {
224 					ret -= iov->iov_len;
225 					iov++;
226 					continue;
227 				}
228 				iov->iov_base += ret;
229 				iov->iov_len -= ret;
230 				ret = 0;
231 			}
232 		} else if (!ret)
233 			break;
234 		else if (errno == EAGAIN || errno == EINTR)
235 			continue;
236 		else
237 			break;
238 	} while (!exit_backend);
239 
240 	if (!total_len)
241 		return 0;
242 
243 	return 1;
244 }
245 
fio_send_data(int sk,const void * p,unsigned int len)246 static int fio_send_data(int sk, const void *p, unsigned int len)
247 {
248 	struct iovec iov = { .iov_base = (void *) p, .iov_len = len };
249 
250 	assert(len <= sizeof(struct fio_net_cmd) + FIO_SERVER_MAX_FRAGMENT_PDU);
251 
252 	return fio_sendv_data(sk, &iov, 1);
253 }
254 
fio_recv_data(int sk,void * p,unsigned int len,bool wait)255 static int fio_recv_data(int sk, void *p, unsigned int len, bool wait)
256 {
257 	int flags;
258 
259 	if (wait)
260 		flags = MSG_WAITALL;
261 	else
262 		flags = OS_MSG_DONTWAIT;
263 
264 	do {
265 		int ret = recv(sk, p, len, flags);
266 
267 		if (ret > 0) {
268 			len -= ret;
269 			if (!len)
270 				break;
271 			p += ret;
272 			continue;
273 		} else if (!ret)
274 			break;
275 		else if (errno == EAGAIN || errno == EINTR) {
276 			if (wait)
277 				continue;
278 			break;
279 		} else
280 			break;
281 	} while (!exit_backend);
282 
283 	if (!len)
284 		return 0;
285 
286 	return -1;
287 }
288 
verify_convert_cmd(struct fio_net_cmd * cmd)289 static int verify_convert_cmd(struct fio_net_cmd *cmd)
290 {
291 	uint16_t crc;
292 
293 	cmd->cmd_crc16 = le16_to_cpu(cmd->cmd_crc16);
294 	cmd->pdu_crc16 = le16_to_cpu(cmd->pdu_crc16);
295 
296 	crc = fio_crc16(cmd, FIO_NET_CMD_CRC_SZ);
297 	if (crc != cmd->cmd_crc16) {
298 		log_err("fio: server bad crc on command (got %x, wanted %x)\n",
299 				cmd->cmd_crc16, crc);
300 		return 1;
301 	}
302 
303 	cmd->version	= le16_to_cpu(cmd->version);
304 	cmd->opcode	= le16_to_cpu(cmd->opcode);
305 	cmd->flags	= le32_to_cpu(cmd->flags);
306 	cmd->tag	= le64_to_cpu(cmd->tag);
307 	cmd->pdu_len	= le32_to_cpu(cmd->pdu_len);
308 
309 	switch (cmd->version) {
310 	case FIO_SERVER_VER:
311 		break;
312 	default:
313 		log_err("fio: bad server cmd version %d\n", cmd->version);
314 		return 1;
315 	}
316 
317 	if (cmd->pdu_len > FIO_SERVER_MAX_FRAGMENT_PDU) {
318 		log_err("fio: command payload too large: %u\n", cmd->pdu_len);
319 		return 1;
320 	}
321 
322 	return 0;
323 }
324 
325 /*
326  * Read (and defragment, if necessary) incoming commands
327  */
fio_net_recv_cmd(int sk,bool wait)328 struct fio_net_cmd *fio_net_recv_cmd(int sk, bool wait)
329 {
330 	struct fio_net_cmd cmd, *tmp, *cmdret = NULL;
331 	size_t cmd_size = 0, pdu_offset = 0;
332 	uint16_t crc;
333 	int ret, first = 1;
334 	void *pdu = NULL;
335 
336 	do {
337 		ret = fio_recv_data(sk, &cmd, sizeof(cmd), wait);
338 		if (ret)
339 			break;
340 
341 		/* We have a command, verify it and swap if need be */
342 		ret = verify_convert_cmd(&cmd);
343 		if (ret)
344 			break;
345 
346 		if (first) {
347 			/* if this is text, add room for \0 at the end */
348 			cmd_size = sizeof(cmd) + cmd.pdu_len + 1;
349 			assert(!cmdret);
350 		} else
351 			cmd_size += cmd.pdu_len;
352 
353 		if (cmd_size / 1024 > FIO_SERVER_MAX_CMD_MB * 1024) {
354 			log_err("fio: cmd+pdu too large (%llu)\n", (unsigned long long) cmd_size);
355 			ret = 1;
356 			break;
357 		}
358 
359 		tmp = realloc(cmdret, cmd_size);
360 		if (!tmp) {
361 			log_err("fio: server failed allocating cmd\n");
362 			ret = 1;
363 			break;
364 		}
365 		cmdret = tmp;
366 
367 		if (first)
368 			memcpy(cmdret, &cmd, sizeof(cmd));
369 		else if (cmdret->opcode != cmd.opcode) {
370 			log_err("fio: fragment opcode mismatch (%d != %d)\n",
371 					cmdret->opcode, cmd.opcode);
372 			ret = 1;
373 			break;
374 		}
375 
376 		if (!cmd.pdu_len)
377 			break;
378 
379 		/* There's payload, get it */
380 		pdu = (void *) cmdret->payload + pdu_offset;
381 		ret = fio_recv_data(sk, pdu, cmd.pdu_len, wait);
382 		if (ret)
383 			break;
384 
385 		/* Verify payload crc */
386 		crc = fio_crc16(pdu, cmd.pdu_len);
387 		if (crc != cmd.pdu_crc16) {
388 			log_err("fio: server bad crc on payload ");
389 			log_err("(got %x, wanted %x)\n", cmd.pdu_crc16, crc);
390 			ret = 1;
391 			break;
392 		}
393 
394 		pdu_offset += cmd.pdu_len;
395 		if (!first)
396 			cmdret->pdu_len += cmd.pdu_len;
397 		first = 0;
398 	} while (cmd.flags & FIO_NET_CMD_F_MORE);
399 
400 	if (ret) {
401 		free(cmdret);
402 		cmdret = NULL;
403 	} else if (cmdret) {
404 		/* zero-terminate text input */
405 		if (cmdret->pdu_len) {
406 			if (cmdret->opcode == FIO_NET_CMD_TEXT) {
407 				struct cmd_text_pdu *__pdu = (struct cmd_text_pdu *) cmdret->payload;
408 				char *buf = (char *) __pdu->buf;
409 
410 				buf[__pdu->buf_len] = '\0';
411 			} else if (cmdret->opcode == FIO_NET_CMD_JOB) {
412 				struct cmd_job_pdu *__pdu = (struct cmd_job_pdu *) cmdret->payload;
413 				char *buf = (char *) __pdu->buf;
414 				int len = le32_to_cpu(__pdu->buf_len);
415 
416 				buf[len] = '\0';
417 			}
418 		}
419 
420 		/* frag flag is internal */
421 		cmdret->flags &= ~FIO_NET_CMD_F_MORE;
422 	}
423 
424 	return cmdret;
425 }
426 
add_reply(uint64_t tag,struct flist_head * list)427 static void add_reply(uint64_t tag, struct flist_head *list)
428 {
429 	struct fio_net_cmd_reply *reply;
430 
431 	reply = (struct fio_net_cmd_reply *) (uintptr_t) tag;
432 	flist_add_tail(&reply->list, list);
433 }
434 
alloc_reply(uint64_t tag,uint16_t opcode)435 static uint64_t alloc_reply(uint64_t tag, uint16_t opcode)
436 {
437 	struct fio_net_cmd_reply *reply;
438 
439 	reply = calloc(1, sizeof(*reply));
440 	INIT_FLIST_HEAD(&reply->list);
441 	fio_gettime(&reply->tv, NULL);
442 	reply->saved_tag = tag;
443 	reply->opcode = opcode;
444 
445 	return (uintptr_t) reply;
446 }
447 
free_reply(uint64_t tag)448 static void free_reply(uint64_t tag)
449 {
450 	struct fio_net_cmd_reply *reply;
451 
452 	reply = (struct fio_net_cmd_reply *) (uintptr_t) tag;
453 	free(reply);
454 }
455 
fio_net_cmd_crc_pdu(struct fio_net_cmd * cmd,const void * pdu)456 static void fio_net_cmd_crc_pdu(struct fio_net_cmd *cmd, const void *pdu)
457 {
458 	uint32_t pdu_len;
459 
460 	cmd->cmd_crc16 = __cpu_to_le16(fio_crc16(cmd, FIO_NET_CMD_CRC_SZ));
461 
462 	pdu_len = le32_to_cpu(cmd->pdu_len);
463 	cmd->pdu_crc16 = __cpu_to_le16(fio_crc16(pdu, pdu_len));
464 }
465 
fio_net_cmd_crc(struct fio_net_cmd * cmd)466 static void fio_net_cmd_crc(struct fio_net_cmd *cmd)
467 {
468 	fio_net_cmd_crc_pdu(cmd, cmd->payload);
469 }
470 
fio_net_send_cmd(int fd,uint16_t opcode,const void * buf,off_t size,uint64_t * tagptr,struct flist_head * list)471 int fio_net_send_cmd(int fd, uint16_t opcode, const void *buf, off_t size,
472 		     uint64_t *tagptr, struct flist_head *list)
473 {
474 	struct fio_net_cmd *cmd = NULL;
475 	size_t this_len, cur_len = 0;
476 	uint64_t tag;
477 	int ret;
478 
479 	if (list) {
480 		assert(tagptr);
481 		tag = *tagptr = alloc_reply(*tagptr, opcode);
482 	} else
483 		tag = tagptr ? *tagptr : 0;
484 
485 	do {
486 		this_len = size;
487 		if (this_len > FIO_SERVER_MAX_FRAGMENT_PDU)
488 			this_len = FIO_SERVER_MAX_FRAGMENT_PDU;
489 
490 		if (!cmd || cur_len < sizeof(*cmd) + this_len) {
491 			if (cmd)
492 				free(cmd);
493 
494 			cur_len = sizeof(*cmd) + this_len;
495 			cmd = malloc(cur_len);
496 		}
497 
498 		fio_init_net_cmd(cmd, opcode, buf, this_len, tag);
499 
500 		if (this_len < size)
501 			cmd->flags = __cpu_to_le32(FIO_NET_CMD_F_MORE);
502 
503 		fio_net_cmd_crc(cmd);
504 
505 		ret = fio_send_data(fd, cmd, sizeof(*cmd) + this_len);
506 		size -= this_len;
507 		buf += this_len;
508 	} while (!ret && size);
509 
510 	if (list) {
511 		if (ret)
512 			free_reply(tag);
513 		else
514 			add_reply(tag, list);
515 	}
516 
517 	if (cmd)
518 		free(cmd);
519 
520 	return ret;
521 }
522 
fio_net_prep_cmd(uint16_t opcode,void * buf,size_t size,uint64_t * tagptr,int flags)523 static struct sk_entry *fio_net_prep_cmd(uint16_t opcode, void *buf,
524 					 size_t size, uint64_t *tagptr,
525 					 int flags)
526 {
527 	struct sk_entry *entry;
528 
529 	entry = smalloc(sizeof(*entry));
530 	INIT_FLIST_HEAD(&entry->next);
531 	entry->opcode = opcode;
532 	if (flags & SK_F_COPY) {
533 		entry->buf = smalloc(size);
534 		memcpy(entry->buf, buf, size);
535 	} else
536 		entry->buf = buf;
537 
538 	entry->size = size;
539 	if (tagptr)
540 		entry->tag = *tagptr;
541 	else
542 		entry->tag = 0;
543 	entry->flags = flags;
544 	return entry;
545 }
546 
547 static int handle_sk_entry(struct sk_out *sk_out, struct sk_entry *entry);
548 
fio_net_queue_entry(struct sk_entry * entry)549 static void fio_net_queue_entry(struct sk_entry *entry)
550 {
551 	struct sk_out *sk_out = pthread_getspecific(sk_out_key);
552 
553 	if (entry->flags & SK_F_INLINE)
554 		handle_sk_entry(sk_out, entry);
555 	else {
556 		sk_lock(sk_out);
557 		flist_add_tail(&entry->list, &sk_out->list);
558 		sk_unlock(sk_out);
559 
560 		fio_mutex_up(&sk_out->wait);
561 	}
562 }
563 
fio_net_queue_cmd(uint16_t opcode,void * buf,off_t size,uint64_t * tagptr,int flags)564 static int fio_net_queue_cmd(uint16_t opcode, void *buf, off_t size,
565 			     uint64_t *tagptr, int flags)
566 {
567 	struct sk_entry *entry;
568 
569 	entry = fio_net_prep_cmd(opcode, buf, size, tagptr, flags);
570 	if (entry) {
571 		fio_net_queue_entry(entry);
572 		return 0;
573 	}
574 
575 	return 1;
576 }
577 
fio_net_send_simple_stack_cmd(int sk,uint16_t opcode,uint64_t tag)578 static int fio_net_send_simple_stack_cmd(int sk, uint16_t opcode, uint64_t tag)
579 {
580 	struct fio_net_cmd cmd;
581 
582 	fio_init_net_cmd(&cmd, opcode, NULL, 0, tag);
583 	fio_net_cmd_crc(&cmd);
584 
585 	return fio_send_data(sk, &cmd, sizeof(cmd));
586 }
587 
588 /*
589  * If 'list' is non-NULL, then allocate and store the sent command for
590  * later verification.
591  */
fio_net_send_simple_cmd(int sk,uint16_t opcode,uint64_t tag,struct flist_head * list)592 int fio_net_send_simple_cmd(int sk, uint16_t opcode, uint64_t tag,
593 			    struct flist_head *list)
594 {
595 	int ret;
596 
597 	if (list)
598 		tag = alloc_reply(tag, opcode);
599 
600 	ret = fio_net_send_simple_stack_cmd(sk, opcode, tag);
601 	if (ret) {
602 		if (list)
603 			free_reply(tag);
604 
605 		return ret;
606 	}
607 
608 	if (list)
609 		add_reply(tag, list);
610 
611 	return 0;
612 }
613 
fio_net_queue_quit(void)614 static int fio_net_queue_quit(void)
615 {
616 	dprint(FD_NET, "server: sending quit\n");
617 
618 	return fio_net_queue_cmd(FIO_NET_CMD_QUIT, NULL, 0, NULL, SK_F_SIMPLE);
619 }
620 
fio_net_send_quit(int sk)621 int fio_net_send_quit(int sk)
622 {
623 	dprint(FD_NET, "server: sending quit\n");
624 
625 	return fio_net_send_simple_cmd(sk, FIO_NET_CMD_QUIT, 0, NULL);
626 }
627 
fio_net_send_ack(struct fio_net_cmd * cmd,int error,int signal)628 static int fio_net_send_ack(struct fio_net_cmd *cmd, int error, int signal)
629 {
630 	struct cmd_end_pdu epdu;
631 	uint64_t tag = 0;
632 
633 	if (cmd)
634 		tag = cmd->tag;
635 
636 	epdu.error = __cpu_to_le32(error);
637 	epdu.signal = __cpu_to_le32(signal);
638 	return fio_net_queue_cmd(FIO_NET_CMD_STOP, &epdu, sizeof(epdu), &tag, SK_F_COPY);
639 }
640 
fio_net_queue_stop(int error,int signal)641 static int fio_net_queue_stop(int error, int signal)
642 {
643 	dprint(FD_NET, "server: sending stop (%d, %d)\n", error, signal);
644 	return fio_net_send_ack(NULL, error, signal);
645 }
646 
fio_server_add_fork_item(pid_t pid,struct flist_head * list)647 static void fio_server_add_fork_item(pid_t pid, struct flist_head *list)
648 {
649 	struct fio_fork_item *ffi;
650 
651 	ffi = malloc(sizeof(*ffi));
652 	ffi->exitval = 0;
653 	ffi->signal = 0;
654 	ffi->exited = 0;
655 	ffi->pid = pid;
656 	flist_add_tail(&ffi->list, list);
657 }
658 
fio_server_add_conn_pid(struct flist_head * conn_list,pid_t pid)659 static void fio_server_add_conn_pid(struct flist_head *conn_list, pid_t pid)
660 {
661 	dprint(FD_NET, "server: forked off connection job (pid=%u)\n", (int) pid);
662 	fio_server_add_fork_item(pid, conn_list);
663 }
664 
fio_server_add_job_pid(struct flist_head * job_list,pid_t pid)665 static void fio_server_add_job_pid(struct flist_head *job_list, pid_t pid)
666 {
667 	dprint(FD_NET, "server: forked off job job (pid=%u)\n", (int) pid);
668 	fio_server_add_fork_item(pid, job_list);
669 }
670 
fio_server_check_fork_item(struct fio_fork_item * ffi)671 static void fio_server_check_fork_item(struct fio_fork_item *ffi)
672 {
673 	int ret, status;
674 
675 	ret = waitpid(ffi->pid, &status, WNOHANG);
676 	if (ret < 0) {
677 		if (errno == ECHILD) {
678 			log_err("fio: connection pid %u disappeared\n", (int) ffi->pid);
679 			ffi->exited = 1;
680 		} else
681 			log_err("fio: waitpid: %s\n", strerror(errno));
682 	} else if (ret == ffi->pid) {
683 		if (WIFSIGNALED(status)) {
684 			ffi->signal = WTERMSIG(status);
685 			ffi->exited = 1;
686 		}
687 		if (WIFEXITED(status)) {
688 			if (WEXITSTATUS(status))
689 				ffi->exitval = WEXITSTATUS(status);
690 			ffi->exited = 1;
691 		}
692 	}
693 }
694 
fio_server_fork_item_done(struct fio_fork_item * ffi,bool stop)695 static void fio_server_fork_item_done(struct fio_fork_item *ffi, bool stop)
696 {
697 	dprint(FD_NET, "pid %u exited, sig=%u, exitval=%d\n", (int) ffi->pid, ffi->signal, ffi->exitval);
698 
699 	/*
700 	 * Fold STOP and QUIT...
701 	 */
702 	if (stop) {
703 		fio_net_queue_stop(ffi->exitval, ffi->signal);
704 		fio_net_queue_quit();
705 	}
706 
707 	flist_del(&ffi->list);
708 	free(ffi);
709 }
710 
fio_server_check_fork_items(struct flist_head * list,bool stop)711 static void fio_server_check_fork_items(struct flist_head *list, bool stop)
712 {
713 	struct flist_head *entry, *tmp;
714 	struct fio_fork_item *ffi;
715 
716 	flist_for_each_safe(entry, tmp, list) {
717 		ffi = flist_entry(entry, struct fio_fork_item, list);
718 
719 		fio_server_check_fork_item(ffi);
720 
721 		if (ffi->exited)
722 			fio_server_fork_item_done(ffi, stop);
723 	}
724 }
725 
fio_server_check_jobs(struct flist_head * job_list)726 static void fio_server_check_jobs(struct flist_head *job_list)
727 {
728 	fio_server_check_fork_items(job_list, true);
729 }
730 
fio_server_check_conns(struct flist_head * conn_list)731 static void fio_server_check_conns(struct flist_head *conn_list)
732 {
733 	fio_server_check_fork_items(conn_list, false);
734 }
735 
handle_load_file_cmd(struct fio_net_cmd * cmd)736 static int handle_load_file_cmd(struct fio_net_cmd *cmd)
737 {
738 	struct cmd_load_file_pdu *pdu = (struct cmd_load_file_pdu *) cmd->payload;
739 	void *file_name = pdu->file;
740 	struct cmd_start_pdu spdu;
741 
742 	dprint(FD_NET, "server: loading local file %s\n", (char *) file_name);
743 
744 	pdu->name_len = le16_to_cpu(pdu->name_len);
745 	pdu->client_type = le16_to_cpu(pdu->client_type);
746 
747 	if (parse_jobs_ini(file_name, 0, 0, pdu->client_type)) {
748 		fio_net_queue_quit();
749 		return -1;
750 	}
751 
752 	spdu.jobs = cpu_to_le32(thread_number);
753 	spdu.stat_outputs = cpu_to_le32(stat_number);
754 	fio_net_queue_cmd(FIO_NET_CMD_START, &spdu, sizeof(spdu), NULL, SK_F_COPY);
755 	return 0;
756 }
757 
handle_run_cmd(struct sk_out * sk_out,struct flist_head * job_list,struct fio_net_cmd * cmd)758 static int handle_run_cmd(struct sk_out *sk_out, struct flist_head *job_list,
759 			  struct fio_net_cmd *cmd)
760 {
761 	pid_t pid;
762 	int ret;
763 
764 	sk_out_assign(sk_out);
765 
766 	fio_time_init();
767 	set_genesis_time();
768 
769 	pid = fork();
770 	if (pid) {
771 		fio_server_add_job_pid(job_list, pid);
772 		return 0;
773 	}
774 
775 	ret = fio_backend(sk_out);
776 	free_threads_shm();
777 	sk_out_drop();
778 	_exit(ret);
779 }
780 
handle_job_cmd(struct fio_net_cmd * cmd)781 static int handle_job_cmd(struct fio_net_cmd *cmd)
782 {
783 	struct cmd_job_pdu *pdu = (struct cmd_job_pdu *) cmd->payload;
784 	void *buf = pdu->buf;
785 	struct cmd_start_pdu spdu;
786 
787 	pdu->buf_len = le32_to_cpu(pdu->buf_len);
788 	pdu->client_type = le32_to_cpu(pdu->client_type);
789 
790 	if (parse_jobs_ini(buf, 1, 0, pdu->client_type)) {
791 		fio_net_queue_quit();
792 		return -1;
793 	}
794 
795 	spdu.jobs = cpu_to_le32(thread_number);
796 	spdu.stat_outputs = cpu_to_le32(stat_number);
797 
798 	fio_net_queue_cmd(FIO_NET_CMD_START, &spdu, sizeof(spdu), NULL, SK_F_COPY);
799 	return 0;
800 }
801 
handle_jobline_cmd(struct fio_net_cmd * cmd)802 static int handle_jobline_cmd(struct fio_net_cmd *cmd)
803 {
804 	void *pdu = cmd->payload;
805 	struct cmd_single_line_pdu *cslp;
806 	struct cmd_line_pdu *clp;
807 	unsigned long offset;
808 	struct cmd_start_pdu spdu;
809 	char **argv;
810 	int i;
811 
812 	clp = pdu;
813 	clp->lines = le16_to_cpu(clp->lines);
814 	clp->client_type = le16_to_cpu(clp->client_type);
815 	argv = malloc(clp->lines * sizeof(char *));
816 	offset = sizeof(*clp);
817 
818 	dprint(FD_NET, "server: %d command line args\n", clp->lines);
819 
820 	for (i = 0; i < clp->lines; i++) {
821 		cslp = pdu + offset;
822 		argv[i] = (char *) cslp->text;
823 
824 		offset += sizeof(*cslp) + le16_to_cpu(cslp->len);
825 		dprint(FD_NET, "server: %d: %s\n", i, argv[i]);
826 	}
827 
828 	if (parse_cmd_line(clp->lines, argv, clp->client_type)) {
829 		fio_net_queue_quit();
830 		free(argv);
831 		return -1;
832 	}
833 
834 	free(argv);
835 
836 	spdu.jobs = cpu_to_le32(thread_number);
837 	spdu.stat_outputs = cpu_to_le32(stat_number);
838 
839 	fio_net_queue_cmd(FIO_NET_CMD_START, &spdu, sizeof(spdu), NULL, SK_F_COPY);
840 	return 0;
841 }
842 
handle_probe_cmd(struct fio_net_cmd * cmd)843 static int handle_probe_cmd(struct fio_net_cmd *cmd)
844 {
845 	struct cmd_client_probe_pdu *pdu = (struct cmd_client_probe_pdu *) cmd->payload;
846 	struct cmd_probe_reply_pdu probe;
847 	uint64_t tag = cmd->tag;
848 
849 	dprint(FD_NET, "server: sending probe reply\n");
850 
851 	strcpy(me, (char *) pdu->server);
852 
853 	memset(&probe, 0, sizeof(probe));
854 	gethostname((char *) probe.hostname, sizeof(probe.hostname));
855 #ifdef CONFIG_BIG_ENDIAN
856 	probe.bigendian = 1;
857 #endif
858 	strncpy((char *) probe.fio_version, fio_version_string, sizeof(probe.fio_version));
859 
860 	probe.os	= FIO_OS;
861 	probe.arch	= FIO_ARCH;
862 	probe.bpp	= sizeof(void *);
863 	probe.cpus	= __cpu_to_le32(cpus_online());
864 
865 	/*
866 	 * If the client supports compression and we do too, then enable it
867 	 */
868 	if (has_zlib && le64_to_cpu(pdu->flags) & FIO_PROBE_FLAG_ZLIB) {
869 		probe.flags = __cpu_to_le64(FIO_PROBE_FLAG_ZLIB);
870 		use_zlib = 1;
871 	} else {
872 		probe.flags = 0;
873 		use_zlib = 0;
874 	}
875 
876 	return fio_net_queue_cmd(FIO_NET_CMD_PROBE, &probe, sizeof(probe), &tag, SK_F_COPY);
877 }
878 
handle_send_eta_cmd(struct fio_net_cmd * cmd)879 static int handle_send_eta_cmd(struct fio_net_cmd *cmd)
880 {
881 	struct jobs_eta *je;
882 	uint64_t tag = cmd->tag;
883 	size_t size;
884 	int i;
885 
886 	dprint(FD_NET, "server sending status\n");
887 
888 	/*
889 	 * Fake ETA return if we don't have a local one, otherwise the client
890 	 * will end up timing out waiting for a response to the ETA request
891 	 */
892 	je = get_jobs_eta(true, &size);
893 	if (!je) {
894 		size = sizeof(*je);
895 		je = calloc(1, size);
896 	} else {
897 		je->nr_running		= cpu_to_le32(je->nr_running);
898 		je->nr_ramp		= cpu_to_le32(je->nr_ramp);
899 		je->nr_pending		= cpu_to_le32(je->nr_pending);
900 		je->nr_setting_up	= cpu_to_le32(je->nr_setting_up);
901 		je->files_open		= cpu_to_le32(je->files_open);
902 
903 		for (i = 0; i < DDIR_RWDIR_CNT; i++) {
904 			je->m_rate[i]	= cpu_to_le64(je->m_rate[i]);
905 			je->t_rate[i]	= cpu_to_le64(je->t_rate[i]);
906 			je->m_iops[i]	= cpu_to_le32(je->m_iops[i]);
907 			je->t_iops[i]	= cpu_to_le32(je->t_iops[i]);
908 			je->rate[i]	= cpu_to_le64(je->rate[i]);
909 			je->iops[i]	= cpu_to_le32(je->iops[i]);
910 		}
911 
912 		je->elapsed_sec		= cpu_to_le64(je->elapsed_sec);
913 		je->eta_sec		= cpu_to_le64(je->eta_sec);
914 		je->nr_threads		= cpu_to_le32(je->nr_threads);
915 		je->is_pow2		= cpu_to_le32(je->is_pow2);
916 		je->unit_base		= cpu_to_le32(je->unit_base);
917 	}
918 
919 	fio_net_queue_cmd(FIO_NET_CMD_ETA, je, size, &tag, SK_F_FREE);
920 	return 0;
921 }
922 
send_update_job_reply(uint64_t __tag,int error)923 static int send_update_job_reply(uint64_t __tag, int error)
924 {
925 	uint64_t tag = __tag;
926 	uint32_t pdu_error;
927 
928 	pdu_error = __cpu_to_le32(error);
929 	return fio_net_queue_cmd(FIO_NET_CMD_UPDATE_JOB, &pdu_error, sizeof(pdu_error), &tag, SK_F_COPY);
930 }
931 
handle_update_job_cmd(struct fio_net_cmd * cmd)932 static int handle_update_job_cmd(struct fio_net_cmd *cmd)
933 {
934 	struct cmd_add_job_pdu *pdu = (struct cmd_add_job_pdu *) cmd->payload;
935 	struct thread_data *td;
936 	uint32_t tnumber;
937 
938 	tnumber = le32_to_cpu(pdu->thread_number);
939 
940 	dprint(FD_NET, "server: updating options for job %u\n", tnumber);
941 
942 	if (!tnumber || tnumber > thread_number) {
943 		send_update_job_reply(cmd->tag, ENODEV);
944 		return 0;
945 	}
946 
947 	td = &threads[tnumber - 1];
948 	convert_thread_options_to_cpu(&td->o, &pdu->top);
949 	send_update_job_reply(cmd->tag, 0);
950 	return 0;
951 }
952 
handle_trigger_cmd(struct fio_net_cmd * cmd)953 static int handle_trigger_cmd(struct fio_net_cmd *cmd)
954 {
955 	struct cmd_vtrigger_pdu *pdu = (struct cmd_vtrigger_pdu *) cmd->payload;
956 	char *buf = (char *) pdu->cmd;
957 	struct all_io_list *rep;
958 	size_t sz;
959 
960 	pdu->len = le16_to_cpu(pdu->len);
961 	buf[pdu->len] = '\0';
962 
963 	rep = get_all_io_list(IO_LIST_ALL, &sz);
964 	if (!rep) {
965 		struct all_io_list state;
966 
967 		state.threads = cpu_to_le64((uint64_t) 0);
968 		fio_net_queue_cmd(FIO_NET_CMD_VTRIGGER, &state, sizeof(state), NULL, SK_F_COPY | SK_F_INLINE);
969 	} else
970 		fio_net_queue_cmd(FIO_NET_CMD_VTRIGGER, rep, sz, NULL, SK_F_FREE | SK_F_INLINE);
971 
972 	exec_trigger(buf);
973 	return 0;
974 }
975 
handle_command(struct sk_out * sk_out,struct flist_head * job_list,struct fio_net_cmd * cmd)976 static int handle_command(struct sk_out *sk_out, struct flist_head *job_list,
977 			  struct fio_net_cmd *cmd)
978 {
979 	int ret;
980 
981 	dprint(FD_NET, "server: got op [%s], pdu=%u, tag=%llx\n",
982 			fio_server_op(cmd->opcode), cmd->pdu_len,
983 			(unsigned long long) cmd->tag);
984 
985 	switch (cmd->opcode) {
986 	case FIO_NET_CMD_QUIT:
987 		fio_terminate_threads(TERMINATE_ALL);
988 		ret = 0;
989 		break;
990 	case FIO_NET_CMD_EXIT:
991 		exit_backend = 1;
992 		return -1;
993 	case FIO_NET_CMD_LOAD_FILE:
994 		ret = handle_load_file_cmd(cmd);
995 		break;
996 	case FIO_NET_CMD_JOB:
997 		ret = handle_job_cmd(cmd);
998 		break;
999 	case FIO_NET_CMD_JOBLINE:
1000 		ret = handle_jobline_cmd(cmd);
1001 		break;
1002 	case FIO_NET_CMD_PROBE:
1003 		ret = handle_probe_cmd(cmd);
1004 		break;
1005 	case FIO_NET_CMD_SEND_ETA:
1006 		ret = handle_send_eta_cmd(cmd);
1007 		break;
1008 	case FIO_NET_CMD_RUN:
1009 		ret = handle_run_cmd(sk_out, job_list, cmd);
1010 		break;
1011 	case FIO_NET_CMD_UPDATE_JOB:
1012 		ret = handle_update_job_cmd(cmd);
1013 		break;
1014 	case FIO_NET_CMD_VTRIGGER:
1015 		ret = handle_trigger_cmd(cmd);
1016 		break;
1017 	case FIO_NET_CMD_SENDFILE: {
1018 		struct cmd_sendfile_reply *in;
1019 		struct cmd_reply *rep;
1020 
1021 		rep = (struct cmd_reply *) (uintptr_t) cmd->tag;
1022 
1023 		in = (struct cmd_sendfile_reply *) cmd->payload;
1024 		in->size = le32_to_cpu(in->size);
1025 		in->error = le32_to_cpu(in->error);
1026 		if (in->error) {
1027 			ret = 1;
1028 			rep->error = in->error;
1029 		} else {
1030 			ret = 0;
1031 			rep->data = smalloc(in->size);
1032 			if (!rep->data) {
1033 				ret = 1;
1034 				rep->error = ENOMEM;
1035 			} else {
1036 				rep->size = in->size;
1037 				memcpy(rep->data, in->data, in->size);
1038 			}
1039 		}
1040 		fio_mutex_up(&rep->lock);
1041 		break;
1042 		}
1043 	default:
1044 		log_err("fio: unknown opcode: %s\n", fio_server_op(cmd->opcode));
1045 		ret = 1;
1046 	}
1047 
1048 	return ret;
1049 }
1050 
1051 /*
1052  * Send a command with a separate PDU, not inlined in the command
1053  */
fio_send_cmd_ext_pdu(int sk,uint16_t opcode,const void * buf,off_t size,uint64_t tag,uint32_t flags)1054 static int fio_send_cmd_ext_pdu(int sk, uint16_t opcode, const void *buf,
1055 				off_t size, uint64_t tag, uint32_t flags)
1056 {
1057 	struct fio_net_cmd cmd;
1058 	struct iovec iov[2];
1059 	size_t this_len;
1060 	int ret;
1061 
1062 	iov[0].iov_base = (void *) &cmd;
1063 	iov[0].iov_len = sizeof(cmd);
1064 
1065 	do {
1066 		uint32_t this_flags = flags;
1067 
1068 		this_len = size;
1069 		if (this_len > FIO_SERVER_MAX_FRAGMENT_PDU)
1070 			this_len = FIO_SERVER_MAX_FRAGMENT_PDU;
1071 
1072 		if (this_len < size)
1073 			this_flags |= FIO_NET_CMD_F_MORE;
1074 
1075 		__fio_init_net_cmd(&cmd, opcode, this_len, tag);
1076 		cmd.flags = __cpu_to_le32(this_flags);
1077 		fio_net_cmd_crc_pdu(&cmd, buf);
1078 
1079 		iov[1].iov_base = (void *) buf;
1080 		iov[1].iov_len = this_len;
1081 
1082 		ret = fio_sendv_data(sk, iov, 2);
1083 		size -= this_len;
1084 		buf += this_len;
1085 	} while (!ret && size);
1086 
1087 	return ret;
1088 }
1089 
finish_entry(struct sk_entry * entry)1090 static void finish_entry(struct sk_entry *entry)
1091 {
1092 	if (entry->flags & SK_F_FREE)
1093 		free(entry->buf);
1094 	else if (entry->flags & SK_F_COPY)
1095 		sfree(entry->buf);
1096 
1097 	sfree(entry);
1098 }
1099 
entry_set_flags(struct sk_entry * entry,struct flist_head * list,unsigned int * flags)1100 static void entry_set_flags(struct sk_entry *entry, struct flist_head *list,
1101 			    unsigned int *flags)
1102 {
1103 	if (!flist_empty(list))
1104 		*flags = FIO_NET_CMD_F_MORE;
1105 	else
1106 		*flags = 0;
1107 }
1108 
send_vec_entry(struct sk_out * sk_out,struct sk_entry * first)1109 static int send_vec_entry(struct sk_out *sk_out, struct sk_entry *first)
1110 {
1111 	unsigned int flags;
1112 	int ret;
1113 
1114 	entry_set_flags(first, &first->next, &flags);
1115 
1116 	ret = fio_send_cmd_ext_pdu(sk_out->sk, first->opcode, first->buf,
1117 					first->size, first->tag, flags);
1118 
1119 	while (!flist_empty(&first->next)) {
1120 		struct sk_entry *next;
1121 
1122 		next = flist_first_entry(&first->next, struct sk_entry, list);
1123 		flist_del_init(&next->list);
1124 
1125 		entry_set_flags(next, &first->next, &flags);
1126 
1127 		ret += fio_send_cmd_ext_pdu(sk_out->sk, next->opcode, next->buf,
1128 						next->size, next->tag, flags);
1129 		finish_entry(next);
1130 	}
1131 
1132 	return ret;
1133 }
1134 
handle_sk_entry(struct sk_out * sk_out,struct sk_entry * entry)1135 static int handle_sk_entry(struct sk_out *sk_out, struct sk_entry *entry)
1136 {
1137 	int ret;
1138 
1139 	fio_mutex_down(&sk_out->xmit);
1140 
1141 	if (entry->flags & SK_F_VEC)
1142 		ret = send_vec_entry(sk_out, entry);
1143 	else if (entry->flags & SK_F_SIMPLE) {
1144 		ret = fio_net_send_simple_cmd(sk_out->sk, entry->opcode,
1145 						entry->tag, NULL);
1146 	} else {
1147 		ret = fio_net_send_cmd(sk_out->sk, entry->opcode, entry->buf,
1148 					entry->size, &entry->tag, NULL);
1149 	}
1150 
1151 	fio_mutex_up(&sk_out->xmit);
1152 
1153 	if (ret)
1154 		log_err("fio: failed handling cmd %s\n", fio_server_op(entry->opcode));
1155 
1156 	finish_entry(entry);
1157 	return ret;
1158 }
1159 
handle_xmits(struct sk_out * sk_out)1160 static int handle_xmits(struct sk_out *sk_out)
1161 {
1162 	struct sk_entry *entry;
1163 	FLIST_HEAD(list);
1164 	int ret = 0;
1165 
1166 	sk_lock(sk_out);
1167 	if (flist_empty(&sk_out->list)) {
1168 		sk_unlock(sk_out);
1169 		return 0;
1170 	}
1171 
1172 	flist_splice_init(&sk_out->list, &list);
1173 	sk_unlock(sk_out);
1174 
1175 	while (!flist_empty(&list)) {
1176 		entry = flist_entry(list.next, struct sk_entry, list);
1177 		flist_del(&entry->list);
1178 		ret += handle_sk_entry(sk_out, entry);
1179 	}
1180 
1181 	return ret;
1182 }
1183 
handle_connection(struct sk_out * sk_out)1184 static int handle_connection(struct sk_out *sk_out)
1185 {
1186 	struct fio_net_cmd *cmd = NULL;
1187 	FLIST_HEAD(job_list);
1188 	int ret = 0;
1189 
1190 	reset_fio_state();
1191 
1192 	/* read forever */
1193 	while (!exit_backend) {
1194 		struct pollfd pfd = {
1195 			.fd	= sk_out->sk,
1196 			.events	= POLLIN,
1197 		};
1198 
1199 		ret = 0;
1200 		do {
1201 			int timeout = 1000;
1202 
1203 			if (!flist_empty(&job_list))
1204 				timeout = 100;
1205 
1206 			handle_xmits(sk_out);
1207 
1208 			ret = poll(&pfd, 1, 0);
1209 			if (ret < 0) {
1210 				if (errno == EINTR)
1211 					break;
1212 				log_err("fio: poll: %s\n", strerror(errno));
1213 				break;
1214 			} else if (!ret) {
1215 				fio_server_check_jobs(&job_list);
1216 				fio_mutex_down_timeout(&sk_out->wait, timeout);
1217 				continue;
1218 			}
1219 
1220 			if (pfd.revents & POLLIN)
1221 				break;
1222 			if (pfd.revents & (POLLERR|POLLHUP)) {
1223 				ret = 1;
1224 				break;
1225 			}
1226 		} while (!exit_backend);
1227 
1228 		fio_server_check_jobs(&job_list);
1229 
1230 		if (ret < 0)
1231 			break;
1232 
1233 		cmd = fio_net_recv_cmd(sk_out->sk, true);
1234 		if (!cmd) {
1235 			ret = -1;
1236 			break;
1237 		}
1238 
1239 		ret = handle_command(sk_out, &job_list, cmd);
1240 		if (ret)
1241 			break;
1242 
1243 		free(cmd);
1244 		cmd = NULL;
1245 	}
1246 
1247 	if (cmd)
1248 		free(cmd);
1249 
1250 	handle_xmits(sk_out);
1251 
1252 	close(sk_out->sk);
1253 	sk_out->sk = -1;
1254 	__sk_out_drop(sk_out);
1255 	_exit(ret);
1256 }
1257 
1258 /* get the address on this host bound by the input socket,
1259  * whether it is ipv6 or ipv4 */
1260 
get_my_addr_str(int sk)1261 static int get_my_addr_str(int sk)
1262 {
1263 	struct sockaddr_in6 myaddr6 = { 0, };
1264 	struct sockaddr_in myaddr4 = { 0, };
1265 	struct sockaddr *sockaddr_p;
1266 	char *net_addr;
1267 	socklen_t len;
1268 	int ret;
1269 
1270 	if (use_ipv6) {
1271 		len = sizeof(myaddr6);
1272 		sockaddr_p = (struct sockaddr * )&myaddr6;
1273 		net_addr = (char * )&myaddr6.sin6_addr;
1274 	} else {
1275 		len = sizeof(myaddr4);
1276 		sockaddr_p = (struct sockaddr * )&myaddr4;
1277 		net_addr = (char * )&myaddr4.sin_addr;
1278 	}
1279 
1280 	ret = getsockname(sk, sockaddr_p, &len);
1281 	if (ret) {
1282 		log_err("fio: getsockaddr: %s\n", strerror(errno));
1283 		return -1;
1284 	}
1285 
1286 	if (!inet_ntop(use_ipv6?AF_INET6:AF_INET, net_addr, client_sockaddr_str, INET6_ADDRSTRLEN - 1)) {
1287 		log_err("inet_ntop: failed to convert addr to string\n");
1288 		return -1;
1289 	}
1290 
1291 	dprint(FD_NET, "fio server bound to addr %s\n", client_sockaddr_str);
1292 	return 0;
1293 }
1294 
accept_loop(int listen_sk)1295 static int accept_loop(int listen_sk)
1296 {
1297 	struct sockaddr_in addr;
1298 	struct sockaddr_in6 addr6;
1299 	socklen_t len = use_ipv6 ? sizeof(addr6) : sizeof(addr);
1300 	struct pollfd pfd;
1301 	int ret = 0, sk, exitval = 0;
1302 	FLIST_HEAD(conn_list);
1303 
1304 	dprint(FD_NET, "server enter accept loop\n");
1305 
1306 	fio_set_fd_nonblocking(listen_sk, "server");
1307 
1308 	while (!exit_backend) {
1309 		struct sk_out *sk_out;
1310 		const char *from;
1311 		char buf[64];
1312 		pid_t pid;
1313 
1314 		pfd.fd = listen_sk;
1315 		pfd.events = POLLIN;
1316 		do {
1317 			int timeout = 1000;
1318 
1319 			if (!flist_empty(&conn_list))
1320 				timeout = 100;
1321 
1322 			ret = poll(&pfd, 1, timeout);
1323 			if (ret < 0) {
1324 				if (errno == EINTR)
1325 					break;
1326 				log_err("fio: poll: %s\n", strerror(errno));
1327 				break;
1328 			} else if (!ret) {
1329 				fio_server_check_conns(&conn_list);
1330 				continue;
1331 			}
1332 
1333 			if (pfd.revents & POLLIN)
1334 				break;
1335 		} while (!exit_backend);
1336 
1337 		fio_server_check_conns(&conn_list);
1338 
1339 		if (exit_backend || ret < 0)
1340 			break;
1341 
1342 		if (use_ipv6)
1343 			sk = accept(listen_sk, (struct sockaddr *) &addr6, &len);
1344 		else
1345 			sk = accept(listen_sk, (struct sockaddr *) &addr, &len);
1346 
1347 		if (sk < 0) {
1348 			log_err("fio: accept: %s\n", strerror(errno));
1349 			return -1;
1350 		}
1351 
1352 		if (use_ipv6)
1353 			from = inet_ntop(AF_INET6, (struct sockaddr *) &addr6.sin6_addr, buf, sizeof(buf));
1354 		else
1355 			from = inet_ntop(AF_INET, (struct sockaddr *) &addr.sin_addr, buf, sizeof(buf));
1356 
1357 		dprint(FD_NET, "server: connect from %s\n", from);
1358 
1359 		sk_out = smalloc(sizeof(*sk_out));
1360 		sk_out->sk = sk;
1361 		INIT_FLIST_HEAD(&sk_out->list);
1362 		__fio_mutex_init(&sk_out->lock, FIO_MUTEX_UNLOCKED);
1363 		__fio_mutex_init(&sk_out->wait, FIO_MUTEX_LOCKED);
1364 		__fio_mutex_init(&sk_out->xmit, FIO_MUTEX_UNLOCKED);
1365 
1366 		pid = fork();
1367 		if (pid) {
1368 			close(sk);
1369 			fio_server_add_conn_pid(&conn_list, pid);
1370 			continue;
1371 		}
1372 
1373 		/* if error, it's already logged, non-fatal */
1374 		get_my_addr_str(sk);
1375 
1376 		/*
1377 		 * Assign sk_out here, it'll be dropped in handle_connection()
1378 		 * since that function calls _exit() when done
1379 		 */
1380 		sk_out_assign(sk_out);
1381 		handle_connection(sk_out);
1382 	}
1383 
1384 	return exitval;
1385 }
1386 
fio_server_text_output(int level,const char * buf,size_t len)1387 int fio_server_text_output(int level, const char *buf, size_t len)
1388 {
1389 	struct sk_out *sk_out = pthread_getspecific(sk_out_key);
1390 	struct cmd_text_pdu *pdu;
1391 	unsigned int tlen;
1392 	struct timeval tv;
1393 
1394 	if (!sk_out || sk_out->sk == -1)
1395 		return -1;
1396 
1397 	tlen = sizeof(*pdu) + len;
1398 	pdu = malloc(tlen);
1399 
1400 	pdu->level	= __cpu_to_le32(level);
1401 	pdu->buf_len	= __cpu_to_le32(len);
1402 
1403 	gettimeofday(&tv, NULL);
1404 	pdu->log_sec	= __cpu_to_le64(tv.tv_sec);
1405 	pdu->log_usec	= __cpu_to_le64(tv.tv_usec);
1406 
1407 	memcpy(pdu->buf, buf, len);
1408 
1409 	fio_net_queue_cmd(FIO_NET_CMD_TEXT, pdu, tlen, NULL, SK_F_COPY);
1410 	free(pdu);
1411 	return len;
1412 }
1413 
convert_io_stat(struct io_stat * dst,struct io_stat * src)1414 static void convert_io_stat(struct io_stat *dst, struct io_stat *src)
1415 {
1416 	dst->max_val	= cpu_to_le64(src->max_val);
1417 	dst->min_val	= cpu_to_le64(src->min_val);
1418 	dst->samples	= cpu_to_le64(src->samples);
1419 
1420 	/*
1421 	 * Encode to IEEE 754 for network transfer
1422 	 */
1423 	dst->mean.u.i	= cpu_to_le64(fio_double_to_uint64(src->mean.u.f));
1424 	dst->S.u.i	= cpu_to_le64(fio_double_to_uint64(src->S.u.f));
1425 }
1426 
convert_gs(struct group_run_stats * dst,struct group_run_stats * src)1427 static void convert_gs(struct group_run_stats *dst, struct group_run_stats *src)
1428 {
1429 	int i;
1430 
1431 	for (i = 0; i < DDIR_RWDIR_CNT; i++) {
1432 		dst->max_run[i]		= cpu_to_le64(src->max_run[i]);
1433 		dst->min_run[i]		= cpu_to_le64(src->min_run[i]);
1434 		dst->max_bw[i]		= cpu_to_le64(src->max_bw[i]);
1435 		dst->min_bw[i]		= cpu_to_le64(src->min_bw[i]);
1436 		dst->iobytes[i]		= cpu_to_le64(src->iobytes[i]);
1437 		dst->agg[i]		= cpu_to_le64(src->agg[i]);
1438 	}
1439 
1440 	dst->kb_base	= cpu_to_le32(src->kb_base);
1441 	dst->unit_base	= cpu_to_le32(src->unit_base);
1442 	dst->groupid	= cpu_to_le32(src->groupid);
1443 	dst->unified_rw_rep	= cpu_to_le32(src->unified_rw_rep);
1444 }
1445 
1446 /*
1447  * Send a CMD_TS, which packs struct thread_stat and group_run_stats
1448  * into a single payload.
1449  */
fio_server_send_ts(struct thread_stat * ts,struct group_run_stats * rs)1450 void fio_server_send_ts(struct thread_stat *ts, struct group_run_stats *rs)
1451 {
1452 	struct cmd_ts_pdu p;
1453 	int i, j;
1454 	void *ss_buf;
1455 	uint64_t *ss_iops, *ss_bw;
1456 
1457 	dprint(FD_NET, "server sending end stats\n");
1458 
1459 	memset(&p, 0, sizeof(p));
1460 
1461 	strncpy(p.ts.name, ts->name, FIO_JOBNAME_SIZE - 1);
1462 	strncpy(p.ts.verror, ts->verror, FIO_VERROR_SIZE - 1);
1463 	strncpy(p.ts.description, ts->description, FIO_JOBDESC_SIZE - 1);
1464 
1465 	p.ts.error		= cpu_to_le32(ts->error);
1466 	p.ts.thread_number	= cpu_to_le32(ts->thread_number);
1467 	p.ts.groupid		= cpu_to_le32(ts->groupid);
1468 	p.ts.pid		= cpu_to_le32(ts->pid);
1469 	p.ts.members		= cpu_to_le32(ts->members);
1470 	p.ts.unified_rw_rep	= cpu_to_le32(ts->unified_rw_rep);
1471 
1472 	for (i = 0; i < DDIR_RWDIR_CNT; i++) {
1473 		convert_io_stat(&p.ts.clat_stat[i], &ts->clat_stat[i]);
1474 		convert_io_stat(&p.ts.slat_stat[i], &ts->slat_stat[i]);
1475 		convert_io_stat(&p.ts.lat_stat[i], &ts->lat_stat[i]);
1476 		convert_io_stat(&p.ts.bw_stat[i], &ts->bw_stat[i]);
1477 	}
1478 
1479 	p.ts.usr_time		= cpu_to_le64(ts->usr_time);
1480 	p.ts.sys_time		= cpu_to_le64(ts->sys_time);
1481 	p.ts.ctx		= cpu_to_le64(ts->ctx);
1482 	p.ts.minf		= cpu_to_le64(ts->minf);
1483 	p.ts.majf		= cpu_to_le64(ts->majf);
1484 	p.ts.clat_percentiles	= cpu_to_le64(ts->clat_percentiles);
1485 	p.ts.percentile_precision = cpu_to_le64(ts->percentile_precision);
1486 
1487 	for (i = 0; i < FIO_IO_U_LIST_MAX_LEN; i++) {
1488 		fio_fp64_t *src = &ts->percentile_list[i];
1489 		fio_fp64_t *dst = &p.ts.percentile_list[i];
1490 
1491 		dst->u.i = cpu_to_le64(fio_double_to_uint64(src->u.f));
1492 	}
1493 
1494 	for (i = 0; i < FIO_IO_U_MAP_NR; i++) {
1495 		p.ts.io_u_map[i]	= cpu_to_le32(ts->io_u_map[i]);
1496 		p.ts.io_u_submit[i]	= cpu_to_le32(ts->io_u_submit[i]);
1497 		p.ts.io_u_complete[i]	= cpu_to_le32(ts->io_u_complete[i]);
1498 	}
1499 
1500 	for (i = 0; i < FIO_IO_U_LAT_U_NR; i++)
1501 		p.ts.io_u_lat_u[i]	= cpu_to_le32(ts->io_u_lat_u[i]);
1502 	for (i = 0; i < FIO_IO_U_LAT_M_NR; i++)
1503 		p.ts.io_u_lat_m[i]	= cpu_to_le32(ts->io_u_lat_m[i]);
1504 
1505 	for (i = 0; i < DDIR_RWDIR_CNT; i++)
1506 		for (j = 0; j < FIO_IO_U_PLAT_NR; j++)
1507 			p.ts.io_u_plat[i][j] = cpu_to_le32(ts->io_u_plat[i][j]);
1508 
1509 	for (i = 0; i < DDIR_RWDIR_CNT; i++) {
1510 		p.ts.total_io_u[i]	= cpu_to_le64(ts->total_io_u[i]);
1511 		p.ts.short_io_u[i]	= cpu_to_le64(ts->short_io_u[i]);
1512 		p.ts.drop_io_u[i]	= cpu_to_le64(ts->drop_io_u[i]);
1513 	}
1514 
1515 	p.ts.total_submit	= cpu_to_le64(ts->total_submit);
1516 	p.ts.total_complete	= cpu_to_le64(ts->total_complete);
1517 
1518 	for (i = 0; i < DDIR_RWDIR_CNT; i++) {
1519 		p.ts.io_bytes[i]	= cpu_to_le64(ts->io_bytes[i]);
1520 		p.ts.runtime[i]		= cpu_to_le64(ts->runtime[i]);
1521 	}
1522 
1523 	p.ts.total_run_time	= cpu_to_le64(ts->total_run_time);
1524 	p.ts.continue_on_error	= cpu_to_le16(ts->continue_on_error);
1525 	p.ts.total_err_count	= cpu_to_le64(ts->total_err_count);
1526 	p.ts.first_error	= cpu_to_le32(ts->first_error);
1527 	p.ts.kb_base		= cpu_to_le32(ts->kb_base);
1528 	p.ts.unit_base		= cpu_to_le32(ts->unit_base);
1529 
1530 	p.ts.latency_depth	= cpu_to_le32(ts->latency_depth);
1531 	p.ts.latency_target	= cpu_to_le64(ts->latency_target);
1532 	p.ts.latency_window	= cpu_to_le64(ts->latency_window);
1533 	p.ts.latency_percentile.u.i = cpu_to_le64(fio_double_to_uint64(ts->latency_percentile.u.f));
1534 
1535 	p.ts.nr_block_infos	= cpu_to_le64(ts->nr_block_infos);
1536 	for (i = 0; i < p.ts.nr_block_infos; i++)
1537 		p.ts.block_infos[i] = cpu_to_le32(ts->block_infos[i]);
1538 
1539 	p.ts.ss_dur		= cpu_to_le64(ts->ss_dur);
1540 	p.ts.ss_state		= cpu_to_le32(ts->ss_state);
1541 	p.ts.ss_head		= cpu_to_le32(ts->ss_head);
1542 	p.ts.ss_limit.u.i	= cpu_to_le64(fio_double_to_uint64(ts->ss_limit.u.f));
1543 	p.ts.ss_slope.u.i	= cpu_to_le64(fio_double_to_uint64(ts->ss_slope.u.f));
1544 	p.ts.ss_deviation.u.i	= cpu_to_le64(fio_double_to_uint64(ts->ss_deviation.u.f));
1545 	p.ts.ss_criterion.u.i	= cpu_to_le64(fio_double_to_uint64(ts->ss_criterion.u.f));
1546 
1547 	convert_gs(&p.rs, rs);
1548 
1549 	dprint(FD_NET, "ts->ss_state = %d\n", ts->ss_state);
1550 	if (ts->ss_state & __FIO_SS_DATA) {
1551 		dprint(FD_NET, "server sending steadystate ring buffers\n");
1552 
1553 		ss_buf = malloc(sizeof(p) + 2*ts->ss_dur*sizeof(uint64_t));
1554 
1555 		memcpy(ss_buf, &p, sizeof(p));
1556 
1557 		ss_iops = (uint64_t *) ((struct cmd_ts_pdu *)ss_buf + 1);
1558 		ss_bw = ss_iops + (int) ts->ss_dur;
1559 		for (i = 0; i < ts->ss_dur; i++) {
1560 			ss_iops[i] = cpu_to_le64(ts->ss_iops_data[i]);
1561 			ss_bw[i] = cpu_to_le64(ts->ss_bw_data[i]);
1562 		}
1563 
1564 		fio_net_queue_cmd(FIO_NET_CMD_TS, ss_buf, sizeof(p) + 2*ts->ss_dur*sizeof(uint64_t), NULL, SK_F_COPY);
1565 
1566 		free(ss_buf);
1567 	}
1568 	else
1569 		fio_net_queue_cmd(FIO_NET_CMD_TS, &p, sizeof(p), NULL, SK_F_COPY);
1570 }
1571 
fio_server_send_gs(struct group_run_stats * rs)1572 void fio_server_send_gs(struct group_run_stats *rs)
1573 {
1574 	struct group_run_stats gs;
1575 
1576 	dprint(FD_NET, "server sending group run stats\n");
1577 
1578 	convert_gs(&gs, rs);
1579 	fio_net_queue_cmd(FIO_NET_CMD_GS, &gs, sizeof(gs), NULL, SK_F_COPY);
1580 }
1581 
fio_server_send_job_options(struct flist_head * opt_list,unsigned int groupid)1582 void fio_server_send_job_options(struct flist_head *opt_list,
1583 				 unsigned int groupid)
1584 {
1585 	struct cmd_job_option pdu;
1586 	struct flist_head *entry;
1587 
1588 	if (flist_empty(opt_list))
1589 		return;
1590 
1591 	flist_for_each(entry, opt_list) {
1592 		struct print_option *p;
1593 		size_t len;
1594 
1595 		p = flist_entry(entry, struct print_option, list);
1596 		memset(&pdu, 0, sizeof(pdu));
1597 
1598 		if (groupid == -1U) {
1599 			pdu.global = __cpu_to_le16(1);
1600 			pdu.groupid = 0;
1601 		} else {
1602 			pdu.global = 0;
1603 			pdu.groupid = cpu_to_le32(groupid);
1604 		}
1605 		len = strlen(p->name);
1606 		if (len >= sizeof(pdu.name)) {
1607 			len = sizeof(pdu.name) - 1;
1608 			pdu.truncated = __cpu_to_le16(1);
1609 		}
1610 		memcpy(pdu.name, p->name, len);
1611 		if (p->value) {
1612 			len = strlen(p->value);
1613 			if (len >= sizeof(pdu.value)) {
1614 				len = sizeof(pdu.value) - 1;
1615 				pdu.truncated = __cpu_to_le16(1);
1616 			}
1617 			memcpy(pdu.value, p->value, len);
1618 		}
1619 		fio_net_queue_cmd(FIO_NET_CMD_JOB_OPT, &pdu, sizeof(pdu), NULL, SK_F_COPY);
1620 	}
1621 }
1622 
convert_agg(struct disk_util_agg * dst,struct disk_util_agg * src)1623 static void convert_agg(struct disk_util_agg *dst, struct disk_util_agg *src)
1624 {
1625 	int i;
1626 
1627 	for (i = 0; i < 2; i++) {
1628 		dst->ios[i]	= cpu_to_le64(src->ios[i]);
1629 		dst->merges[i]	= cpu_to_le64(src->merges[i]);
1630 		dst->sectors[i]	= cpu_to_le64(src->sectors[i]);
1631 		dst->ticks[i]	= cpu_to_le64(src->ticks[i]);
1632 	}
1633 
1634 	dst->io_ticks		= cpu_to_le64(src->io_ticks);
1635 	dst->time_in_queue	= cpu_to_le64(src->time_in_queue);
1636 	dst->slavecount		= cpu_to_le32(src->slavecount);
1637 	dst->max_util.u.i	= cpu_to_le64(fio_double_to_uint64(src->max_util.u.f));
1638 }
1639 
convert_dus(struct disk_util_stat * dst,struct disk_util_stat * src)1640 static void convert_dus(struct disk_util_stat *dst, struct disk_util_stat *src)
1641 {
1642 	int i;
1643 
1644 	dst->name[FIO_DU_NAME_SZ - 1] = '\0';
1645 	strncpy((char *) dst->name, (char *) src->name, FIO_DU_NAME_SZ - 1);
1646 
1647 	for (i = 0; i < 2; i++) {
1648 		dst->s.ios[i]		= cpu_to_le64(src->s.ios[i]);
1649 		dst->s.merges[i]	= cpu_to_le64(src->s.merges[i]);
1650 		dst->s.sectors[i]	= cpu_to_le64(src->s.sectors[i]);
1651 		dst->s.ticks[i]		= cpu_to_le64(src->s.ticks[i]);
1652 	}
1653 
1654 	dst->s.io_ticks		= cpu_to_le64(src->s.io_ticks);
1655 	dst->s.time_in_queue	= cpu_to_le64(src->s.time_in_queue);
1656 	dst->s.msec		= cpu_to_le64(src->s.msec);
1657 }
1658 
fio_server_send_du(void)1659 void fio_server_send_du(void)
1660 {
1661 	struct disk_util *du;
1662 	struct flist_head *entry;
1663 	struct cmd_du_pdu pdu;
1664 
1665 	dprint(FD_NET, "server: sending disk_util %d\n", !flist_empty(&disk_list));
1666 
1667 	memset(&pdu, 0, sizeof(pdu));
1668 
1669 	flist_for_each(entry, &disk_list) {
1670 		du = flist_entry(entry, struct disk_util, list);
1671 
1672 		convert_dus(&pdu.dus, &du->dus);
1673 		convert_agg(&pdu.agg, &du->agg);
1674 
1675 		fio_net_queue_cmd(FIO_NET_CMD_DU, &pdu, sizeof(pdu), NULL, SK_F_COPY);
1676 	}
1677 }
1678 
1679 #ifdef CONFIG_ZLIB
1680 
__fio_net_prep_tail(z_stream * stream,void * out_pdu,struct sk_entry ** last_entry,struct sk_entry * first)1681 static inline void __fio_net_prep_tail(z_stream *stream, void *out_pdu,
1682 					struct sk_entry **last_entry,
1683 					struct sk_entry *first)
1684 {
1685 	unsigned int this_len = FIO_SERVER_MAX_FRAGMENT_PDU - stream->avail_out;
1686 
1687 	*last_entry = fio_net_prep_cmd(FIO_NET_CMD_IOLOG, out_pdu, this_len,
1688 				 NULL, SK_F_VEC | SK_F_INLINE | SK_F_FREE);
1689 	flist_add_tail(&(*last_entry)->list, &first->next);
1690 
1691 }
1692 
1693 /*
1694  * Deflates the next input given, creating as many new packets in the
1695  * linked list as necessary.
1696  */
__deflate_pdu_buffer(void * next_in,unsigned int next_sz,void ** out_pdu,struct sk_entry ** last_entry,z_stream * stream,struct sk_entry * first)1697 static int __deflate_pdu_buffer(void *next_in, unsigned int next_sz, void **out_pdu,
1698 				struct sk_entry **last_entry, z_stream *stream,
1699 				struct sk_entry *first)
1700 {
1701 	int ret;
1702 
1703 	stream->next_in = next_in;
1704 	stream->avail_in = next_sz;
1705 	do {
1706 		if (! stream->avail_out) {
1707 
1708 			__fio_net_prep_tail(stream, *out_pdu, last_entry, first);
1709 
1710 			*out_pdu = malloc(FIO_SERVER_MAX_FRAGMENT_PDU);
1711 
1712 			stream->avail_out = FIO_SERVER_MAX_FRAGMENT_PDU;
1713 			stream->next_out = *out_pdu;
1714 		}
1715 
1716 		ret = deflate(stream, Z_BLOCK);
1717 
1718 		if (ret < 0) {
1719 			free(*out_pdu);
1720 			return 1;
1721 		}
1722 	} while (stream->avail_in);
1723 
1724 	return 0;
1725 }
1726 
__fio_append_iolog_gz_hist(struct sk_entry * first,struct io_log * log,struct io_logs * cur_log,z_stream * stream)1727 static int __fio_append_iolog_gz_hist(struct sk_entry *first, struct io_log *log,
1728 				      struct io_logs *cur_log, z_stream *stream)
1729 {
1730 	struct sk_entry *entry;
1731 	void *out_pdu;
1732 	int ret, i, j;
1733 	int sample_sz = log_entry_sz(log);
1734 
1735 	out_pdu = malloc(FIO_SERVER_MAX_FRAGMENT_PDU);
1736 	stream->avail_out = FIO_SERVER_MAX_FRAGMENT_PDU;
1737 	stream->next_out = out_pdu;
1738 
1739 	for (i = 0; i < cur_log->nr_samples; i++) {
1740 		struct io_sample *s;
1741 		struct io_u_plat_entry *cur_plat_entry, *prev_plat_entry;
1742 		unsigned int *cur_plat, *prev_plat;
1743 
1744 		s = get_sample(log, cur_log, i);
1745 		ret = __deflate_pdu_buffer(s, sample_sz, &out_pdu, &entry, stream, first);
1746 		if (ret)
1747 			return ret;
1748 
1749 		/* Do the subtraction on server side so that client doesn't have to
1750 		 * reconstruct our linked list from packets.
1751 		 */
1752 		cur_plat_entry  = s->data.plat_entry;
1753 		prev_plat_entry = flist_first_entry(&cur_plat_entry->list, struct io_u_plat_entry, list);
1754 		cur_plat  = cur_plat_entry->io_u_plat;
1755 		prev_plat = prev_plat_entry->io_u_plat;
1756 
1757 		for (j = 0; j < FIO_IO_U_PLAT_NR; j++) {
1758 			cur_plat[j] -= prev_plat[j];
1759 		}
1760 
1761 		flist_del(&prev_plat_entry->list);
1762 		free(prev_plat_entry);
1763 
1764 		ret = __deflate_pdu_buffer(cur_plat_entry, sizeof(*cur_plat_entry),
1765 					   &out_pdu, &entry, stream, first);
1766 
1767 		if (ret)
1768 			return ret;
1769 	}
1770 
1771 	__fio_net_prep_tail(stream, out_pdu, &entry, first);
1772 
1773 	return 0;
1774 }
1775 
__fio_append_iolog_gz(struct sk_entry * first,struct io_log * log,struct io_logs * cur_log,z_stream * stream)1776 static int __fio_append_iolog_gz(struct sk_entry *first, struct io_log *log,
1777 				 struct io_logs *cur_log, z_stream *stream)
1778 {
1779 	unsigned int this_len;
1780 	void *out_pdu;
1781 	int ret;
1782 
1783 	if (log->log_type == IO_LOG_TYPE_HIST)
1784 		return __fio_append_iolog_gz_hist(first, log, cur_log, stream);
1785 
1786 	stream->next_in = (void *) cur_log->log;
1787 	stream->avail_in = cur_log->nr_samples * log_entry_sz(log);
1788 
1789 	do {
1790 		struct sk_entry *entry;
1791 
1792 		/*
1793 		 * Dirty - since the log is potentially huge, compress it into
1794 		 * FIO_SERVER_MAX_FRAGMENT_PDU chunks and let the receiving
1795 		 * side defragment it.
1796 		 */
1797 		out_pdu = malloc(FIO_SERVER_MAX_FRAGMENT_PDU);
1798 
1799 		stream->avail_out = FIO_SERVER_MAX_FRAGMENT_PDU;
1800 		stream->next_out = out_pdu;
1801 		ret = deflate(stream, Z_BLOCK);
1802 		/* may be Z_OK, or Z_STREAM_END */
1803 		if (ret < 0) {
1804 			free(out_pdu);
1805 			return 1;
1806 		}
1807 
1808 		this_len = FIO_SERVER_MAX_FRAGMENT_PDU - stream->avail_out;
1809 
1810 		entry = fio_net_prep_cmd(FIO_NET_CMD_IOLOG, out_pdu, this_len,
1811 					 NULL, SK_F_VEC | SK_F_INLINE | SK_F_FREE);
1812 		flist_add_tail(&entry->list, &first->next);
1813 	} while (stream->avail_in);
1814 
1815 	return 0;
1816 }
1817 
fio_append_iolog_gz(struct sk_entry * first,struct io_log * log)1818 static int fio_append_iolog_gz(struct sk_entry *first, struct io_log *log)
1819 {
1820 	int ret = 0;
1821 	z_stream stream;
1822 
1823 	memset(&stream, 0, sizeof(stream));
1824 	stream.zalloc = Z_NULL;
1825 	stream.zfree = Z_NULL;
1826 	stream.opaque = Z_NULL;
1827 
1828 	if (deflateInit(&stream, Z_DEFAULT_COMPRESSION) != Z_OK)
1829 		return 1;
1830 
1831 	while (!flist_empty(&log->io_logs)) {
1832 		struct io_logs *cur_log;
1833 
1834 		cur_log = flist_first_entry(&log->io_logs, struct io_logs, list);
1835 		flist_del_init(&cur_log->list);
1836 
1837 		ret = __fio_append_iolog_gz(first, log, cur_log, &stream);
1838 		if (ret)
1839 			break;
1840 	}
1841 
1842 	ret = deflate(&stream, Z_FINISH);
1843 
1844 	while (ret != Z_STREAM_END) {
1845 		struct sk_entry *entry;
1846 		unsigned int this_len;
1847 		void *out_pdu;
1848 
1849 		out_pdu = malloc(FIO_SERVER_MAX_FRAGMENT_PDU);
1850 		stream.avail_out = FIO_SERVER_MAX_FRAGMENT_PDU;
1851 		stream.next_out = out_pdu;
1852 
1853 		ret = deflate(&stream, Z_FINISH);
1854 		/* may be Z_OK, or Z_STREAM_END */
1855 		if (ret < 0) {
1856 			free(out_pdu);
1857 			break;
1858 		}
1859 
1860 		this_len = FIO_SERVER_MAX_FRAGMENT_PDU - stream.avail_out;
1861 
1862 		entry = fio_net_prep_cmd(FIO_NET_CMD_IOLOG, out_pdu, this_len,
1863 					 NULL, SK_F_VEC | SK_F_INLINE | SK_F_FREE);
1864 		flist_add_tail(&entry->list, &first->next);
1865 	} while (ret != Z_STREAM_END);
1866 
1867 	ret = deflateEnd(&stream);
1868 	if (ret == Z_OK)
1869 		return 0;
1870 
1871 	return 1;
1872 }
1873 #else
fio_append_iolog_gz(struct sk_entry * first,struct io_log * log)1874 static int fio_append_iolog_gz(struct sk_entry *first, struct io_log *log)
1875 {
1876 	return 1;
1877 }
1878 #endif
1879 
fio_append_gz_chunks(struct sk_entry * first,struct io_log * log)1880 static int fio_append_gz_chunks(struct sk_entry *first, struct io_log *log)
1881 {
1882 	struct sk_entry *entry;
1883 	struct flist_head *node;
1884 
1885 	pthread_mutex_lock(&log->chunk_lock);
1886 	flist_for_each(node, &log->chunk_list) {
1887 		struct iolog_compress *c;
1888 
1889 		c = flist_entry(node, struct iolog_compress, list);
1890 		entry = fio_net_prep_cmd(FIO_NET_CMD_IOLOG, c->buf, c->len,
1891 						NULL, SK_F_VEC | SK_F_INLINE);
1892 		flist_add_tail(&entry->list, &first->next);
1893 	}
1894 	pthread_mutex_unlock(&log->chunk_lock);
1895 
1896 	return 0;
1897 }
1898 
fio_append_text_log(struct sk_entry * first,struct io_log * log)1899 static int fio_append_text_log(struct sk_entry *first, struct io_log *log)
1900 {
1901 	struct sk_entry *entry;
1902 
1903 	while (!flist_empty(&log->io_logs)) {
1904 		struct io_logs *cur_log;
1905 		size_t size;
1906 
1907 		cur_log = flist_first_entry(&log->io_logs, struct io_logs, list);
1908 		flist_del_init(&cur_log->list);
1909 
1910 		size = cur_log->nr_samples * log_entry_sz(log);
1911 
1912 		entry = fio_net_prep_cmd(FIO_NET_CMD_IOLOG, cur_log->log, size,
1913 						NULL, SK_F_VEC | SK_F_INLINE);
1914 		flist_add_tail(&entry->list, &first->next);
1915 	}
1916 
1917 	return 0;
1918 }
1919 
fio_send_iolog(struct thread_data * td,struct io_log * log,const char * name)1920 int fio_send_iolog(struct thread_data *td, struct io_log *log, const char *name)
1921 {
1922 	struct cmd_iolog_pdu pdu;
1923 	struct sk_entry *first;
1924 	struct flist_head *entry;
1925 	int ret = 0;
1926 
1927 	pdu.nr_samples = cpu_to_le64(iolog_nr_samples(log));
1928 	pdu.thread_number = cpu_to_le32(td->thread_number);
1929 	pdu.log_type = cpu_to_le32(log->log_type);
1930 	pdu.log_hist_coarseness = cpu_to_le32(log->hist_coarseness);
1931 
1932 	if (!flist_empty(&log->chunk_list))
1933 		pdu.compressed = __cpu_to_le32(STORE_COMPRESSED);
1934 	else if (use_zlib)
1935 		pdu.compressed = __cpu_to_le32(XMIT_COMPRESSED);
1936 	else
1937 		pdu.compressed = 0;
1938 
1939 	strncpy((char *) pdu.name, name, FIO_NET_NAME_MAX);
1940 	pdu.name[FIO_NET_NAME_MAX - 1] = '\0';
1941 
1942 	/*
1943 	 * We can't do this for a pre-compressed log, but for that case,
1944 	 * log->nr_samples is zero anyway.
1945 	 */
1946 	flist_for_each(entry, &log->io_logs) {
1947 		struct io_logs *cur_log;
1948 		int i;
1949 
1950 		cur_log = flist_entry(entry, struct io_logs, list);
1951 
1952 		for (i = 0; i < cur_log->nr_samples; i++) {
1953 			struct io_sample *s = get_sample(log, cur_log, i);
1954 
1955 			s->time		= cpu_to_le64(s->time);
1956 			s->data.val	= cpu_to_le64(s->data.val);
1957 			s->__ddir	= cpu_to_le32(s->__ddir);
1958 			s->bs		= cpu_to_le32(s->bs);
1959 
1960 			if (log->log_offset) {
1961 				struct io_sample_offset *so = (void *) s;
1962 
1963 				so->offset = cpu_to_le64(so->offset);
1964 			}
1965 		}
1966 	}
1967 
1968 	/*
1969 	 * Assemble header entry first
1970 	 */
1971 	first = fio_net_prep_cmd(FIO_NET_CMD_IOLOG, &pdu, sizeof(pdu), NULL, SK_F_VEC | SK_F_INLINE | SK_F_COPY);
1972 
1973 	/*
1974 	 * Now append actual log entries. If log compression was enabled on
1975 	 * the job, just send out the compressed chunks directly. If we
1976 	 * have a plain log, compress if we can, then send. Otherwise, send
1977 	 * the plain text output.
1978 	 */
1979 	if (!flist_empty(&log->chunk_list))
1980 		ret = fio_append_gz_chunks(first, log);
1981 	else if (use_zlib)
1982 		ret = fio_append_iolog_gz(first, log);
1983 	else
1984 		ret = fio_append_text_log(first, log);
1985 
1986 	fio_net_queue_entry(first);
1987 	return ret;
1988 }
1989 
fio_server_send_add_job(struct thread_data * td)1990 void fio_server_send_add_job(struct thread_data *td)
1991 {
1992 	struct cmd_add_job_pdu pdu;
1993 
1994 	memset(&pdu, 0, sizeof(pdu));
1995 	pdu.thread_number = cpu_to_le32(td->thread_number);
1996 	pdu.groupid = cpu_to_le32(td->groupid);
1997 	convert_thread_options_to_net(&pdu.top, &td->o);
1998 
1999 	fio_net_queue_cmd(FIO_NET_CMD_ADD_JOB, &pdu, sizeof(pdu), NULL,
2000 				SK_F_COPY);
2001 }
2002 
fio_server_send_start(struct thread_data * td)2003 void fio_server_send_start(struct thread_data *td)
2004 {
2005 	struct sk_out *sk_out = pthread_getspecific(sk_out_key);
2006 
2007 	assert(sk_out->sk != -1);
2008 
2009 	fio_net_queue_cmd(FIO_NET_CMD_SERVER_START, NULL, 0, NULL, SK_F_SIMPLE);
2010 }
2011 
fio_server_get_verify_state(const char * name,int threadnumber,void ** datap)2012 int fio_server_get_verify_state(const char *name, int threadnumber,
2013 				void **datap)
2014 {
2015 	struct thread_io_list *s;
2016 	struct cmd_sendfile out;
2017 	struct cmd_reply *rep;
2018 	uint64_t tag;
2019 	void *data;
2020 	int ret;
2021 
2022 	dprint(FD_NET, "server: request verify state\n");
2023 
2024 	rep = smalloc(sizeof(*rep));
2025 	if (!rep)
2026 		return ENOMEM;
2027 
2028 	__fio_mutex_init(&rep->lock, FIO_MUTEX_LOCKED);
2029 	rep->data = NULL;
2030 	rep->error = 0;
2031 
2032 	verify_state_gen_name((char *) out.path, sizeof(out.path), name, me,
2033 				threadnumber);
2034 	tag = (uint64_t) (uintptr_t) rep;
2035 	fio_net_queue_cmd(FIO_NET_CMD_SENDFILE, &out, sizeof(out), &tag,
2036 				SK_F_COPY);
2037 
2038 	/*
2039 	 * Wait for the backend to receive the reply
2040 	 */
2041 	if (fio_mutex_down_timeout(&rep->lock, 10000)) {
2042 		log_err("fio: timed out waiting for reply\n");
2043 		ret = ETIMEDOUT;
2044 		goto fail;
2045 	}
2046 
2047 	if (rep->error) {
2048 		log_err("fio: failure on receiving state file %s: %s\n",
2049 				out.path, strerror(rep->error));
2050 		ret = rep->error;
2051 fail:
2052 		*datap = NULL;
2053 		sfree(rep);
2054 		fio_net_queue_quit();
2055 		return ret;
2056 	}
2057 
2058 	/*
2059 	 * The format is verify_state_hdr, then thread_io_list. Verify
2060 	 * the header, and the thread_io_list checksum
2061 	 */
2062 	s = rep->data + sizeof(struct verify_state_hdr);
2063 	if (verify_state_hdr(rep->data, s)) {
2064 		ret = EILSEQ;
2065 		goto fail;
2066 	}
2067 
2068 	/*
2069 	 * Don't need the header from now, copy just the thread_io_list
2070 	 */
2071 	ret = 0;
2072 	rep->size -= sizeof(struct verify_state_hdr);
2073 	data = malloc(rep->size);
2074 	memcpy(data, s, rep->size);
2075 	*datap = data;
2076 
2077 	sfree(rep->data);
2078 	__fio_mutex_remove(&rep->lock);
2079 	sfree(rep);
2080 	return ret;
2081 }
2082 
fio_init_server_ip(void)2083 static int fio_init_server_ip(void)
2084 {
2085 	struct sockaddr *addr;
2086 	socklen_t socklen;
2087 	char buf[80];
2088 	const char *str;
2089 	int sk, opt;
2090 
2091 	if (use_ipv6)
2092 		sk = socket(AF_INET6, SOCK_STREAM, 0);
2093 	else
2094 		sk = socket(AF_INET, SOCK_STREAM, 0);
2095 
2096 	if (sk < 0) {
2097 		log_err("fio: socket: %s\n", strerror(errno));
2098 		return -1;
2099 	}
2100 
2101 	opt = 1;
2102 	if (setsockopt(sk, SOL_SOCKET, SO_REUSEADDR, (void *)&opt, sizeof(opt)) < 0) {
2103 		log_err("fio: setsockopt(REUSEADDR): %s\n", strerror(errno));
2104 		close(sk);
2105 		return -1;
2106 	}
2107 #ifdef SO_REUSEPORT
2108 	/*
2109 	 * Not fatal if fails, so just ignore it if that happens
2110 	 */
2111 	setsockopt(sk, SOL_SOCKET, SO_REUSEPORT, &opt, sizeof(opt));
2112 #endif
2113 
2114 	if (use_ipv6) {
2115 		const void *src = &saddr_in6.sin6_addr;
2116 
2117 		addr = (struct sockaddr *) &saddr_in6;
2118 		socklen = sizeof(saddr_in6);
2119 		saddr_in6.sin6_family = AF_INET6;
2120 		str = inet_ntop(AF_INET6, src, buf, sizeof(buf));
2121 	} else {
2122 		const void *src = &saddr_in.sin_addr;
2123 
2124 		addr = (struct sockaddr *) &saddr_in;
2125 		socklen = sizeof(saddr_in);
2126 		saddr_in.sin_family = AF_INET;
2127 		str = inet_ntop(AF_INET, src, buf, sizeof(buf));
2128 	}
2129 
2130 	if (bind(sk, addr, socklen) < 0) {
2131 		log_err("fio: bind: %s\n", strerror(errno));
2132 		log_info("fio: failed with IPv%c %s\n", use_ipv6 ? '6' : '4', str);
2133 		close(sk);
2134 		return -1;
2135 	}
2136 
2137 	return sk;
2138 }
2139 
fio_init_server_sock(void)2140 static int fio_init_server_sock(void)
2141 {
2142 	struct sockaddr_un addr;
2143 	socklen_t len;
2144 	mode_t mode;
2145 	int sk;
2146 
2147 	sk = socket(AF_UNIX, SOCK_STREAM, 0);
2148 	if (sk < 0) {
2149 		log_err("fio: socket: %s\n", strerror(errno));
2150 		return -1;
2151 	}
2152 
2153 	mode = umask(000);
2154 
2155 	memset(&addr, 0, sizeof(addr));
2156 	addr.sun_family = AF_UNIX;
2157 	strncpy(addr.sun_path, bind_sock, sizeof(addr.sun_path) - 1);
2158 
2159 	len = sizeof(addr.sun_family) + strlen(bind_sock) + 1;
2160 
2161 	if (bind(sk, (struct sockaddr *) &addr, len) < 0) {
2162 		log_err("fio: bind: %s\n", strerror(errno));
2163 		close(sk);
2164 		return -1;
2165 	}
2166 
2167 	umask(mode);
2168 	return sk;
2169 }
2170 
fio_init_server_connection(void)2171 static int fio_init_server_connection(void)
2172 {
2173 	char bind_str[128];
2174 	int sk;
2175 
2176 	dprint(FD_NET, "starting server\n");
2177 
2178 	if (!bind_sock)
2179 		sk = fio_init_server_ip();
2180 	else
2181 		sk = fio_init_server_sock();
2182 
2183 	if (sk < 0)
2184 		return sk;
2185 
2186 	memset(bind_str, 0, sizeof(bind_str));
2187 
2188 	if (!bind_sock) {
2189 		char *p, port[16];
2190 		const void *src;
2191 		int af;
2192 
2193 		if (use_ipv6) {
2194 			af = AF_INET6;
2195 			src = &saddr_in6.sin6_addr;
2196 		} else {
2197 			af = AF_INET;
2198 			src = &saddr_in.sin_addr;
2199 		}
2200 
2201 		p = (char *) inet_ntop(af, src, bind_str, sizeof(bind_str));
2202 
2203 		sprintf(port, ",%u", fio_net_port);
2204 		if (p)
2205 			strcat(p, port);
2206 		else
2207 			strncpy(bind_str, port, sizeof(bind_str) - 1);
2208 	} else
2209 		strncpy(bind_str, bind_sock, sizeof(bind_str) - 1);
2210 
2211 	log_info("fio: server listening on %s\n", bind_str);
2212 
2213 	if (listen(sk, 4) < 0) {
2214 		log_err("fio: listen: %s\n", strerror(errno));
2215 		close(sk);
2216 		return -1;
2217 	}
2218 
2219 	return sk;
2220 }
2221 
fio_server_parse_host(const char * host,int ipv6,struct in_addr * inp,struct in6_addr * inp6)2222 int fio_server_parse_host(const char *host, int ipv6, struct in_addr *inp,
2223 			  struct in6_addr *inp6)
2224 
2225 {
2226 	int ret = 0;
2227 
2228 	if (ipv6)
2229 		ret = inet_pton(AF_INET6, host, inp6);
2230 	else
2231 		ret = inet_pton(AF_INET, host, inp);
2232 
2233 	if (ret != 1) {
2234 		struct addrinfo hints, *res;
2235 
2236 		memset(&hints, 0, sizeof(hints));
2237 		hints.ai_family = ipv6 ? AF_INET6 : AF_INET;
2238 		hints.ai_socktype = SOCK_STREAM;
2239 
2240 		ret = getaddrinfo(host, NULL, &hints, &res);
2241 		if (ret) {
2242 			log_err("fio: failed to resolve <%s> (%s)\n", host,
2243 					gai_strerror(ret));
2244 			return 1;
2245 		}
2246 
2247 		if (ipv6)
2248 			memcpy(inp6, &((struct sockaddr_in6 *) res->ai_addr)->sin6_addr, sizeof(*inp6));
2249 		else
2250 			memcpy(inp, &((struct sockaddr_in *) res->ai_addr)->sin_addr, sizeof(*inp));
2251 
2252 		ret = 1;
2253 		freeaddrinfo(res);
2254 	}
2255 
2256 	return !(ret == 1);
2257 }
2258 
2259 /*
2260  * Parse a host/ip/port string. Reads from 'str'.
2261  *
2262  * Outputs:
2263  *
2264  * For IPv4:
2265  *	*ptr is the host, *port is the port, inp is the destination.
2266  * For IPv6:
2267  *	*ptr is the host, *port is the port, inp6 is the dest, and *ipv6 is 1.
2268  * For local domain sockets:
2269  *	*ptr is the filename, *is_sock is 1.
2270  */
fio_server_parse_string(const char * str,char ** ptr,int * is_sock,int * port,struct in_addr * inp,struct in6_addr * inp6,int * ipv6)2271 int fio_server_parse_string(const char *str, char **ptr, int *is_sock,
2272 			    int *port, struct in_addr *inp,
2273 			    struct in6_addr *inp6, int *ipv6)
2274 {
2275 	const char *host = str;
2276 	char *portp;
2277 	int lport = 0;
2278 
2279 	*ptr = NULL;
2280 	*is_sock = 0;
2281 	*port = fio_net_port;
2282 	*ipv6 = 0;
2283 
2284 	if (!strncmp(str, "sock:", 5)) {
2285 		*ptr = strdup(str + 5);
2286 		*is_sock = 1;
2287 
2288 		return 0;
2289 	}
2290 
2291 	/*
2292 	 * Is it ip:<ip or host>:port
2293 	 */
2294 	if (!strncmp(host, "ip:", 3))
2295 		host += 3;
2296 	else if (!strncmp(host, "ip4:", 4))
2297 		host += 4;
2298 	else if (!strncmp(host, "ip6:", 4)) {
2299 		host += 4;
2300 		*ipv6 = 1;
2301 	} else if (host[0] == ':') {
2302 		/* String is :port */
2303 		host++;
2304 		lport = atoi(host);
2305 		if (!lport || lport > 65535) {
2306 			log_err("fio: bad server port %u\n", lport);
2307 			return 1;
2308 		}
2309 		/* no hostname given, we are done */
2310 		*port = lport;
2311 		return 0;
2312 	}
2313 
2314 	/*
2315 	 * If no port seen yet, check if there's a last ',' at the end
2316 	 */
2317 	if (!lport) {
2318 		portp = strchr(host, ',');
2319 		if (portp) {
2320 			*portp = '\0';
2321 			portp++;
2322 			lport = atoi(portp);
2323 			if (!lport || lport > 65535) {
2324 				log_err("fio: bad server port %u\n", lport);
2325 				return 1;
2326 			}
2327 		}
2328 	}
2329 
2330 	if (lport)
2331 		*port = lport;
2332 
2333 	if (!strlen(host))
2334 		return 0;
2335 
2336 	*ptr = strdup(host);
2337 
2338 	if (fio_server_parse_host(*ptr, *ipv6, inp, inp6)) {
2339 		free(*ptr);
2340 		*ptr = NULL;
2341 		return 1;
2342 	}
2343 
2344 	if (*port == 0)
2345 		*port = fio_net_port;
2346 
2347 	return 0;
2348 }
2349 
2350 /*
2351  * Server arg should be one of:
2352  *
2353  * sock:/path/to/socket
2354  *   ip:1.2.3.4
2355  *      1.2.3.4
2356  *
2357  * Where sock uses unix domain sockets, and ip binds the server to
2358  * a specific interface. If no arguments are given to the server, it
2359  * uses IP and binds to 0.0.0.0.
2360  *
2361  */
fio_handle_server_arg(void)2362 static int fio_handle_server_arg(void)
2363 {
2364 	int port = fio_net_port;
2365 	int is_sock, ret = 0;
2366 
2367 	saddr_in.sin_addr.s_addr = htonl(INADDR_ANY);
2368 
2369 	if (!fio_server_arg)
2370 		goto out;
2371 
2372 	ret = fio_server_parse_string(fio_server_arg, &bind_sock, &is_sock,
2373 					&port, &saddr_in.sin_addr,
2374 					&saddr_in6.sin6_addr, &use_ipv6);
2375 
2376 	if (!is_sock && bind_sock) {
2377 		free(bind_sock);
2378 		bind_sock = NULL;
2379 	}
2380 
2381 out:
2382 	fio_net_port = port;
2383 	saddr_in.sin_port = htons(port);
2384 	saddr_in6.sin6_port = htons(port);
2385 	return ret;
2386 }
2387 
sig_int(int sig)2388 static void sig_int(int sig)
2389 {
2390 	if (bind_sock)
2391 		unlink(bind_sock);
2392 }
2393 
set_sig_handlers(void)2394 static void set_sig_handlers(void)
2395 {
2396 	struct sigaction act;
2397 
2398 	memset(&act, 0, sizeof(act));
2399 	act.sa_handler = sig_int;
2400 	act.sa_flags = SA_RESTART;
2401 	sigaction(SIGINT, &act, NULL);
2402 }
2403 
fio_server_destroy_sk_key(void)2404 void fio_server_destroy_sk_key(void)
2405 {
2406 	pthread_key_delete(sk_out_key);
2407 }
2408 
fio_server_create_sk_key(void)2409 int fio_server_create_sk_key(void)
2410 {
2411 	if (pthread_key_create(&sk_out_key, NULL)) {
2412 		log_err("fio: can't create sk_out backend key\n");
2413 		return 1;
2414 	}
2415 
2416 	pthread_setspecific(sk_out_key, NULL);
2417 	return 0;
2418 }
2419 
fio_server(void)2420 static int fio_server(void)
2421 {
2422 	int sk, ret;
2423 
2424 	dprint(FD_NET, "starting server\n");
2425 
2426 	if (fio_handle_server_arg())
2427 		return -1;
2428 
2429 	sk = fio_init_server_connection();
2430 	if (sk < 0)
2431 		return -1;
2432 
2433 	set_sig_handlers();
2434 
2435 	ret = accept_loop(sk);
2436 
2437 	close(sk);
2438 
2439 	if (fio_server_arg) {
2440 		free(fio_server_arg);
2441 		fio_server_arg = NULL;
2442 	}
2443 	if (bind_sock)
2444 		free(bind_sock);
2445 
2446 	return ret;
2447 }
2448 
fio_server_got_signal(int signal)2449 void fio_server_got_signal(int signal)
2450 {
2451 	struct sk_out *sk_out = pthread_getspecific(sk_out_key);
2452 
2453 	assert(sk_out);
2454 
2455 	if (signal == SIGPIPE)
2456 		sk_out->sk = -1;
2457 	else {
2458 		log_info("\nfio: terminating on signal %d\n", signal);
2459 		exit_backend = 1;
2460 	}
2461 }
2462 
check_existing_pidfile(const char * pidfile)2463 static int check_existing_pidfile(const char *pidfile)
2464 {
2465 	struct stat sb;
2466 	char buf[16];
2467 	pid_t pid;
2468 	FILE *f;
2469 
2470 	if (stat(pidfile, &sb))
2471 		return 0;
2472 
2473 	f = fopen(pidfile, "r");
2474 	if (!f)
2475 		return 0;
2476 
2477 	if (fread(buf, sb.st_size, 1, f) <= 0) {
2478 		fclose(f);
2479 		return 1;
2480 	}
2481 	fclose(f);
2482 
2483 	pid = atoi(buf);
2484 	if (kill(pid, SIGCONT) < 0)
2485 		return errno != ESRCH;
2486 
2487 	return 1;
2488 }
2489 
write_pid(pid_t pid,const char * pidfile)2490 static int write_pid(pid_t pid, const char *pidfile)
2491 {
2492 	FILE *fpid;
2493 
2494 	fpid = fopen(pidfile, "w");
2495 	if (!fpid) {
2496 		log_err("fio: failed opening pid file %s\n", pidfile);
2497 		return 1;
2498 	}
2499 
2500 	fprintf(fpid, "%u\n", (unsigned int) pid);
2501 	fclose(fpid);
2502 	return 0;
2503 }
2504 
2505 /*
2506  * If pidfile is specified, background us.
2507  */
fio_start_server(char * pidfile)2508 int fio_start_server(char *pidfile)
2509 {
2510 	pid_t pid;
2511 	int ret;
2512 
2513 #if defined(WIN32)
2514 	WSADATA wsd;
2515 	WSAStartup(MAKEWORD(2, 2), &wsd);
2516 #endif
2517 
2518 	if (!pidfile)
2519 		return fio_server();
2520 
2521 	if (check_existing_pidfile(pidfile)) {
2522 		log_err("fio: pidfile %s exists and server appears alive\n",
2523 								pidfile);
2524 		free(pidfile);
2525 		return -1;
2526 	}
2527 
2528 	pid = fork();
2529 	if (pid < 0) {
2530 		log_err("fio: failed server fork: %s\n", strerror(errno));
2531 		free(pidfile);
2532 		return -1;
2533 	} else if (pid) {
2534 		ret = write_pid(pid, pidfile);
2535 		free(pidfile);
2536 		_exit(ret);
2537 	}
2538 
2539 	setsid();
2540 	openlog("fio", LOG_NDELAY|LOG_NOWAIT|LOG_PID, LOG_USER);
2541 	log_syslog = 1;
2542 	close(STDIN_FILENO);
2543 	close(STDOUT_FILENO);
2544 	close(STDERR_FILENO);
2545 	f_out = NULL;
2546 	f_err = NULL;
2547 
2548 	ret = fio_server();
2549 
2550 	closelog();
2551 	unlink(pidfile);
2552 	free(pidfile);
2553 	return ret;
2554 }
2555 
fio_server_set_arg(const char * arg)2556 void fio_server_set_arg(const char *arg)
2557 {
2558 	fio_server_arg = strdup(arg);
2559 }
2560