• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * IO verification helpers
3  */
4 #include <unistd.h>
5 #include <fcntl.h>
6 #include <string.h>
7 #include <assert.h>
8 #include <pthread.h>
9 #include <libgen.h>
10 
11 #include "fio.h"
12 #include "verify.h"
13 #include "trim.h"
14 #include "lib/rand.h"
15 #include "lib/hweight.h"
16 #include "lib/pattern.h"
17 
18 #include "crc/md5.h"
19 #include "crc/crc64.h"
20 #include "crc/crc32.h"
21 #include "crc/crc32c.h"
22 #include "crc/crc16.h"
23 #include "crc/crc7.h"
24 #include "crc/sha256.h"
25 #include "crc/sha512.h"
26 #include "crc/sha1.h"
27 #include "crc/xxhash.h"
28 #include "crc/sha3.h"
29 
30 static void populate_hdr(struct thread_data *td, struct io_u *io_u,
31 			 struct verify_header *hdr, unsigned int header_num,
32 			 unsigned int header_len);
33 static void fill_hdr(struct thread_data *td, struct io_u *io_u,
34 		     struct verify_header *hdr, unsigned int header_num,
35 		     unsigned int header_len, uint64_t rand_seed);
36 static void __fill_hdr(struct thread_data *td, struct io_u *io_u,
37 		       struct verify_header *hdr, unsigned int header_num,
38 		       unsigned int header_len, uint64_t rand_seed);
39 
fill_buffer_pattern(struct thread_data * td,void * p,unsigned int len)40 void fill_buffer_pattern(struct thread_data *td, void *p, unsigned int len)
41 {
42 	(void)cpy_pattern(td->o.buffer_pattern, td->o.buffer_pattern_bytes, p, len);
43 }
44 
__fill_buffer(struct thread_options * o,unsigned long seed,void * p,unsigned int len)45 static void __fill_buffer(struct thread_options *o, unsigned long seed, void *p,
46 			  unsigned int len)
47 {
48 	__fill_random_buf_percentage(seed, p, o->compress_percentage, len, len, o->buffer_pattern, o->buffer_pattern_bytes);
49 }
50 
fill_buffer(struct thread_data * td,void * p,unsigned int len)51 static unsigned long fill_buffer(struct thread_data *td, void *p,
52 				 unsigned int len)
53 {
54 	struct frand_state *fs = &td->verify_state;
55 	struct thread_options *o = &td->o;
56 
57 	return fill_random_buf_percentage(fs, p, o->compress_percentage, len, len, o->buffer_pattern, o->buffer_pattern_bytes);
58 }
59 
fill_verify_pattern(struct thread_data * td,void * p,unsigned int len,struct io_u * io_u,unsigned long seed,int use_seed)60 void fill_verify_pattern(struct thread_data *td, void *p, unsigned int len,
61 			 struct io_u *io_u, unsigned long seed, int use_seed)
62 {
63 	struct thread_options *o = &td->o;
64 
65 	if (!o->verify_pattern_bytes) {
66 		dprint(FD_VERIFY, "fill random bytes len=%u\n", len);
67 
68 		if (use_seed)
69 			__fill_buffer(o, seed, p, len);
70 		else
71 			io_u->rand_seed = fill_buffer(td, p, len);
72 		return;
73 	}
74 
75 	/* Skip if we were here and we do not need to patch pattern
76 	 * with format */
77 	if (!td->o.verify_fmt_sz && io_u->buf_filled_len >= len) {
78 		dprint(FD_VERIFY, "using already filled verify pattern b=%d len=%u\n",
79 			o->verify_pattern_bytes, len);
80 		return;
81 	}
82 
83 	(void)paste_format(td->o.verify_pattern, td->o.verify_pattern_bytes,
84 			   td->o.verify_fmt, td->o.verify_fmt_sz,
85 			   p, len, io_u);
86 	io_u->buf_filled_len = len;
87 }
88 
get_hdr_inc(struct thread_data * td,struct io_u * io_u)89 static unsigned int get_hdr_inc(struct thread_data *td, struct io_u *io_u)
90 {
91 	unsigned int hdr_inc;
92 
93 	hdr_inc = io_u->buflen;
94 	if (td->o.verify_interval && td->o.verify_interval <= io_u->buflen)
95 		hdr_inc = td->o.verify_interval;
96 
97 	return hdr_inc;
98 }
99 
fill_pattern_headers(struct thread_data * td,struct io_u * io_u,unsigned long seed,int use_seed)100 static void fill_pattern_headers(struct thread_data *td, struct io_u *io_u,
101 				 unsigned long seed, int use_seed)
102 {
103 	unsigned int hdr_inc, header_num;
104 	struct verify_header *hdr;
105 	void *p = io_u->buf;
106 
107 	fill_verify_pattern(td, p, io_u->buflen, io_u, seed, use_seed);
108 
109 	hdr_inc = get_hdr_inc(td, io_u);
110 	header_num = 0;
111 	for (; p < io_u->buf + io_u->buflen; p += hdr_inc) {
112 		hdr = p;
113 		populate_hdr(td, io_u, hdr, header_num, hdr_inc);
114 		header_num++;
115 	}
116 }
117 
memswp(void * buf1,void * buf2,unsigned int len)118 static void memswp(void *buf1, void *buf2, unsigned int len)
119 {
120 	char swap[200];
121 
122 	assert(len <= sizeof(swap));
123 
124 	memcpy(&swap, buf1, len);
125 	memcpy(buf1, buf2, len);
126 	memcpy(buf2, &swap, len);
127 }
128 
hexdump(void * buffer,int len)129 static void hexdump(void *buffer, int len)
130 {
131 	unsigned char *p = buffer;
132 	int i;
133 
134 	for (i = 0; i < len; i++)
135 		log_err("%02x", p[i]);
136 	log_err("\n");
137 }
138 
139 /*
140  * Prepare for separation of verify_header and checksum header
141  */
__hdr_size(int verify_type)142 static inline unsigned int __hdr_size(int verify_type)
143 {
144 	unsigned int len = 0;
145 
146 	switch (verify_type) {
147 	case VERIFY_NONE:
148 	case VERIFY_HDR_ONLY:
149 	case VERIFY_NULL:
150 	case VERIFY_PATTERN:
151 		len = 0;
152 		break;
153 	case VERIFY_MD5:
154 		len = sizeof(struct vhdr_md5);
155 		break;
156 	case VERIFY_CRC64:
157 		len = sizeof(struct vhdr_crc64);
158 		break;
159 	case VERIFY_CRC32C:
160 	case VERIFY_CRC32:
161 	case VERIFY_CRC32C_INTEL:
162 		len = sizeof(struct vhdr_crc32);
163 		break;
164 	case VERIFY_CRC16:
165 		len = sizeof(struct vhdr_crc16);
166 		break;
167 	case VERIFY_CRC7:
168 		len = sizeof(struct vhdr_crc7);
169 		break;
170 	case VERIFY_SHA256:
171 		len = sizeof(struct vhdr_sha256);
172 		break;
173 	case VERIFY_SHA512:
174 		len = sizeof(struct vhdr_sha512);
175 		break;
176 	case VERIFY_SHA3_224:
177 		len = sizeof(struct vhdr_sha3_224);
178 		break;
179 	case VERIFY_SHA3_256:
180 		len = sizeof(struct vhdr_sha3_256);
181 		break;
182 	case VERIFY_SHA3_384:
183 		len = sizeof(struct vhdr_sha3_384);
184 		break;
185 	case VERIFY_SHA3_512:
186 		len = sizeof(struct vhdr_sha3_512);
187 		break;
188 	case VERIFY_XXHASH:
189 		len = sizeof(struct vhdr_xxhash);
190 		break;
191 	case VERIFY_SHA1:
192 		len = sizeof(struct vhdr_sha1);
193 		break;
194 	case VERIFY_PATTERN_NO_HDR:
195 		return 0;
196 	default:
197 		log_err("fio: unknown verify header!\n");
198 		assert(0);
199 	}
200 
201 	return len + sizeof(struct verify_header);
202 }
203 
hdr_size(struct thread_data * td,struct verify_header * hdr)204 static inline unsigned int hdr_size(struct thread_data *td,
205 				    struct verify_header *hdr)
206 {
207 	if (td->o.verify == VERIFY_PATTERN_NO_HDR)
208 		return 0;
209 
210 	return __hdr_size(hdr->verify_type);
211 }
212 
hdr_priv(struct verify_header * hdr)213 static void *hdr_priv(struct verify_header *hdr)
214 {
215 	void *priv = hdr;
216 
217 	return priv + sizeof(struct verify_header);
218 }
219 
220 /*
221  * Verify container, pass info to verify handlers and allow them to
222  * pass info back in case of error
223  */
224 struct vcont {
225 	/*
226 	 * Input
227 	 */
228 	struct io_u *io_u;
229 	unsigned int hdr_num;
230 	struct thread_data *td;
231 
232 	/*
233 	 * Output, only valid in case of error
234 	 */
235 	const char *name;
236 	void *good_crc;
237 	void *bad_crc;
238 	unsigned int crc_len;
239 };
240 
241 #define DUMP_BUF_SZ	255
242 static int dump_buf_warned;
243 
dump_buf(char * buf,unsigned int len,unsigned long long offset,const char * type,struct fio_file * f)244 static void dump_buf(char *buf, unsigned int len, unsigned long long offset,
245 		     const char *type, struct fio_file *f)
246 {
247 	char *ptr, fname[DUMP_BUF_SZ];
248 	size_t buf_left = DUMP_BUF_SZ;
249 	int ret, fd;
250 
251 	ptr = strdup(f->file_name);
252 
253 	memset(fname, 0, sizeof(fname));
254 	if (aux_path)
255 		sprintf(fname, "%s%s", aux_path, FIO_OS_PATH_SEPARATOR);
256 
257 	strncpy(fname + strlen(fname), basename(ptr), buf_left - 1);
258 
259 	buf_left -= strlen(fname);
260 	if (buf_left <= 0) {
261 		if (!dump_buf_warned) {
262 			log_err("fio: verify failure dump buffer too small\n");
263 			dump_buf_warned = 1;
264 		}
265 		free(ptr);
266 		return;
267 	}
268 
269 	snprintf(fname + strlen(fname), buf_left, ".%llu.%s", offset, type);
270 
271 	fd = open(fname, O_CREAT | O_TRUNC | O_WRONLY, 0644);
272 	if (fd < 0) {
273 		perror("open verify buf file");
274 		return;
275 	}
276 
277 	while (len) {
278 		ret = write(fd, buf, len);
279 		if (!ret)
280 			break;
281 		else if (ret < 0) {
282 			perror("write verify buf file");
283 			break;
284 		}
285 		len -= ret;
286 		buf += ret;
287 	}
288 
289 	close(fd);
290 	log_err("       %s data dumped as %s\n", type, fname);
291 	free(ptr);
292 }
293 
294 /*
295  * Dump the contents of the read block and re-generate the correct data
296  * and dump that too.
297  */
__dump_verify_buffers(struct verify_header * hdr,struct vcont * vc)298 static void __dump_verify_buffers(struct verify_header *hdr, struct vcont *vc)
299 {
300 	struct thread_data *td = vc->td;
301 	struct io_u *io_u = vc->io_u;
302 	unsigned long hdr_offset;
303 	struct io_u dummy;
304 	void *buf;
305 
306 	if (!td->o.verify_dump)
307 		return;
308 
309 	/*
310 	 * Dump the contents we just read off disk
311 	 */
312 	hdr_offset = vc->hdr_num * hdr->len;
313 
314 	dump_buf(io_u->buf + hdr_offset, hdr->len, io_u->offset + hdr_offset,
315 			"received", vc->io_u->file);
316 
317 	/*
318 	 * Allocate a new buf and re-generate the original data
319 	 */
320 	buf = malloc(io_u->buflen);
321 	dummy = *io_u;
322 	dummy.buf = buf;
323 	dummy.rand_seed = hdr->rand_seed;
324 	dummy.buf_filled_len = 0;
325 	dummy.buflen = io_u->buflen;
326 
327 	fill_pattern_headers(td, &dummy, hdr->rand_seed, 1);
328 
329 	dump_buf(buf + hdr_offset, hdr->len, io_u->offset + hdr_offset,
330 			"expected", vc->io_u->file);
331 	free(buf);
332 }
333 
dump_verify_buffers(struct verify_header * hdr,struct vcont * vc)334 static void dump_verify_buffers(struct verify_header *hdr, struct vcont *vc)
335 {
336 	struct thread_data *td = vc->td;
337 	struct verify_header shdr;
338 
339 	if (td->o.verify == VERIFY_PATTERN_NO_HDR) {
340 		__fill_hdr(td, vc->io_u, &shdr, 0, vc->io_u->buflen, 0);
341 		hdr = &shdr;
342 	}
343 
344 	__dump_verify_buffers(hdr, vc);
345 }
346 
log_verify_failure(struct verify_header * hdr,struct vcont * vc)347 static void log_verify_failure(struct verify_header *hdr, struct vcont *vc)
348 {
349 	unsigned long long offset;
350 
351 	offset = vc->io_u->offset;
352 	offset += vc->hdr_num * hdr->len;
353 	log_err("%.8s: verify failed at file %s offset %llu, length %u\n",
354 			vc->name, vc->io_u->file->file_name, offset, hdr->len);
355 
356 	if (vc->good_crc && vc->bad_crc) {
357 		log_err("       Expected CRC: ");
358 		hexdump(vc->good_crc, vc->crc_len);
359 		log_err("       Received CRC: ");
360 		hexdump(vc->bad_crc, vc->crc_len);
361 	}
362 
363 	dump_verify_buffers(hdr, vc);
364 }
365 
366 /*
367  * Return data area 'header_num'
368  */
io_u_verify_off(struct verify_header * hdr,struct vcont * vc)369 static inline void *io_u_verify_off(struct verify_header *hdr, struct vcont *vc)
370 {
371 	return vc->io_u->buf + vc->hdr_num * hdr->len + hdr_size(vc->td, hdr);
372 }
373 
verify_io_u_pattern(struct verify_header * hdr,struct vcont * vc)374 static int verify_io_u_pattern(struct verify_header *hdr, struct vcont *vc)
375 {
376 	struct thread_data *td = vc->td;
377 	struct io_u *io_u = vc->io_u;
378 	char *buf, *pattern;
379 	unsigned int header_size = __hdr_size(td->o.verify);
380 	unsigned int len, mod, i, pattern_size;
381 	int rc;
382 
383 	pattern = td->o.verify_pattern;
384 	pattern_size = td->o.verify_pattern_bytes;
385 	assert(pattern_size != 0);
386 
387 	(void)paste_format_inplace(pattern, pattern_size,
388 				   td->o.verify_fmt, td->o.verify_fmt_sz, io_u);
389 
390 	buf = (void *) hdr + header_size;
391 	len = get_hdr_inc(td, io_u) - header_size;
392 	mod = (get_hdr_inc(td, io_u) * vc->hdr_num + header_size) % pattern_size;
393 
394 	rc = cmp_pattern(pattern, pattern_size, mod, buf, len);
395 	if (!rc)
396 		return 0;
397 
398 	/* Slow path, compare each byte */
399 	for (i = 0; i < len; i++) {
400 		if (buf[i] != pattern[mod]) {
401 			unsigned int bits;
402 
403 			bits = hweight8(buf[i] ^ pattern[mod]);
404 			log_err("fio: got pattern '%02x', wanted '%02x'. Bad bits %d\n",
405 				(unsigned char)buf[i],
406 				(unsigned char)pattern[mod],
407 				bits);
408 			log_err("fio: bad pattern block offset %u\n", i);
409 			vc->name = "pattern";
410 			log_verify_failure(hdr, vc);
411 			return EILSEQ;
412 		}
413 		mod++;
414 		if (mod == td->o.verify_pattern_bytes)
415 			mod = 0;
416 	}
417 
418 	/* Unreachable line */
419 	assert(0);
420 	return EILSEQ;
421 }
422 
verify_io_u_xxhash(struct verify_header * hdr,struct vcont * vc)423 static int verify_io_u_xxhash(struct verify_header *hdr, struct vcont *vc)
424 {
425 	void *p = io_u_verify_off(hdr, vc);
426 	struct vhdr_xxhash *vh = hdr_priv(hdr);
427 	uint32_t hash;
428 	void *state;
429 
430 	dprint(FD_VERIFY, "xxhash verify io_u %p, len %u\n", vc->io_u, hdr->len);
431 
432 	state = XXH32_init(1);
433 	XXH32_update(state, p, hdr->len - hdr_size(vc->td, hdr));
434 	hash = XXH32_digest(state);
435 
436 	if (vh->hash == hash)
437 		return 0;
438 
439 	vc->name = "xxhash";
440 	vc->good_crc = &vh->hash;
441 	vc->bad_crc = &hash;
442 	vc->crc_len = sizeof(hash);
443 	log_verify_failure(hdr, vc);
444 	return EILSEQ;
445 }
446 
verify_io_u_sha3(struct verify_header * hdr,struct vcont * vc,struct fio_sha3_ctx * sha3_ctx,uint8_t * sha,unsigned int sha_size,const char * name)447 static int verify_io_u_sha3(struct verify_header *hdr, struct vcont *vc,
448 			    struct fio_sha3_ctx *sha3_ctx, uint8_t *sha,
449 			    unsigned int sha_size, const char *name)
450 {
451 	void *p = io_u_verify_off(hdr, vc);
452 
453 	dprint(FD_VERIFY, "%s verify io_u %p, len %u\n", name, vc->io_u, hdr->len);
454 
455 	fio_sha3_update(sha3_ctx, p, hdr->len - hdr_size(vc->td, hdr));
456 	fio_sha3_final(sha3_ctx);
457 
458 	if (!memcmp(sha, sha3_ctx->sha, sha_size))
459 		return 0;
460 
461 	vc->name = name;
462 	vc->good_crc = sha;
463 	vc->bad_crc = sha3_ctx->sha;
464 	vc->crc_len = sha_size;
465 	log_verify_failure(hdr, vc);
466 	return EILSEQ;
467 }
468 
verify_io_u_sha3_224(struct verify_header * hdr,struct vcont * vc)469 static int verify_io_u_sha3_224(struct verify_header *hdr, struct vcont *vc)
470 {
471 	struct vhdr_sha3_224 *vh = hdr_priv(hdr);
472 	uint8_t sha[SHA3_224_DIGEST_SIZE];
473 	struct fio_sha3_ctx sha3_ctx = {
474 		.sha = sha,
475 	};
476 
477 	fio_sha3_224_init(&sha3_ctx);
478 
479 	return verify_io_u_sha3(hdr, vc, &sha3_ctx, vh->sha,
480 				SHA3_224_DIGEST_SIZE, "sha3-224");
481 }
482 
verify_io_u_sha3_256(struct verify_header * hdr,struct vcont * vc)483 static int verify_io_u_sha3_256(struct verify_header *hdr, struct vcont *vc)
484 {
485 	struct vhdr_sha3_256 *vh = hdr_priv(hdr);
486 	uint8_t sha[SHA3_256_DIGEST_SIZE];
487 	struct fio_sha3_ctx sha3_ctx = {
488 		.sha = sha,
489 	};
490 
491 	fio_sha3_256_init(&sha3_ctx);
492 
493 	return verify_io_u_sha3(hdr, vc, &sha3_ctx, vh->sha,
494 				SHA3_256_DIGEST_SIZE, "sha3-256");
495 }
496 
verify_io_u_sha3_384(struct verify_header * hdr,struct vcont * vc)497 static int verify_io_u_sha3_384(struct verify_header *hdr, struct vcont *vc)
498 {
499 	struct vhdr_sha3_384 *vh = hdr_priv(hdr);
500 	uint8_t sha[SHA3_384_DIGEST_SIZE];
501 	struct fio_sha3_ctx sha3_ctx = {
502 		.sha = sha,
503 	};
504 
505 	fio_sha3_384_init(&sha3_ctx);
506 
507 	return verify_io_u_sha3(hdr, vc, &sha3_ctx, vh->sha,
508 				SHA3_384_DIGEST_SIZE, "sha3-384");
509 }
510 
verify_io_u_sha3_512(struct verify_header * hdr,struct vcont * vc)511 static int verify_io_u_sha3_512(struct verify_header *hdr, struct vcont *vc)
512 {
513 	struct vhdr_sha3_512 *vh = hdr_priv(hdr);
514 	uint8_t sha[SHA3_512_DIGEST_SIZE];
515 	struct fio_sha3_ctx sha3_ctx = {
516 		.sha = sha,
517 	};
518 
519 	fio_sha3_512_init(&sha3_ctx);
520 
521 	return verify_io_u_sha3(hdr, vc, &sha3_ctx, vh->sha,
522 				SHA3_512_DIGEST_SIZE, "sha3-512");
523 }
524 
verify_io_u_sha512(struct verify_header * hdr,struct vcont * vc)525 static int verify_io_u_sha512(struct verify_header *hdr, struct vcont *vc)
526 {
527 	void *p = io_u_verify_off(hdr, vc);
528 	struct vhdr_sha512 *vh = hdr_priv(hdr);
529 	uint8_t sha512[128];
530 	struct fio_sha512_ctx sha512_ctx = {
531 		.buf = sha512,
532 	};
533 
534 	dprint(FD_VERIFY, "sha512 verify io_u %p, len %u\n", vc->io_u, hdr->len);
535 
536 	fio_sha512_init(&sha512_ctx);
537 	fio_sha512_update(&sha512_ctx, p, hdr->len - hdr_size(vc->td, hdr));
538 
539 	if (!memcmp(vh->sha512, sha512_ctx.buf, sizeof(sha512)))
540 		return 0;
541 
542 	vc->name = "sha512";
543 	vc->good_crc = vh->sha512;
544 	vc->bad_crc = sha512_ctx.buf;
545 	vc->crc_len = sizeof(vh->sha512);
546 	log_verify_failure(hdr, vc);
547 	return EILSEQ;
548 }
549 
verify_io_u_sha256(struct verify_header * hdr,struct vcont * vc)550 static int verify_io_u_sha256(struct verify_header *hdr, struct vcont *vc)
551 {
552 	void *p = io_u_verify_off(hdr, vc);
553 	struct vhdr_sha256 *vh = hdr_priv(hdr);
554 	uint8_t sha256[64];
555 	struct fio_sha256_ctx sha256_ctx = {
556 		.buf = sha256,
557 	};
558 
559 	dprint(FD_VERIFY, "sha256 verify io_u %p, len %u\n", vc->io_u, hdr->len);
560 
561 	fio_sha256_init(&sha256_ctx);
562 	fio_sha256_update(&sha256_ctx, p, hdr->len - hdr_size(vc->td, hdr));
563 	fio_sha256_final(&sha256_ctx);
564 
565 	if (!memcmp(vh->sha256, sha256_ctx.buf, sizeof(sha256)))
566 		return 0;
567 
568 	vc->name = "sha256";
569 	vc->good_crc = vh->sha256;
570 	vc->bad_crc = sha256_ctx.buf;
571 	vc->crc_len = sizeof(vh->sha256);
572 	log_verify_failure(hdr, vc);
573 	return EILSEQ;
574 }
575 
verify_io_u_sha1(struct verify_header * hdr,struct vcont * vc)576 static int verify_io_u_sha1(struct verify_header *hdr, struct vcont *vc)
577 {
578 	void *p = io_u_verify_off(hdr, vc);
579 	struct vhdr_sha1 *vh = hdr_priv(hdr);
580 	uint32_t sha1[5];
581 	struct fio_sha1_ctx sha1_ctx = {
582 		.H = sha1,
583 	};
584 
585 	dprint(FD_VERIFY, "sha1 verify io_u %p, len %u\n", vc->io_u, hdr->len);
586 
587 	fio_sha1_init(&sha1_ctx);
588 	fio_sha1_update(&sha1_ctx, p, hdr->len - hdr_size(vc->td, hdr));
589 	fio_sha1_final(&sha1_ctx);
590 
591 	if (!memcmp(vh->sha1, sha1_ctx.H, sizeof(sha1)))
592 		return 0;
593 
594 	vc->name = "sha1";
595 	vc->good_crc = vh->sha1;
596 	vc->bad_crc = sha1_ctx.H;
597 	vc->crc_len = sizeof(vh->sha1);
598 	log_verify_failure(hdr, vc);
599 	return EILSEQ;
600 }
601 
verify_io_u_crc7(struct verify_header * hdr,struct vcont * vc)602 static int verify_io_u_crc7(struct verify_header *hdr, struct vcont *vc)
603 {
604 	void *p = io_u_verify_off(hdr, vc);
605 	struct vhdr_crc7 *vh = hdr_priv(hdr);
606 	unsigned char c;
607 
608 	dprint(FD_VERIFY, "crc7 verify io_u %p, len %u\n", vc->io_u, hdr->len);
609 
610 	c = fio_crc7(p, hdr->len - hdr_size(vc->td, hdr));
611 
612 	if (c == vh->crc7)
613 		return 0;
614 
615 	vc->name = "crc7";
616 	vc->good_crc = &vh->crc7;
617 	vc->bad_crc = &c;
618 	vc->crc_len = 1;
619 	log_verify_failure(hdr, vc);
620 	return EILSEQ;
621 }
622 
verify_io_u_crc16(struct verify_header * hdr,struct vcont * vc)623 static int verify_io_u_crc16(struct verify_header *hdr, struct vcont *vc)
624 {
625 	void *p = io_u_verify_off(hdr, vc);
626 	struct vhdr_crc16 *vh = hdr_priv(hdr);
627 	unsigned short c;
628 
629 	dprint(FD_VERIFY, "crc16 verify io_u %p, len %u\n", vc->io_u, hdr->len);
630 
631 	c = fio_crc16(p, hdr->len - hdr_size(vc->td, hdr));
632 
633 	if (c == vh->crc16)
634 		return 0;
635 
636 	vc->name = "crc16";
637 	vc->good_crc = &vh->crc16;
638 	vc->bad_crc = &c;
639 	vc->crc_len = 2;
640 	log_verify_failure(hdr, vc);
641 	return EILSEQ;
642 }
643 
verify_io_u_crc64(struct verify_header * hdr,struct vcont * vc)644 static int verify_io_u_crc64(struct verify_header *hdr, struct vcont *vc)
645 {
646 	void *p = io_u_verify_off(hdr, vc);
647 	struct vhdr_crc64 *vh = hdr_priv(hdr);
648 	unsigned long long c;
649 
650 	dprint(FD_VERIFY, "crc64 verify io_u %p, len %u\n", vc->io_u, hdr->len);
651 
652 	c = fio_crc64(p, hdr->len - hdr_size(vc->td, hdr));
653 
654 	if (c == vh->crc64)
655 		return 0;
656 
657 	vc->name = "crc64";
658 	vc->good_crc = &vh->crc64;
659 	vc->bad_crc = &c;
660 	vc->crc_len = 8;
661 	log_verify_failure(hdr, vc);
662 	return EILSEQ;
663 }
664 
verify_io_u_crc32(struct verify_header * hdr,struct vcont * vc)665 static int verify_io_u_crc32(struct verify_header *hdr, struct vcont *vc)
666 {
667 	void *p = io_u_verify_off(hdr, vc);
668 	struct vhdr_crc32 *vh = hdr_priv(hdr);
669 	uint32_t c;
670 
671 	dprint(FD_VERIFY, "crc32 verify io_u %p, len %u\n", vc->io_u, hdr->len);
672 
673 	c = fio_crc32(p, hdr->len - hdr_size(vc->td, hdr));
674 
675 	if (c == vh->crc32)
676 		return 0;
677 
678 	vc->name = "crc32";
679 	vc->good_crc = &vh->crc32;
680 	vc->bad_crc = &c;
681 	vc->crc_len = 4;
682 	log_verify_failure(hdr, vc);
683 	return EILSEQ;
684 }
685 
verify_io_u_crc32c(struct verify_header * hdr,struct vcont * vc)686 static int verify_io_u_crc32c(struct verify_header *hdr, struct vcont *vc)
687 {
688 	void *p = io_u_verify_off(hdr, vc);
689 	struct vhdr_crc32 *vh = hdr_priv(hdr);
690 	uint32_t c;
691 
692 	dprint(FD_VERIFY, "crc32c verify io_u %p, len %u\n", vc->io_u, hdr->len);
693 
694 	c = fio_crc32c(p, hdr->len - hdr_size(vc->td, hdr));
695 
696 	if (c == vh->crc32)
697 		return 0;
698 
699 	vc->name = "crc32c";
700 	vc->good_crc = &vh->crc32;
701 	vc->bad_crc = &c;
702 	vc->crc_len = 4;
703 	log_verify_failure(hdr, vc);
704 	return EILSEQ;
705 }
706 
verify_io_u_md5(struct verify_header * hdr,struct vcont * vc)707 static int verify_io_u_md5(struct verify_header *hdr, struct vcont *vc)
708 {
709 	void *p = io_u_verify_off(hdr, vc);
710 	struct vhdr_md5 *vh = hdr_priv(hdr);
711 	uint32_t hash[MD5_HASH_WORDS];
712 	struct fio_md5_ctx md5_ctx = {
713 		.hash = hash,
714 	};
715 
716 	dprint(FD_VERIFY, "md5 verify io_u %p, len %u\n", vc->io_u, hdr->len);
717 
718 	fio_md5_init(&md5_ctx);
719 	fio_md5_update(&md5_ctx, p, hdr->len - hdr_size(vc->td, hdr));
720 	fio_md5_final(&md5_ctx);
721 
722 	if (!memcmp(vh->md5_digest, md5_ctx.hash, sizeof(hash)))
723 		return 0;
724 
725 	vc->name = "md5";
726 	vc->good_crc = vh->md5_digest;
727 	vc->bad_crc = md5_ctx.hash;
728 	vc->crc_len = sizeof(hash);
729 	log_verify_failure(hdr, vc);
730 	return EILSEQ;
731 }
732 
733 /*
734  * Push IO verification to a separate thread
735  */
verify_io_u_async(struct thread_data * td,struct io_u ** io_u_ptr)736 int verify_io_u_async(struct thread_data *td, struct io_u **io_u_ptr)
737 {
738 	struct io_u *io_u = *io_u_ptr;
739 
740 	pthread_mutex_lock(&td->io_u_lock);
741 
742 	if (io_u->file)
743 		put_file_log(td, io_u->file);
744 
745 	if (io_u->flags & IO_U_F_IN_CUR_DEPTH) {
746 		td->cur_depth--;
747 		io_u_clear(td, io_u, IO_U_F_IN_CUR_DEPTH);
748 	}
749 	flist_add_tail(&io_u->verify_list, &td->verify_list);
750 	*io_u_ptr = NULL;
751 	pthread_mutex_unlock(&td->io_u_lock);
752 
753 	pthread_cond_signal(&td->verify_cond);
754 	return 0;
755 }
756 
757 /*
758  * Thanks Rusty, for spending the time so I don't have to.
759  *
760  * http://rusty.ozlabs.org/?p=560
761  */
mem_is_zero(const void * data,size_t length)762 static int mem_is_zero(const void *data, size_t length)
763 {
764 	const unsigned char *p = data;
765 	size_t len;
766 
767 	/* Check first 16 bytes manually */
768 	for (len = 0; len < 16; len++) {
769 		if (!length)
770 			return 1;
771 		if (*p)
772 			return 0;
773 		p++;
774 		length--;
775 	}
776 
777 	/* Now we know that's zero, memcmp with self. */
778 	return memcmp(data, p, length) == 0;
779 }
780 
mem_is_zero_slow(const void * data,size_t length,size_t * offset)781 static int mem_is_zero_slow(const void *data, size_t length, size_t *offset)
782 {
783 	const unsigned char *p = data;
784 
785 	*offset = 0;
786 	while (length) {
787 		if (*p)
788 			break;
789 		(*offset)++;
790 		length--;
791 		p++;
792 	}
793 
794 	return !length;
795 }
796 
verify_trimmed_io_u(struct thread_data * td,struct io_u * io_u)797 static int verify_trimmed_io_u(struct thread_data *td, struct io_u *io_u)
798 {
799 	size_t offset;
800 
801 	if (!td->o.trim_zero)
802 		return 0;
803 
804 	if (mem_is_zero(io_u->buf, io_u->buflen))
805 		return 0;
806 
807 	mem_is_zero_slow(io_u->buf, io_u->buflen, &offset);
808 
809 	log_err("trim: verify failed at file %s offset %llu, length %lu"
810 		", block offset %lu\n",
811 			io_u->file->file_name, io_u->offset, io_u->buflen,
812 			(unsigned long) offset);
813 	return EILSEQ;
814 }
815 
verify_header(struct io_u * io_u,struct thread_data * td,struct verify_header * hdr,unsigned int hdr_num,unsigned int hdr_len)816 static int verify_header(struct io_u *io_u, struct thread_data *td,
817 			 struct verify_header *hdr, unsigned int hdr_num,
818 			 unsigned int hdr_len)
819 {
820 	void *p = hdr;
821 	uint32_t crc;
822 
823 	if (hdr->magic != FIO_HDR_MAGIC) {
824 		log_err("verify: bad magic header %x, wanted %x",
825 			hdr->magic, FIO_HDR_MAGIC);
826 		goto err;
827 	}
828 	if (hdr->len != hdr_len) {
829 		log_err("verify: bad header length %u, wanted %u",
830 			hdr->len, hdr_len);
831 		goto err;
832 	}
833 	if (hdr->rand_seed != io_u->rand_seed) {
834 		log_err("verify: bad header rand_seed %"PRIu64
835 			", wanted %"PRIu64,
836 			hdr->rand_seed, io_u->rand_seed);
837 		goto err;
838 	}
839 	if (hdr->offset != io_u->offset + hdr_num * td->o.verify_interval) {
840 		log_err("verify: bad header offset %"PRIu64
841 			", wanted %llu",
842 			hdr->offset, io_u->offset);
843 		goto err;
844 	}
845 
846 	/*
847 	 * For read-only workloads, the program cannot be certain of the
848 	 * last numberio written to a block. Checking of numberio will be
849 	 * done only for workloads that write data.  For verify_only,
850 	 * numberio will be checked in the last iteration when the correct
851 	 * state of numberio, that would have been written to each block
852 	 * in a previous run of fio, has been reached.
853 	 */
854 	if (td_write(td) && (td_min_bs(td) == td_max_bs(td)) &&
855 	    !td->o.time_based)
856 		if (!td->o.verify_only || td->o.loops == 0)
857 			if (hdr->numberio != io_u->numberio) {
858 				log_err("verify: bad header numberio %"PRIu16
859 					", wanted %"PRIu16,
860 					hdr->numberio, io_u->numberio);
861 				goto err;
862 			}
863 
864 	crc = fio_crc32c(p, offsetof(struct verify_header, crc32));
865 	if (crc != hdr->crc32) {
866 		log_err("verify: bad header crc %x, calculated %x",
867 			hdr->crc32, crc);
868 		goto err;
869 	}
870 	return 0;
871 
872 err:
873 	log_err(" at file %s offset %llu, length %u\n",
874 		io_u->file->file_name,
875 		io_u->offset + hdr_num * hdr_len, hdr_len);
876 
877 	if (td->o.verify_dump)
878 		dump_buf(p, hdr_len, io_u->offset + hdr_num * hdr_len,
879 				"hdr_fail", io_u->file);
880 
881 	return EILSEQ;
882 }
883 
verify_io_u(struct thread_data * td,struct io_u ** io_u_ptr)884 int verify_io_u(struct thread_data *td, struct io_u **io_u_ptr)
885 {
886 	struct verify_header *hdr;
887 	struct io_u *io_u = *io_u_ptr;
888 	unsigned int header_size, hdr_inc, hdr_num = 0;
889 	void *p;
890 	int ret;
891 
892 	if (td->o.verify == VERIFY_NULL || io_u->ddir != DDIR_READ)
893 		return 0;
894 	/*
895 	 * If the IO engine is faking IO (like null), then just pretend
896 	 * we verified everything.
897 	 */
898 	if (td_ioengine_flagged(td, FIO_FAKEIO))
899 		return 0;
900 
901 	if (io_u->flags & IO_U_F_TRIMMED) {
902 		ret = verify_trimmed_io_u(td, io_u);
903 		goto done;
904 	}
905 
906 	hdr_inc = get_hdr_inc(td, io_u);
907 
908 	ret = 0;
909 	for (p = io_u->buf; p < io_u->buf + io_u->buflen;
910 	     p += hdr_inc, hdr_num++) {
911 		struct vcont vc = {
912 			.io_u		= io_u,
913 			.hdr_num	= hdr_num,
914 			.td		= td,
915 		};
916 		unsigned int verify_type;
917 
918 		if (ret && td->o.verify_fatal)
919 			break;
920 
921 		header_size = __hdr_size(td->o.verify);
922 		if (td->o.verify_offset)
923 			memswp(p, p + td->o.verify_offset, header_size);
924 		hdr = p;
925 
926 		/*
927 		 * Make rand_seed check pass when have verifysort or
928 		 * verify_backlog.
929 		 */
930 		if (td->o.verifysort || (td->flags & TD_F_VER_BACKLOG))
931 			io_u->rand_seed = hdr->rand_seed;
932 
933 		if (td->o.verify != VERIFY_PATTERN_NO_HDR) {
934 			ret = verify_header(io_u, td, hdr, hdr_num, hdr_inc);
935 			if (ret)
936 				return ret;
937 		}
938 
939 		if (td->o.verify != VERIFY_NONE)
940 			verify_type = td->o.verify;
941 		else
942 			verify_type = hdr->verify_type;
943 
944 		switch (verify_type) {
945 		case VERIFY_HDR_ONLY:
946 			/* Header is always verified, check if pattern is left
947 			 * for verification. */
948 			if (td->o.verify_pattern_bytes)
949 				ret = verify_io_u_pattern(hdr, &vc);
950 			break;
951 		case VERIFY_MD5:
952 			ret = verify_io_u_md5(hdr, &vc);
953 			break;
954 		case VERIFY_CRC64:
955 			ret = verify_io_u_crc64(hdr, &vc);
956 			break;
957 		case VERIFY_CRC32C:
958 		case VERIFY_CRC32C_INTEL:
959 			ret = verify_io_u_crc32c(hdr, &vc);
960 			break;
961 		case VERIFY_CRC32:
962 			ret = verify_io_u_crc32(hdr, &vc);
963 			break;
964 		case VERIFY_CRC16:
965 			ret = verify_io_u_crc16(hdr, &vc);
966 			break;
967 		case VERIFY_CRC7:
968 			ret = verify_io_u_crc7(hdr, &vc);
969 			break;
970 		case VERIFY_SHA256:
971 			ret = verify_io_u_sha256(hdr, &vc);
972 			break;
973 		case VERIFY_SHA512:
974 			ret = verify_io_u_sha512(hdr, &vc);
975 			break;
976 		case VERIFY_SHA3_224:
977 			ret = verify_io_u_sha3_224(hdr, &vc);
978 			break;
979 		case VERIFY_SHA3_256:
980 			ret = verify_io_u_sha3_256(hdr, &vc);
981 			break;
982 		case VERIFY_SHA3_384:
983 			ret = verify_io_u_sha3_384(hdr, &vc);
984 			break;
985 		case VERIFY_SHA3_512:
986 			ret = verify_io_u_sha3_512(hdr, &vc);
987 			break;
988 		case VERIFY_XXHASH:
989 			ret = verify_io_u_xxhash(hdr, &vc);
990 			break;
991 		case VERIFY_SHA1:
992 			ret = verify_io_u_sha1(hdr, &vc);
993 			break;
994 		case VERIFY_PATTERN:
995 		case VERIFY_PATTERN_NO_HDR:
996 			ret = verify_io_u_pattern(hdr, &vc);
997 			break;
998 		default:
999 			log_err("Bad verify type %u\n", hdr->verify_type);
1000 			ret = EINVAL;
1001 		}
1002 
1003 		if (ret && verify_type != hdr->verify_type)
1004 			log_err("fio: verify type mismatch (%u media, %u given)\n",
1005 					hdr->verify_type, verify_type);
1006 	}
1007 
1008 done:
1009 	if (ret && td->o.verify_fatal)
1010 		fio_mark_td_terminate(td);
1011 
1012 	return ret;
1013 }
1014 
fill_xxhash(struct verify_header * hdr,void * p,unsigned int len)1015 static void fill_xxhash(struct verify_header *hdr, void *p, unsigned int len)
1016 {
1017 	struct vhdr_xxhash *vh = hdr_priv(hdr);
1018 	void *state;
1019 
1020 	state = XXH32_init(1);
1021 	XXH32_update(state, p, len);
1022 	vh->hash = XXH32_digest(state);
1023 }
1024 
fill_sha3(struct fio_sha3_ctx * sha3_ctx,void * p,unsigned int len)1025 static void fill_sha3(struct fio_sha3_ctx *sha3_ctx, void *p, unsigned int len)
1026 {
1027 	fio_sha3_update(sha3_ctx, p, len);
1028 	fio_sha3_final(sha3_ctx);
1029 }
1030 
fill_sha3_224(struct verify_header * hdr,void * p,unsigned int len)1031 static void fill_sha3_224(struct verify_header *hdr, void *p, unsigned int len)
1032 {
1033 	struct vhdr_sha3_224 *vh = hdr_priv(hdr);
1034 	struct fio_sha3_ctx sha3_ctx = {
1035 		.sha = vh->sha,
1036 	};
1037 
1038 	fio_sha3_224_init(&sha3_ctx);
1039 	fill_sha3(&sha3_ctx, p, len);
1040 }
1041 
fill_sha3_256(struct verify_header * hdr,void * p,unsigned int len)1042 static void fill_sha3_256(struct verify_header *hdr, void *p, unsigned int len)
1043 {
1044 	struct vhdr_sha3_256 *vh = hdr_priv(hdr);
1045 	struct fio_sha3_ctx sha3_ctx = {
1046 		.sha = vh->sha,
1047 	};
1048 
1049 	fio_sha3_256_init(&sha3_ctx);
1050 	fill_sha3(&sha3_ctx, p, len);
1051 }
1052 
fill_sha3_384(struct verify_header * hdr,void * p,unsigned int len)1053 static void fill_sha3_384(struct verify_header *hdr, void *p, unsigned int len)
1054 {
1055 	struct vhdr_sha3_384 *vh = hdr_priv(hdr);
1056 	struct fio_sha3_ctx sha3_ctx = {
1057 		.sha = vh->sha,
1058 	};
1059 
1060 	fio_sha3_384_init(&sha3_ctx);
1061 	fill_sha3(&sha3_ctx, p, len);
1062 }
1063 
fill_sha3_512(struct verify_header * hdr,void * p,unsigned int len)1064 static void fill_sha3_512(struct verify_header *hdr, void *p, unsigned int len)
1065 {
1066 	struct vhdr_sha3_512 *vh = hdr_priv(hdr);
1067 	struct fio_sha3_ctx sha3_ctx = {
1068 		.sha = vh->sha,
1069 	};
1070 
1071 	fio_sha3_512_init(&sha3_ctx);
1072 	fill_sha3(&sha3_ctx, p, len);
1073 }
1074 
fill_sha512(struct verify_header * hdr,void * p,unsigned int len)1075 static void fill_sha512(struct verify_header *hdr, void *p, unsigned int len)
1076 {
1077 	struct vhdr_sha512 *vh = hdr_priv(hdr);
1078 	struct fio_sha512_ctx sha512_ctx = {
1079 		.buf = vh->sha512,
1080 	};
1081 
1082 	fio_sha512_init(&sha512_ctx);
1083 	fio_sha512_update(&sha512_ctx, p, len);
1084 }
1085 
fill_sha256(struct verify_header * hdr,void * p,unsigned int len)1086 static void fill_sha256(struct verify_header *hdr, void *p, unsigned int len)
1087 {
1088 	struct vhdr_sha256 *vh = hdr_priv(hdr);
1089 	struct fio_sha256_ctx sha256_ctx = {
1090 		.buf = vh->sha256,
1091 	};
1092 
1093 	fio_sha256_init(&sha256_ctx);
1094 	fio_sha256_update(&sha256_ctx, p, len);
1095 	fio_sha256_final(&sha256_ctx);
1096 }
1097 
fill_sha1(struct verify_header * hdr,void * p,unsigned int len)1098 static void fill_sha1(struct verify_header *hdr, void *p, unsigned int len)
1099 {
1100 	struct vhdr_sha1 *vh = hdr_priv(hdr);
1101 	struct fio_sha1_ctx sha1_ctx = {
1102 		.H = vh->sha1,
1103 	};
1104 
1105 	fio_sha1_init(&sha1_ctx);
1106 	fio_sha1_update(&sha1_ctx, p, len);
1107 	fio_sha1_final(&sha1_ctx);
1108 }
1109 
fill_crc7(struct verify_header * hdr,void * p,unsigned int len)1110 static void fill_crc7(struct verify_header *hdr, void *p, unsigned int len)
1111 {
1112 	struct vhdr_crc7 *vh = hdr_priv(hdr);
1113 
1114 	vh->crc7 = fio_crc7(p, len);
1115 }
1116 
fill_crc16(struct verify_header * hdr,void * p,unsigned int len)1117 static void fill_crc16(struct verify_header *hdr, void *p, unsigned int len)
1118 {
1119 	struct vhdr_crc16 *vh = hdr_priv(hdr);
1120 
1121 	vh->crc16 = fio_crc16(p, len);
1122 }
1123 
fill_crc32(struct verify_header * hdr,void * p,unsigned int len)1124 static void fill_crc32(struct verify_header *hdr, void *p, unsigned int len)
1125 {
1126 	struct vhdr_crc32 *vh = hdr_priv(hdr);
1127 
1128 	vh->crc32 = fio_crc32(p, len);
1129 }
1130 
fill_crc32c(struct verify_header * hdr,void * p,unsigned int len)1131 static void fill_crc32c(struct verify_header *hdr, void *p, unsigned int len)
1132 {
1133 	struct vhdr_crc32 *vh = hdr_priv(hdr);
1134 
1135 	vh->crc32 = fio_crc32c(p, len);
1136 }
1137 
fill_crc64(struct verify_header * hdr,void * p,unsigned int len)1138 static void fill_crc64(struct verify_header *hdr, void *p, unsigned int len)
1139 {
1140 	struct vhdr_crc64 *vh = hdr_priv(hdr);
1141 
1142 	vh->crc64 = fio_crc64(p, len);
1143 }
1144 
fill_md5(struct verify_header * hdr,void * p,unsigned int len)1145 static void fill_md5(struct verify_header *hdr, void *p, unsigned int len)
1146 {
1147 	struct vhdr_md5 *vh = hdr_priv(hdr);
1148 	struct fio_md5_ctx md5_ctx = {
1149 		.hash = (uint32_t *) vh->md5_digest,
1150 	};
1151 
1152 	fio_md5_init(&md5_ctx);
1153 	fio_md5_update(&md5_ctx, p, len);
1154 	fio_md5_final(&md5_ctx);
1155 }
1156 
__fill_hdr(struct thread_data * td,struct io_u * io_u,struct verify_header * hdr,unsigned int header_num,unsigned int header_len,uint64_t rand_seed)1157 static void __fill_hdr(struct thread_data *td, struct io_u *io_u,
1158 		       struct verify_header *hdr, unsigned int header_num,
1159 		       unsigned int header_len, uint64_t rand_seed)
1160 {
1161 	void *p = hdr;
1162 
1163 	hdr->magic = FIO_HDR_MAGIC;
1164 	hdr->verify_type = td->o.verify;
1165 	hdr->len = header_len;
1166 	hdr->rand_seed = rand_seed;
1167 	hdr->offset = io_u->offset + header_num * td->o.verify_interval;
1168 	hdr->time_sec = io_u->start_time.tv_sec;
1169 	hdr->time_usec = io_u->start_time.tv_usec;
1170 	hdr->thread = td->thread_number;
1171 	hdr->numberio = io_u->numberio;
1172 	hdr->crc32 = fio_crc32c(p, offsetof(struct verify_header, crc32));
1173 }
1174 
1175 
fill_hdr(struct thread_data * td,struct io_u * io_u,struct verify_header * hdr,unsigned int header_num,unsigned int header_len,uint64_t rand_seed)1176 static void fill_hdr(struct thread_data *td, struct io_u *io_u,
1177 		     struct verify_header *hdr, unsigned int header_num,
1178 		     unsigned int header_len, uint64_t rand_seed)
1179 {
1180 
1181 	if (td->o.verify != VERIFY_PATTERN_NO_HDR)
1182 		__fill_hdr(td, io_u, hdr, header_num, header_len, rand_seed);
1183 }
1184 
populate_hdr(struct thread_data * td,struct io_u * io_u,struct verify_header * hdr,unsigned int header_num,unsigned int header_len)1185 static void populate_hdr(struct thread_data *td, struct io_u *io_u,
1186 			 struct verify_header *hdr, unsigned int header_num,
1187 			 unsigned int header_len)
1188 {
1189 	unsigned int data_len;
1190 	void *data, *p;
1191 
1192 	p = (void *) hdr;
1193 
1194 	fill_hdr(td, io_u, hdr, header_num, header_len, io_u->rand_seed);
1195 
1196 	data_len = header_len - hdr_size(td, hdr);
1197 
1198 	data = p + hdr_size(td, hdr);
1199 	switch (td->o.verify) {
1200 	case VERIFY_MD5:
1201 		dprint(FD_VERIFY, "fill md5 io_u %p, len %u\n",
1202 						io_u, hdr->len);
1203 		fill_md5(hdr, data, data_len);
1204 		break;
1205 	case VERIFY_CRC64:
1206 		dprint(FD_VERIFY, "fill crc64 io_u %p, len %u\n",
1207 						io_u, hdr->len);
1208 		fill_crc64(hdr, data, data_len);
1209 		break;
1210 	case VERIFY_CRC32C:
1211 	case VERIFY_CRC32C_INTEL:
1212 		dprint(FD_VERIFY, "fill crc32c io_u %p, len %u\n",
1213 						io_u, hdr->len);
1214 		fill_crc32c(hdr, data, data_len);
1215 		break;
1216 	case VERIFY_CRC32:
1217 		dprint(FD_VERIFY, "fill crc32 io_u %p, len %u\n",
1218 						io_u, hdr->len);
1219 		fill_crc32(hdr, data, data_len);
1220 		break;
1221 	case VERIFY_CRC16:
1222 		dprint(FD_VERIFY, "fill crc16 io_u %p, len %u\n",
1223 						io_u, hdr->len);
1224 		fill_crc16(hdr, data, data_len);
1225 		break;
1226 	case VERIFY_CRC7:
1227 		dprint(FD_VERIFY, "fill crc7 io_u %p, len %u\n",
1228 						io_u, hdr->len);
1229 		fill_crc7(hdr, data, data_len);
1230 		break;
1231 	case VERIFY_SHA256:
1232 		dprint(FD_VERIFY, "fill sha256 io_u %p, len %u\n",
1233 						io_u, hdr->len);
1234 		fill_sha256(hdr, data, data_len);
1235 		break;
1236 	case VERIFY_SHA512:
1237 		dprint(FD_VERIFY, "fill sha512 io_u %p, len %u\n",
1238 						io_u, hdr->len);
1239 		fill_sha512(hdr, data, data_len);
1240 		break;
1241 	case VERIFY_SHA3_224:
1242 		dprint(FD_VERIFY, "fill sha3-224 io_u %p, len %u\n",
1243 						io_u, hdr->len);
1244 		fill_sha3_224(hdr, data, data_len);
1245 		break;
1246 	case VERIFY_SHA3_256:
1247 		dprint(FD_VERIFY, "fill sha3-256 io_u %p, len %u\n",
1248 						io_u, hdr->len);
1249 		fill_sha3_256(hdr, data, data_len);
1250 		break;
1251 	case VERIFY_SHA3_384:
1252 		dprint(FD_VERIFY, "fill sha3-384 io_u %p, len %u\n",
1253 						io_u, hdr->len);
1254 		fill_sha3_384(hdr, data, data_len);
1255 		break;
1256 	case VERIFY_SHA3_512:
1257 		dprint(FD_VERIFY, "fill sha3-512 io_u %p, len %u\n",
1258 						io_u, hdr->len);
1259 		fill_sha3_512(hdr, data, data_len);
1260 		break;
1261 	case VERIFY_XXHASH:
1262 		dprint(FD_VERIFY, "fill xxhash io_u %p, len %u\n",
1263 						io_u, hdr->len);
1264 		fill_xxhash(hdr, data, data_len);
1265 		break;
1266 	case VERIFY_SHA1:
1267 		dprint(FD_VERIFY, "fill sha1 io_u %p, len %u\n",
1268 						io_u, hdr->len);
1269 		fill_sha1(hdr, data, data_len);
1270 		break;
1271 	case VERIFY_HDR_ONLY:
1272 	case VERIFY_PATTERN:
1273 	case VERIFY_PATTERN_NO_HDR:
1274 		/* nothing to do here */
1275 		break;
1276 	default:
1277 		log_err("fio: bad verify type: %d\n", td->o.verify);
1278 		assert(0);
1279 	}
1280 
1281 	if (td->o.verify_offset && hdr_size(td, hdr))
1282 		memswp(p, p + td->o.verify_offset, hdr_size(td, hdr));
1283 }
1284 
1285 /*
1286  * fill body of io_u->buf with random data and add a header with the
1287  * checksum of choice
1288  */
populate_verify_io_u(struct thread_data * td,struct io_u * io_u)1289 void populate_verify_io_u(struct thread_data *td, struct io_u *io_u)
1290 {
1291 	if (td->o.verify == VERIFY_NULL)
1292 		return;
1293 
1294 	io_u->numberio = td->io_issues[io_u->ddir];
1295 
1296 	fill_pattern_headers(td, io_u, 0, 0);
1297 }
1298 
get_next_verify(struct thread_data * td,struct io_u * io_u)1299 int get_next_verify(struct thread_data *td, struct io_u *io_u)
1300 {
1301 	struct io_piece *ipo = NULL;
1302 
1303 	/*
1304 	 * this io_u is from a requeue, we already filled the offsets
1305 	 */
1306 	if (io_u->file)
1307 		return 0;
1308 
1309 	if (!RB_EMPTY_ROOT(&td->io_hist_tree)) {
1310 		struct rb_node *n = rb_first(&td->io_hist_tree);
1311 
1312 		ipo = rb_entry(n, struct io_piece, rb_node);
1313 
1314 		/*
1315 		 * Ensure that the associated IO has completed
1316 		 */
1317 		read_barrier();
1318 		if (ipo->flags & IP_F_IN_FLIGHT)
1319 			goto nothing;
1320 
1321 		rb_erase(n, &td->io_hist_tree);
1322 		assert(ipo->flags & IP_F_ONRB);
1323 		ipo->flags &= ~IP_F_ONRB;
1324 	} else if (!flist_empty(&td->io_hist_list)) {
1325 		ipo = flist_first_entry(&td->io_hist_list, struct io_piece, list);
1326 
1327 		/*
1328 		 * Ensure that the associated IO has completed
1329 		 */
1330 		read_barrier();
1331 		if (ipo->flags & IP_F_IN_FLIGHT)
1332 			goto nothing;
1333 
1334 		flist_del(&ipo->list);
1335 		assert(ipo->flags & IP_F_ONLIST);
1336 		ipo->flags &= ~IP_F_ONLIST;
1337 	}
1338 
1339 	if (ipo) {
1340 		td->io_hist_len--;
1341 
1342 		io_u->offset = ipo->offset;
1343 		io_u->buflen = ipo->len;
1344 		io_u->numberio = ipo->numberio;
1345 		io_u->file = ipo->file;
1346 		io_u_set(td, io_u, IO_U_F_VER_LIST);
1347 
1348 		if (ipo->flags & IP_F_TRIMMED)
1349 			io_u_set(td, io_u, IO_U_F_TRIMMED);
1350 
1351 		if (!fio_file_open(io_u->file)) {
1352 			int r = td_io_open_file(td, io_u->file);
1353 
1354 			if (r) {
1355 				dprint(FD_VERIFY, "failed file %s open\n",
1356 						io_u->file->file_name);
1357 				return 1;
1358 			}
1359 		}
1360 
1361 		get_file(ipo->file);
1362 		assert(fio_file_open(io_u->file));
1363 		io_u->ddir = DDIR_READ;
1364 		io_u->xfer_buf = io_u->buf;
1365 		io_u->xfer_buflen = io_u->buflen;
1366 
1367 		remove_trim_entry(td, ipo);
1368 		free(ipo);
1369 		dprint(FD_VERIFY, "get_next_verify: ret io_u %p\n", io_u);
1370 
1371 		if (!td->o.verify_pattern_bytes) {
1372 			io_u->rand_seed = __rand(&td->verify_state);
1373 			if (sizeof(int) != sizeof(long *))
1374 				io_u->rand_seed *= __rand(&td->verify_state);
1375 		}
1376 		return 0;
1377 	}
1378 
1379 nothing:
1380 	dprint(FD_VERIFY, "get_next_verify: empty\n");
1381 	return 1;
1382 }
1383 
fio_verify_init(struct thread_data * td)1384 void fio_verify_init(struct thread_data *td)
1385 {
1386 	if (td->o.verify == VERIFY_CRC32C_INTEL ||
1387 	    td->o.verify == VERIFY_CRC32C) {
1388 		crc32c_arm64_probe();
1389 		crc32c_intel_probe();
1390 	}
1391 }
1392 
verify_async_thread(void * data)1393 static void *verify_async_thread(void *data)
1394 {
1395 	struct thread_data *td = data;
1396 	struct io_u *io_u;
1397 	int ret = 0;
1398 
1399 	if (fio_option_is_set(&td->o, verify_cpumask) &&
1400 	    fio_setaffinity(td->pid, td->o.verify_cpumask)) {
1401 		log_err("fio: failed setting verify thread affinity\n");
1402 		goto done;
1403 	}
1404 
1405 	do {
1406 		FLIST_HEAD(list);
1407 
1408 		read_barrier();
1409 		if (td->verify_thread_exit)
1410 			break;
1411 
1412 		pthread_mutex_lock(&td->io_u_lock);
1413 
1414 		while (flist_empty(&td->verify_list) &&
1415 		       !td->verify_thread_exit) {
1416 			ret = pthread_cond_wait(&td->verify_cond,
1417 							&td->io_u_lock);
1418 			if (ret) {
1419 				pthread_mutex_unlock(&td->io_u_lock);
1420 				break;
1421 			}
1422 		}
1423 
1424 		flist_splice_init(&td->verify_list, &list);
1425 		pthread_mutex_unlock(&td->io_u_lock);
1426 
1427 		if (flist_empty(&list))
1428 			continue;
1429 
1430 		while (!flist_empty(&list)) {
1431 			io_u = flist_first_entry(&list, struct io_u, verify_list);
1432 			flist_del_init(&io_u->verify_list);
1433 
1434 			io_u_set(td, io_u, IO_U_F_NO_FILE_PUT);
1435 			ret = verify_io_u(td, &io_u);
1436 
1437 			put_io_u(td, io_u);
1438 			if (!ret)
1439 				continue;
1440 			if (td_non_fatal_error(td, ERROR_TYPE_VERIFY_BIT, ret)) {
1441 				update_error_count(td, ret);
1442 				td_clear_error(td);
1443 				ret = 0;
1444 			}
1445 		}
1446 	} while (!ret);
1447 
1448 	if (ret) {
1449 		td_verror(td, ret, "async_verify");
1450 		if (td->o.verify_fatal)
1451 			fio_mark_td_terminate(td);
1452 	}
1453 
1454 done:
1455 	pthread_mutex_lock(&td->io_u_lock);
1456 	td->nr_verify_threads--;
1457 	pthread_mutex_unlock(&td->io_u_lock);
1458 
1459 	pthread_cond_signal(&td->free_cond);
1460 	return NULL;
1461 }
1462 
verify_async_init(struct thread_data * td)1463 int verify_async_init(struct thread_data *td)
1464 {
1465 	int i, ret;
1466 	pthread_attr_t attr;
1467 
1468 	pthread_attr_init(&attr);
1469 	pthread_attr_setstacksize(&attr, 2 * PTHREAD_STACK_MIN);
1470 
1471 	td->verify_thread_exit = 0;
1472 
1473 	td->verify_threads = malloc(sizeof(pthread_t) * td->o.verify_async);
1474 	for (i = 0; i < td->o.verify_async; i++) {
1475 		ret = pthread_create(&td->verify_threads[i], &attr,
1476 					verify_async_thread, td);
1477 		if (ret) {
1478 			log_err("fio: async verify creation failed: %s\n",
1479 					strerror(ret));
1480 			break;
1481 		}
1482 		ret = pthread_detach(td->verify_threads[i]);
1483 		if (ret) {
1484 			log_err("fio: async verify thread detach failed: %s\n",
1485 					strerror(ret));
1486 			break;
1487 		}
1488 		td->nr_verify_threads++;
1489 	}
1490 
1491 	pthread_attr_destroy(&attr);
1492 
1493 	if (i != td->o.verify_async) {
1494 		log_err("fio: only %d verify threads started, exiting\n", i);
1495 		td->verify_thread_exit = 1;
1496 		write_barrier();
1497 		pthread_cond_broadcast(&td->verify_cond);
1498 		return 1;
1499 	}
1500 
1501 	return 0;
1502 }
1503 
verify_async_exit(struct thread_data * td)1504 void verify_async_exit(struct thread_data *td)
1505 {
1506 	td->verify_thread_exit = 1;
1507 	write_barrier();
1508 	pthread_cond_broadcast(&td->verify_cond);
1509 
1510 	pthread_mutex_lock(&td->io_u_lock);
1511 
1512 	while (td->nr_verify_threads)
1513 		pthread_cond_wait(&td->free_cond, &td->io_u_lock);
1514 
1515 	pthread_mutex_unlock(&td->io_u_lock);
1516 	free(td->verify_threads);
1517 	td->verify_threads = NULL;
1518 }
1519 
paste_blockoff(char * buf,unsigned int len,void * priv)1520 int paste_blockoff(char *buf, unsigned int len, void *priv)
1521 {
1522 	struct io_u *io = priv;
1523 	unsigned long long off;
1524 
1525 	typecheck(typeof(off), io->offset);
1526 	off = cpu_to_le64((uint64_t)io->offset);
1527 	len = min(len, (unsigned int)sizeof(off));
1528 	memcpy(buf, &off, len);
1529 	return 0;
1530 }
1531 
__fill_file_completions(struct thread_data * td,struct thread_io_list * s,struct fio_file * f,unsigned int * index)1532 static int __fill_file_completions(struct thread_data *td,
1533 				   struct thread_io_list *s,
1534 				   struct fio_file *f, unsigned int *index)
1535 {
1536 	unsigned int comps;
1537 	int i, j;
1538 
1539 	if (!f->last_write_comp)
1540 		return 0;
1541 
1542 	if (td->io_blocks[DDIR_WRITE] < td->o.iodepth)
1543 		comps = td->io_blocks[DDIR_WRITE];
1544 	else
1545 		comps = td->o.iodepth;
1546 
1547 	j = f->last_write_idx - 1;
1548 	for (i = 0; i < comps; i++) {
1549 		if (j == -1)
1550 			j = td->o.iodepth - 1;
1551 		s->comps[*index].fileno = __cpu_to_le64(f->fileno);
1552 		s->comps[*index].offset = cpu_to_le64(f->last_write_comp[j]);
1553 		(*index)++;
1554 		j--;
1555 	}
1556 
1557 	return comps;
1558 }
1559 
fill_file_completions(struct thread_data * td,struct thread_io_list * s,unsigned int * index)1560 static int fill_file_completions(struct thread_data *td,
1561 				 struct thread_io_list *s, unsigned int *index)
1562 {
1563 	struct fio_file *f;
1564 	unsigned int i;
1565 	int comps = 0;
1566 
1567 	for_each_file(td, f, i)
1568 		comps += __fill_file_completions(td, s, f, index);
1569 
1570 	return comps;
1571 }
1572 
get_all_io_list(int save_mask,size_t * sz)1573 struct all_io_list *get_all_io_list(int save_mask, size_t *sz)
1574 {
1575 	struct all_io_list *rep;
1576 	struct thread_data *td;
1577 	size_t depth;
1578 	void *next;
1579 	int i, nr;
1580 
1581 	compiletime_assert(sizeof(struct all_io_list) == 8, "all_io_list");
1582 
1583 	/*
1584 	 * Calculate reply space needed. We need one 'io_state' per thread,
1585 	 * and the size will vary depending on depth.
1586 	 */
1587 	depth = 0;
1588 	nr = 0;
1589 	for_each_td(td, i) {
1590 		if (save_mask != IO_LIST_ALL && (i + 1) != save_mask)
1591 			continue;
1592 		td->stop_io = 1;
1593 		td->flags |= TD_F_VSTATE_SAVED;
1594 		depth += (td->o.iodepth * td->o.nr_files);
1595 		nr++;
1596 	}
1597 
1598 	if (!nr)
1599 		return NULL;
1600 
1601 	*sz = sizeof(*rep);
1602 	*sz += nr * sizeof(struct thread_io_list);
1603 	*sz += depth * sizeof(struct file_comp);
1604 	rep = malloc(*sz);
1605 	memset(rep, 0, *sz);
1606 
1607 	rep->threads = cpu_to_le64((uint64_t) nr);
1608 
1609 	next = &rep->state[0];
1610 	for_each_td(td, i) {
1611 		struct thread_io_list *s = next;
1612 		unsigned int comps, index = 0;
1613 
1614 		if (save_mask != IO_LIST_ALL && (i + 1) != save_mask)
1615 			continue;
1616 
1617 		comps = fill_file_completions(td, s, &index);
1618 
1619 		s->no_comps = cpu_to_le64((uint64_t) comps);
1620 		s->depth = cpu_to_le64((uint64_t) td->o.iodepth);
1621 		s->nofiles = cpu_to_le64((uint64_t) td->o.nr_files);
1622 		s->numberio = cpu_to_le64((uint64_t) td->io_issues[DDIR_WRITE]);
1623 		s->index = cpu_to_le64((uint64_t) i);
1624 		if (td->random_state.use64) {
1625 			s->rand.state64.s[0] = cpu_to_le64(td->random_state.state64.s1);
1626 			s->rand.state64.s[1] = cpu_to_le64(td->random_state.state64.s2);
1627 			s->rand.state64.s[2] = cpu_to_le64(td->random_state.state64.s3);
1628 			s->rand.state64.s[3] = cpu_to_le64(td->random_state.state64.s4);
1629 			s->rand.state64.s[4] = cpu_to_le64(td->random_state.state64.s5);
1630 			s->rand.state64.s[5] = 0;
1631 			s->rand.use64 = cpu_to_le64((uint64_t)1);
1632 		} else {
1633 			s->rand.state32.s[0] = cpu_to_le32(td->random_state.state32.s1);
1634 			s->rand.state32.s[1] = cpu_to_le32(td->random_state.state32.s2);
1635 			s->rand.state32.s[2] = cpu_to_le32(td->random_state.state32.s3);
1636 			s->rand.state32.s[3] = 0;
1637 			s->rand.use64 = 0;
1638 		}
1639 		s->name[sizeof(s->name) - 1] = '\0';
1640 		strncpy((char *) s->name, td->o.name, sizeof(s->name) - 1);
1641 		next = io_list_next(s);
1642 	}
1643 
1644 	return rep;
1645 }
1646 
open_state_file(const char * name,const char * prefix,int num,int for_write)1647 static int open_state_file(const char *name, const char *prefix, int num,
1648 			   int for_write)
1649 {
1650 	char out[PATH_MAX];
1651 	int flags;
1652 	int fd;
1653 
1654 	if (for_write)
1655 		flags = O_CREAT | O_TRUNC | O_WRONLY | O_SYNC;
1656 	else
1657 		flags = O_RDONLY;
1658 
1659 	verify_state_gen_name(out, sizeof(out), name, prefix, num);
1660 
1661 	fd = open(out, flags, 0644);
1662 	if (fd == -1) {
1663 		perror("fio: open state file");
1664 		log_err("fio: state file: %s (for_write=%d)\n", out, for_write);
1665 		return -1;
1666 	}
1667 
1668 	return fd;
1669 }
1670 
write_thread_list_state(struct thread_io_list * s,const char * prefix)1671 static int write_thread_list_state(struct thread_io_list *s,
1672 				   const char *prefix)
1673 {
1674 	struct verify_state_hdr hdr;
1675 	uint64_t crc;
1676 	ssize_t ret;
1677 	int fd;
1678 
1679 	fd = open_state_file((const char *) s->name, prefix, s->index, 1);
1680 	if (fd == -1)
1681 		return 1;
1682 
1683 	crc = fio_crc32c((void *)s, thread_io_list_sz(s));
1684 
1685 	hdr.version = cpu_to_le64((uint64_t) VSTATE_HDR_VERSION);
1686 	hdr.size = cpu_to_le64((uint64_t) thread_io_list_sz(s));
1687 	hdr.crc = cpu_to_le64(crc);
1688 	ret = write(fd, &hdr, sizeof(hdr));
1689 	if (ret != sizeof(hdr))
1690 		goto write_fail;
1691 
1692 	ret = write(fd, s, thread_io_list_sz(s));
1693 	if (ret != thread_io_list_sz(s)) {
1694 write_fail:
1695 		if (ret < 0)
1696 			perror("fio: write state file");
1697 		log_err("fio: failed to write state file\n");
1698 		ret = 1;
1699 	} else
1700 		ret = 0;
1701 
1702 	close(fd);
1703 	return ret;
1704 }
1705 
__verify_save_state(struct all_io_list * state,const char * prefix)1706 void __verify_save_state(struct all_io_list *state, const char *prefix)
1707 {
1708 	struct thread_io_list *s = &state->state[0];
1709 	unsigned int i;
1710 
1711 	for (i = 0; i < le64_to_cpu(state->threads); i++) {
1712 		write_thread_list_state(s,  prefix);
1713 		s = io_list_next(s);
1714 	}
1715 }
1716 
verify_save_state(int mask)1717 void verify_save_state(int mask)
1718 {
1719 	struct all_io_list *state;
1720 	size_t sz;
1721 
1722 	state = get_all_io_list(mask, &sz);
1723 	if (state) {
1724 		char prefix[PATH_MAX];
1725 
1726 		if (aux_path)
1727 			sprintf(prefix, "%s%slocal", aux_path, FIO_OS_PATH_SEPARATOR);
1728 		else
1729 			strcpy(prefix, "local");
1730 
1731 		__verify_save_state(state, prefix);
1732 		free(state);
1733 	}
1734 }
1735 
verify_free_state(struct thread_data * td)1736 void verify_free_state(struct thread_data *td)
1737 {
1738 	if (td->vstate)
1739 		free(td->vstate);
1740 }
1741 
verify_assign_state(struct thread_data * td,void * p)1742 void verify_assign_state(struct thread_data *td, void *p)
1743 {
1744 	struct thread_io_list *s = p;
1745 	int i;
1746 
1747 	s->no_comps = le64_to_cpu(s->no_comps);
1748 	s->depth = le32_to_cpu(s->depth);
1749 	s->nofiles = le32_to_cpu(s->nofiles);
1750 	s->numberio = le64_to_cpu(s->numberio);
1751 	s->rand.use64 = le64_to_cpu(s->rand.use64);
1752 
1753 	if (s->rand.use64) {
1754 		for (i = 0; i < 6; i++)
1755 			s->rand.state64.s[i] = le64_to_cpu(s->rand.state64.s[i]);
1756 	} else {
1757 		for (i = 0; i < 4; i++)
1758 			s->rand.state32.s[i] = le32_to_cpu(s->rand.state32.s[i]);
1759 	}
1760 
1761 	for (i = 0; i < s->no_comps; i++) {
1762 		s->comps[i].fileno = le64_to_cpu(s->comps[i].fileno);
1763 		s->comps[i].offset = le64_to_cpu(s->comps[i].offset);
1764 	}
1765 
1766 	td->vstate = p;
1767 }
1768 
verify_state_hdr(struct verify_state_hdr * hdr,struct thread_io_list * s)1769 int verify_state_hdr(struct verify_state_hdr *hdr, struct thread_io_list *s)
1770 {
1771 	uint64_t crc;
1772 
1773 	hdr->version = le64_to_cpu(hdr->version);
1774 	hdr->size = le64_to_cpu(hdr->size);
1775 	hdr->crc = le64_to_cpu(hdr->crc);
1776 
1777 	if (hdr->version != VSTATE_HDR_VERSION)
1778 		return 1;
1779 
1780 	crc = fio_crc32c((void *)s, hdr->size);
1781 	if (crc != hdr->crc)
1782 		return 1;
1783 
1784 	return 0;
1785 }
1786 
verify_load_state(struct thread_data * td,const char * prefix)1787 int verify_load_state(struct thread_data *td, const char *prefix)
1788 {
1789 	struct verify_state_hdr hdr;
1790 	void *s = NULL;
1791 	uint64_t crc;
1792 	ssize_t ret;
1793 	int fd;
1794 
1795 	if (!td->o.verify_state)
1796 		return 0;
1797 
1798 	fd = open_state_file(td->o.name, prefix, td->thread_number - 1, 0);
1799 	if (fd == -1)
1800 		return 1;
1801 
1802 	ret = read(fd, &hdr, sizeof(hdr));
1803 	if (ret != sizeof(hdr)) {
1804 		if (ret < 0)
1805 			td_verror(td, errno, "read verify state hdr");
1806 		log_err("fio: failed reading verify state header\n");
1807 		goto err;
1808 	}
1809 
1810 	hdr.version = le64_to_cpu(hdr.version);
1811 	hdr.size = le64_to_cpu(hdr.size);
1812 	hdr.crc = le64_to_cpu(hdr.crc);
1813 
1814 	if (hdr.version != VSTATE_HDR_VERSION) {
1815 		log_err("fio: unsupported (%d) version in verify state header\n",
1816 				(unsigned int) hdr.version);
1817 		goto err;
1818 	}
1819 
1820 	s = malloc(hdr.size);
1821 	ret = read(fd, s, hdr.size);
1822 	if (ret != hdr.size) {
1823 		if (ret < 0)
1824 			td_verror(td, errno, "read verify state");
1825 		log_err("fio: failed reading verity state\n");
1826 		goto err;
1827 	}
1828 
1829 	crc = fio_crc32c(s, hdr.size);
1830 	if (crc != hdr.crc) {
1831 		log_err("fio: verify state is corrupt\n");
1832 		goto err;
1833 	}
1834 
1835 	close(fd);
1836 
1837 	verify_assign_state(td, s);
1838 	return 0;
1839 err:
1840 	if (s)
1841 		free(s);
1842 	close(fd);
1843 	return 1;
1844 }
1845 
1846 /*
1847  * Use the loaded verify state to know when to stop doing verification
1848  */
verify_state_should_stop(struct thread_data * td,struct io_u * io_u)1849 int verify_state_should_stop(struct thread_data *td, struct io_u *io_u)
1850 {
1851 	struct thread_io_list *s = td->vstate;
1852 	struct fio_file *f = io_u->file;
1853 	int i;
1854 
1855 	if (!s || !f)
1856 		return 0;
1857 
1858 	/*
1859 	 * If we're not into the window of issues - depth yet, continue. If
1860 	 * issue is shorter than depth, do check.
1861 	 */
1862 	if ((td->io_blocks[DDIR_READ] < s->depth ||
1863 	    s->numberio - td->io_blocks[DDIR_READ] > s->depth) &&
1864 	    s->numberio > s->depth)
1865 		return 0;
1866 
1867 	/*
1868 	 * We're in the window of having to check if this io was
1869 	 * completed or not. If the IO was seen as completed, then
1870 	 * lets verify it.
1871 	 */
1872 	for (i = 0; i < s->no_comps; i++) {
1873 		if (s->comps[i].fileno != f->fileno)
1874 			continue;
1875 		if (io_u->offset == s->comps[i].offset)
1876 			return 0;
1877 	}
1878 
1879 	/*
1880 	 * Not found, we have to stop
1881 	 */
1882 	return 1;
1883 }
1884