• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * sg engine
3  *
4  * IO engine that uses the Linux SG v3 interface to talk to SCSI devices
5  *
6  */
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <unistd.h>
10 #include <errno.h>
11 #include <assert.h>
12 #include <sys/poll.h>
13 
14 #include "../fio.h"
15 
16 #ifdef FIO_HAVE_SGIO
17 
18 struct sgio_cmd {
19 	unsigned char cdb[10];
20 	int nr;
21 };
22 
23 struct sgio_data {
24 	struct sgio_cmd *cmds;
25 	struct io_u **events;
26 	struct pollfd *pfds;
27 	int *fd_flags;
28 	void *sgbuf;
29 	unsigned int bs;
30 	int type_checked;
31 };
32 
sgio_hdr_init(struct sgio_data * sd,struct sg_io_hdr * hdr,struct io_u * io_u,int fs)33 static void sgio_hdr_init(struct sgio_data *sd, struct sg_io_hdr *hdr,
34 			  struct io_u *io_u, int fs)
35 {
36 	struct sgio_cmd *sc = &sd->cmds[io_u->index];
37 
38 	memset(hdr, 0, sizeof(*hdr));
39 	memset(sc->cdb, 0, sizeof(sc->cdb));
40 
41 	hdr->interface_id = 'S';
42 	hdr->cmdp = sc->cdb;
43 	hdr->cmd_len = sizeof(sc->cdb);
44 	hdr->pack_id = io_u->index;
45 	hdr->usr_ptr = io_u;
46 
47 	if (fs) {
48 		hdr->dxferp = io_u->xfer_buf;
49 		hdr->dxfer_len = io_u->xfer_buflen;
50 	}
51 }
52 
pollin_events(struct pollfd * pfds,int fds)53 static int pollin_events(struct pollfd *pfds, int fds)
54 {
55 	int i;
56 
57 	for (i = 0; i < fds; i++)
58 		if (pfds[i].revents & POLLIN)
59 			return 1;
60 
61 	return 0;
62 }
63 
fio_sgio_getevents(struct thread_data * td,unsigned int min,unsigned int max,struct timespec fio_unused * t)64 static int fio_sgio_getevents(struct thread_data *td, unsigned int min,
65 			      unsigned int max, struct timespec fio_unused *t)
66 {
67 	struct sgio_data *sd = td->io_ops->data;
68 	int left = max, ret, r = 0;
69 	void *buf = sd->sgbuf;
70 	unsigned int i, events;
71 	struct fio_file *f;
72 
73 	/*
74 	 * Fill in the file descriptors
75 	 */
76 	for_each_file(td, f, i) {
77 		/*
78 		 * don't block for min events == 0
79 		 */
80 		if (!min)
81 			sd->fd_flags[i] = fio_set_fd_nonblocking(f->fd, "sg");
82 		else
83 			sd->fd_flags[i] = -1;
84 
85 		sd->pfds[i].fd = f->fd;
86 		sd->pfds[i].events = POLLIN;
87 	}
88 
89 	while (left) {
90 		void *p;
91 
92 		do {
93 			if (!min)
94 				break;
95 
96 			ret = poll(sd->pfds, td->o.nr_files, -1);
97 			if (ret < 0) {
98 				if (!r)
99 					r = -errno;
100 				td_verror(td, errno, "poll");
101 				break;
102 			} else if (!ret)
103 				continue;
104 
105 			if (pollin_events(sd->pfds, td->o.nr_files))
106 				break;
107 		} while (1);
108 
109 		if (r < 0)
110 			break;
111 
112 re_read:
113 		p = buf;
114 		events = 0;
115 		for_each_file(td, f, i) {
116 			ret = read(f->fd, p, left * sizeof(struct sg_io_hdr));
117 			if (ret < 0) {
118 				if (errno == EAGAIN)
119 					continue;
120 				r = -errno;
121 				td_verror(td, errno, "read");
122 				break;
123 			} else if (ret) {
124 				p += ret;
125 				events += ret / sizeof(struct sg_io_hdr);
126 			}
127 		}
128 
129 		if (r < 0)
130 			break;
131 		if (!events) {
132 			usleep(1000);
133 			goto re_read;
134 		}
135 
136 		left -= events;
137 		r += events;
138 
139 		for (i = 0; i < events; i++) {
140 			struct sg_io_hdr *hdr = (struct sg_io_hdr *) buf + i;
141 
142 			sd->events[i] = hdr->usr_ptr;
143 		}
144 	}
145 
146 	if (!min) {
147 		for_each_file(td, f, i) {
148 			if (sd->fd_flags[i] == -1)
149 				continue;
150 
151 			if (fcntl(f->fd, F_SETFL, sd->fd_flags[i]) < 0)
152 				log_err("fio: sg failed to restore fcntl flags: %s\n", strerror(errno));
153 		}
154 	}
155 
156 	return r;
157 }
158 
fio_sgio_ioctl_doio(struct thread_data * td,struct fio_file * f,struct io_u * io_u)159 static int fio_sgio_ioctl_doio(struct thread_data *td,
160 			       struct fio_file *f, struct io_u *io_u)
161 {
162 	struct sgio_data *sd = td->io_ops->data;
163 	struct sg_io_hdr *hdr = &io_u->hdr;
164 	int ret;
165 
166 	sd->events[0] = io_u;
167 
168 	ret = ioctl(f->fd, SG_IO, hdr);
169 	if (ret < 0)
170 		return ret;
171 
172 	return FIO_Q_COMPLETED;
173 }
174 
fio_sgio_rw_doio(struct fio_file * f,struct io_u * io_u,int do_sync)175 static int fio_sgio_rw_doio(struct fio_file *f, struct io_u *io_u, int do_sync)
176 {
177 	struct sg_io_hdr *hdr = &io_u->hdr;
178 	int ret;
179 
180 	ret = write(f->fd, hdr, sizeof(*hdr));
181 	if (ret < 0)
182 		return ret;
183 
184 	if (do_sync) {
185 		ret = read(f->fd, hdr, sizeof(*hdr));
186 		if (ret < 0)
187 			return ret;
188 		return FIO_Q_COMPLETED;
189 	}
190 
191 	return FIO_Q_QUEUED;
192 }
193 
fio_sgio_doio(struct thread_data * td,struct io_u * io_u,int do_sync)194 static int fio_sgio_doio(struct thread_data *td, struct io_u *io_u, int do_sync)
195 {
196 	struct fio_file *f = io_u->file;
197 
198 	if (f->filetype == FIO_TYPE_BD)
199 		return fio_sgio_ioctl_doio(td, f, io_u);
200 
201 	return fio_sgio_rw_doio(f, io_u, do_sync);
202 }
203 
fio_sgio_prep(struct thread_data * td,struct io_u * io_u)204 static int fio_sgio_prep(struct thread_data *td, struct io_u *io_u)
205 {
206 	struct sg_io_hdr *hdr = &io_u->hdr;
207 	struct sgio_data *sd = td->io_ops->data;
208 	int nr_blocks, lba;
209 
210 	if (io_u->xfer_buflen & (sd->bs - 1)) {
211 		log_err("read/write not sector aligned\n");
212 		return EINVAL;
213 	}
214 
215 	if (io_u->ddir == DDIR_READ) {
216 		sgio_hdr_init(sd, hdr, io_u, 1);
217 
218 		hdr->dxfer_direction = SG_DXFER_FROM_DEV;
219 		hdr->cmdp[0] = 0x28;
220 	} else if (io_u->ddir == DDIR_WRITE) {
221 		sgio_hdr_init(sd, hdr, io_u, 1);
222 
223 		hdr->dxfer_direction = SG_DXFER_TO_DEV;
224 		hdr->cmdp[0] = 0x2a;
225 	} else {
226 		sgio_hdr_init(sd, hdr, io_u, 0);
227 
228 		hdr->dxfer_direction = SG_DXFER_NONE;
229 		hdr->cmdp[0] = 0x35;
230 	}
231 
232 	if (hdr->dxfer_direction != SG_DXFER_NONE) {
233 		nr_blocks = io_u->xfer_buflen / sd->bs;
234 		lba = io_u->offset / sd->bs;
235 		hdr->cmdp[2] = (unsigned char) ((lba >> 24) & 0xff);
236 		hdr->cmdp[3] = (unsigned char) ((lba >> 16) & 0xff);
237 		hdr->cmdp[4] = (unsigned char) ((lba >>  8) & 0xff);
238 		hdr->cmdp[5] = (unsigned char) (lba & 0xff);
239 		hdr->cmdp[7] = (unsigned char) ((nr_blocks >> 8) & 0xff);
240 		hdr->cmdp[8] = (unsigned char) (nr_blocks & 0xff);
241 	}
242 
243 	return 0;
244 }
245 
fio_sgio_queue(struct thread_data * td,struct io_u * io_u)246 static int fio_sgio_queue(struct thread_data *td, struct io_u *io_u)
247 {
248 	struct sg_io_hdr *hdr = &io_u->hdr;
249 	int ret, do_sync = 0;
250 
251 	fio_ro_check(td, io_u);
252 
253 	if (td->o.sync_io || td->o.odirect || ddir_sync(io_u->ddir))
254 		do_sync = 1;
255 
256 	ret = fio_sgio_doio(td, io_u, do_sync);
257 
258 	if (ret < 0)
259 		io_u->error = errno;
260 	else if (hdr->status) {
261 		io_u->resid = hdr->resid;
262 		io_u->error = EIO;
263 	}
264 
265 	if (io_u->error) {
266 		td_verror(td, io_u->error, "xfer");
267 		return FIO_Q_COMPLETED;
268 	}
269 
270 	return ret;
271 }
272 
fio_sgio_event(struct thread_data * td,int event)273 static struct io_u *fio_sgio_event(struct thread_data *td, int event)
274 {
275 	struct sgio_data *sd = td->io_ops->data;
276 
277 	return sd->events[event];
278 }
279 
fio_sgio_get_bs(struct thread_data * td,unsigned int * bs)280 static int fio_sgio_get_bs(struct thread_data *td, unsigned int *bs)
281 {
282 	struct sgio_data *sd = td->io_ops->data;
283 	struct io_u io_u;
284 	struct sg_io_hdr *hdr;
285 	unsigned char buf[8];
286 	int ret;
287 
288 	memset(&io_u, 0, sizeof(io_u));
289 	io_u.file = td->files[0];
290 
291 	hdr = &io_u.hdr;
292 	sgio_hdr_init(sd, hdr, &io_u, 0);
293 	memset(buf, 0, sizeof(buf));
294 
295 	hdr->cmdp[0] = 0x25;
296 	hdr->dxfer_direction = SG_DXFER_FROM_DEV;
297 	hdr->dxferp = buf;
298 	hdr->dxfer_len = sizeof(buf);
299 
300 	ret = fio_sgio_doio(td, &io_u, 1);
301 	if (ret)
302 		return ret;
303 
304 	*bs = (buf[4] << 24) | (buf[5] << 16) | (buf[6] << 8) | buf[7];
305 	return 0;
306 }
307 
fio_sgio_cleanup(struct thread_data * td)308 static void fio_sgio_cleanup(struct thread_data *td)
309 {
310 	struct sgio_data *sd = td->io_ops->data;
311 
312 	if (sd) {
313 		free(sd->events);
314 		free(sd->cmds);
315 		free(sd->fd_flags);
316 		free(sd->pfds);
317 		free(sd->sgbuf);
318 		free(sd);
319 	}
320 }
321 
fio_sgio_init(struct thread_data * td)322 static int fio_sgio_init(struct thread_data *td)
323 {
324 	struct sgio_data *sd;
325 
326 	sd = malloc(sizeof(*sd));
327 	memset(sd, 0, sizeof(*sd));
328 	sd->cmds = malloc(td->o.iodepth * sizeof(struct sgio_cmd));
329 	memset(sd->cmds, 0, td->o.iodepth * sizeof(struct sgio_cmd));
330 	sd->events = malloc(td->o.iodepth * sizeof(struct io_u *));
331 	memset(sd->events, 0, td->o.iodepth * sizeof(struct io_u *));
332 	sd->pfds = malloc(sizeof(struct pollfd) * td->o.nr_files);
333 	memset(sd->pfds, 0, sizeof(struct pollfd) * td->o.nr_files);
334 	sd->fd_flags = malloc(sizeof(int) * td->o.nr_files);
335 	memset(sd->fd_flags, 0, sizeof(int) * td->o.nr_files);
336 	sd->sgbuf = malloc(sizeof(struct sg_io_hdr) * td->o.iodepth);
337 	memset(sd->sgbuf, 0, sizeof(struct sg_io_hdr) * td->o.iodepth);
338 
339 	td->io_ops->data = sd;
340 
341 	/*
342 	 * we want to do it, regardless of whether odirect is set or not
343 	 */
344 	td->o.override_sync = 1;
345 	return 0;
346 }
347 
fio_sgio_type_check(struct thread_data * td,struct fio_file * f)348 static int fio_sgio_type_check(struct thread_data *td, struct fio_file *f)
349 {
350 	struct sgio_data *sd = td->io_ops->data;
351 	unsigned int bs;
352 
353 	if (f->filetype == FIO_TYPE_BD) {
354 		if (ioctl(f->fd, BLKSSZGET, &bs) < 0) {
355 			td_verror(td, errno, "ioctl");
356 			return 1;
357 		}
358 	} else if (f->filetype == FIO_TYPE_CHAR) {
359 		int version, ret;
360 
361 		if (ioctl(f->fd, SG_GET_VERSION_NUM, &version) < 0) {
362 			td_verror(td, errno, "ioctl");
363 			return 1;
364 		}
365 
366 		ret = fio_sgio_get_bs(td, &bs);
367 		if (ret)
368 			return 1;
369 	} else {
370 		log_err("ioengine sg only works on block devices\n");
371 		return 1;
372 	}
373 
374 	sd->bs = bs;
375 
376 	if (f->filetype == FIO_TYPE_BD) {
377 		td->io_ops->getevents = NULL;
378 		td->io_ops->event = NULL;
379 	}
380 
381 	return 0;
382 }
383 
fio_sgio_open(struct thread_data * td,struct fio_file * f)384 static int fio_sgio_open(struct thread_data *td, struct fio_file *f)
385 {
386 	struct sgio_data *sd = td->io_ops->data;
387 	int ret;
388 
389 	ret = generic_open_file(td, f);
390 	if (ret)
391 		return ret;
392 
393 	if (sd && !sd->type_checked && fio_sgio_type_check(td, f)) {
394 		ret = generic_close_file(td, f);
395 		return 1;
396 	}
397 
398 	return 0;
399 }
400 
401 static struct ioengine_ops ioengine = {
402 	.name		= "sg",
403 	.version	= FIO_IOOPS_VERSION,
404 	.init		= fio_sgio_init,
405 	.prep		= fio_sgio_prep,
406 	.queue		= fio_sgio_queue,
407 	.getevents	= fio_sgio_getevents,
408 	.event		= fio_sgio_event,
409 	.cleanup	= fio_sgio_cleanup,
410 	.open_file	= fio_sgio_open,
411 	.close_file	= generic_close_file,
412 	.get_file_size	= generic_get_file_size,
413 	.flags		= FIO_SYNCIO | FIO_RAWIO,
414 };
415 
416 #else /* FIO_HAVE_SGIO */
417 
418 /*
419  * When we have a proper configure system in place, we simply wont build
420  * and install this io engine. For now install a crippled version that
421  * just complains and fails to load.
422  */
fio_sgio_init(struct thread_data fio_unused * td)423 static int fio_sgio_init(struct thread_data fio_unused *td)
424 {
425 	log_err("fio: ioengine sg not available\n");
426 	return 1;
427 }
428 
429 static struct ioengine_ops ioengine = {
430 	.name		= "sg",
431 	.version	= FIO_IOOPS_VERSION,
432 	.init		= fio_sgio_init,
433 };
434 
435 #endif
436 
fio_sgio_register(void)437 static void fio_init fio_sgio_register(void)
438 {
439 	register_ioengine(&ioengine);
440 }
441 
fio_sgio_unregister(void)442 static void fio_exit fio_sgio_unregister(void)
443 {
444 	unregister_ioengine(&ioengine);
445 }
446