1 /*
2 * Copyright (c) International Business Machines Corp., 2001
3 * Copyright (c) 2013 Oracle and/or its affiliates. All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation; either version 2 of
8 * the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it would be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 *
19 * This module will test block io layer.
20 *
21 * module: tbio
22 *
23 * FILE : tbio.c
24 * USAGE : kernel_space:./load_tbio.sh
25 * user_space :./test_bio
26 *
27 * DESCRIPTION : The module will test block i/o layer for kernel 2.5
28 * REQUIREMENTS:
29 * 1) glibc 2.1.91 or above.
30 *
31 * HISTORY :
32 * 11/19/2003 Kai Zhao (ltcd3@cn.ibm.com)
33 *
34 * CODE COVERAGE: 74.9% - fs/bio.c (Total Coverage)
35 *
36 */
37
38 #include <linux/module.h>
39 #include <linux/init.h>
40 #include <linux/version.h>
41 #include <linux/kernel.h>
42 #include <linux/fs.h>
43 #include <linux/errno.h>
44 #include <linux/types.h>
45 #include <linux/vmalloc.h>
46 #include <linux/genhd.h>
47 #include <linux/blkdev.h>
48 #include <linux/buffer_head.h>
49
50 #include "tbio.h"
51
52 MODULE_AUTHOR("Kai Zhao <ltcd3@cn.ibm.com>");
53 MODULE_AUTHOR("Alexey Kodanev <alexey.kodanev@oracle.com>");
54 MODULE_DESCRIPTION(TMOD_DRIVER_NAME);
55 MODULE_LICENSE("GPL");
56
57 #define prk_err(fmt, ...) \
58 pr_err(TBIO_DEVICE_NAME ": " fmt "\n", ##__VA_ARGS__)
59 #define prk_info(fmt, ...) \
60 pr_info(TBIO_DEVICE_NAME ": " fmt "\n", ##__VA_ARGS__)
61
62 static int nsectors = 4096;
63 module_param(nsectors, int, 0444);
64 MODULE_PARM_DESC(nsectors, "The number of sectors");
65
66 static struct bio *tbiop, *tbiop_dup;
67
68 static struct tbio_device {
69 unsigned long size;
70 spinlock_t lock;
71 u8 *data;
72 struct gendisk *gd;
73 struct block_device *bdev;
74 struct request_queue *q;
75 } tbio_dev;
76
send_request(struct request_queue * q,struct bio * bio,struct block_device * bdev,struct tbio_interface * inter,int writing)77 static int send_request(struct request_queue *q, struct bio *bio,
78 struct block_device *bdev, struct tbio_interface *inter,
79 int writing)
80 {
81 struct request *rq;
82 rq = blk_make_request(q, bio, GFP_KERNEL);
83 if (!rq) {
84 prk_err("failed to make request");
85 return -EFAULT;
86 }
87
88 if ((!inter->cmd_len) || (inter->cmd_len > rq->cmd_len)) {
89 prk_err("invalid inter->cmd_len");
90 return -EFAULT;
91 }
92
93 rq->cmd_len = inter->cmd_len;
94
95 if (copy_from_user(rq->cmd, inter->cmd, inter->cmd_len))
96 goto out_request;
97
98 if (*(rq->cmd + rq->cmd_len - 1)) {
99 prk_err("rq->cmd is not null-terminated");
100 return -EFAULT;
101 }
102
103 rq->__sector = bio->bi_sector;
104
105 if (blk_execute_rq(q, bdev->bd_disk, rq, 0))
106 goto out_request;
107
108 blk_put_request(rq);
109
110 return 0;
111
112 out_request:
113
114 blk_put_request(rq);
115 return -EFAULT;
116 }
117
tbio_io(struct block_device * bdev,struct tbio_interface * uptr)118 static int tbio_io(struct block_device *bdev, struct tbio_interface *uptr)
119 {
120 int ret;
121 tbio_interface_t inter;
122 struct bio *bio = NULL;
123 int reading = 0, writing = 0;
124 void *buf = NULL;
125 struct request_queue *q = bdev_get_queue(bdev);
126
127 if (copy_from_user(&inter, uptr, sizeof(tbio_interface_t))) {
128 prk_err("copy_from_user");
129 return -EFAULT;
130 }
131
132 if (inter.data_len > (q->limits.max_sectors << 9)) {
133 prk_err("inter.in_len > q->max_sectors << 9");
134 return -EIO;
135 }
136
137 if (inter.data_len) {
138
139 switch (inter.direction) {
140 default:
141 return -EINVAL;
142 case TBIO_TO_DEV:
143 writing = 1;
144 break;
145 case TBIO_FROM_DEV:
146 reading = 1;
147 break;
148 }
149
150 bio = bio_map_user(q, bdev, (unsigned long)inter.data,
151 inter.data_len, reading, GFP_KERNEL);
152
153 if (!bio) {
154 prk_err("bio_map_user failed");
155 buf = kmalloc(inter.data_len, q->bounce_gfp | GFP_USER);
156 if (!buf) {
157 prk_err("buffer no memory");
158 return -1;
159 }
160 ret = copy_from_user(buf, inter.data, inter.data_len);
161 if (ret)
162 prk_err("copy_from_user() failed");
163
164 prk_info("buffer %s\n, copy_from_user returns '%d'",
165 (char *)buf, ret);
166 }
167
168 }
169
170 send_request(q, bio, bdev, &inter, writing);
171
172 if (bio)
173 bio_unmap_user(bio);
174 return 0;
175 }
176
test_bio_put(struct bio * biop)177 static int test_bio_put(struct bio *biop)
178 {
179 if (biop)
180 bio_put(biop);
181
182 return 0;
183 }
184
test_bio_clone(void)185 static int test_bio_clone(void)
186 {
187 tbiop_dup = bio_clone(tbiop, GFP_NOIO);
188 if (tbiop_dup == NULL) {
189 prk_err("bio_clone failed");
190 return -1;
191 }
192
193 test_bio_put(tbiop_dup);
194
195 return 0;
196 }
197
test_bio_add_page(void)198 static int test_bio_add_page(void)
199 {
200 int ret = 0, i = 0, offset = 0;
201 unsigned long addr = 0;
202 struct page *ppage = NULL;
203
204 for (i = 0; i < 128; i++) {
205
206 addr = get_zeroed_page(GFP_KERNEL);
207
208 if (addr == 0) {
209 prk_err("get free page failed %ld", addr);
210 ret = -1;
211 break;
212 }
213
214 ppage = virt_to_page(addr);
215 if (ppage == NULL) {
216 prk_err("covert virture page to page struct failed");
217 ret = -1;
218 break;
219 }
220
221 ret = bio_add_page(tbiop, ppage, PAGE_SIZE, offset);
222 if (ret < 0) {
223 prk_err("bio_add_page failed");
224 break;
225 }
226 offset += ret;
227 }
228
229 return ret;
230 }
231
test_do_bio_alloc(int num)232 static int test_do_bio_alloc(int num)
233 {
234 tbiop = bio_alloc(GFP_KERNEL, num);
235 if (tbiop == NULL) {
236 prk_err("bio_alloc failed");
237 return -1;
238 }
239 bio_put(tbiop);
240
241 return 0;
242 }
243
test_bio_alloc(void)244 static int test_bio_alloc(void)
245 {
246 if (test_do_bio_alloc(2) < 0) {
247 prk_err("can not alloc bio for %d", 2);
248 return -1;
249 }
250
251 if (test_do_bio_alloc(8) < 0) {
252 prk_err("can not alloc bio for %d", 8);
253 return -1;
254 }
255
256 if (test_do_bio_alloc(32) < 0) {
257 prk_err("can not alloc bio for %d", 32);
258 return -1;
259 }
260
261 if (test_do_bio_alloc(96) < 0) {
262 prk_err("can not alloc bio for %d", 96);
263 return -1;
264 }
265
266 if (test_do_bio_alloc(BIO_MAX_PAGES) < 0) {
267 prk_err("can not alloc bio for %d", BIO_MAX_PAGES);
268 return -1;
269 }
270
271 tbiop = bio_alloc(GFP_KERNEL, BIO_MAX_PAGES);
272 if (tbiop == NULL) {
273 prk_err("bio_alloc failed");
274 return -1;
275 }
276
277 tbiop->bi_bdev = tbio_dev.bdev;
278 tbiop->bi_sector = 0;
279
280 return 0;
281 }
282
test_bio_split(struct block_device * bdev,struct tbio_interface * uptr)283 static int test_bio_split(struct block_device *bdev,
284 struct tbio_interface *uptr)
285 {
286 int ret;
287 tbio_interface_t inter;
288 struct bio *bio = NULL;
289 struct bio_pair *bio_pairp = NULL;
290 int reading = 0, writing = 0;
291 void *buf = NULL;
292 struct request_queue *q = bdev_get_queue(bdev);
293 if (!q) {
294 prk_err("bdev_get_queue() failed");
295 return -EFAULT;
296 }
297
298 prk_info("test_bio_split");
299
300 if (copy_from_user(&inter, uptr, sizeof(tbio_interface_t))) {
301 prk_err("copy_from_user");
302 return -EFAULT;
303 }
304
305 if (inter.data_len > (q->limits.max_sectors << 9)) {
306 prk_err("inter.in_len > q->limits.max_sectors << 9");
307 return -EIO;
308 }
309
310 prk_info("inter.data_len is %d", inter.data_len);
311 if (inter.data_len) {
312
313 switch (inter.direction) {
314 default:
315 return -EINVAL;
316 case TBIO_TO_DEV:
317 writing = 1;
318 break;
319 case TBIO_FROM_DEV:
320 reading = 1;
321 break;
322 }
323
324 bio = bio_map_user(q, bdev, (unsigned long)inter.data,
325 inter.data_len, reading, GFP_KERNEL);
326
327 if (!bio) {
328 prk_err("bio_map_user failed");
329 buf = kmalloc(inter.data_len, q->bounce_gfp | GFP_USER);
330 if (!buf) {
331 prk_err("buffer no memory");
332 return -1;
333 }
334 ret = copy_from_user(buf, inter.data, inter.data_len);
335 if (ret)
336 prk_err("copy_from_user() failed");
337
338 prk_info("buffer %s", (char *)buf);
339 } else {
340 bio_pairp = bio_split(bio, 2);
341
342 if (bio_pairp == NULL) {
343 prk_err("bio_split failed");
344 bio_unmap_user(bio);
345 return -1;
346 }
347 }
348
349 }
350
351 send_request(q, &(bio_pairp->bio1), bdev, &inter, writing);
352 q = bdev_get_queue(bdev);
353 send_request(q, &(bio_pairp->bio2), bdev, &inter, writing);
354
355 if (bio_pairp)
356 bio_pair_release(bio_pairp);
357
358 if (bio)
359 bio_unmap_user(bio);
360
361 return 0;
362 }
363
test_bio_get_nr_vecs(void)364 static int test_bio_get_nr_vecs(void)
365 {
366 int number = 0;
367
368 number = bio_get_nr_vecs(tbio_dev.bdev);
369
370 if (number < 0) {
371 prk_err("bio_get_nr_vec failed");
372 return -1;
373 }
374
375 prk_info("bio_get_nr_vecs: %d", number);
376 return 0;
377 }
378
tbio_ioctl(struct block_device * blk,fmode_t mode,unsigned cmd,unsigned long arg)379 static int tbio_ioctl(struct block_device *blk, fmode_t mode,
380 unsigned cmd, unsigned long arg)
381 {
382 int err = 0;
383
384 switch (cmd) {
385 case LTP_TBIO_DO_IO:
386 prk_info("TEST-CASE: LTP_TBIO_DO_IO:");
387 err = tbio_io(tbio_dev.bdev, (struct tbio_interface *)arg);
388 break;
389 case LTP_TBIO_CLONE:
390 prk_info("TEST-CASE: LTP_TBIO_CLONE:");
391 err = test_bio_clone();
392 break;
393 case LTP_TBIO_ADD_PAGE:
394 prk_info("TEST-CASE: LTP_TBIO_ADD_PAGE:");
395 err = test_bio_add_page();
396 break;
397 case LTP_TBIO_ALLOC:
398 prk_info("TEST-CASE: LTP_TBIO_ALLOC:");
399 err = test_bio_alloc();
400 break;
401 case LTP_TBIO_GET_NR_VECS:
402 prk_info("TEST-CASE: LTP_TBIO_GET_NR_VECS:");
403 err = test_bio_get_nr_vecs();
404 break;
405 case LTP_TBIO_PUT:
406 prk_info("TEST-CASE: LTP_TBIO_PUT:");
407 err = test_bio_put(tbiop);
408 break;
409 case LTP_TBIO_SPLIT:
410 prk_info("TEST-CASE: LTP_TBIO_SPLIT:");
411 err = test_bio_split(tbio_dev.bdev,
412 (struct tbio_interface *)arg);
413 break;
414 }
415
416 prk_info("TEST-CASE DONE");
417 return err;
418 }
419
tbio_transfer(struct request * req,struct tbio_device * dev)420 static int tbio_transfer(struct request *req, struct tbio_device *dev)
421 {
422 unsigned int i = 0, offset = 0;
423 char *buf;
424 unsigned long flags;
425 size_t size;
426
427 struct bio_vec *bv;
428 struct req_iterator iter;
429
430 size = blk_rq_cur_bytes(req);
431 prk_info("bio req of size %zu:", size);
432 offset = blk_rq_pos(req) * 512;
433
434 rq_for_each_segment(bv, req, iter) {
435 size = bv->bv_len;
436 prk_info("%s bio(%u), segs(%u) sect(%u) pos(%lu) off(%u)",
437 (bio_data_dir(iter.bio) == READ) ? "READ" : "WRITE",
438 i, bio_segments(iter.bio), bio_sectors(iter.bio),
439 iter.bio->bi_sector, offset);
440
441 if (get_capacity(req->rq_disk) * 512 < offset) {
442 prk_info("Error, small capacity %zu, offset %u",
443 get_capacity(req->rq_disk) * 512,
444 offset);
445 continue;
446 }
447
448 buf = bvec_kmap_irq(bv, &flags);
449 if (bio_data_dir(iter.bio) == WRITE)
450 memcpy(dev->data + offset, buf, size);
451 else
452 memcpy(buf, dev->data + offset, size);
453 offset += size;
454 flush_kernel_dcache_page(bv->bv_page);
455 bvec_kunmap_irq(buf, &flags);
456 ++i;
457 }
458
459 return 0;
460 }
461
tbio_request(struct request_queue * q)462 static void tbio_request(struct request_queue *q)
463 {
464 int ret = 0;
465 struct request *req;
466
467 while ((req = blk_fetch_request(q)) != NULL) {
468
469 ret = tbio_transfer(req, &tbio_dev);
470
471 spin_unlock_irq(q->queue_lock);
472 blk_end_request_all(req, ret);
473 spin_lock_irq(q->queue_lock);
474 }
475 }
476
tbio_open(struct block_device * blk,fmode_t mode)477 static int tbio_open(struct block_device *blk, fmode_t mode)
478 {
479 tbio_dev.bdev = blk;
480
481 return 0;
482 }
483
484 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)
tbio_release(struct gendisk * gd,fmode_t mode)485 static int tbio_release(struct gendisk *gd, fmode_t mode)
486 #else
487 static void tbio_release(struct gendisk *gd, fmode_t mode)
488 #endif
489 {
490
491 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)
492 return 0;
493 #endif
494 }
495
tbio_media_changed(struct gendisk * gd)496 int tbio_media_changed(struct gendisk *gd)
497 {
498 return 0;
499 }
500
tbio_revalidate(struct gendisk * gd)501 int tbio_revalidate(struct gendisk *gd)
502 {
503 return 0;
504 }
505
506 static const struct block_device_operations tbio_ops = {
507 .owner = THIS_MODULE,
508 .open = tbio_open,
509 .ioctl = tbio_ioctl,
510 .release = tbio_release,
511 .media_changed = tbio_media_changed,
512 .revalidate_disk = tbio_revalidate
513 };
514
tbio_init(void)515 static int __init tbio_init(void)
516 {
517 tbio_dev.size = nsectors * 512;
518
519 tbio_dev.data = vmalloc(tbio_dev.size);
520 if (tbio_dev.data == NULL)
521 return -ENOMEM;
522 strcpy(tbio_dev.data, "tbio data");
523 tbio_dev.bdev = NULL;
524
525 TBIO_MAJOR = register_blkdev(0, DEVICE_NAME);
526 if (TBIO_MAJOR <= 0) {
527 prk_err("unable to get major number");
528 goto out;
529 }
530 prk_info("register_blkdev major %d", TBIO_MAJOR);
531
532 spin_lock_init(&tbio_dev.lock);
533 tbio_dev.q = blk_init_queue(tbio_request, &tbio_dev.lock);
534 if (!tbio_dev.q) {
535 prk_err("failed to init queue");
536 goto out_unregister;
537 }
538
539 tbio_dev.gd = alloc_disk(1);
540 if (!tbio_dev.gd)
541 goto out_unregister;
542 tbio_dev.gd->major = TBIO_MAJOR;
543 tbio_dev.gd->first_minor = 0;
544 tbio_dev.gd->fops = &tbio_ops;
545 tbio_dev.gd->private_data = &tbio_dev;
546 tbio_dev.gd->queue = tbio_dev.q;
547 strcpy(tbio_dev.gd->disk_name, "tbio");
548 set_capacity(tbio_dev.gd, nsectors);
549 tbio_dev.gd->queue->queuedata = tbio_dev.gd;
550
551 add_disk(tbio_dev.gd);
552
553 return 0;
554
555 out_unregister:
556 unregister_blkdev(TBIO_MAJOR, DEVICE_NAME);
557 out:
558 vfree(tbio_dev.data);
559 return -ENOMEM;
560 }
561 module_init(tbio_init);
562
tbio_exit(void)563 static void tbio_exit(void)
564 {
565 del_gendisk(tbio_dev.gd);
566 blk_cleanup_queue(tbio_dev.q);
567 put_disk(tbio_dev.gd);
568 unregister_blkdev(TBIO_MAJOR, DEVICE_NAME);
569 vfree(tbio_dev.data);
570 prk_info("exit");
571 }
572 module_exit(tbio_exit);
573