Lines Matching +full:sub +full:- +full:sampled
2 * fs/direct-io.c
15 * added support for non-aligned IO.
56 * is determined on a per-invocation basis. When talking to the filesystem
58 * down by dio->blkfactor. Similarly, fs-blocksize quantities are converted
73 finer. blkfactor=2 means 1/4-block
75 unsigned start_zero_done; /* flag: sub-blocksize zeroing has
160 return sdio->tail - sdio->head; in dio_pages_present()
170 ret = iov_iter_get_pages(sdio->iter, dio->pages, LONG_MAX, DIO_PAGES, in dio_refill_pages()
171 &sdio->from); in dio_refill_pages()
173 if (ret < 0 && sdio->blocks_available && (dio->op == REQ_OP_WRITE)) { in dio_refill_pages()
180 if (dio->page_errors == 0) in dio_refill_pages()
181 dio->page_errors = ret; in dio_refill_pages()
183 dio->pages[0] = page; in dio_refill_pages()
184 sdio->head = 0; in dio_refill_pages()
185 sdio->tail = 1; in dio_refill_pages()
186 sdio->from = 0; in dio_refill_pages()
187 sdio->to = PAGE_SIZE; in dio_refill_pages()
192 iov_iter_advance(sdio->iter, ret); in dio_refill_pages()
193 ret += sdio->from; in dio_refill_pages()
194 sdio->head = 0; in dio_refill_pages()
195 sdio->tail = (ret + PAGE_SIZE - 1) / PAGE_SIZE; in dio_refill_pages()
196 sdio->to = ((ret - 1) & (PAGE_SIZE - 1)) + 1; in dio_refill_pages()
219 return dio->pages[sdio->head]; in dio_get_page()
232 errseq_set(&inode->i_mapping->wb_err, -EIO); in dio_warn_stale_pagecache()
238 pr_crit("File: %s PID: %d Comm: %.20s\n", path, current->pid, in dio_warn_stale_pagecache()
239 current->comm); in dio_warn_stale_pagecache()
244 * dio_complete() - called when all DIO BIO I/O has been completed
257 loff_t offset = dio->iocb->ki_pos; in dio_complete()
264 * In that case -EIOCBQUEUED is in fact not an error we want in dio_complete()
267 if (ret == -EIOCBQUEUED) in dio_complete()
270 if (dio->result) { in dio_complete()
271 transferred = dio->result; in dio_complete()
274 if ((dio->op == REQ_OP_READ) && in dio_complete()
275 ((offset + transferred) > dio->i_size)) in dio_complete()
276 transferred = dio->i_size - offset; in dio_complete()
278 if (unlikely(ret == -EFAULT) && transferred) in dio_complete()
283 ret = dio->page_errors; in dio_complete()
285 ret = dio->io_error; in dio_complete()
289 if (dio->end_io) { in dio_complete()
291 err = dio->end_io(dio->iocb, offset, ret, dio->private); in dio_complete()
298 * non-direct readahead, or faulted in by get_user_pages() if the source in dio_complete()
303 * And this page cache invalidation has to be after dio->end_io(), as in dio_complete()
309 ret > 0 && dio->op == REQ_OP_WRITE && in dio_complete()
310 dio->inode->i_mapping->nrpages) { in dio_complete()
311 err = invalidate_inode_pages2_range(dio->inode->i_mapping, in dio_complete()
313 (offset + ret - 1) >> PAGE_SHIFT); in dio_complete()
315 dio_warn_stale_pagecache(dio->iocb->ki_filp); in dio_complete()
318 inode_dio_end(dio->inode); in dio_complete()
326 dio->iocb->ki_pos += transferred; in dio_complete()
328 if (ret > 0 && dio->op == REQ_OP_WRITE) in dio_complete()
329 ret = generic_write_sync(dio->iocb, ret); in dio_complete()
330 dio->iocb->ki_complete(dio->iocb, ret, 0); in dio_complete()
351 struct dio *dio = bio->bi_private; in dio_bio_end_aio()
359 spin_lock_irqsave(&dio->bio_lock, flags); in dio_bio_end_aio()
360 remaining = --dio->refcount; in dio_bio_end_aio()
361 if (remaining == 1 && dio->waiter) in dio_bio_end_aio()
362 wake_up_process(dio->waiter); in dio_bio_end_aio()
363 spin_unlock_irqrestore(&dio->bio_lock, flags); in dio_bio_end_aio()
374 if (dio->result) in dio_bio_end_aio()
375 defer_completion = dio->defer_completion || in dio_bio_end_aio()
376 (dio->op == REQ_OP_WRITE && in dio_bio_end_aio()
377 dio->inode->i_mapping->nrpages); in dio_bio_end_aio()
379 INIT_WORK(&dio->complete_work, dio_aio_complete_work); in dio_bio_end_aio()
380 queue_work(dio->inode->i_sb->s_dio_done_wq, in dio_bio_end_aio()
381 &dio->complete_work); in dio_bio_end_aio()
389 * The BIO completion handler simply queues the BIO up for the process-context
393 * implement a singly-linked list of completed BIOs, at dio->bio_list.
397 struct dio *dio = bio->bi_private; in dio_bio_end_io()
400 spin_lock_irqsave(&dio->bio_lock, flags); in dio_bio_end_io()
401 bio->bi_private = dio->bio_list; in dio_bio_end_io()
402 dio->bio_list = bio; in dio_bio_end_io()
403 if (--dio->refcount == 1 && dio->waiter) in dio_bio_end_io()
404 wake_up_process(dio->waiter); in dio_bio_end_io()
405 spin_unlock_irqrestore(&dio->bio_lock, flags); in dio_bio_end_io()
409 * dio_end_io - handle the end io action for the given bio
418 struct dio *dio = bio->bi_private; in dio_end_io()
420 if (dio->is_async) in dio_end_io()
441 bio->bi_iter.bi_sector = first_sector; in dio_bio_alloc()
442 bio_set_op_attrs(bio, dio->op, dio->op_flags); in dio_bio_alloc()
443 if (dio->is_async) in dio_bio_alloc()
444 bio->bi_end_io = dio_bio_end_aio; in dio_bio_alloc()
446 bio->bi_end_io = dio_bio_end_io; in dio_bio_alloc()
448 bio->bi_write_hint = dio->iocb->ki_hint; in dio_bio_alloc()
450 sdio->bio = bio; in dio_bio_alloc()
451 sdio->logical_offset_in_bio = sdio->cur_page_fs_offset; in dio_bio_alloc()
459 * bios hold a dio reference between submit_bio and ->end_io.
463 struct bio *bio = sdio->bio; in dio_bio_submit()
466 bio->bi_private = dio; in dio_bio_submit()
468 spin_lock_irqsave(&dio->bio_lock, flags); in dio_bio_submit()
469 dio->refcount++; in dio_bio_submit()
470 spin_unlock_irqrestore(&dio->bio_lock, flags); in dio_bio_submit()
472 if (dio->is_async && dio->op == REQ_OP_READ && dio->should_dirty) in dio_bio_submit()
475 dio->bio_disk = bio->bi_disk; in dio_bio_submit()
477 if (sdio->submit_io) { in dio_bio_submit()
478 sdio->submit_io(bio, dio->inode, sdio->logical_offset_in_bio); in dio_bio_submit()
479 dio->bio_cookie = BLK_QC_T_NONE; in dio_bio_submit()
481 dio->bio_cookie = submit_bio(bio); in dio_bio_submit()
483 sdio->bio = NULL; in dio_bio_submit()
484 sdio->boundary = 0; in dio_bio_submit()
485 sdio->logical_offset_in_bio = 0; in dio_bio_submit()
493 while (sdio->head < sdio->tail) in dio_cleanup()
494 put_page(dio->pages[sdio->head++]); in dio_cleanup()
500 * all bios have been issued so that dio->refcount can only decrease. This
508 spin_lock_irqsave(&dio->bio_lock, flags); in dio_await_one()
516 while (dio->refcount > 1 && dio->bio_list == NULL) { in dio_await_one()
518 dio->waiter = current; in dio_await_one()
519 spin_unlock_irqrestore(&dio->bio_lock, flags); in dio_await_one()
520 if (!(dio->iocb->ki_flags & IOCB_HIPRI) || in dio_await_one()
521 !blk_poll(dio->bio_disk->queue, dio->bio_cookie)) in dio_await_one()
524 spin_lock_irqsave(&dio->bio_lock, flags); in dio_await_one()
525 dio->waiter = NULL; in dio_await_one()
527 if (dio->bio_list) { in dio_await_one()
528 bio = dio->bio_list; in dio_await_one()
529 dio->bio_list = bio->bi_private; in dio_await_one()
531 spin_unlock_irqrestore(&dio->bio_lock, flags); in dio_await_one()
542 blk_status_t err = bio->bi_status; in dio_bio_complete()
545 if (err == BLK_STS_AGAIN && (bio->bi_opf & REQ_NOWAIT)) in dio_bio_complete()
546 dio->io_error = -EAGAIN; in dio_bio_complete()
548 dio->io_error = -EIO; in dio_bio_complete()
551 if (dio->is_async && dio->op == REQ_OP_READ && dio->should_dirty) { in dio_bio_complete()
555 struct page *page = bvec->bv_page; in dio_bio_complete()
557 if (dio->op == REQ_OP_READ && !PageCompound(page) && in dio_bio_complete()
558 dio->should_dirty) in dio_bio_complete()
568 * Wait on and process all in-flight BIOs. This must only be called once
571 * errors are propagated through dio->io_error and should be propagated via
595 if (sdio->reap_counter++ >= 64) { in dio_bio_reap()
596 while (dio->bio_list) { in dio_bio_reap()
601 spin_lock_irqsave(&dio->bio_lock, flags); in dio_bio_reap()
602 bio = dio->bio_list; in dio_bio_reap()
603 dio->bio_list = bio->bi_private; in dio_bio_reap()
604 spin_unlock_irqrestore(&dio->bio_lock, flags); in dio_bio_reap()
609 sdio->reap_counter = 0; in dio_bio_reap()
625 sb->s_id); in sb_init_dio_done_wq()
627 return -ENOMEM; in sb_init_dio_done_wq()
631 old = cmpxchg(&sb->s_dio_done_wq, NULL, wq); in sb_init_dio_done_wq()
640 struct super_block *sb = dio->inode->i_sb; in dio_set_defer_completion()
642 if (dio->defer_completion) in dio_set_defer_completion()
644 dio->defer_completion = true; in dio_set_defer_completion()
645 if (!sb->s_dio_done_wq) in dio_set_defer_completion()
652 * of available blocks at sdio->blocks_available. These are in units of the
656 * it uses the passed inode-relative block number as the file offset, as usual.
658 * get_block() is passed the number of i_blkbits-sized blocks which direct_io
661 * If the fs has mapped a lot of blocks, it should populate bh->b_size to
663 * bh->b_blocknr.
668 * In the case of filesystem holes: the fs may return an arbitrarily-large
670 * buffer_mapped(). However the direct-io code will only process holes one
671 * block at a time - it will repeatedly call get_block() as it walks the hole.
677 sector_t fs_startblk; /* Into file, in filesystem-sized blocks */ in get_more_blocks()
678 sector_t fs_endblk; /* Into file, in filesystem-sized blocks */ in get_more_blocks()
679 unsigned long fs_count; /* Number of filesystem-sized blocks */ in get_more_blocks()
681 unsigned int i_blkbits = sdio->blkbits + sdio->blkfactor; in get_more_blocks()
688 ret = dio->page_errors; in get_more_blocks()
690 BUG_ON(sdio->block_in_file >= sdio->final_block_in_request); in get_more_blocks()
691 fs_startblk = sdio->block_in_file >> sdio->blkfactor; in get_more_blocks()
692 fs_endblk = (sdio->final_block_in_request - 1) >> in get_more_blocks()
693 sdio->blkfactor; in get_more_blocks()
694 fs_count = fs_endblk - fs_startblk + 1; in get_more_blocks()
696 map_bh->b_state = 0; in get_more_blocks()
697 map_bh->b_size = fs_count << i_blkbits; in get_more_blocks()
710 create = dio->op == REQ_OP_WRITE; in get_more_blocks()
711 if (dio->flags & DIO_SKIP_HOLES) { in get_more_blocks()
712 i_size = i_size_read(dio->inode); in get_more_blocks()
713 if (i_size && fs_startblk <= (i_size - 1) >> i_blkbits) in get_more_blocks()
717 ret = (*sdio->get_block)(dio->inode, fs_startblk, in get_more_blocks()
721 dio->private = map_bh->b_private; in get_more_blocks()
741 sector = start_sector << (sdio->blkbits - 9); in dio_new_bio()
742 nr_pages = min(sdio->pages_in_io, BIO_MAX_PAGES); in dio_new_bio()
744 dio_bio_alloc(dio, sdio, map_bh->b_bdev, sector, nr_pages); in dio_new_bio()
745 sdio->boundary = 0; in dio_new_bio()
753 * the just-added page.
755 * Return zero on success. Non-zero means the caller needs to start a new BIO.
761 ret = bio_add_page(sdio->bio, sdio->cur_page, in dio_bio_add_page()
762 sdio->cur_page_len, sdio->cur_page_offset); in dio_bio_add_page()
763 if (ret == sdio->cur_page_len) { in dio_bio_add_page()
767 if ((sdio->cur_page_len + sdio->cur_page_offset) == PAGE_SIZE) in dio_bio_add_page()
768 sdio->pages_in_io--; in dio_bio_add_page()
769 get_page(sdio->cur_page); in dio_bio_add_page()
770 sdio->final_block_in_bio = sdio->cur_page_block + in dio_bio_add_page()
771 (sdio->cur_page_len >> sdio->blkbits); in dio_bio_add_page()
782 * starts on-disk at cur_page_block.
794 if (sdio->bio) { in dio_send_cur_page()
795 loff_t cur_offset = sdio->cur_page_fs_offset; in dio_send_cur_page()
796 loff_t bio_next_offset = sdio->logical_offset_in_bio + in dio_send_cur_page()
797 sdio->bio->bi_iter.bi_size; in dio_send_cur_page()
802 * Btrfs cannot handle having logically non-contiguous requests in dio_send_cur_page()
805 * Logical: [0-4095][HOLE][8192-12287] in dio_send_cur_page()
806 * Physical: [0-4095] [4096-8191] in dio_send_cur_page()
813 if (sdio->final_block_in_bio != sdio->cur_page_block || in dio_send_cur_page()
818 if (sdio->bio == NULL) { in dio_send_cur_page()
819 ret = dio_new_bio(dio, sdio, sdio->cur_page_block, map_bh); in dio_send_cur_page()
826 ret = dio_new_bio(dio, sdio, sdio->cur_page_block, map_bh); in dio_send_cur_page()
844 * The chunk of page starts on-disk at blocknr.
846 * We perform deferred IO, by recording the last-submitted page inside our
860 if (dio->op == REQ_OP_WRITE) { in submit_page_section()
870 if (sdio->cur_page == page && in submit_page_section()
871 sdio->cur_page_offset + sdio->cur_page_len == offset && in submit_page_section()
872 sdio->cur_page_block + in submit_page_section()
873 (sdio->cur_page_len >> sdio->blkbits) == blocknr) { in submit_page_section()
874 sdio->cur_page_len += len; in submit_page_section()
881 if (sdio->cur_page) { in submit_page_section()
883 put_page(sdio->cur_page); in submit_page_section()
884 sdio->cur_page = NULL; in submit_page_section()
890 sdio->cur_page = page; in submit_page_section()
891 sdio->cur_page_offset = offset; in submit_page_section()
892 sdio->cur_page_len = len; in submit_page_section()
893 sdio->cur_page_block = blocknr; in submit_page_section()
894 sdio->cur_page_fs_offset = sdio->block_in_file << sdio->blkbits; in submit_page_section()
897 * If sdio->boundary then we want to schedule the IO now to in submit_page_section()
900 if (sdio->boundary) { in submit_page_section()
902 if (sdio->bio) in submit_page_section()
904 put_page(sdio->cur_page); in submit_page_section()
905 sdio->cur_page = NULL; in submit_page_section()
912 * the block for us, we need to fill-in the unused portion of the
913 * block with zeros. This happens only if user-buffer, fileoffset or
914 * io length is not filesystem block-size multiple.
927 sdio->start_zero_done = 1; in dio_zero_block()
928 if (!sdio->blkfactor || !buffer_new(map_bh)) in dio_zero_block()
931 dio_blocks_per_fs_block = 1 << sdio->blkfactor; in dio_zero_block()
932 this_chunk_blocks = sdio->block_in_file & (dio_blocks_per_fs_block - 1); in dio_zero_block()
942 this_chunk_blocks = dio_blocks_per_fs_block - this_chunk_blocks; in dio_zero_block()
944 this_chunk_bytes = this_chunk_blocks << sdio->blkbits; in dio_zero_block()
948 sdio->next_block_for_io, map_bh)) in dio_zero_block()
951 sdio->next_block_for_io += this_chunk_blocks; in dio_zero_block()
960 * happily perform page-sized but 512-byte aligned IOs. It is important that
963 * So what we do is to permit the ->get_block function to populate bh.b_size
966 * For best results, the blockdev should be set up with 512-byte i_blkbits and
973 const unsigned blkbits = sdio->blkbits; in do_direct_IO()
974 const unsigned i_blkbits = blkbits + sdio->blkfactor; in do_direct_IO()
977 while (sdio->block_in_file < sdio->final_block_in_request) { in do_direct_IO()
986 from = sdio->head ? 0 : sdio->from; in do_direct_IO()
987 to = (sdio->head == sdio->tail - 1) ? sdio->to : PAGE_SIZE; in do_direct_IO()
988 sdio->head++; in do_direct_IO()
995 if (sdio->blocks_available == 0) { in do_direct_IO()
1010 sdio->blocks_available = in do_direct_IO()
1011 map_bh->b_size >> blkbits; in do_direct_IO()
1012 sdio->next_block_for_io = in do_direct_IO()
1013 map_bh->b_blocknr << sdio->blkfactor; in do_direct_IO()
1016 map_bh->b_bdev, in do_direct_IO()
1017 map_bh->b_blocknr, in do_direct_IO()
1018 map_bh->b_size >> i_blkbits); in do_direct_IO()
1021 if (!sdio->blkfactor) in do_direct_IO()
1024 blkmask = (1 << sdio->blkfactor) - 1; in do_direct_IO()
1025 dio_remainder = (sdio->block_in_file & blkmask); in do_direct_IO()
1029 * starts partway into a fs-block, in do_direct_IO()
1030 * dio_remainder will be non-zero. If the IO in do_direct_IO()
1036 * on-disk in do_direct_IO()
1039 sdio->next_block_for_io += dio_remainder; in do_direct_IO()
1040 sdio->blocks_available -= dio_remainder; in do_direct_IO()
1047 /* AKPM: eargh, -ENOTBLK is a hack */ in do_direct_IO()
1048 if (dio->op == REQ_OP_WRITE) { in do_direct_IO()
1050 return -ENOTBLK; in do_direct_IO()
1057 i_size_aligned = ALIGN(i_size_read(dio->inode), in do_direct_IO()
1059 if (sdio->block_in_file >= in do_direct_IO()
1066 sdio->block_in_file++; in do_direct_IO()
1068 dio->result += 1 << blkbits; in do_direct_IO()
1077 if (unlikely(sdio->blkfactor && !sdio->start_zero_done)) in do_direct_IO()
1084 this_chunk_blocks = sdio->blocks_available; in do_direct_IO()
1085 u = (to - from) >> blkbits; in do_direct_IO()
1088 u = sdio->final_block_in_request - sdio->block_in_file; in do_direct_IO()
1094 if (this_chunk_blocks == sdio->blocks_available) in do_direct_IO()
1095 sdio->boundary = buffer_boundary(map_bh); in do_direct_IO()
1099 sdio->next_block_for_io, in do_direct_IO()
1105 sdio->next_block_for_io += this_chunk_blocks; in do_direct_IO()
1107 sdio->block_in_file += this_chunk_blocks; in do_direct_IO()
1109 dio->result += this_chunk_bytes; in do_direct_IO()
1110 sdio->blocks_available -= this_chunk_blocks; in do_direct_IO()
1112 BUG_ON(sdio->block_in_file > sdio->final_block_in_request); in do_direct_IO()
1113 if (sdio->block_in_file == sdio->final_block_in_request) in do_direct_IO()
1134 * return code that the caller will hand to ->complete(). in drop_refcount()
1140 spin_lock_irqsave(&dio->bio_lock, flags); in drop_refcount()
1141 ret2 = --dio->refcount; in drop_refcount()
1142 spin_unlock_irqrestore(&dio->bio_lock, flags); in drop_refcount()
1150 * - if the flags value contains DIO_LOCKING we use a fancy locking
1155 * - if the flags value does NOT contain DIO_LOCKING we don't use any
1177 unsigned i_blkbits = READ_ONCE(inode->i_blkbits); in do_blockdev_direct_IO()
1179 unsigned blocksize_mask = (1 << blkbits) - 1; in do_blockdev_direct_IO()
1180 ssize_t retval = -EINVAL; in do_blockdev_direct_IO()
1182 loff_t offset = iocb->ki_pos; in do_blockdev_direct_IO()
1198 blocksize_mask = (1 << blkbits) - 1; in do_blockdev_direct_IO()
1208 retval = -ENOMEM; in do_blockdev_direct_IO()
1218 dio->flags = flags; in do_blockdev_direct_IO()
1219 if (dio->flags & DIO_LOCKING) { in do_blockdev_direct_IO()
1222 iocb->ki_filp->f_mapping; in do_blockdev_direct_IO()
1228 end - 1); in do_blockdev_direct_IO()
1237 /* Once we sampled i_size check for reads beyond EOF */ in do_blockdev_direct_IO()
1238 dio->i_size = i_size_read(inode); in do_blockdev_direct_IO()
1239 if (iov_iter_rw(iter) == READ && offset >= dio->i_size) { in do_blockdev_direct_IO()
1240 if (dio->flags & DIO_LOCKING) in do_blockdev_direct_IO()
1254 dio->is_async = false; in do_blockdev_direct_IO()
1256 dio->is_async = false; in do_blockdev_direct_IO()
1258 dio->is_async = true; in do_blockdev_direct_IO()
1260 dio->inode = inode; in do_blockdev_direct_IO()
1262 dio->op = REQ_OP_WRITE; in do_blockdev_direct_IO()
1263 dio->op_flags = REQ_SYNC | REQ_IDLE; in do_blockdev_direct_IO()
1264 if (iocb->ki_flags & IOCB_NOWAIT) in do_blockdev_direct_IO()
1265 dio->op_flags |= REQ_NOWAIT; in do_blockdev_direct_IO()
1267 dio->op = REQ_OP_READ; in do_blockdev_direct_IO()
1272 * so that we can call ->fsync. in do_blockdev_direct_IO()
1274 if (dio->is_async && iov_iter_rw(iter) == WRITE) { in do_blockdev_direct_IO()
1276 if (iocb->ki_flags & IOCB_DSYNC) in do_blockdev_direct_IO()
1278 else if (!dio->inode->i_sb->s_dio_done_wq) { in do_blockdev_direct_IO()
1284 retval = sb_init_dio_done_wq(dio->inode->i_sb); in do_blockdev_direct_IO()
1303 sdio.blkfactor = i_blkbits - blkbits; in do_blockdev_direct_IO()
1307 dio->end_io = end_io; in do_blockdev_direct_IO()
1309 sdio.final_block_in_bio = -1; in do_blockdev_direct_IO()
1310 sdio.next_block_for_io = -1; in do_blockdev_direct_IO()
1312 dio->iocb = iocb; in do_blockdev_direct_IO()
1314 spin_lock_init(&dio->bio_lock); in do_blockdev_direct_IO()
1315 dio->refcount = 1; in do_blockdev_direct_IO()
1317 dio->should_dirty = (iter->type == ITER_IOVEC); in do_blockdev_direct_IO()
1322 * In case of non-aligned buffers, we may need 2 more in do_blockdev_direct_IO()
1336 if (retval == -ENOTBLK) { in do_blockdev_direct_IO()
1344 * There may be some unwritten disk at the end of a part-written in do_blockdev_direct_IO()
1345 * fs-block-sized block. Go zero that now. in do_blockdev_direct_IO()
1374 if (iov_iter_rw(iter) == READ && (dio->flags & DIO_LOCKING)) in do_blockdev_direct_IO()
1375 inode_unlock(dio->inode); in do_blockdev_direct_IO()
1381 * call aio_complete is when we return -EIOCBQUEUED, so we key on that. in do_blockdev_direct_IO()
1382 * This had *better* be the only place that raises -EIOCBQUEUED. in do_blockdev_direct_IO()
1384 BUG_ON(retval == -EIOCBQUEUED); in do_blockdev_direct_IO()
1385 if (dio->is_async && retval == 0 && dio->result && in do_blockdev_direct_IO()
1386 (iov_iter_rw(iter) == READ || dio->result == count)) in do_blockdev_direct_IO()
1387 retval = -EIOCBQUEUED; in do_blockdev_direct_IO()
1394 BUG_ON(retval != -EIOCBQUEUED); in do_blockdev_direct_IO()
1414 prefetch(&bdev->bd_disk->part_tbl); in __blockdev_direct_IO()
1415 prefetch(bdev->bd_queue); in __blockdev_direct_IO()
1416 prefetch((char *)bdev->bd_queue + SMP_CACHE_BYTES); in __blockdev_direct_IO()