Lines Matching refs:dio
109 struct dio { struct
159 static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio) in dio_refill_pages() argument
163 ret = iov_iter_get_pages(sdio->iter, dio->pages, LONG_MAX, DIO_PAGES, in dio_refill_pages()
166 if (ret < 0 && sdio->blocks_available && (dio->rw & WRITE)) { in dio_refill_pages()
173 if (dio->page_errors == 0) in dio_refill_pages()
174 dio->page_errors = ret; in dio_refill_pages()
176 dio->pages[0] = page; in dio_refill_pages()
201 static inline struct page *dio_get_page(struct dio *dio, in dio_get_page() argument
207 ret = dio_refill_pages(dio, sdio); in dio_get_page()
212 return dio->pages[sdio->head]; in dio_get_page()
227 static ssize_t dio_complete(struct dio *dio, loff_t offset, ssize_t ret, in dio_complete() argument
241 if (dio->result) { in dio_complete()
242 transferred = dio->result; in dio_complete()
245 if ((dio->rw == READ) && ((offset + transferred) > dio->i_size)) in dio_complete()
246 transferred = dio->i_size - offset; in dio_complete()
250 ret = dio->page_errors; in dio_complete()
252 ret = dio->io_error; in dio_complete()
256 if (dio->end_io && dio->result) in dio_complete()
257 dio->end_io(dio->iocb, offset, transferred, dio->private); in dio_complete()
259 if (!(dio->flags & DIO_SKIP_DIO_COUNT)) in dio_complete()
260 inode_dio_end(dio->inode); in dio_complete()
263 if (dio->rw & WRITE) { in dio_complete()
266 err = generic_write_sync(dio->iocb->ki_filp, offset, in dio_complete()
272 dio->iocb->ki_complete(dio->iocb, ret, 0); in dio_complete()
275 kmem_cache_free(dio_cache, dio); in dio_complete()
281 struct dio *dio = container_of(work, struct dio, complete_work); in dio_aio_complete_work() local
283 dio_complete(dio, dio->iocb->ki_pos, 0, true); in dio_aio_complete_work()
286 static int dio_bio_complete(struct dio *dio, struct bio *bio);
293 struct dio *dio = bio->bi_private; in dio_bio_end_aio() local
298 dio_bio_complete(dio, bio); in dio_bio_end_aio()
300 spin_lock_irqsave(&dio->bio_lock, flags); in dio_bio_end_aio()
301 remaining = --dio->refcount; in dio_bio_end_aio()
302 if (remaining == 1 && dio->waiter) in dio_bio_end_aio()
303 wake_up_process(dio->waiter); in dio_bio_end_aio()
304 spin_unlock_irqrestore(&dio->bio_lock, flags); in dio_bio_end_aio()
307 if (dio->result && dio->defer_completion) { in dio_bio_end_aio()
308 INIT_WORK(&dio->complete_work, dio_aio_complete_work); in dio_bio_end_aio()
309 queue_work(dio->inode->i_sb->s_dio_done_wq, in dio_bio_end_aio()
310 &dio->complete_work); in dio_bio_end_aio()
312 dio_complete(dio, dio->iocb->ki_pos, 0, true); in dio_bio_end_aio()
326 struct dio *dio = bio->bi_private; in dio_bio_end_io() local
329 spin_lock_irqsave(&dio->bio_lock, flags); in dio_bio_end_io()
330 bio->bi_private = dio->bio_list; in dio_bio_end_io()
331 dio->bio_list = bio; in dio_bio_end_io()
332 if (--dio->refcount == 1 && dio->waiter) in dio_bio_end_io()
333 wake_up_process(dio->waiter); in dio_bio_end_io()
334 spin_unlock_irqrestore(&dio->bio_lock, flags); in dio_bio_end_io()
348 struct dio *dio = bio->bi_private; in dio_end_io() local
350 if (dio->is_async) in dio_end_io()
358 dio_bio_alloc(struct dio *dio, struct dio_submit *sdio, in dio_bio_alloc() argument
372 if (dio->is_async) in dio_bio_alloc()
388 static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio) in dio_bio_submit() argument
393 bio->bi_private = dio; in dio_bio_submit()
395 spin_lock_irqsave(&dio->bio_lock, flags); in dio_bio_submit()
396 dio->refcount++; in dio_bio_submit()
397 spin_unlock_irqrestore(&dio->bio_lock, flags); in dio_bio_submit()
399 if (dio->is_async && dio->rw == READ && dio->should_dirty) in dio_bio_submit()
402 dio->bio_bdev = bio->bi_bdev; in dio_bio_submit()
405 sdio->submit_io(dio->rw, bio, dio->inode, in dio_bio_submit()
407 dio->bio_cookie = BLK_QC_T_NONE; in dio_bio_submit()
409 dio->bio_cookie = submit_bio(dio->rw, bio); in dio_bio_submit()
419 static inline void dio_cleanup(struct dio *dio, struct dio_submit *sdio) in dio_cleanup() argument
422 page_cache_release(dio->pages[sdio->head++]); in dio_cleanup()
431 static struct bio *dio_await_one(struct dio *dio) in dio_await_one() argument
436 spin_lock_irqsave(&dio->bio_lock, flags); in dio_await_one()
444 while (dio->refcount > 1 && dio->bio_list == NULL) { in dio_await_one()
446 dio->waiter = current; in dio_await_one()
447 spin_unlock_irqrestore(&dio->bio_lock, flags); in dio_await_one()
448 if (!blk_poll(bdev_get_queue(dio->bio_bdev), dio->bio_cookie)) in dio_await_one()
451 spin_lock_irqsave(&dio->bio_lock, flags); in dio_await_one()
452 dio->waiter = NULL; in dio_await_one()
454 if (dio->bio_list) { in dio_await_one()
455 bio = dio->bio_list; in dio_await_one()
456 dio->bio_list = bio->bi_private; in dio_await_one()
458 spin_unlock_irqrestore(&dio->bio_lock, flags); in dio_await_one()
465 static int dio_bio_complete(struct dio *dio, struct bio *bio) in dio_bio_complete() argument
472 dio->io_error = -EIO; in dio_bio_complete()
474 if (dio->is_async && dio->rw == READ && dio->should_dirty) { in dio_bio_complete()
481 if (dio->rw == READ && !PageCompound(page) && in dio_bio_complete()
482 dio->should_dirty) in dio_bio_complete()
499 static void dio_await_completion(struct dio *dio) in dio_await_completion() argument
503 bio = dio_await_one(dio); in dio_await_completion()
505 dio_bio_complete(dio, bio); in dio_await_completion()
516 static inline int dio_bio_reap(struct dio *dio, struct dio_submit *sdio) in dio_bio_reap() argument
521 while (dio->bio_list) { in dio_bio_reap()
526 spin_lock_irqsave(&dio->bio_lock, flags); in dio_bio_reap()
527 bio = dio->bio_list; in dio_bio_reap()
528 dio->bio_list = bio->bi_private; in dio_bio_reap()
529 spin_unlock_irqrestore(&dio->bio_lock, flags); in dio_bio_reap()
530 ret2 = dio_bio_complete(dio, bio); in dio_bio_reap()
563 static int dio_set_defer_completion(struct dio *dio) in dio_set_defer_completion() argument
565 struct super_block *sb = dio->inode->i_sb; in dio_set_defer_completion()
567 if (dio->defer_completion) in dio_set_defer_completion()
569 dio->defer_completion = true; in dio_set_defer_completion()
598 static int get_more_blocks(struct dio *dio, struct dio_submit *sdio, in get_more_blocks() argument
612 ret = dio->page_errors; in get_more_blocks()
634 create = dio->rw & WRITE; in get_more_blocks()
635 if (dio->flags & DIO_SKIP_HOLES) { in get_more_blocks()
636 if (sdio->block_in_file < (i_size_read(dio->inode) >> in get_more_blocks()
641 ret = (*sdio->get_block)(dio->inode, fs_startblk, in get_more_blocks()
645 dio->private = map_bh->b_private; in get_more_blocks()
648 ret = dio_set_defer_completion(dio); in get_more_blocks()
656 static inline int dio_new_bio(struct dio *dio, struct dio_submit *sdio, in dio_new_bio() argument
662 ret = dio_bio_reap(dio, sdio); in dio_new_bio()
668 dio_bio_alloc(dio, sdio, map_bh->b_bdev, sector, nr_pages); in dio_new_bio()
713 static inline int dio_send_cur_page(struct dio *dio, struct dio_submit *sdio, in dio_send_cur_page() argument
739 dio_bio_submit(dio, sdio); in dio_send_cur_page()
743 ret = dio_new_bio(dio, sdio, sdio->cur_page_block, map_bh); in dio_send_cur_page()
749 dio_bio_submit(dio, sdio); in dio_send_cur_page()
750 ret = dio_new_bio(dio, sdio, sdio->cur_page_block, map_bh); in dio_send_cur_page()
778 submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page, in submit_page_section() argument
785 if (dio->rw & WRITE) { in submit_page_section()
807 ret = dio_send_cur_page(dio, sdio, map_bh); in submit_page_section()
826 ret = dio_send_cur_page(dio, sdio, map_bh); in submit_page_section()
828 dio_bio_submit(dio, sdio); in submit_page_section()
840 static void clean_blockdev_aliases(struct dio *dio, struct buffer_head *map_bh) in clean_blockdev_aliases() argument
845 nblocks = map_bh->b_size >> dio->inode->i_blkbits; in clean_blockdev_aliases()
862 static inline void dio_zero_block(struct dio *dio, struct dio_submit *sdio, in dio_zero_block() argument
890 if (submit_page_section(dio, sdio, page, 0, this_chunk_bytes, in dio_zero_block()
913 static int do_direct_IO(struct dio *dio, struct dio_submit *sdio, in do_direct_IO() argument
923 page = dio_get_page(dio, sdio); in do_direct_IO()
944 ret = get_more_blocks(dio, sdio, map_bh); in do_direct_IO()
957 clean_blockdev_aliases(dio, map_bh); in do_direct_IO()
986 if (dio->rw & WRITE) { in do_direct_IO()
995 i_size_aligned = ALIGN(i_size_read(dio->inode), in do_direct_IO()
1006 dio->result += 1 << blkbits; in do_direct_IO()
1016 dio_zero_block(dio, sdio, 0, map_bh); in do_direct_IO()
1034 ret = submit_page_section(dio, sdio, page, in do_direct_IO()
1047 dio->result += this_chunk_bytes; in do_direct_IO()
1062 static inline int drop_refcount(struct dio *dio) in drop_refcount() argument
1078 spin_lock_irqsave(&dio->bio_lock, flags); in drop_refcount()
1079 ret2 = --dio->refcount; in drop_refcount()
1080 spin_unlock_irqrestore(&dio->bio_lock, flags); in drop_refcount()
1121 struct dio *dio; in do_blockdev_direct_IO() local
1144 dio = kmem_cache_alloc(dio_cache, GFP_KERNEL); in do_blockdev_direct_IO()
1146 if (!dio) in do_blockdev_direct_IO()
1153 memset(dio, 0, offsetof(struct dio, pages)); in do_blockdev_direct_IO()
1155 dio->flags = flags; in do_blockdev_direct_IO()
1156 if (dio->flags & DIO_LOCKING) { in do_blockdev_direct_IO()
1168 kmem_cache_free(dio_cache, dio); in do_blockdev_direct_IO()
1175 dio->i_size = i_size_read(inode); in do_blockdev_direct_IO()
1176 if (iov_iter_rw(iter) == READ && offset >= dio->i_size) { in do_blockdev_direct_IO()
1177 if (dio->flags & DIO_LOCKING) in do_blockdev_direct_IO()
1179 kmem_cache_free(dio_cache, dio); in do_blockdev_direct_IO()
1191 dio->is_async = false; in do_blockdev_direct_IO()
1192 else if (!(dio->flags & DIO_ASYNC_EXTEND) && in do_blockdev_direct_IO()
1194 dio->is_async = false; in do_blockdev_direct_IO()
1196 dio->is_async = true; in do_blockdev_direct_IO()
1198 dio->inode = inode; in do_blockdev_direct_IO()
1199 dio->rw = iov_iter_rw(iter) == WRITE ? WRITE_ODIRECT : READ; in do_blockdev_direct_IO()
1205 if (dio->is_async && iov_iter_rw(iter) == WRITE && in do_blockdev_direct_IO()
1208 retval = dio_set_defer_completion(dio); in do_blockdev_direct_IO()
1214 kmem_cache_free(dio_cache, dio); in do_blockdev_direct_IO()
1222 if (!(dio->flags & DIO_SKIP_DIO_COUNT)) in do_blockdev_direct_IO()
1231 dio->end_io = end_io; in do_blockdev_direct_IO()
1236 dio->iocb = iocb; in do_blockdev_direct_IO()
1238 spin_lock_init(&dio->bio_lock); in do_blockdev_direct_IO()
1239 dio->refcount = 1; in do_blockdev_direct_IO()
1241 dio->should_dirty = (iter->type == ITER_IOVEC); in do_blockdev_direct_IO()
1257 retval = do_direct_IO(dio, &sdio, &map_bh); in do_blockdev_direct_IO()
1259 dio_cleanup(dio, &sdio); in do_blockdev_direct_IO()
1272 dio_zero_block(dio, &sdio, 1, &map_bh); in do_blockdev_direct_IO()
1277 ret2 = dio_send_cur_page(dio, &sdio, &map_bh); in do_blockdev_direct_IO()
1284 dio_bio_submit(dio, &sdio); in do_blockdev_direct_IO()
1292 dio_cleanup(dio, &sdio); in do_blockdev_direct_IO()
1299 if (iov_iter_rw(iter) == READ && (dio->flags & DIO_LOCKING)) in do_blockdev_direct_IO()
1300 mutex_unlock(&dio->inode->i_mutex); in do_blockdev_direct_IO()
1310 if (dio->is_async && retval == 0 && dio->result && in do_blockdev_direct_IO()
1311 (iov_iter_rw(iter) == READ || dio->result == count)) in do_blockdev_direct_IO()
1314 dio_await_completion(dio); in do_blockdev_direct_IO()
1316 if (drop_refcount(dio) == 0) { in do_blockdev_direct_IO()
1317 retval = dio_complete(dio, offset, retval, false); in do_blockdev_direct_IO()
1351 dio_cache = KMEM_CACHE(dio, SLAB_PANIC); in dio_init()