Lines Matching refs:bdev
592 struct block_device *bdev) in __submit_flush_wait() argument
594 int ret = blkdev_issue_flush(bdev, GFP_NOFS); in __submit_flush_wait()
596 trace_f2fs_issue_flush(bdev, test_opt(sbi, NOBARRIER), in __submit_flush_wait()
612 ret = __submit_flush_wait(sbi, FDEV(i).bdev); in submit_flush_wait()
790 ret = __submit_flush_wait(sbi, FDEV(i).bdev); in f2fs_flush_device_cache()
994 struct block_device *bdev, block_t lstart, in __create_discard_cmd() argument
1007 dc->bdev = bdev; in __create_discard_cmd()
1026 struct block_device *bdev, block_t lstart, in __attach_discard_cmd() argument
1034 dc = __create_discard_cmd(sbi, bdev, lstart, start, len); in __attach_discard_cmd()
1063 trace_f2fs_remove_discard(dc->bdev, dc->start, dc->len); in __remove_discard_cmd()
1174 struct block_device *bdev, block_t lstart,
1182 struct block_device *bdev = dc->bdev; in __submit_discard_cmd() local
1183 struct request_queue *q = bdev_get_queue(bdev); in __submit_discard_cmd()
1199 trace_f2fs_issue_discard(bdev, dc->start, dc->len); in __submit_discard_cmd()
1229 err = __blkdev_issue_discard(bdev, in __submit_discard_cmd()
1281 __update_discard_tree_range(sbi, bdev, lstart, start, len); in __submit_discard_cmd()
1287 struct block_device *bdev, block_t lstart, in __insert_discard_tree() argument
1306 __attach_discard_cmd(sbi, bdev, lstart, start, len, parent, in __insert_discard_tree()
1339 __insert_discard_tree(sbi, dc->bdev, blkaddr + 1, in __punch_discard_cmd()
1354 struct block_device *bdev, block_t lstart, in __update_discard_tree_range() argument
1362 struct request_queue *q = bdev_get_queue(bdev); in __update_discard_tree_range()
1405 prev_dc->bdev == bdev && in __update_discard_tree_range()
1417 next_dc->bdev == bdev && in __update_discard_tree_range()
1431 __insert_discard_tree(sbi, bdev, di.lstart, di.start, in __update_discard_tree_range()
1445 struct block_device *bdev, block_t blkstart, block_t blklen) in __queue_discard_cmd() argument
1449 if (!f2fs_bdev_support_discard(bdev)) in __queue_discard_cmd()
1452 trace_f2fs_queue_discard(bdev, blkstart, blklen); in __queue_discard_cmd()
1460 __update_discard_tree_range(sbi, bdev, lblkstart, blkstart, blklen); in __queue_discard_cmd()
1829 struct block_device *bdev, block_t blkstart, block_t blklen) in __f2fs_issue_discard_zone() argument
1850 if (sector & (bdev_zone_sectors(bdev) - 1) || in __f2fs_issue_discard_zone()
1851 nr_sects != bdev_zone_sectors(bdev)) { in __f2fs_issue_discard_zone()
1857 trace_f2fs_issue_reset_zone(bdev, blkstart); in __f2fs_issue_discard_zone()
1858 return blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET, in __f2fs_issue_discard_zone()
1863 return __queue_discard_cmd(sbi, bdev, lblkstart, blklen); in __f2fs_issue_discard_zone()
1868 struct block_device *bdev, block_t blkstart, block_t blklen) in __issue_discard_async() argument
1871 if (f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(bdev)) in __issue_discard_async()
1872 return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen); in __issue_discard_async()
1874 return __queue_discard_cmd(sbi, bdev, blkstart, blklen); in __issue_discard_async()
1881 struct block_device *bdev; in f2fs_issue_discard() local
1887 bdev = f2fs_target_device(sbi, blkstart, NULL); in f2fs_issue_discard()
1894 if (bdev2 != bdev) { in f2fs_issue_discard()
1895 err = __issue_discard_async(sbi, bdev, in f2fs_issue_discard()
1899 bdev = bdev2; in f2fs_issue_discard()
1913 err = __issue_discard_async(sbi, bdev, start, len); in f2fs_issue_discard()
4881 ret = __f2fs_issue_discard_zone(sbi, fdev->bdev, zone_block, in check_zone_write_pointer()
4899 if (!bdev_is_zoned(FDEV(i).bdev)) in get_target_zoned_dev()
4937 err = blkdev_report_zones(zbd->bdev, zone_sector, 1, in fix_curseg_write_pointer()
4980 err = blkdev_report_zones(zbd->bdev, zone_sector, 1, in fix_curseg_write_pointer()
4996 err = __f2fs_issue_discard_zone(sbi, zbd->bdev, in fix_curseg_write_pointer()
5043 if (!bdev_is_zoned(FDEV(i).bdev)) in f2fs_check_write_pointer()
5048 ret = blkdev_report_zones(FDEV(i).bdev, 0, BLK_ALL_ZONES, in f2fs_check_write_pointer()