Lines Matching refs:queue
168 struct request_queue *queue; member
825 blk_queue_max_hw_sectors(ubd_dev->queue, 8 * sizeof(long)); in ubd_open_dev()
849 ubd_dev->queue->limits.discard_granularity = SECTOR_SIZE; in ubd_open_dev()
850 ubd_dev->queue->limits.discard_alignment = SECTOR_SIZE; in ubd_open_dev()
851 blk_queue_max_discard_sectors(ubd_dev->queue, UBD_MAX_REQUEST); in ubd_open_dev()
852 blk_queue_max_write_zeroes_sectors(ubd_dev->queue, UBD_MAX_REQUEST); in ubd_open_dev()
853 blk_queue_flag_set(QUEUE_FLAG_DISCARD, ubd_dev->queue); in ubd_open_dev()
855 blk_queue_flag_set(QUEUE_FLAG_NONROT, ubd_dev->queue); in ubd_open_dev()
866 blk_cleanup_queue(ubd_dev->queue); in ubd_device_release()
901 disk->queue = ubd_devs[unit].queue; in ubd_disk_register()
941 ubd_dev->queue = blk_mq_init_queue(&ubd_dev->tag_set); in ubd_add()
942 if (IS_ERR(ubd_dev->queue)) { in ubd_add()
943 err = PTR_ERR(ubd_dev->queue); in ubd_add()
947 ubd_dev->queue->queuedata = ubd_dev; in ubd_add()
948 blk_queue_write_cache(ubd_dev->queue, true, false); in ubd_add()
950 blk_queue_max_segments(ubd_dev->queue, MAX_SG); in ubd_add()
951 blk_queue_segment_boundary(ubd_dev->queue, PAGE_SIZE - 1); in ubd_add()
975 if (!(IS_ERR(ubd_dev->queue))) in ubd_add()
976 blk_cleanup_queue(ubd_dev->queue); in ubd_add()
1418 struct ubd *ubd_dev = hctx->queue->queuedata; in ubd_queue_rq()