Lines Matching refs:io_req
481 struct io_thread_req *io_req = (*irq_req_buffer)[count]; in ubd_handler() local
483 if ((io_req->error == BLK_STS_NOTSUPP) && (req_op(io_req->req) == REQ_OP_DISCARD)) { in ubd_handler()
484 blk_queue_max_discard_sectors(io_req->req->q, 0); in ubd_handler()
485 blk_queue_max_write_zeroes_sectors(io_req->req->q, 0); in ubd_handler()
486 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, io_req->req->q); in ubd_handler()
488 blk_mq_end_request(io_req->req, io_req->error); in ubd_handler()
489 kfree(io_req); in ubd_handler()
1256 static void ubd_map_req(struct ubd *dev, struct io_thread_req *io_req, in ubd_map_req() argument
1262 unsigned long byte_offset = io_req->offset; in ubd_map_req()
1266 io_req->io_desc[0].buffer = NULL; in ubd_map_req()
1267 io_req->io_desc[0].length = blk_rq_bytes(req); in ubd_map_req()
1270 BUG_ON(i >= io_req->desc_cnt); in ubd_map_req()
1272 io_req->io_desc[i].buffer = bvec_virt(&bvec); in ubd_map_req()
1273 io_req->io_desc[i].length = bvec.bv_len; in ubd_map_req()
1279 for (i = 0; i < io_req->desc_cnt; i++) { in ubd_map_req()
1280 cowify_req(io_req, &io_req->io_desc[i], byte_offset, in ubd_map_req()
1283 byte_offset += io_req->io_desc[i].length; in ubd_map_req()
1292 struct io_thread_req *io_req; in ubd_alloc_req() local
1295 io_req = kmalloc(sizeof(*io_req) + in ubd_alloc_req()
1298 if (!io_req) in ubd_alloc_req()
1301 io_req->req = req; in ubd_alloc_req()
1303 io_req->fds[0] = dev->cow.fd; in ubd_alloc_req()
1305 io_req->fds[0] = dev->fd; in ubd_alloc_req()
1306 io_req->error = 0; in ubd_alloc_req()
1307 io_req->sectorsize = SECTOR_SIZE; in ubd_alloc_req()
1308 io_req->fds[1] = dev->fd; in ubd_alloc_req()
1309 io_req->offset = (u64) blk_rq_pos(req) << SECTOR_SHIFT; in ubd_alloc_req()
1310 io_req->offsets[0] = 0; in ubd_alloc_req()
1311 io_req->offsets[1] = dev->cow.data_offset; in ubd_alloc_req()
1314 io_req->io_desc[i].sector_mask = 0; in ubd_alloc_req()
1315 io_req->io_desc[i].cow_offset = -1; in ubd_alloc_req()
1318 return io_req; in ubd_alloc_req()
1324 struct io_thread_req *io_req; in ubd_submit_request() local
1335 io_req = ubd_alloc_req(dev, req, segs); in ubd_submit_request()
1336 if (!io_req) in ubd_submit_request()
1339 io_req->desc_cnt = segs; in ubd_submit_request()
1341 ubd_map_req(dev, io_req, req); in ubd_submit_request()
1343 ret = os_write_file(thread_fd, &io_req, sizeof(io_req)); in ubd_submit_request()
1344 if (ret != sizeof(io_req)) { in ubd_submit_request()
1347 kfree(io_req); in ubd_submit_request()