Lines Matching refs:rq
83 struct request *rq; member
175 static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq, in blk_fill_sgv4_hdr_rq() argument
180 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL); in blk_fill_sgv4_hdr_rq()
181 if (!rq->cmd) in blk_fill_sgv4_hdr_rq()
185 if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request, in blk_fill_sgv4_hdr_rq()
190 if (blk_verify_command(rq->cmd, has_write_perm)) in blk_fill_sgv4_hdr_rq()
198 rq->cmd_len = hdr->request_len; in blk_fill_sgv4_hdr_rq()
200 rq->timeout = msecs_to_jiffies(hdr->timeout); in blk_fill_sgv4_hdr_rq()
201 if (!rq->timeout) in blk_fill_sgv4_hdr_rq()
202 rq->timeout = q->sg_timeout; in blk_fill_sgv4_hdr_rq()
203 if (!rq->timeout) in blk_fill_sgv4_hdr_rq()
204 rq->timeout = BLK_DEFAULT_SG_TIMEOUT; in blk_fill_sgv4_hdr_rq()
205 if (rq->timeout < BLK_MIN_SG_TIMEOUT) in blk_fill_sgv4_hdr_rq()
206 rq->timeout = BLK_MIN_SG_TIMEOUT; in blk_fill_sgv4_hdr_rq()
248 struct request *rq, *next_rq = NULL; in bsg_map_hdr() local
272 rq = blk_get_request(q, rw, GFP_KERNEL); in bsg_map_hdr()
273 if (IS_ERR(rq)) in bsg_map_hdr()
274 return rq; in bsg_map_hdr()
275 blk_rq_set_block_pc(rq); in bsg_map_hdr()
277 ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd, has_write_perm); in bsg_map_hdr()
293 rq->next_rq = next_rq; in bsg_map_hdr()
294 next_rq->cmd_type = rq->cmd_type; in bsg_map_hdr()
313 ret = blk_rq_map_user(q, rq, NULL, dxferp, dxfer_len, in bsg_map_hdr()
319 rq->sense = sense; in bsg_map_hdr()
320 rq->sense_len = 0; in bsg_map_hdr()
322 return rq; in bsg_map_hdr()
324 if (rq->cmd != rq->__cmd) in bsg_map_hdr()
325 kfree(rq->cmd); in bsg_map_hdr()
326 blk_put_request(rq); in bsg_map_hdr()
338 static void bsg_rq_end_io(struct request *rq, int uptodate) in bsg_rq_end_io() argument
340 struct bsg_command *bc = rq->end_io_data; in bsg_rq_end_io()
345 bd->name, rq, bc, bc->bio, uptodate); in bsg_rq_end_io()
362 struct bsg_command *bc, struct request *rq) in bsg_add_command() argument
369 bc->rq = rq; in bsg_add_command()
370 bc->bio = rq->bio; in bsg_add_command()
371 if (rq->next_rq) in bsg_add_command()
372 bc->bidi_bio = rq->next_rq->bio; in bsg_add_command()
378 dprintk("%s: queueing rq %p, bc %p\n", bd->name, rq, bc); in bsg_add_command()
380 rq->end_io_data = bc; in bsg_add_command()
381 blk_execute_rq_nowait(q, NULL, rq, at_head, bsg_rq_end_io); in bsg_add_command()
429 static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr, in blk_complete_sgv4_hdr_rq() argument
434 dprintk("rq %p bio %p 0x%x\n", rq, bio, rq->errors); in blk_complete_sgv4_hdr_rq()
438 hdr->device_status = rq->errors & 0xff; in blk_complete_sgv4_hdr_rq()
439 hdr->transport_status = host_byte(rq->errors); in blk_complete_sgv4_hdr_rq()
440 hdr->driver_status = driver_byte(rq->errors); in blk_complete_sgv4_hdr_rq()
446 if (rq->sense_len && hdr->response) { in blk_complete_sgv4_hdr_rq()
448 rq->sense_len); in blk_complete_sgv4_hdr_rq()
451 rq->sense, len); in blk_complete_sgv4_hdr_rq()
458 if (rq->next_rq) { in blk_complete_sgv4_hdr_rq()
459 hdr->dout_resid = rq->resid_len; in blk_complete_sgv4_hdr_rq()
460 hdr->din_resid = rq->next_rq->resid_len; in blk_complete_sgv4_hdr_rq()
462 blk_put_request(rq->next_rq); in blk_complete_sgv4_hdr_rq()
463 } else if (rq_data_dir(rq) == READ) in blk_complete_sgv4_hdr_rq()
464 hdr->din_resid = rq->resid_len; in blk_complete_sgv4_hdr_rq()
466 hdr->dout_resid = rq->resid_len; in blk_complete_sgv4_hdr_rq()
474 if (!ret && rq->errors < 0) in blk_complete_sgv4_hdr_rq()
475 ret = rq->errors; in blk_complete_sgv4_hdr_rq()
478 if (rq->cmd != rq->__cmd) in blk_complete_sgv4_hdr_rq()
479 kfree(rq->cmd); in blk_complete_sgv4_hdr_rq()
480 blk_put_request(rq); in blk_complete_sgv4_hdr_rq()
523 tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio, in bsg_complete_all_commands()
558 ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio, in __bsg_read()
622 struct request *rq; in __bsg_write() local
629 rq = NULL; in __bsg_write()
650 rq = bsg_map_hdr(bd, &bc->hdr, has_write_perm, bc->sense); in __bsg_write()
651 if (IS_ERR(rq)) { in __bsg_write()
652 ret = PTR_ERR(rq); in __bsg_write()
653 rq = NULL; in __bsg_write()
657 bsg_add_command(bd, q, bc, rq); in __bsg_write()
659 rq = NULL; in __bsg_write()
773 struct request_queue *rq, in bsg_add_device() argument
780 if (!blk_get_queue(rq)) in bsg_add_device()
785 blk_put_queue(rq); in bsg_add_device()
789 bd->queue = rq; in bsg_add_device()
797 strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1); in bsg_add_device()
932 struct request *rq; in bsg_ioctl() local
941 rq = bsg_map_hdr(bd, &hdr, file->f_mode & FMODE_WRITE, sense); in bsg_ioctl()
942 if (IS_ERR(rq)) in bsg_ioctl()
943 return PTR_ERR(rq); in bsg_ioctl()
945 bio = rq->bio; in bsg_ioctl()
946 if (rq->next_rq) in bsg_ioctl()
947 bidi_bio = rq->next_rq->bio; in bsg_ioctl()
950 blk_execute_rq(bd->queue, NULL, rq, at_head); in bsg_ioctl()
951 ret = blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio); in bsg_ioctl()