Home
last modified time | relevance | path

Searched refs:rq (Results 1 – 25 of 284) sorted by relevance

12345678910>>...12

/drivers/scsi/fnic/
Dvnic_rq.c27 static int vnic_rq_alloc_bufs(struct vnic_rq *rq) in vnic_rq_alloc_bufs() argument
31 unsigned int i, j, count = rq->ring.desc_count; in vnic_rq_alloc_bufs()
34 vdev = rq->vdev; in vnic_rq_alloc_bufs()
37 rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ, GFP_ATOMIC); in vnic_rq_alloc_bufs()
38 if (!rq->bufs[i]) { in vnic_rq_alloc_bufs()
45 buf = rq->bufs[i]; in vnic_rq_alloc_bufs()
48 buf->desc = (u8 *)rq->ring.descs + in vnic_rq_alloc_bufs()
49 rq->ring.desc_size * buf->index; in vnic_rq_alloc_bufs()
51 buf->next = rq->bufs[0]; in vnic_rq_alloc_bufs()
54 buf->next = rq->bufs[i + 1]; in vnic_rq_alloc_bufs()
[all …]
Dvnic_rq.h105 static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) in vnic_rq_desc_avail() argument
108 return rq->ring.desc_avail; in vnic_rq_desc_avail()
111 static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq) in vnic_rq_desc_used() argument
114 return rq->ring.desc_count - rq->ring.desc_avail - 1; in vnic_rq_desc_used()
117 static inline void *vnic_rq_next_desc(struct vnic_rq *rq) in vnic_rq_next_desc() argument
119 return rq->to_use->desc; in vnic_rq_next_desc()
122 static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq) in vnic_rq_next_index() argument
124 return rq->to_use->index; in vnic_rq_next_index()
127 static inline unsigned int vnic_rq_next_buf_index(struct vnic_rq *rq) in vnic_rq_next_buf_index() argument
129 return rq->buf_index++; in vnic_rq_next_buf_index()
[all …]
/drivers/net/ethernet/cisco/enic/
Dvnic_rq.c30 static int vnic_rq_alloc_bufs(struct vnic_rq *rq) in vnic_rq_alloc_bufs() argument
34 unsigned int i, j, count = rq->ring.desc_count; in vnic_rq_alloc_bufs()
37 vdev = rq->vdev; in vnic_rq_alloc_bufs()
40 rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_ATOMIC); in vnic_rq_alloc_bufs()
41 if (!rq->bufs[i]) in vnic_rq_alloc_bufs()
46 buf = rq->bufs[i]; in vnic_rq_alloc_bufs()
49 buf->desc = (u8 *)rq->ring.descs + in vnic_rq_alloc_bufs()
50 rq->ring.desc_size * buf->index; in vnic_rq_alloc_bufs()
52 buf->next = rq->bufs[0]; in vnic_rq_alloc_bufs()
55 buf->next = rq->bufs[i + 1]; in vnic_rq_alloc_bufs()
[all …]
Dvnic_rq.h89 static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) in vnic_rq_desc_avail() argument
92 return rq->ring.desc_avail; in vnic_rq_desc_avail()
95 static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq) in vnic_rq_desc_used() argument
98 return rq->ring.desc_count - rq->ring.desc_avail - 1; in vnic_rq_desc_used()
101 static inline void *vnic_rq_next_desc(struct vnic_rq *rq) in vnic_rq_next_desc() argument
103 return rq->to_use->desc; in vnic_rq_next_desc()
106 static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq) in vnic_rq_next_index() argument
108 return rq->to_use->index; in vnic_rq_next_index()
111 static inline void vnic_rq_post(struct vnic_rq *rq, in vnic_rq_post() argument
115 struct vnic_rq_buf *buf = rq->to_use; in vnic_rq_post()
[all …]
/drivers/ide/
Dide-io.c57 int ide_end_rq(ide_drive_t *drive, struct request *rq, int error, in ide_end_rq() argument
70 return blk_end_request(rq, error, nr_bytes); in ide_end_rq()
78 struct request *rq = cmd->rq; in ide_complete_cmd() local
105 if (rq && rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { in ide_complete_cmd()
106 struct ide_cmd *orig_cmd = rq->special; in ide_complete_cmd()
118 struct request *rq = hwif->rq; in ide_complete_rq() local
125 if (blk_noretry_request(rq) && error <= 0) in ide_complete_rq()
126 nr_bytes = blk_rq_sectors(rq) << 9; in ide_complete_rq()
128 rc = ide_end_rq(drive, rq, error, nr_bytes); in ide_complete_rq()
130 hwif->rq = NULL; in ide_complete_rq()
[all …]
Dide-cd.c96 static int cdrom_log_sense(ide_drive_t *drive, struct request *rq) in cdrom_log_sense() argument
101 if (!sense || !rq || (rq->cmd_flags & REQ_QUIET)) in cdrom_log_sense()
124 if (rq->cmd[0] == GPCMD_START_STOP_UNIT && sense->asc == 0x24) in cdrom_log_sense()
210 static void ide_cd_complete_failed_rq(ide_drive_t *drive, struct request *rq) in ide_cd_complete_failed_rq() argument
218 struct request *failed = (struct request *)rq->special; in ide_cd_complete_failed_rq()
219 void *sense = bio_data(rq->bio); in ide_cd_complete_failed_rq()
229 failed->sense_len = rq->sense_len; in ide_cd_complete_failed_rq()
247 static int ide_cd_breathe(ide_drive_t *drive, struct request *rq) in ide_cd_breathe() argument
252 if (!rq->errors) in ide_cd_breathe()
255 rq->errors = 1; in ide_cd_breathe()
[all …]
Dide-eh.c7 static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, in ide_ata_error() argument
15 rq->errors |= ERROR_RESET; in ide_ata_error()
28 rq->errors = ERROR_MAX; in ide_ata_error()
31 rq->errors |= ERROR_RECAL; in ide_ata_error()
35 if ((stat & ATA_DRQ) && rq_data_dir(rq) == READ && in ide_ata_error()
42 if (rq->errors >= ERROR_MAX || blk_noretry_request(rq)) { in ide_ata_error()
43 ide_kill_rq(drive, rq); in ide_ata_error()
48 rq->errors |= ERROR_RESET; in ide_ata_error()
50 if ((rq->errors & ERROR_RESET) == ERROR_RESET) { in ide_ata_error()
51 ++rq->errors; in ide_ata_error()
[all …]
Dide-floppy.c66 struct request *rq = pc->rq; in ide_floppy_callback() local
75 rq->cmd_type == REQ_TYPE_BLOCK_PC) in ide_floppy_callback()
79 u8 *buf = bio_data(rq->bio); in ide_floppy_callback()
100 if (rq->cmd_type == REQ_TYPE_SPECIAL) in ide_floppy_callback()
101 rq->errors = uptodate ? 0 : IDE_DRV_ERROR_GENERAL; in ide_floppy_callback()
136 unsigned int done = blk_rq_bytes(drive->hwif->rq); in ide_floppy_issue_pc()
191 struct ide_atapi_pc *pc, struct request *rq, in idefloppy_create_rw_cmd() argument
196 int blocks = blk_rq_sectors(rq) / floppy->bs_factor; in idefloppy_create_rw_cmd()
197 int cmd = rq_data_dir(rq); in idefloppy_create_rw_cmd()
206 memcpy(rq->cmd, pc->c, 12); in idefloppy_create_rw_cmd()
[all …]
Dide-pm.c10 struct request *rq; in generic_ide_suspend() local
21 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); in generic_ide_suspend()
22 rq->cmd_type = REQ_TYPE_PM_SUSPEND; in generic_ide_suspend()
23 rq->special = &rqpm; in generic_ide_suspend()
29 ret = blk_execute_rq(drive->queue, NULL, rq, 0); in generic_ide_suspend()
30 blk_put_request(rq); in generic_ide_suspend()
46 struct request *rq; in generic_ide_resume() local
61 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); in generic_ide_resume()
62 rq->cmd_type = REQ_TYPE_PM_RESUME; in generic_ide_resume()
63 rq->cmd_flags |= REQ_PREEMPT; in generic_ide_resume()
[all …]
Dide-atapi.c92 struct request *rq; in ide_queue_pc_tail() local
95 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); in ide_queue_pc_tail()
96 rq->cmd_type = REQ_TYPE_SPECIAL; in ide_queue_pc_tail()
97 rq->special = (char *)pc; in ide_queue_pc_tail()
100 error = blk_rq_map_kern(drive->queue, rq, buf, bufflen, in ide_queue_pc_tail()
106 memcpy(rq->cmd, pc->c, 12); in ide_queue_pc_tail()
108 rq->cmd[13] = REQ_IDETAPE_PC1; in ide_queue_pc_tail()
109 error = blk_execute_rq(drive->queue, disk, rq, 0); in ide_queue_pc_tail()
111 blk_put_request(rq); in ide_queue_pc_tail()
171 void ide_prep_sense(ide_drive_t *drive, struct request *rq) in ide_prep_sense() argument
[all …]
Dide-park.c13 struct request *rq; in issue_park_cmd() local
34 rq = blk_get_request(q, READ, __GFP_WAIT); in issue_park_cmd()
35 rq->cmd[0] = REQ_PARK_HEADS; in issue_park_cmd()
36 rq->cmd_len = 1; in issue_park_cmd()
37 rq->cmd_type = REQ_TYPE_SPECIAL; in issue_park_cmd()
38 rq->special = &timeout; in issue_park_cmd()
39 rc = blk_execute_rq(q, NULL, rq, 1); in issue_park_cmd()
40 blk_put_request(rq); in issue_park_cmd()
48 rq = blk_get_request(q, READ, GFP_NOWAIT); in issue_park_cmd()
49 if (unlikely(!rq)) in issue_park_cmd()
[all …]
Dide-devsets.c162 struct request *rq; in ide_devset_execute() local
168 rq = blk_get_request(q, READ, __GFP_WAIT); in ide_devset_execute()
169 rq->cmd_type = REQ_TYPE_SPECIAL; in ide_devset_execute()
170 rq->cmd_len = 5; in ide_devset_execute()
171 rq->cmd[0] = REQ_DEVSET_EXEC; in ide_devset_execute()
172 *(int *)&rq->cmd[1] = arg; in ide_devset_execute()
173 rq->special = setting->set; in ide_devset_execute()
175 if (blk_execute_rq(q, NULL, rq, 0)) in ide_devset_execute()
176 ret = rq->errors; in ide_devset_execute()
177 blk_put_request(rq); in ide_devset_execute()
[all …]
/drivers/s390/char/
Draw3270.c136 struct raw3270_request *rq; in raw3270_request_alloc() local
139 rq = kzalloc(sizeof(struct raw3270_request), GFP_KERNEL | GFP_DMA); in raw3270_request_alloc()
140 if (!rq) in raw3270_request_alloc()
145 rq->buffer = kmalloc(size, GFP_KERNEL | GFP_DMA); in raw3270_request_alloc()
146 if (!rq->buffer) { in raw3270_request_alloc()
147 kfree(rq); in raw3270_request_alloc()
151 rq->size = size; in raw3270_request_alloc()
152 INIT_LIST_HEAD(&rq->list); in raw3270_request_alloc()
157 rq->ccw.cda = __pa(rq->buffer); in raw3270_request_alloc()
158 rq->ccw.flags = CCW_FLAG_SLI; in raw3270_request_alloc()
[all …]
Dfs3270.c47 fs3270_wake_up(struct raw3270_request *rq, void *data) in fs3270_wake_up() argument
63 fs3270_do_io(struct raw3270_view *view, struct raw3270_request *rq) in fs3270_do_io() argument
69 rq->callback = fs3270_wake_up; in fs3270_do_io()
70 rq->callback_data = &fp->wait; in fs3270_do_io()
80 rc = raw3270_start(view, rq); in fs3270_do_io()
83 wait_event(fp->wait, raw3270_request_final(rq)); in fs3270_do_io()
93 fs3270_reset_callback(struct raw3270_request *rq, void *data) in fs3270_reset_callback() argument
97 fp = (struct fs3270 *) rq->view; in fs3270_reset_callback()
98 raw3270_request_reset(rq); in fs3270_reset_callback()
103 fs3270_restore_callback(struct raw3270_request *rq, void *data) in fs3270_restore_callback() argument
[all …]
/drivers/scsi/device_handler/
Dscsi_dh_alua.c113 struct request *rq; in get_alua_req() local
116 rq = blk_get_request(q, rw, GFP_NOIO); in get_alua_req()
118 if (!rq) { in get_alua_req()
124 if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) { in get_alua_req()
125 blk_put_request(rq); in get_alua_req()
131 rq->cmd_type = REQ_TYPE_BLOCK_PC; in get_alua_req()
132 rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | in get_alua_req()
134 rq->retries = ALUA_FAILOVER_RETRIES; in get_alua_req()
135 rq->timeout = ALUA_FAILOVER_TIMEOUT * HZ; in get_alua_req()
137 return rq; in get_alua_req()
[all …]
Dscsi_dh_emc.c273 struct request *rq; in get_req() local
276 rq = blk_get_request(sdev->request_queue, in get_req()
278 if (!rq) { in get_req()
283 rq->cmd_len = COMMAND_SIZE(cmd); in get_req()
284 rq->cmd[0] = cmd; in get_req()
289 rq->cmd[1] = 0x10; in get_req()
290 rq->cmd[4] = len; in get_req()
294 rq->cmd[1] = 0x10; in get_req()
295 rq->cmd[8] = len; in get_req()
299 rq->cmd[4] = len; in get_req()
[all …]
Dscsi_dh_rdac.c272 struct request *rq; in get_rdac_req() local
275 rq = blk_get_request(q, rw, GFP_NOIO); in get_rdac_req()
277 if (!rq) { in get_rdac_req()
283 if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) { in get_rdac_req()
284 blk_put_request(rq); in get_rdac_req()
290 rq->cmd_type = REQ_TYPE_BLOCK_PC; in get_rdac_req()
291 rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | in get_rdac_req()
293 rq->retries = RDAC_RETRIES; in get_rdac_req()
294 rq->timeout = RDAC_TIMEOUT; in get_rdac_req()
296 return rq; in get_rdac_req()
[all …]
/drivers/infiniband/hw/qib/
Dqib_srq.c61 if ((unsigned) wr->num_sge > srq->rq.max_sge) { in qib_post_srq_receive()
67 spin_lock_irqsave(&srq->rq.lock, flags); in qib_post_srq_receive()
68 wq = srq->rq.wq; in qib_post_srq_receive()
70 if (next >= srq->rq.size) in qib_post_srq_receive()
73 spin_unlock_irqrestore(&srq->rq.lock, flags); in qib_post_srq_receive()
79 wqe = get_rwqe_ptr(&srq->rq, wq->head); in qib_post_srq_receive()
87 spin_unlock_irqrestore(&srq->rq.lock, flags); in qib_post_srq_receive()
132 srq->rq.size = srq_init_attr->attr.max_wr + 1; in qib_create_srq()
133 srq->rq.max_sge = srq_init_attr->attr.max_sge; in qib_create_srq()
134 sz = sizeof(struct ib_sge) * srq->rq.max_sge + in qib_create_srq()
[all …]
/drivers/infiniband/hw/ipath/
Dipath_srq.c61 if ((unsigned) wr->num_sge > srq->rq.max_sge) { in ipath_post_srq_receive()
67 spin_lock_irqsave(&srq->rq.lock, flags); in ipath_post_srq_receive()
68 wq = srq->rq.wq; in ipath_post_srq_receive()
70 if (next >= srq->rq.size) in ipath_post_srq_receive()
73 spin_unlock_irqrestore(&srq->rq.lock, flags); in ipath_post_srq_receive()
79 wqe = get_rwqe_ptr(&srq->rq, wq->head); in ipath_post_srq_receive()
87 spin_unlock_irqrestore(&srq->rq.lock, flags); in ipath_post_srq_receive()
135 srq->rq.size = srq_init_attr->attr.max_wr + 1; in ipath_create_srq()
136 srq->rq.max_sge = srq_init_attr->attr.max_sge; in ipath_create_srq()
137 sz = sizeof(struct ib_sge) * srq->rq.max_sge + in ipath_create_srq()
[all …]
/drivers/net/
Dvirtio_net.c91 struct receive_queue *rq; member
183 static void give_pages(struct receive_queue *rq, struct page *page) in give_pages() argument
189 end->private = (unsigned long)rq->pages; in give_pages()
190 rq->pages = page; in give_pages()
193 static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) in get_a_page() argument
195 struct page *p = rq->pages; in get_a_page()
198 rq->pages = (struct page *)p->private; in get_a_page()
234 static struct sk_buff *page_to_skb(struct receive_queue *rq, in page_to_skb() argument
237 struct virtnet_info *vi = rq->vq->vdev->priv; in page_to_skb()
292 give_pages(rq, page); in page_to_skb()
[all …]
/drivers/usb/misc/
Duss720.c95 struct uss720_async_request *rq = container_of(kref, struct uss720_async_request, ref_count); in destroy_async() local
96 struct parport_uss720_private *priv = rq->priv; in destroy_async()
99 if (likely(rq->urb)) in destroy_async()
100 usb_free_urb(rq->urb); in destroy_async()
102 list_del_init(&rq->asynclist); in destroy_async()
104 kfree(rq); in destroy_async()
112 struct uss720_async_request *rq; in async_complete() local
117 rq = urb->context; in async_complete()
118 priv = rq->priv; in async_complete()
123 } else if (rq->dr.bRequest == 3) { in async_complete()
[all …]
/drivers/char/
Draw.c212 struct raw_config_request rq; in raw_ctl_ioctl() local
218 if (copy_from_user(&rq, (void __user *) arg, sizeof(rq))) in raw_ctl_ioctl()
221 return bind_set(rq.raw_minor, rq.block_major, rq.block_minor); in raw_ctl_ioctl()
224 if (copy_from_user(&rq, (void __user *) arg, sizeof(rq))) in raw_ctl_ioctl()
227 err = bind_get(rq.raw_minor, &dev); in raw_ctl_ioctl()
231 rq.block_major = MAJOR(dev); in raw_ctl_ioctl()
232 rq.block_minor = MINOR(dev); in raw_ctl_ioctl()
234 if (copy_to_user((void __user *)arg, &rq, sizeof(rq))) in raw_ctl_ioctl()
254 struct raw32_config_request rq; in raw_ctl_compat_ioctl() local
260 if (copy_from_user(&rq, user_req, sizeof(rq))) in raw_ctl_compat_ioctl()
[all …]
/drivers/scsi/
Dscsi_tgt_lib.c50 struct request *rq; member
82 struct request *rq; in scsi_host_get_command() local
99 rq = blk_get_request(shost->uspace_req_q, !write, gfp_mask); in scsi_host_get_command()
100 if (!rq) in scsi_host_get_command()
109 cmd->request = rq; in scsi_host_get_command()
111 cmd->cmnd = rq->cmd; in scsi_host_get_command()
113 rq->special = cmd; in scsi_host_get_command()
114 rq->cmd_type = REQ_TYPE_SPECIAL; in scsi_host_get_command()
115 rq->cmd_flags |= REQ_TYPE_BLOCK_PC; in scsi_host_get_command()
116 rq->end_io_data = tcmd; in scsi_host_get_command()
[all …]
/drivers/infiniband/hw/cxgb4/
Dt4.h329 struct t4_rq rq; member
337 return wq->rq.in_use; in t4_rqes_posted()
342 return wq->rq.in_use == 0; in t4_rq_empty()
347 return wq->rq.in_use == (wq->rq.size - 1); in t4_rq_full()
352 return wq->rq.size - 1 - wq->rq.in_use; in t4_rq_avail()
357 wq->rq.in_use++; in t4_rq_produce()
358 if (++wq->rq.pidx == wq->rq.size) in t4_rq_produce()
359 wq->rq.pidx = 0; in t4_rq_produce()
360 wq->rq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); in t4_rq_produce()
361 if (wq->rq.wq_pidx >= wq->rq.size * T4_RQ_NUM_SLOTS) in t4_rq_produce()
[all …]
/drivers/isdn/mISDN/
Dstack.c429 struct channel_req rq; in connect_layer1() local
445 rq.protocol = protocol; in connect_layer1()
446 rq.adr.channel = adr->channel; in connect_layer1()
447 err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq); in connect_layer1()
466 struct channel_req rq, rq2; in connect_Bstack() local
478 rq.protocol = protocol; in connect_Bstack()
479 rq.adr = *adr; in connect_Bstack()
480 err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq); in connect_Bstack()
483 ch->recv = rq.ch->send; in connect_Bstack()
484 ch->peer = rq.ch; in connect_Bstack()
[all …]

12345678910>>...12