/drivers/pci/ |
D | iov.c | 44 struct pci_sriov *iov = dev->sriov; in pci_iov_set_numvfs() local 46 pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, nr_virtfn); in pci_iov_set_numvfs() 47 pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_OFFSET, &iov->offset); in pci_iov_set_numvfs() 48 pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_STRIDE, &iov->stride); in pci_iov_set_numvfs() 60 struct pci_sriov *iov = dev->sriov; in compute_max_vf_buses() local 63 for (nr_virtfn = iov->total_VFs; nr_virtfn; nr_virtfn--) { in compute_max_vf_buses() 65 if (!iov->offset || (nr_virtfn > 1 && !iov->stride)) { in compute_max_vf_buses() 71 if (busnr > iov->max_VF_buses) in compute_max_vf_buses() 72 iov->max_VF_buses = busnr; in compute_max_vf_buses() 145 struct pci_sriov *iov = dev->sriov; in pci_iov_add_virtfn() local [all …]
|
/drivers/usb/usbip/ |
D | vhci_tx.c | 59 struct kvec *iov; in vhci_send_cmd_submit() local 75 memset(&iov, 0, sizeof(iov)); in vhci_send_cmd_submit() 85 iov = kcalloc(iovnum, sizeof(*iov), GFP_KERNEL); in vhci_send_cmd_submit() 86 if (!iov) { in vhci_send_cmd_submit() 99 iov[iovnum].iov_base = &pdu_header; in vhci_send_cmd_submit() 100 iov[iovnum].iov_len = sizeof(pdu_header); in vhci_send_cmd_submit() 109 iov[iovnum].iov_base = sg_virt(sg); in vhci_send_cmd_submit() 110 iov[iovnum].iov_len = sg->length; in vhci_send_cmd_submit() 114 iov[iovnum].iov_base = urb->transfer_buffer; in vhci_send_cmd_submit() 115 iov[iovnum].iov_len = in vhci_send_cmd_submit() [all …]
|
D | stub_tx.c | 164 struct kvec *iov = NULL; in stub_send_ret_submit() local 193 iov = kcalloc(iovnum, sizeof(struct kvec), GFP_KERNEL); in stub_send_ret_submit() 195 if (!iov) { in stub_send_ret_submit() 217 iov[iovnum].iov_base = &pdu_header; in stub_send_ret_submit() 218 iov[iovnum].iov_len = sizeof(pdu_header); in stub_send_ret_submit() 230 iov[iovnum].iov_base = in stub_send_ret_submit() 232 iov[iovnum].iov_len = in stub_send_ret_submit() 253 iov[iovnum].iov_base = sg_virt(sg); in stub_send_ret_submit() 254 iov[iovnum].iov_len = size; in stub_send_ret_submit() 260 iov[iovnum].iov_base = urb->transfer_buffer; in stub_send_ret_submit() [all …]
|
D | vudc_tx.c | 41 struct kvec iov[1]; in v_send_ret_unlink() local 50 memset(&iov, 0, sizeof(iov)); in v_send_ret_unlink() 56 iov[0].iov_base = &pdu_header; in v_send_ret_unlink() 57 iov[0].iov_len = sizeof(pdu_header); in v_send_ret_unlink() 60 ret = kernel_sendmsg(udc->ud.tcp_socket, &msg, iov, in v_send_ret_unlink() 78 struct kvec *iov = NULL; in v_send_ret_submit() local 100 iov = kcalloc(iovnum, sizeof(*iov), GFP_KERNEL); in v_send_ret_submit() 101 if (!iov) { in v_send_ret_submit() 114 iov[iovnum].iov_base = &pdu_header; in v_send_ret_submit() 115 iov[iovnum].iov_len = sizeof(pdu_header); in v_send_ret_submit() [all …]
|
/drivers/vhost/ |
D | vringh.c | 74 static inline ssize_t vringh_iov_xfer(struct vringh_kiov *iov, in vringh_iov_xfer() argument 81 while (len && iov->i < iov->used) { in vringh_iov_xfer() 84 partlen = min(iov->iov[iov->i].iov_len, len); in vringh_iov_xfer() 85 err = xfer(iov->iov[iov->i].iov_base, ptr, partlen); in vringh_iov_xfer() 91 iov->consumed += partlen; in vringh_iov_xfer() 92 iov->iov[iov->i].iov_len -= partlen; in vringh_iov_xfer() 93 iov->iov[iov->i].iov_base += partlen; in vringh_iov_xfer() 95 if (!iov->iov[iov->i].iov_len) { in vringh_iov_xfer() 97 iov->iov[iov->i].iov_len = iov->consumed; in vringh_iov_xfer() 98 iov->iov[iov->i].iov_base -= iov->consumed; in vringh_iov_xfer() [all …]
|
D | vsock.c | 120 head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov), in vhost_transport_do_send_pkt() 150 iov_len = iov_length(&vq->iov[out], in); in vhost_transport_do_send_pkt() 157 iov_iter_init(&iov_iter, READ, &vq->iov[out], in, iov_len); in vhost_transport_do_send_pkt() 332 len = iov_length(vq->iov, out); in vhost_vsock_alloc_pkt() 333 iov_iter_init(&iov_iter, WRITE, vq->iov, out, len); in vhost_vsock_alloc_pkt() 415 head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov), in vhost_vsock_handle_tx_kick()
|
D | scsi.c | 462 head = vhost_get_vq_desc(vq, vq->iov, in vhost_scsi_do_evt_work() 463 ARRAY_SIZE(vq->iov), &out, &in, in vhost_scsi_do_evt_work() 476 if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) { in vhost_scsi_do_evt_work() 478 vq->iov[out].iov_len); in vhost_scsi_do_evt_work() 488 eventp = vq->iov[out].iov_base; in vhost_scsi_do_evt_work() 655 if (!iter || !iter->iov) { in vhost_scsi_calc_sgls() 804 resp = vq->iov[out].iov_base; in vhost_scsi_send_bad_target() 818 vc->head = vhost_get_vq_desc(vq, vq->iov, in vhost_scsi_get_desc() 819 ARRAY_SIZE(vq->iov), &vc->out, &vc->in, in vhost_scsi_get_desc() 842 vc->out_size = iov_length(vq->iov, vc->out); in vhost_scsi_get_desc() [all …]
|
/drivers/crypto/cavium/nitrox/ |
D | nitrox_mbx.c | 125 vfno = RING_TO_VFNO(i, ndev->iov.max_vf_queues); in nitrox_pf2vf_mbox_handler() 126 vfdev = ndev->iov.vfdev + vfno; in nitrox_pf2vf_mbox_handler() 137 queue_work(ndev->iov.pf2vf_wq, &pfwork->pf2vf_resp); in nitrox_pf2vf_mbox_handler() 147 vfno = RING_TO_VFNO(i + 64, ndev->iov.max_vf_queues); in nitrox_pf2vf_mbox_handler() 148 vfdev = ndev->iov.vfdev + vfno; in nitrox_pf2vf_mbox_handler() 160 queue_work(ndev->iov.pf2vf_wq, &pfwork->pf2vf_resp); in nitrox_pf2vf_mbox_handler() 171 ndev->iov.vfdev = kcalloc(ndev->iov.num_vfs, in nitrox_mbox_init() 173 if (!ndev->iov.vfdev) in nitrox_mbox_init() 176 for (i = 0; i < ndev->iov.num_vfs; i++) { in nitrox_mbox_init() 177 vfdev = ndev->iov.vfdev + i; in nitrox_mbox_init() [all …]
|
D | nitrox_sriov.c | 174 ndev->iov.num_vfs = num_vfs; in nitrox_sriov_enable() 175 ndev->iov.max_vf_queues = vf_mode_to_nr_queues(ndev->mode); in nitrox_sriov_enable() 194 ndev->iov.num_vfs = 0; in nitrox_sriov_enable() 216 ndev->iov.num_vfs = 0; in nitrox_sriov_disable() 217 ndev->iov.max_vf_queues = 0; in nitrox_sriov_disable()
|
/drivers/net/ethernet/google/gve/ |
D | gve_tx.c | 75 struct gve_tx_iovec iov[2]) in gve_tx_alloc_fifo() 95 iov[0].iov_offset = fifo->head; in gve_tx_alloc_fifo() 96 iov[0].iov_len = bytes; in gve_tx_alloc_fifo() 105 iov[0].iov_len -= overflow; in gve_tx_alloc_fifo() 106 iov[1].iov_offset = 0; /* Start of fifo*/ in gve_tx_alloc_fifo() 107 iov[1].iov_len = overflow; in gve_tx_alloc_fifo() 115 iov[nfrags - 1].iov_padding = padding; in gve_tx_alloc_fifo() 438 &info->iov[0]); in gve_tx_add_skb() 441 &info->iov[payload_iov]); in gve_tx_add_skb() 445 info->iov[hdr_nfrags - 1].iov_offset); in gve_tx_add_skb() [all …]
|
/drivers/net/ethernet/intel/fm10k/ |
D | fm10k_iov.c | 187 hw->iov.ops.reset_resources(hw, vf_info); in fm10k_iov_event() 236 hw->iov.ops.reset_lport(hw, vf_info); in fm10k_iov_mbx() 242 hw->iov.ops.reset_resources(hw, vf_info); in fm10k_iov_mbx() 300 hw->iov.ops.reset_resources(hw, vf_info); in fm10k_iov_suspend() 301 hw->iov.ops.reset_lport(hw, vf_info); in fm10k_iov_suspend() 348 hw->iov.ops.assign_resources(hw, num_vfs, num_vfs); in fm10k_iov_resume() 356 dglort.vsi_l = fls(hw->iov.total_vfs - 1); in fm10k_iov_resume() 370 hw->iov.ops.set_lport(hw, vf_info, i, in fm10k_iov_resume() 374 hw->iov.ops.assign_default_mac_vlan(hw, vf_info); in fm10k_iov_resume() 402 hw->iov.ops.assign_default_mac_vlan(hw, vf_info); in fm10k_iov_update_pvid() [all …]
|
D | fm10k_pf.c | 177 hw->iov.total_vfs = fm10k_is_ari_hierarchy_pf(hw) ? 64 : 7; in fm10k_init_hw_pf() 443 if (!hw->iov.num_vfs) in fm10k_update_int_moderator_pf() 577 u16 num_pools = hw->iov.num_pools; in fm10k_queues_per_pool() 585 u16 num_vfs = hw->iov.num_vfs; in fm10k_vf_queue_index() 595 u16 num_pools = hw->iov.num_pools; in fm10k_vectors_per_pool() 631 if ((num_vfs > num_pools) || (num_vfs > hw->iov.total_vfs)) in fm10k_iov_assign_resources_pf() 635 hw->iov.num_vfs = num_vfs; in fm10k_iov_assign_resources_pf() 636 hw->iov.num_pools = num_pools; in fm10k_iov_assign_resources_pf() 741 if (vf_idx >= hw->iov.num_vfs) in fm10k_iov_configure_tc_pf() 799 if (vf_idx >= hw->iov.num_vfs) in fm10k_iov_assign_int_moderator_pf() [all …]
|
/drivers/infiniband/hw/qib/ |
D | qib_user_sdma.c | 560 const struct iovec *iov, in qib_user_sdma_coalesce() argument 581 iov[i].iov_base, iov[i].iov_len); in qib_user_sdma_coalesce() 587 mpage += iov[i].iov_len; in qib_user_sdma_coalesce() 588 len += iov[i].iov_len; in qib_user_sdma_coalesce() 605 static int qib_user_sdma_num_pages(const struct iovec *iov) in qib_user_sdma_num_pages() argument 607 const unsigned long addr = (unsigned long) iov->iov_base; in qib_user_sdma_num_pages() 608 const unsigned long len = iov->iov_len; in qib_user_sdma_num_pages() 718 const struct iovec *iov, in qib_user_sdma_pin_pkt() argument 725 const int npages = qib_user_sdma_num_pages(iov + idx); in qib_user_sdma_pin_pkt() 726 const unsigned long addr = (unsigned long) iov[idx].iov_base; in qib_user_sdma_pin_pkt() [all …]
|
/drivers/infiniband/sw/siw/ |
D | siw_qp.c | 388 struct kvec iov[3]; in siw_send_terminate() local 425 iov[0].iov_base = term; in siw_send_terminate() 426 iov[0].iov_len = sizeof(*term); in siw_send_terminate() 489 iov[1].iov_base = rreq; in siw_send_terminate() 490 iov[1].iov_len = sizeof(*rreq); in siw_send_terminate() 497 iov[1].iov_base = rx_hdr; in siw_send_terminate() 501 iov[1].iov_len = in siw_send_terminate() 504 iov[1].iov_len = in siw_send_terminate() 515 iov[1].iov_base = rx_hdr; in siw_send_terminate() 519 iov[1].iov_len = in siw_send_terminate() [all …]
|
D | siw_qp_tx.c | 297 struct kvec iov = { .iov_base = in siw_tx_ctrl() local 301 int rv = kernel_sendmsg(s, &msg, &iov, 1, in siw_tx_ctrl() 428 struct kvec iov[MAX_ARRAY]; in siw_tx_hdt() local 446 iov[0].iov_base = in siw_tx_hdt() 448 iov[0].iov_len = hdr_len = in siw_tx_hdt() 472 iov[seg].iov_base = in siw_tx_hdt() 474 iov[seg].iov_len = sge_len; in siw_tx_hdt() 478 iov[seg].iov_base, in siw_tx_hdt() 508 iov[seg].iov_base = kmap(p) + fp_off; in siw_tx_hdt() 509 iov[seg].iov_len = plen; in siw_tx_hdt() [all …]
|
/drivers/xen/xenbus/ |
D | xenbus_xs.c | 657 struct kvec iov[2]; in xs_watch() local 659 iov[0].iov_base = (void *)path; in xs_watch() 660 iov[0].iov_len = strlen(path) + 1; in xs_watch() 661 iov[1].iov_base = (void *)token; in xs_watch() 662 iov[1].iov_len = strlen(token) + 1; in xs_watch() 664 return xs_error(xs_talkv(XBT_NIL, XS_WATCH, iov, in xs_watch() 665 ARRAY_SIZE(iov), NULL)); in xs_watch() 670 struct kvec iov[2]; in xs_unwatch() local 672 iov[0].iov_base = (char *)path; in xs_unwatch() 673 iov[0].iov_len = strlen(path) + 1; in xs_unwatch() [all …]
|
/drivers/misc/mic/vop/ |
D | vop_vringh.c | 677 static inline u32 vop_vringh_iov_consumed(struct vringh_kiov *iov) in vop_vringh_iov_consumed() argument 680 u32 total = iov->consumed; in vop_vringh_iov_consumed() 682 for (i = 0; i < iov->i; i++) in vop_vringh_iov_consumed() 683 total += iov->iov[i].iov_len; in vop_vringh_iov_consumed() 694 static int vop_vringh_copy(struct vop_vdev *vdev, struct vringh_kiov *iov, in vop_vringh_copy() argument 701 while (len && iov->i < iov->used) { in vop_vringh_copy() 702 struct kvec *kiov = &iov->iov[iov->i]; in vop_vringh_copy() 724 iov->consumed += partlen; in vop_vringh_copy() 729 kiov->iov_len = iov->consumed; in vop_vringh_copy() 730 kiov->iov_base -= iov->consumed; in vop_vringh_copy() [all …]
|
/drivers/nvme/target/ |
D | tcp.c | 62 struct kvec *iov; member 194 cmd->iov = NULL; in nvmet_tcp_get_cmd() 283 struct kvec *iov = cmd->iov; in nvmet_tcp_map_pdu_iovec() local 297 iov->iov_base = kmap(sg_page(sg)) + sg->offset + sg_offset; in nvmet_tcp_map_pdu_iovec() 298 iov->iov_len = iov_len; in nvmet_tcp_map_pdu_iovec() 302 iov++; in nvmet_tcp_map_pdu_iovec() 305 iov_iter_kvec(&cmd->recv_msg.msg_iter, READ, cmd->iov, in nvmet_tcp_map_pdu_iovec() 343 cmd->iov = kmalloc_array(cmd->req.sg_cnt, in nvmet_tcp_map_data() 344 sizeof(*cmd->iov), GFP_KERNEL); in nvmet_tcp_map_data() 345 if (!cmd->iov) in nvmet_tcp_map_data() [all …]
|
/drivers/target/iscsi/ |
D | iscsi_target.c | 504 struct kvec *iov; in iscsit_xmit_nondatain_pdu() local 508 iov = &cmd->iov_misc[0]; in iscsit_xmit_nondatain_pdu() 509 iov[niov].iov_base = cmd->pdu; in iscsit_xmit_nondatain_pdu() 510 iov[niov++].iov_len = ISCSI_HDR_LEN; in iscsit_xmit_nondatain_pdu() 519 iov[0].iov_len += ISCSI_CRC_LEN; in iscsit_xmit_nondatain_pdu() 529 iov[niov].iov_base = (void *)data_buf; in iscsit_xmit_nondatain_pdu() 530 iov[niov++].iov_len = data_buf_len; in iscsit_xmit_nondatain_pdu() 534 iov[niov].iov_base = &cmd->pad_bytes; in iscsit_xmit_nondatain_pdu() 535 iov[niov++].iov_len = padding; in iscsit_xmit_nondatain_pdu() 547 iov[niov].iov_base = &cmd->data_crc; in iscsit_xmit_nondatain_pdu() [all …]
|
D | iscsi_target_util.c | 1066 struct kvec *iov; in iscsit_send_tx_data() local 1072 iov = &cmd->iov_data[0]; in iscsit_send_tx_data() 1075 iov = &cmd->iov_misc[0]; in iscsit_send_tx_data() 1079 tx_sent = tx_data(conn, &iov[0], iov_count, tx_size); in iscsit_send_tx_data() 1097 struct kvec iov; in iscsit_fe_sendpage_sg() local 1107 iov.iov_base = cmd->pdu; in iscsit_fe_sendpage_sg() 1108 iov.iov_len = tx_hdr_size; in iscsit_fe_sendpage_sg() 1110 tx_sent = tx_data(conn, &iov, 1, tx_hdr_size); in iscsit_fe_sendpage_sg() 1242 iov_iter_kvec(&msg.msg_iter, READ, count->iov, count->iov_count, data); in iscsit_do_rx_data() 1261 struct kvec *iov, in rx_data() argument [all …]
|
/drivers/net/ethernet/broadcom/bnx2x/ |
D | bnx2x_sriov.c | 1055 struct bnx2x_sriov *iov = &bp->vfdb->sriov; in bnx2x_vf_bus() local 1057 return dev->bus->number + ((dev->devfn + iov->offset + in bnx2x_vf_bus() 1058 iov->stride * vfid) >> 8); in bnx2x_vf_bus() 1064 struct bnx2x_sriov *iov = &bp->vfdb->sriov; in bnx2x_vf_devfn() local 1066 return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff; in bnx2x_vf_devfn() 1073 struct bnx2x_sriov *iov = &bp->vfdb->sriov; in bnx2x_vf_set_bars() local 1079 size /= iov->total; in bnx2x_vf_set_bars() 1123 static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov) in bnx2x_sriov_pci_cfg_info() argument 1134 iov->pos = pos; in bnx2x_sriov_pci_cfg_info() 1136 pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl); in bnx2x_sriov_pci_cfg_info() [all …]
|
/drivers/mmc/host/ |
D | sdhci-omap.c | 127 bool power_on, unsigned int iov) in sdhci_omap_set_pbias() argument 136 ret = regulator_set_voltage(omap_host->pbias, iov, iov); in sdhci_omap_set_pbias() 168 unsigned int iov) in sdhci_omap_enable_iov() argument 179 ret = regulator_set_voltage(mmc->supply.vqmmc, iov, iov); in sdhci_omap_enable_iov() 186 ret = sdhci_omap_set_pbias(omap_host, true, iov); in sdhci_omap_enable_iov() 511 unsigned int iov; in sdhci_omap_start_signal_voltage_switch() local 532 iov = IOV_3V3; in sdhci_omap_start_signal_voltage_switch() 544 iov = IOV_1V8; in sdhci_omap_start_signal_voltage_switch() 549 ret = sdhci_omap_enable_iov(omap_host, iov); in sdhci_omap_start_signal_voltage_switch() 551 dev_err(dev, "failed to switch IO voltage to %dmV\n", iov); in sdhci_omap_start_signal_voltage_switch() [all …]
|
/drivers/infiniband/hw/hfi1/ |
D | user_sdma.c | 500 memcpy(&req->iovs[i].iov, in hfi1_user_sdma_process_request() 502 sizeof(req->iovs[i].iov)); in hfi1_user_sdma_process_request() 508 req->data_len += req->iovs[i].iov.iov_len; in hfi1_user_sdma_process_request() 727 base = (unsigned long)iovec->iov.iov_base; in user_sdma_txadd() 821 if (READ_ONCE(iovec->offset) == iovec->iov.iov_len) { in user_sdma_send_pkts() 968 ((unsigned long)iovec->iov.iov_base + in pin_sdma_pages() 980 node->rb.len = iovec->iov.iov_len; in pin_sdma_pages() 1001 struct iovec *iov; in pin_vector_pages() local 1007 iovec->iov.iov_base, in pin_vector_pages() 1008 iovec->iov.iov_len, &rb_node); in pin_vector_pages() [all …]
|
/drivers/media/dvb-core/ |
D | dvb_net.c | 59 static inline __u32 iov_crc32( __u32 c, struct kvec *iov, unsigned int cnt ) in iov_crc32() argument 63 c = crc32_be( c, iov[j].iov_base, iov[j].iov_len ); in iov_crc32() 645 struct kvec iov[3], in dvb_net_ule_check_crc() 659 hexdump(iov[0].iov_base, iov[0].iov_len); in dvb_net_ule_check_crc() 660 hexdump(iov[1].iov_base, iov[1].iov_len); in dvb_net_ule_check_crc() 661 hexdump(iov[2].iov_base, iov[2].iov_len); in dvb_net_ule_check_crc() 822 struct kvec iov[3] = { in dvb_net_ule() local 835 ule_crc = iov_crc32(ule_crc, iov, 3); in dvb_net_ule() 842 dvb_net_ule_check_crc(&h, iov, ule_crc, expected_crc); in dvb_net_ule()
|
/drivers/target/ |
D | target_core_user.c | 635 static inline void new_iov(struct iovec **iov, int *iov_cnt) in new_iov() argument 640 (*iov)++; in new_iov() 643 iovec = *iov; in new_iov() 657 static inline size_t iov_tail(struct iovec *iov) in iov_tail() argument 659 return (size_t)iov->iov_base + iov->iov_len; in iov_tail() 664 unsigned int data_nents, struct iovec **iov, in scatter_data_area() argument 702 to_offset == iov_tail(*iov)) { in scatter_data_area() 708 (*iov)->iov_len += copy_bytes; in scatter_data_area() 715 new_iov(iov, iov_cnt); in scatter_data_area() 716 (*iov)->iov_base = (void __user *)to_offset; in scatter_data_area() [all …]
|