• Home
  • Raw
  • Download

Lines Matching refs:vs

241 static void vhost_scsi_init_inflight(struct vhost_scsi *vs,  in vhost_scsi_init_inflight()  argument
249 vq = &vs->vqs[i].vq; in vhost_scsi_init_inflight()
254 idx = vs->vqs[i].inflight_idx; in vhost_scsi_init_inflight()
256 old_inflight[i] = &vs->vqs[i].inflights[idx]; in vhost_scsi_init_inflight()
259 vs->vqs[i].inflight_idx = idx ^ 1; in vhost_scsi_init_inflight()
260 new_inflight = &vs->vqs[i].inflights[idx ^ 1]; in vhost_scsi_init_inflight()
368 struct vhost_scsi *vs = cmd->tvc_vhost; in vhost_scsi_release_cmd() local
370 llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list); in vhost_scsi_release_cmd()
371 vhost_work_queue(&vs->dev, &vs->vs_completion_work); in vhost_scsi_release_cmd()
423 static void vhost_scsi_free_evt(struct vhost_scsi *vs, struct vhost_scsi_evt *evt) in vhost_scsi_free_evt() argument
425 vs->vs_events_nr--; in vhost_scsi_free_evt()
430 vhost_scsi_allocate_evt(struct vhost_scsi *vs, in vhost_scsi_allocate_evt() argument
433 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; in vhost_scsi_allocate_evt()
436 if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) { in vhost_scsi_allocate_evt()
437 vs->vs_events_missed = true; in vhost_scsi_allocate_evt()
444 vs->vs_events_missed = true; in vhost_scsi_allocate_evt()
450 vs->vs_events_nr++; in vhost_scsi_allocate_evt()
461 vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt) in vhost_scsi_do_evt_work() argument
463 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; in vhost_scsi_do_evt_work()
470 vs->vs_events_missed = true; in vhost_scsi_do_evt_work()
475 vhost_disable_notify(&vs->dev, vq); in vhost_scsi_do_evt_work()
480 vs->vs_events_missed = true; in vhost_scsi_do_evt_work()
484 if (vhost_enable_notify(&vs->dev, vq)) in vhost_scsi_do_evt_work()
486 vs->vs_events_missed = true; in vhost_scsi_do_evt_work()
493 vs->vs_events_missed = true; in vhost_scsi_do_evt_work()
497 if (vs->vs_events_missed) { in vhost_scsi_do_evt_work()
499 vs->vs_events_missed = false; in vhost_scsi_do_evt_work()
505 vhost_add_used_and_signal(&vs->dev, vq, head, 0); in vhost_scsi_do_evt_work()
512 struct vhost_scsi *vs = container_of(work, struct vhost_scsi, in vhost_scsi_evt_work() local
514 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; in vhost_scsi_evt_work()
519 llnode = llist_del_all(&vs->vs_event_list); in vhost_scsi_evt_work()
521 vhost_scsi_do_evt_work(vs, evt); in vhost_scsi_evt_work()
522 vhost_scsi_free_evt(vs, evt); in vhost_scsi_evt_work()
534 struct vhost_scsi *vs = container_of(work, struct vhost_scsi, in vhost_scsi_complete_cmd_work() local
545 llnode = llist_del_all(&vs->vs_completion_list); in vhost_scsi_complete_cmd_work()
568 vq = q - vs->vqs; in vhost_scsi_complete_cmd_work()
579 vhost_signal(&vs->dev, &vs->vqs[vq].vq); in vhost_scsi_complete_cmd_work()
803 vhost_scsi_send_bad_target(struct vhost_scsi *vs, in vhost_scsi_send_bad_target() argument
816 vhost_add_used_and_signal(&vs->dev, vq, head, 0); in vhost_scsi_send_bad_target()
822 vhost_scsi_get_desc(struct vhost_scsi *vs, struct vhost_virtqueue *vq, in vhost_scsi_get_desc() argument
840 if (unlikely(vhost_enable_notify(&vs->dev, vq))) { in vhost_scsi_get_desc()
841 vhost_disable_notify(&vs->dev, vq); in vhost_scsi_get_desc()
925 vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) in vhost_scsi_handle_vq() argument
953 vhost_disable_notify(&vs->dev, vq); in vhost_scsi_handle_vq()
956 ret = vhost_scsi_get_desc(vs, vq, &vc); in vhost_scsi_handle_vq()
1090 cmd->tvc_vhost = vs; in vhost_scsi_handle_vq()
1127 vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out); in vhost_scsi_handle_vq()
1134 vhost_scsi_send_tmf_resp(struct vhost_scsi *vs, struct vhost_virtqueue *vq, in vhost_scsi_send_tmf_resp() argument
1150 vhost_add_used_and_signal(&vs->dev, vq, vq_desc, 0); in vhost_scsi_send_tmf_resp()
1172 vhost_scsi_handle_tmf(struct vhost_scsi *vs, struct vhost_scsi_tpg *tpg, in vhost_scsi_handle_tmf() argument
1203 tmf->vhost = vs; in vhost_scsi_handle_tmf()
1221 vhost_scsi_send_tmf_resp(vs, vq, vc->in, vc->head, &vq->iov[vc->out], in vhost_scsi_handle_tmf()
1226 vhost_scsi_send_an_resp(struct vhost_scsi *vs, in vhost_scsi_send_an_resp() argument
1242 vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0); in vhost_scsi_send_an_resp()
1248 vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) in vhost_scsi_ctl_handle_vq() argument
1270 vhost_disable_notify(&vs->dev, vq); in vhost_scsi_ctl_handle_vq()
1273 ret = vhost_scsi_get_desc(vs, vq, &vc); in vhost_scsi_ctl_handle_vq()
1337 vhost_scsi_handle_tmf(vs, tpg, vq, &v_req.tmf, &vc); in vhost_scsi_ctl_handle_vq()
1339 vhost_scsi_send_an_resp(vs, vq, &vc); in vhost_scsi_ctl_handle_vq()
1350 vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out); in vhost_scsi_ctl_handle_vq()
1360 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev); in vhost_scsi_ctl_handle_kick() local
1363 vhost_scsi_ctl_handle_vq(vs, vq); in vhost_scsi_ctl_handle_kick()
1367 vhost_scsi_send_evt(struct vhost_scsi *vs, in vhost_scsi_send_evt() argument
1375 evt = vhost_scsi_allocate_evt(vs, event, reason); in vhost_scsi_send_evt()
1392 llist_add(&evt->list, &vs->vs_event_list); in vhost_scsi_send_evt()
1393 vhost_work_queue(&vs->dev, &vs->vs_event_work); in vhost_scsi_send_evt()
1400 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev); in vhost_scsi_evt_handle_kick() local
1406 if (vs->vs_events_missed) in vhost_scsi_evt_handle_kick()
1407 vhost_scsi_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0); in vhost_scsi_evt_handle_kick()
1416 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev); in vhost_scsi_handle_kick() local
1418 vhost_scsi_handle_vq(vs, vq); in vhost_scsi_handle_kick()
1422 static void vhost_scsi_flush(struct vhost_scsi *vs) in vhost_scsi_flush() argument
1428 vhost_scsi_init_inflight(vs, old_inflight); in vhost_scsi_flush()
1439 vhost_work_dev_flush(&vs->dev); in vhost_scsi_flush()
1531 vhost_scsi_set_endpoint(struct vhost_scsi *vs, in vhost_scsi_set_endpoint() argument
1543 mutex_lock(&vs->dev.mutex); in vhost_scsi_set_endpoint()
1546 for (index = 0; index < vs->dev.nvqs; ++index) { in vhost_scsi_set_endpoint()
1548 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) { in vhost_scsi_set_endpoint()
1560 if (vs->vs_tpg) in vhost_scsi_set_endpoint()
1561 memcpy(vs_tpg, vs->vs_tpg, len); in vhost_scsi_set_endpoint()
1576 if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) { in vhost_scsi_set_endpoint()
1595 tpg->vhost_scsi = vs; in vhost_scsi_set_endpoint()
1603 memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn, in vhost_scsi_set_endpoint()
1604 sizeof(vs->vs_vhost_wwpn)); in vhost_scsi_set_endpoint()
1607 vq = &vs->vqs[i].vq; in vhost_scsi_set_endpoint()
1617 vq = &vs->vqs[i].vq; in vhost_scsi_set_endpoint()
1632 vhost_scsi_flush(vs); in vhost_scsi_set_endpoint()
1633 kfree(vs->vs_tpg); in vhost_scsi_set_endpoint()
1634 vs->vs_tpg = vs_tpg; in vhost_scsi_set_endpoint()
1639 if (!vhost_vq_get_backend(&vs->vqs[i].vq)) in vhost_scsi_set_endpoint()
1640 vhost_scsi_destroy_vq_cmds(&vs->vqs[i].vq); in vhost_scsi_set_endpoint()
1652 mutex_unlock(&vs->dev.mutex); in vhost_scsi_set_endpoint()
1658 vhost_scsi_clear_endpoint(struct vhost_scsi *vs, in vhost_scsi_clear_endpoint() argument
1670 mutex_lock(&vs->dev.mutex); in vhost_scsi_clear_endpoint()
1672 for (index = 0; index < vs->dev.nvqs; ++index) { in vhost_scsi_clear_endpoint()
1673 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) { in vhost_scsi_clear_endpoint()
1679 if (!vs->vs_tpg) { in vhost_scsi_clear_endpoint()
1686 tpg = vs->vs_tpg[target]; in vhost_scsi_clear_endpoint()
1707 vs->vs_tpg[target] = NULL; in vhost_scsi_clear_endpoint()
1719 vq = &vs->vqs[i].vq; in vhost_scsi_clear_endpoint()
1725 vhost_scsi_flush(vs); in vhost_scsi_clear_endpoint()
1728 vq = &vs->vqs[i].vq; in vhost_scsi_clear_endpoint()
1736 vhost_scsi_flush(vs); in vhost_scsi_clear_endpoint()
1737 kfree(vs->vs_tpg); in vhost_scsi_clear_endpoint()
1738 vs->vs_tpg = NULL; in vhost_scsi_clear_endpoint()
1739 WARN_ON(vs->vs_events_nr); in vhost_scsi_clear_endpoint()
1740 mutex_unlock(&vs->dev.mutex); in vhost_scsi_clear_endpoint()
1747 mutex_unlock(&vs->dev.mutex); in vhost_scsi_clear_endpoint()
1752 static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features) in vhost_scsi_set_features() argument
1760 mutex_lock(&vs->dev.mutex); in vhost_scsi_set_features()
1762 !vhost_log_access_ok(&vs->dev)) { in vhost_scsi_set_features()
1763 mutex_unlock(&vs->dev.mutex); in vhost_scsi_set_features()
1768 vq = &vs->vqs[i].vq; in vhost_scsi_set_features()
1773 mutex_unlock(&vs->dev.mutex); in vhost_scsi_set_features()
1779 struct vhost_scsi *vs; in vhost_scsi_open() local
1783 vs = kvzalloc(sizeof(*vs), GFP_KERNEL); in vhost_scsi_open()
1784 if (!vs) in vhost_scsi_open()
1791 vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work); in vhost_scsi_open()
1792 vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work); in vhost_scsi_open()
1794 vs->vs_events_nr = 0; in vhost_scsi_open()
1795 vs->vs_events_missed = false; in vhost_scsi_open()
1797 vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq; in vhost_scsi_open()
1798 vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; in vhost_scsi_open()
1799 vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick; in vhost_scsi_open()
1800 vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick; in vhost_scsi_open()
1802 vqs[i] = &vs->vqs[i].vq; in vhost_scsi_open()
1803 vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick; in vhost_scsi_open()
1805 vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV, in vhost_scsi_open()
1808 vhost_scsi_init_inflight(vs, NULL); in vhost_scsi_open()
1810 f->private_data = vs; in vhost_scsi_open()
1814 kvfree(vs); in vhost_scsi_open()
1821 struct vhost_scsi *vs = f->private_data; in vhost_scsi_release() local
1824 mutex_lock(&vs->dev.mutex); in vhost_scsi_release()
1825 memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn)); in vhost_scsi_release()
1826 mutex_unlock(&vs->dev.mutex); in vhost_scsi_release()
1827 vhost_scsi_clear_endpoint(vs, &t); in vhost_scsi_release()
1828 vhost_dev_stop(&vs->dev); in vhost_scsi_release()
1829 vhost_dev_cleanup(&vs->dev); in vhost_scsi_release()
1831 vhost_scsi_flush(vs); in vhost_scsi_release()
1832 kfree(vs->dev.vqs); in vhost_scsi_release()
1833 kvfree(vs); in vhost_scsi_release()
1842 struct vhost_scsi *vs = f->private_data; in vhost_scsi_ioctl() local
1850 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; in vhost_scsi_ioctl()
1859 return vhost_scsi_set_endpoint(vs, &backend); in vhost_scsi_ioctl()
1866 return vhost_scsi_clear_endpoint(vs, &backend); in vhost_scsi_ioctl()
1875 vs->vs_events_missed = events_missed; in vhost_scsi_ioctl()
1880 events_missed = vs->vs_events_missed; in vhost_scsi_ioctl()
1893 return vhost_scsi_set_features(vs, features); in vhost_scsi_ioctl()
1895 mutex_lock(&vs->dev.mutex); in vhost_scsi_ioctl()
1896 r = vhost_dev_ioctl(&vs->dev, ioctl, argp); in vhost_scsi_ioctl()
1899 r = vhost_vring_ioctl(&vs->dev, ioctl, argp); in vhost_scsi_ioctl()
1900 mutex_unlock(&vs->dev.mutex); in vhost_scsi_ioctl()
1951 struct vhost_scsi *vs = tpg->vhost_scsi; in vhost_scsi_do_plug() local
1955 if (!vs) in vhost_scsi_do_plug()
1958 mutex_lock(&vs->dev.mutex); in vhost_scsi_do_plug()
1965 vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; in vhost_scsi_do_plug()
1968 vhost_scsi_send_evt(vs, tpg, lun, in vhost_scsi_do_plug()
1971 mutex_unlock(&vs->dev.mutex); in vhost_scsi_do_plug()