Lines Matching +full:5 +full:vs
226 static void vhost_scsi_init_inflight(struct vhost_scsi *vs, in vhost_scsi_init_inflight() argument
234 vq = &vs->vqs[i].vq; in vhost_scsi_init_inflight()
239 idx = vs->vqs[i].inflight_idx; in vhost_scsi_init_inflight()
241 old_inflight[i] = &vs->vqs[i].inflights[idx]; in vhost_scsi_init_inflight()
244 vs->vqs[i].inflight_idx = idx ^ 1; in vhost_scsi_init_inflight()
245 new_inflight = &vs->vqs[i].inflights[idx ^ 1]; in vhost_scsi_init_inflight()
364 struct vhost_scsi *vs = cmd->tvc_vhost; in vhost_scsi_complete_cmd() local
366 llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list); in vhost_scsi_complete_cmd()
368 vhost_work_queue(&vs->dev, &vs->vs_completion_work); in vhost_scsi_complete_cmd()
397 static void vhost_scsi_free_evt(struct vhost_scsi *vs, struct vhost_scsi_evt *evt) in vhost_scsi_free_evt() argument
399 vs->vs_events_nr--; in vhost_scsi_free_evt()
404 vhost_scsi_allocate_evt(struct vhost_scsi *vs, in vhost_scsi_allocate_evt() argument
407 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; in vhost_scsi_allocate_evt()
410 if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) { in vhost_scsi_allocate_evt()
411 vs->vs_events_missed = true; in vhost_scsi_allocate_evt()
418 vs->vs_events_missed = true; in vhost_scsi_allocate_evt()
424 vs->vs_events_nr++; in vhost_scsi_allocate_evt()
444 vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt) in vhost_scsi_do_evt_work() argument
446 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; in vhost_scsi_do_evt_work()
453 vs->vs_events_missed = true; in vhost_scsi_do_evt_work()
458 vhost_disable_notify(&vs->dev, vq); in vhost_scsi_do_evt_work()
463 vs->vs_events_missed = true; in vhost_scsi_do_evt_work()
467 if (vhost_enable_notify(&vs->dev, vq)) in vhost_scsi_do_evt_work()
469 vs->vs_events_missed = true; in vhost_scsi_do_evt_work()
476 vs->vs_events_missed = true; in vhost_scsi_do_evt_work()
480 if (vs->vs_events_missed) { in vhost_scsi_do_evt_work()
482 vs->vs_events_missed = false; in vhost_scsi_do_evt_work()
488 vhost_add_used_and_signal(&vs->dev, vq, head, 0); in vhost_scsi_do_evt_work()
495 struct vhost_scsi *vs = container_of(work, struct vhost_scsi, in vhost_scsi_evt_work() local
497 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; in vhost_scsi_evt_work()
502 llnode = llist_del_all(&vs->vs_event_list); in vhost_scsi_evt_work()
504 vhost_scsi_do_evt_work(vs, evt); in vhost_scsi_evt_work()
505 vhost_scsi_free_evt(vs, evt); in vhost_scsi_evt_work()
517 struct vhost_scsi *vs = container_of(work, struct vhost_scsi, in vhost_scsi_complete_cmd_work() local
528 llnode = llist_del_all(&vs->vs_completion_list); in vhost_scsi_complete_cmd_work()
551 vq = q - vs->vqs; in vhost_scsi_complete_cmd_work()
562 vhost_signal(&vs->dev, &vs->vqs[vq].vq); in vhost_scsi_complete_cmd_work()
791 vhost_scsi_send_bad_target(struct vhost_scsi *vs, in vhost_scsi_send_bad_target() argument
804 vhost_add_used_and_signal(&vs->dev, vq, head, 0); in vhost_scsi_send_bad_target()
810 vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) in vhost_scsi_handle_vq() argument
837 vhost_disable_notify(&vs->dev, vq); in vhost_scsi_handle_vq()
850 if (unlikely(vhost_enable_notify(&vs->dev, vq))) { in vhost_scsi_handle_vq()
851 vhost_disable_notify(&vs->dev, vq); in vhost_scsi_handle_vq()
900 vhost_scsi_send_bad_target(vs, vq, head, out); in vhost_scsi_handle_vq()
906 vhost_scsi_send_bad_target(vs, vq, head, out); in vhost_scsi_handle_vq()
913 vhost_scsi_send_bad_target(vs, vq, head, out); in vhost_scsi_handle_vq()
918 * iovec sizes + incoming iovec sizes vs. virtio-scsi request + in vhost_scsi_handle_vq()
959 vhost_scsi_send_bad_target(vs, vq, head, out); in vhost_scsi_handle_vq()
967 vhost_scsi_send_bad_target(vs, vq, head, out); in vhost_scsi_handle_vq()
1007 vhost_scsi_send_bad_target(vs, vq, head, out); in vhost_scsi_handle_vq()
1016 vhost_scsi_send_bad_target(vs, vq, head, out); in vhost_scsi_handle_vq()
1019 cmd->tvc_vhost = vs; in vhost_scsi_handle_vq()
1036 vhost_scsi_send_bad_target(vs, vq, head, out); in vhost_scsi_handle_vq()
1065 vhost_scsi_send_evt(struct vhost_scsi *vs, in vhost_scsi_send_evt() argument
1073 evt = vhost_scsi_allocate_evt(vs, event, reason); in vhost_scsi_send_evt()
1090 llist_add(&evt->list, &vs->vs_event_list); in vhost_scsi_send_evt()
1091 vhost_work_queue(&vs->dev, &vs->vs_event_work); in vhost_scsi_send_evt()
1098 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev); in vhost_scsi_evt_handle_kick() local
1104 if (vs->vs_events_missed) in vhost_scsi_evt_handle_kick()
1105 vhost_scsi_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0); in vhost_scsi_evt_handle_kick()
1114 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev); in vhost_scsi_handle_kick() local
1116 vhost_scsi_handle_vq(vs, vq); in vhost_scsi_handle_kick()
1119 static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index) in vhost_scsi_flush_vq() argument
1121 vhost_poll_flush(&vs->vqs[index].vq.poll); in vhost_scsi_flush_vq()
1125 static void vhost_scsi_flush(struct vhost_scsi *vs) in vhost_scsi_flush() argument
1131 vhost_scsi_init_inflight(vs, old_inflight); in vhost_scsi_flush()
1143 vhost_scsi_flush_vq(vs, i); in vhost_scsi_flush()
1144 vhost_work_flush(&vs->dev, &vs->vs_completion_work); in vhost_scsi_flush()
1145 vhost_work_flush(&vs->dev, &vs->vs_event_work); in vhost_scsi_flush()
1157 * vhost_scsi_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
1160 vhost_scsi_set_endpoint(struct vhost_scsi *vs, in vhost_scsi_set_endpoint() argument
1172 mutex_lock(&vs->dev.mutex); in vhost_scsi_set_endpoint()
1175 for (index = 0; index < vs->dev.nvqs; ++index) { in vhost_scsi_set_endpoint()
1177 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) { in vhost_scsi_set_endpoint()
1189 if (vs->vs_tpg) in vhost_scsi_set_endpoint()
1190 memcpy(vs_tpg, vs->vs_tpg, len); in vhost_scsi_set_endpoint()
1205 if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) { in vhost_scsi_set_endpoint()
1226 tpg->vhost_scsi = vs; in vhost_scsi_set_endpoint()
1235 memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn, in vhost_scsi_set_endpoint()
1236 sizeof(vs->vs_vhost_wwpn)); in vhost_scsi_set_endpoint()
1238 vq = &vs->vqs[i].vq; in vhost_scsi_set_endpoint()
1251 * old vs->vs_tpg is finished. in vhost_scsi_set_endpoint()
1253 vhost_scsi_flush(vs); in vhost_scsi_set_endpoint()
1254 kfree(vs->vs_tpg); in vhost_scsi_set_endpoint()
1255 vs->vs_tpg = vs_tpg; in vhost_scsi_set_endpoint()
1258 mutex_unlock(&vs->dev.mutex); in vhost_scsi_set_endpoint()
1264 vhost_scsi_clear_endpoint(struct vhost_scsi *vs, in vhost_scsi_clear_endpoint() argument
1276 mutex_lock(&vs->dev.mutex); in vhost_scsi_clear_endpoint()
1278 for (index = 0; index < vs->dev.nvqs; ++index) { in vhost_scsi_clear_endpoint()
1279 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) { in vhost_scsi_clear_endpoint()
1285 if (!vs->vs_tpg) { in vhost_scsi_clear_endpoint()
1292 tpg = vs->vs_tpg[target]; in vhost_scsi_clear_endpoint()
1313 vs->vs_tpg[target] = NULL; in vhost_scsi_clear_endpoint()
1325 vq = &vs->vqs[i].vq; in vhost_scsi_clear_endpoint()
1333 * old vs->vs_tpg is finished. in vhost_scsi_clear_endpoint()
1335 vhost_scsi_flush(vs); in vhost_scsi_clear_endpoint()
1336 kfree(vs->vs_tpg); in vhost_scsi_clear_endpoint()
1337 vs->vs_tpg = NULL; in vhost_scsi_clear_endpoint()
1338 WARN_ON(vs->vs_events_nr); in vhost_scsi_clear_endpoint()
1339 mutex_unlock(&vs->dev.mutex); in vhost_scsi_clear_endpoint()
1346 mutex_unlock(&vs->dev.mutex); in vhost_scsi_clear_endpoint()
1351 static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features) in vhost_scsi_set_features() argument
1359 mutex_lock(&vs->dev.mutex); in vhost_scsi_set_features()
1361 !vhost_log_access_ok(&vs->dev)) { in vhost_scsi_set_features()
1362 mutex_unlock(&vs->dev.mutex); in vhost_scsi_set_features()
1367 vq = &vs->vqs[i].vq; in vhost_scsi_set_features()
1372 mutex_unlock(&vs->dev.mutex); in vhost_scsi_set_features()
1378 struct vhost_scsi *vs; in vhost_scsi_open() local
1382 vs = kzalloc(sizeof(*vs), GFP_KERNEL | __GFP_NOWARN | __GFP_RETRY_MAYFAIL); in vhost_scsi_open()
1383 if (!vs) { in vhost_scsi_open()
1384 vs = vzalloc(sizeof(*vs)); in vhost_scsi_open()
1385 if (!vs) in vhost_scsi_open()
1393 vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work); in vhost_scsi_open()
1394 vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work); in vhost_scsi_open()
1396 vs->vs_events_nr = 0; in vhost_scsi_open()
1397 vs->vs_events_missed = false; in vhost_scsi_open()
1399 vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq; in vhost_scsi_open()
1400 vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; in vhost_scsi_open()
1401 vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick; in vhost_scsi_open()
1402 vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick; in vhost_scsi_open()
1404 vqs[i] = &vs->vqs[i].vq; in vhost_scsi_open()
1405 vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick; in vhost_scsi_open()
1407 vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV, in vhost_scsi_open()
1410 vhost_scsi_init_inflight(vs, NULL); in vhost_scsi_open()
1412 f->private_data = vs; in vhost_scsi_open()
1416 kvfree(vs); in vhost_scsi_open()
1423 struct vhost_scsi *vs = f->private_data; in vhost_scsi_release() local
1426 mutex_lock(&vs->dev.mutex); in vhost_scsi_release()
1427 memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn)); in vhost_scsi_release()
1428 mutex_unlock(&vs->dev.mutex); in vhost_scsi_release()
1429 vhost_scsi_clear_endpoint(vs, &t); in vhost_scsi_release()
1430 vhost_dev_stop(&vs->dev); in vhost_scsi_release()
1431 vhost_dev_cleanup(&vs->dev); in vhost_scsi_release()
1433 vhost_scsi_flush(vs); in vhost_scsi_release()
1434 kfree(vs->dev.vqs); in vhost_scsi_release()
1435 kvfree(vs); in vhost_scsi_release()
1444 struct vhost_scsi *vs = f->private_data; in vhost_scsi_ioctl() local
1452 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; in vhost_scsi_ioctl()
1461 return vhost_scsi_set_endpoint(vs, &backend); in vhost_scsi_ioctl()
1468 return vhost_scsi_clear_endpoint(vs, &backend); in vhost_scsi_ioctl()
1477 vs->vs_events_missed = events_missed; in vhost_scsi_ioctl()
1482 events_missed = vs->vs_events_missed; in vhost_scsi_ioctl()
1495 return vhost_scsi_set_features(vs, features); in vhost_scsi_ioctl()
1497 mutex_lock(&vs->dev.mutex); in vhost_scsi_ioctl()
1498 r = vhost_dev_ioctl(&vs->dev, ioctl, argp); in vhost_scsi_ioctl()
1501 r = vhost_vring_ioctl(&vs->dev, ioctl, argp); in vhost_scsi_ioctl()
1502 mutex_unlock(&vs->dev.mutex); in vhost_scsi_ioctl()
1563 struct vhost_scsi *vs = tpg->vhost_scsi; in vhost_scsi_do_plug() local
1567 if (!vs) in vhost_scsi_do_plug()
1570 mutex_lock(&vs->dev.mutex); in vhost_scsi_do_plug()
1577 vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; in vhost_scsi_do_plug()
1580 vhost_scsi_send_evt(vs, tpg, lun, in vhost_scsi_do_plug()
1583 mutex_unlock(&vs->dev.mutex); in vhost_scsi_do_plug()
1935 if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET) in vhost_scsi_make_tpg()