• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*******************************************************************************
2  * Vhost kernel TCM fabric driver for virtio SCSI initiators
3  *
4  * (C) Copyright 2010-2013 Datera, Inc.
5  * (C) Copyright 2010-2012 IBM Corp.
6  *
7  * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
8  *
9  * Authors: Nicholas A. Bellinger <nab@daterainc.com>
10  *          Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation; either version 2 of the License, or
15  * (at your option) any later version.
16  *
17  * This program is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  * GNU General Public License for more details.
21  *
22  ****************************************************************************/
23 
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <generated/utsrelease.h>
27 #include <linux/utsname.h>
28 #include <linux/init.h>
29 #include <linux/slab.h>
30 #include <linux/kthread.h>
31 #include <linux/types.h>
32 #include <linux/string.h>
33 #include <linux/configfs.h>
34 #include <linux/ctype.h>
35 #include <linux/compat.h>
36 #include <linux/eventfd.h>
37 #include <linux/fs.h>
38 #include <linux/vmalloc.h>
39 #include <linux/miscdevice.h>
40 #include <asm/unaligned.h>
41 #include <scsi/scsi_common.h>
42 #include <scsi/scsi_proto.h>
43 #include <target/target_core_base.h>
44 #include <target/target_core_fabric.h>
45 #include <linux/vhost.h>
46 #include <linux/virtio_scsi.h>
47 #include <linux/llist.h>
48 #include <linux/bitmap.h>
49 
50 #include "vhost.h"
51 
52 #define VHOST_SCSI_VERSION  "v0.1"
53 #define VHOST_SCSI_NAMELEN 256
54 #define VHOST_SCSI_MAX_CDB_SIZE 32
55 #define VHOST_SCSI_PREALLOC_SGLS 2048
56 #define VHOST_SCSI_PREALLOC_UPAGES 2048
57 #define VHOST_SCSI_PREALLOC_PROT_SGLS 2048
58 
59 /* Max number of requests before requeueing the job.
60  * Using this limit prevents one virtqueue from starving others with
61  * request.
62  */
63 #define VHOST_SCSI_WEIGHT 256
64 
65 struct vhost_scsi_inflight {
66 	/* Wait for the flush operation to finish */
67 	struct completion comp;
68 	/* Refcount for the inflight reqs */
69 	struct kref kref;
70 };
71 
72 struct vhost_scsi_cmd {
73 	/* Descriptor from vhost_get_vq_desc() for virt_queue segment */
74 	int tvc_vq_desc;
75 	/* virtio-scsi initiator task attribute */
76 	int tvc_task_attr;
77 	/* virtio-scsi response incoming iovecs */
78 	int tvc_in_iovs;
79 	/* virtio-scsi initiator data direction */
80 	enum dma_data_direction tvc_data_direction;
81 	/* Expected data transfer length from virtio-scsi header */
82 	u32 tvc_exp_data_len;
83 	/* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
84 	u64 tvc_tag;
85 	/* The number of scatterlists associated with this cmd */
86 	u32 tvc_sgl_count;
87 	u32 tvc_prot_sgl_count;
88 	/* Saved unpacked SCSI LUN for vhost_scsi_submission_work() */
89 	u32 tvc_lun;
90 	/* Pointer to the SGL formatted memory from virtio-scsi */
91 	struct scatterlist *tvc_sgl;
92 	struct scatterlist *tvc_prot_sgl;
93 	struct page **tvc_upages;
94 	/* Pointer to response header iovec */
95 	struct iovec tvc_resp_iov;
96 	/* Pointer to vhost_scsi for our device */
97 	struct vhost_scsi *tvc_vhost;
98 	/* Pointer to vhost_virtqueue for the cmd */
99 	struct vhost_virtqueue *tvc_vq;
100 	/* Pointer to vhost nexus memory */
101 	struct vhost_scsi_nexus *tvc_nexus;
102 	/* The TCM I/O descriptor that is accessed via container_of() */
103 	struct se_cmd tvc_se_cmd;
104 	/* work item used for cmwq dispatch to vhost_scsi_submission_work() */
105 	struct work_struct work;
106 	/* Copy of the incoming SCSI command descriptor block (CDB) */
107 	unsigned char tvc_cdb[VHOST_SCSI_MAX_CDB_SIZE];
108 	/* Sense buffer that will be mapped into outgoing status */
109 	unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
110 	/* Completed commands list, serviced from vhost worker thread */
111 	struct llist_node tvc_completion_list;
112 	/* Used to track inflight cmd */
113 	struct vhost_scsi_inflight *inflight;
114 };
115 
116 struct vhost_scsi_nexus {
117 	/* Pointer to TCM session for I_T Nexus */
118 	struct se_session *tvn_se_sess;
119 };
120 
121 struct vhost_scsi_tpg {
122 	/* Vhost port target portal group tag for TCM */
123 	u16 tport_tpgt;
124 	/* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
125 	int tv_tpg_port_count;
126 	/* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
127 	int tv_tpg_vhost_count;
128 	/* Used for enabling T10-PI with legacy devices */
129 	int tv_fabric_prot_type;
130 	/* list for vhost_scsi_list */
131 	struct list_head tv_tpg_list;
132 	/* Used to protect access for tpg_nexus */
133 	struct mutex tv_tpg_mutex;
134 	/* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
135 	struct vhost_scsi_nexus *tpg_nexus;
136 	/* Pointer back to vhost_scsi_tport */
137 	struct vhost_scsi_tport *tport;
138 	/* Returned by vhost_scsi_make_tpg() */
139 	struct se_portal_group se_tpg;
140 	/* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
141 	struct vhost_scsi *vhost_scsi;
142 	struct list_head tmf_queue;
143 };
144 
145 struct vhost_scsi_tport {
146 	/* SCSI protocol the tport is providing */
147 	u8 tport_proto_id;
148 	/* Binary World Wide unique Port Name for Vhost Target port */
149 	u64 tport_wwpn;
150 	/* ASCII formatted WWPN for Vhost Target port */
151 	char tport_name[VHOST_SCSI_NAMELEN];
152 	/* Returned by vhost_scsi_make_tport() */
153 	struct se_wwn tport_wwn;
154 };
155 
156 struct vhost_scsi_evt {
157 	/* event to be sent to guest */
158 	struct virtio_scsi_event event;
159 	/* event list, serviced from vhost worker thread */
160 	struct llist_node list;
161 };
162 
163 enum {
164 	VHOST_SCSI_VQ_CTL = 0,
165 	VHOST_SCSI_VQ_EVT = 1,
166 	VHOST_SCSI_VQ_IO = 2,
167 };
168 
169 /* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */
170 enum {
171 	VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
172 					       (1ULL << VIRTIO_SCSI_F_T10_PI)
173 };
174 
175 #define VHOST_SCSI_MAX_TARGET	256
176 #define VHOST_SCSI_MAX_VQ	128
177 #define VHOST_SCSI_MAX_EVENT	128
178 
179 struct vhost_scsi_virtqueue {
180 	struct vhost_virtqueue vq;
181 	/*
182 	 * Reference counting for inflight reqs, used for flush operation. At
183 	 * each time, one reference tracks new commands submitted, while we
184 	 * wait for another one to reach 0.
185 	 */
186 	struct vhost_scsi_inflight inflights[2];
187 	/*
188 	 * Indicate current inflight in use, protected by vq->mutex.
189 	 * Writers must also take dev mutex and flush under it.
190 	 */
191 	int inflight_idx;
192 	struct vhost_scsi_cmd *scsi_cmds;
193 	struct sbitmap scsi_tags;
194 	int max_cmds;
195 };
196 
197 struct vhost_scsi {
198 	/* Protected by vhost_scsi->dev.mutex */
199 	struct vhost_scsi_tpg **vs_tpg;
200 	char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
201 
202 	struct vhost_dev dev;
203 	struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ];
204 
205 	struct vhost_work vs_completion_work; /* cmd completion work item */
206 	struct llist_head vs_completion_list; /* cmd completion queue */
207 
208 	struct vhost_work vs_event_work; /* evt injection work item */
209 	struct llist_head vs_event_list; /* evt injection queue */
210 
211 	bool vs_events_missed; /* any missed events, protected by vq->mutex */
212 	int vs_events_nr; /* num of pending events, protected by vq->mutex */
213 };
214 
215 struct vhost_scsi_tmf {
216 	struct vhost_work vwork;
217 	struct vhost_scsi_tpg *tpg;
218 	struct vhost_scsi *vhost;
219 	struct vhost_scsi_virtqueue *svq;
220 	struct list_head queue_entry;
221 
222 	struct se_cmd se_cmd;
223 	u8 scsi_resp;
224 	struct vhost_scsi_inflight *inflight;
225 	struct iovec resp_iov;
226 	int in_iovs;
227 	int vq_desc;
228 };
229 
230 /*
231  * Context for processing request and control queue operations.
232  */
233 struct vhost_scsi_ctx {
234 	int head;
235 	unsigned int out, in;
236 	size_t req_size, rsp_size;
237 	size_t out_size, in_size;
238 	u8 *target, *lunp;
239 	void *req;
240 	struct iov_iter out_iter;
241 };
242 
243 static struct workqueue_struct *vhost_scsi_workqueue;
244 
245 /* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */
246 static DEFINE_MUTEX(vhost_scsi_mutex);
247 static LIST_HEAD(vhost_scsi_list);
248 
vhost_scsi_done_inflight(struct kref * kref)249 static void vhost_scsi_done_inflight(struct kref *kref)
250 {
251 	struct vhost_scsi_inflight *inflight;
252 
253 	inflight = container_of(kref, struct vhost_scsi_inflight, kref);
254 	complete(&inflight->comp);
255 }
256 
vhost_scsi_init_inflight(struct vhost_scsi * vs,struct vhost_scsi_inflight * old_inflight[])257 static void vhost_scsi_init_inflight(struct vhost_scsi *vs,
258 				    struct vhost_scsi_inflight *old_inflight[])
259 {
260 	struct vhost_scsi_inflight *new_inflight;
261 	struct vhost_virtqueue *vq;
262 	int idx, i;
263 
264 	for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
265 		vq = &vs->vqs[i].vq;
266 
267 		mutex_lock(&vq->mutex);
268 
269 		/* store old infight */
270 		idx = vs->vqs[i].inflight_idx;
271 		if (old_inflight)
272 			old_inflight[i] = &vs->vqs[i].inflights[idx];
273 
274 		/* setup new infight */
275 		vs->vqs[i].inflight_idx = idx ^ 1;
276 		new_inflight = &vs->vqs[i].inflights[idx ^ 1];
277 		kref_init(&new_inflight->kref);
278 		init_completion(&new_inflight->comp);
279 
280 		mutex_unlock(&vq->mutex);
281 	}
282 }
283 
284 static struct vhost_scsi_inflight *
vhost_scsi_get_inflight(struct vhost_virtqueue * vq)285 vhost_scsi_get_inflight(struct vhost_virtqueue *vq)
286 {
287 	struct vhost_scsi_inflight *inflight;
288 	struct vhost_scsi_virtqueue *svq;
289 
290 	svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
291 	inflight = &svq->inflights[svq->inflight_idx];
292 	kref_get(&inflight->kref);
293 
294 	return inflight;
295 }
296 
vhost_scsi_put_inflight(struct vhost_scsi_inflight * inflight)297 static void vhost_scsi_put_inflight(struct vhost_scsi_inflight *inflight)
298 {
299 	kref_put(&inflight->kref, vhost_scsi_done_inflight);
300 }
301 
vhost_scsi_check_true(struct se_portal_group * se_tpg)302 static int vhost_scsi_check_true(struct se_portal_group *se_tpg)
303 {
304 	return 1;
305 }
306 
vhost_scsi_check_false(struct se_portal_group * se_tpg)307 static int vhost_scsi_check_false(struct se_portal_group *se_tpg)
308 {
309 	return 0;
310 }
311 
vhost_scsi_get_fabric_wwn(struct se_portal_group * se_tpg)312 static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg)
313 {
314 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
315 				struct vhost_scsi_tpg, se_tpg);
316 	struct vhost_scsi_tport *tport = tpg->tport;
317 
318 	return &tport->tport_name[0];
319 }
320 
vhost_scsi_get_tpgt(struct se_portal_group * se_tpg)321 static u16 vhost_scsi_get_tpgt(struct se_portal_group *se_tpg)
322 {
323 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
324 				struct vhost_scsi_tpg, se_tpg);
325 	return tpg->tport_tpgt;
326 }
327 
vhost_scsi_check_prot_fabric_only(struct se_portal_group * se_tpg)328 static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg)
329 {
330 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
331 				struct vhost_scsi_tpg, se_tpg);
332 
333 	return tpg->tv_fabric_prot_type;
334 }
335 
vhost_scsi_tpg_get_inst_index(struct se_portal_group * se_tpg)336 static u32 vhost_scsi_tpg_get_inst_index(struct se_portal_group *se_tpg)
337 {
338 	return 1;
339 }
340 
vhost_scsi_release_cmd_res(struct se_cmd * se_cmd)341 static void vhost_scsi_release_cmd_res(struct se_cmd *se_cmd)
342 {
343 	struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd,
344 				struct vhost_scsi_cmd, tvc_se_cmd);
345 	struct vhost_scsi_virtqueue *svq = container_of(tv_cmd->tvc_vq,
346 				struct vhost_scsi_virtqueue, vq);
347 	struct vhost_scsi_inflight *inflight = tv_cmd->inflight;
348 	int i;
349 
350 	if (tv_cmd->tvc_sgl_count) {
351 		for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
352 			put_page(sg_page(&tv_cmd->tvc_sgl[i]));
353 	}
354 	if (tv_cmd->tvc_prot_sgl_count) {
355 		for (i = 0; i < tv_cmd->tvc_prot_sgl_count; i++)
356 			put_page(sg_page(&tv_cmd->tvc_prot_sgl[i]));
357 	}
358 
359 	sbitmap_clear_bit(&svq->scsi_tags, se_cmd->map_tag);
360 	vhost_scsi_put_inflight(inflight);
361 }
362 
vhost_scsi_release_tmf_res(struct vhost_scsi_tmf * tmf)363 static void vhost_scsi_release_tmf_res(struct vhost_scsi_tmf *tmf)
364 {
365 	struct vhost_scsi_tpg *tpg = tmf->tpg;
366 	struct vhost_scsi_inflight *inflight = tmf->inflight;
367 
368 	mutex_lock(&tpg->tv_tpg_mutex);
369 	list_add_tail(&tpg->tmf_queue, &tmf->queue_entry);
370 	mutex_unlock(&tpg->tv_tpg_mutex);
371 	vhost_scsi_put_inflight(inflight);
372 }
373 
vhost_scsi_release_cmd(struct se_cmd * se_cmd)374 static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
375 {
376 	if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
377 		struct vhost_scsi_tmf *tmf = container_of(se_cmd,
378 					struct vhost_scsi_tmf, se_cmd);
379 
380 		vhost_work_queue(&tmf->vhost->dev, &tmf->vwork);
381 	} else {
382 		struct vhost_scsi_cmd *cmd = container_of(se_cmd,
383 					struct vhost_scsi_cmd, tvc_se_cmd);
384 		struct vhost_scsi *vs = cmd->tvc_vhost;
385 
386 		llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
387 		vhost_work_queue(&vs->dev, &vs->vs_completion_work);
388 	}
389 }
390 
vhost_scsi_sess_get_index(struct se_session * se_sess)391 static u32 vhost_scsi_sess_get_index(struct se_session *se_sess)
392 {
393 	return 0;
394 }
395 
vhost_scsi_write_pending(struct se_cmd * se_cmd)396 static int vhost_scsi_write_pending(struct se_cmd *se_cmd)
397 {
398 	/* Go ahead and process the write immediately */
399 	target_execute_cmd(se_cmd);
400 	return 0;
401 }
402 
vhost_scsi_set_default_node_attrs(struct se_node_acl * nacl)403 static void vhost_scsi_set_default_node_attrs(struct se_node_acl *nacl)
404 {
405 	return;
406 }
407 
vhost_scsi_get_cmd_state(struct se_cmd * se_cmd)408 static int vhost_scsi_get_cmd_state(struct se_cmd *se_cmd)
409 {
410 	return 0;
411 }
412 
vhost_scsi_queue_data_in(struct se_cmd * se_cmd)413 static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd)
414 {
415 	transport_generic_free_cmd(se_cmd, 0);
416 	return 0;
417 }
418 
vhost_scsi_queue_status(struct se_cmd * se_cmd)419 static int vhost_scsi_queue_status(struct se_cmd *se_cmd)
420 {
421 	transport_generic_free_cmd(se_cmd, 0);
422 	return 0;
423 }
424 
vhost_scsi_queue_tm_rsp(struct se_cmd * se_cmd)425 static void vhost_scsi_queue_tm_rsp(struct se_cmd *se_cmd)
426 {
427 	struct vhost_scsi_tmf *tmf = container_of(se_cmd, struct vhost_scsi_tmf,
428 						  se_cmd);
429 
430 	tmf->scsi_resp = se_cmd->se_tmr_req->response;
431 	transport_generic_free_cmd(&tmf->se_cmd, 0);
432 }
433 
vhost_scsi_aborted_task(struct se_cmd * se_cmd)434 static void vhost_scsi_aborted_task(struct se_cmd *se_cmd)
435 {
436 	return;
437 }
438 
vhost_scsi_free_evt(struct vhost_scsi * vs,struct vhost_scsi_evt * evt)439 static void vhost_scsi_free_evt(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
440 {
441 	vs->vs_events_nr--;
442 	kfree(evt);
443 }
444 
445 static struct vhost_scsi_evt *
vhost_scsi_allocate_evt(struct vhost_scsi * vs,u32 event,u32 reason)446 vhost_scsi_allocate_evt(struct vhost_scsi *vs,
447 		       u32 event, u32 reason)
448 {
449 	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
450 	struct vhost_scsi_evt *evt;
451 
452 	if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
453 		vs->vs_events_missed = true;
454 		return NULL;
455 	}
456 
457 	evt = kzalloc(sizeof(*evt), GFP_KERNEL);
458 	if (!evt) {
459 		vq_err(vq, "Failed to allocate vhost_scsi_evt\n");
460 		vs->vs_events_missed = true;
461 		return NULL;
462 	}
463 
464 	evt->event.event = cpu_to_vhost32(vq, event);
465 	evt->event.reason = cpu_to_vhost32(vq, reason);
466 	vs->vs_events_nr++;
467 
468 	return evt;
469 }
470 
vhost_scsi_check_stop_free(struct se_cmd * se_cmd)471 static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
472 {
473 	return target_put_sess_cmd(se_cmd);
474 }
475 
476 static void
vhost_scsi_do_evt_work(struct vhost_scsi * vs,struct vhost_scsi_evt * evt)477 vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
478 {
479 	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
480 	struct virtio_scsi_event *event = &evt->event;
481 	struct virtio_scsi_event __user *eventp;
482 	unsigned out, in;
483 	int head, ret;
484 
485 	if (!vhost_vq_get_backend(vq)) {
486 		vs->vs_events_missed = true;
487 		return;
488 	}
489 
490 again:
491 	vhost_disable_notify(&vs->dev, vq);
492 	head = vhost_get_vq_desc(vq, vq->iov,
493 			ARRAY_SIZE(vq->iov), &out, &in,
494 			NULL, NULL);
495 	if (head < 0) {
496 		vs->vs_events_missed = true;
497 		return;
498 	}
499 	if (head == vq->num) {
500 		if (vhost_enable_notify(&vs->dev, vq))
501 			goto again;
502 		vs->vs_events_missed = true;
503 		return;
504 	}
505 
506 	if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
507 		vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
508 				vq->iov[out].iov_len);
509 		vs->vs_events_missed = true;
510 		return;
511 	}
512 
513 	if (vs->vs_events_missed) {
514 		event->event |= cpu_to_vhost32(vq, VIRTIO_SCSI_T_EVENTS_MISSED);
515 		vs->vs_events_missed = false;
516 	}
517 
518 	eventp = vq->iov[out].iov_base;
519 	ret = __copy_to_user(eventp, event, sizeof(*event));
520 	if (!ret)
521 		vhost_add_used_and_signal(&vs->dev, vq, head, 0);
522 	else
523 		vq_err(vq, "Faulted on vhost_scsi_send_event\n");
524 }
525 
vhost_scsi_evt_work(struct vhost_work * work)526 static void vhost_scsi_evt_work(struct vhost_work *work)
527 {
528 	struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
529 					vs_event_work);
530 	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
531 	struct vhost_scsi_evt *evt, *t;
532 	struct llist_node *llnode;
533 
534 	mutex_lock(&vq->mutex);
535 	llnode = llist_del_all(&vs->vs_event_list);
536 	llist_for_each_entry_safe(evt, t, llnode, list) {
537 		vhost_scsi_do_evt_work(vs, evt);
538 		vhost_scsi_free_evt(vs, evt);
539 	}
540 	mutex_unlock(&vq->mutex);
541 }
542 
543 /* Fill in status and signal that we are done processing this command
544  *
545  * This is scheduled in the vhost work queue so we are called with the owner
546  * process mm and can access the vring.
547  */
vhost_scsi_complete_cmd_work(struct vhost_work * work)548 static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
549 {
550 	struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
551 					vs_completion_work);
552 	DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
553 	struct virtio_scsi_cmd_resp v_rsp;
554 	struct vhost_scsi_cmd *cmd, *t;
555 	struct llist_node *llnode;
556 	struct se_cmd *se_cmd;
557 	struct iov_iter iov_iter;
558 	int ret, vq;
559 
560 	bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
561 	llnode = llist_del_all(&vs->vs_completion_list);
562 	llist_for_each_entry_safe(cmd, t, llnode, tvc_completion_list) {
563 		se_cmd = &cmd->tvc_se_cmd;
564 
565 		pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
566 			cmd, se_cmd->residual_count, se_cmd->scsi_status);
567 
568 		memset(&v_rsp, 0, sizeof(v_rsp));
569 		v_rsp.resid = cpu_to_vhost32(cmd->tvc_vq, se_cmd->residual_count);
570 		/* TODO is status_qualifier field needed? */
571 		v_rsp.status = se_cmd->scsi_status;
572 		v_rsp.sense_len = cpu_to_vhost32(cmd->tvc_vq,
573 						 se_cmd->scsi_sense_length);
574 		memcpy(v_rsp.sense, cmd->tvc_sense_buf,
575 		       se_cmd->scsi_sense_length);
576 
577 		iov_iter_init(&iov_iter, READ, &cmd->tvc_resp_iov,
578 			      cmd->tvc_in_iovs, sizeof(v_rsp));
579 		ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
580 		if (likely(ret == sizeof(v_rsp))) {
581 			struct vhost_scsi_virtqueue *q;
582 			vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
583 			q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
584 			vq = q - vs->vqs;
585 			__set_bit(vq, signal);
586 		} else
587 			pr_err("Faulted on virtio_scsi_cmd_resp\n");
588 
589 		vhost_scsi_release_cmd_res(se_cmd);
590 	}
591 
592 	vq = -1;
593 	while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
594 		< VHOST_SCSI_MAX_VQ)
595 		vhost_signal(&vs->dev, &vs->vqs[vq].vq);
596 }
597 
598 static struct vhost_scsi_cmd *
vhost_scsi_get_cmd(struct vhost_virtqueue * vq,struct vhost_scsi_tpg * tpg,unsigned char * cdb,u64 scsi_tag,u16 lun,u8 task_attr,u32 exp_data_len,int data_direction)599 vhost_scsi_get_cmd(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
600 		   unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr,
601 		   u32 exp_data_len, int data_direction)
602 {
603 	struct vhost_scsi_virtqueue *svq = container_of(vq,
604 					struct vhost_scsi_virtqueue, vq);
605 	struct vhost_scsi_cmd *cmd;
606 	struct vhost_scsi_nexus *tv_nexus;
607 	struct scatterlist *sg, *prot_sg;
608 	struct page **pages;
609 	int tag;
610 
611 	tv_nexus = tpg->tpg_nexus;
612 	if (!tv_nexus) {
613 		pr_err("Unable to locate active struct vhost_scsi_nexus\n");
614 		return ERR_PTR(-EIO);
615 	}
616 
617 	tag = sbitmap_get(&svq->scsi_tags, 0, false);
618 	if (tag < 0) {
619 		pr_err("Unable to obtain tag for vhost_scsi_cmd\n");
620 		return ERR_PTR(-ENOMEM);
621 	}
622 
623 	cmd = &svq->scsi_cmds[tag];
624 	sg = cmd->tvc_sgl;
625 	prot_sg = cmd->tvc_prot_sgl;
626 	pages = cmd->tvc_upages;
627 	memset(cmd, 0, sizeof(*cmd));
628 	cmd->tvc_sgl = sg;
629 	cmd->tvc_prot_sgl = prot_sg;
630 	cmd->tvc_upages = pages;
631 	cmd->tvc_se_cmd.map_tag = tag;
632 	cmd->tvc_tag = scsi_tag;
633 	cmd->tvc_lun = lun;
634 	cmd->tvc_task_attr = task_attr;
635 	cmd->tvc_exp_data_len = exp_data_len;
636 	cmd->tvc_data_direction = data_direction;
637 	cmd->tvc_nexus = tv_nexus;
638 	cmd->inflight = vhost_scsi_get_inflight(vq);
639 
640 	memcpy(cmd->tvc_cdb, cdb, VHOST_SCSI_MAX_CDB_SIZE);
641 
642 	return cmd;
643 }
644 
645 /*
646  * Map a user memory range into a scatterlist
647  *
648  * Returns the number of scatterlist entries used or -errno on error.
649  */
650 static int
vhost_scsi_map_to_sgl(struct vhost_scsi_cmd * cmd,struct iov_iter * iter,struct scatterlist * sgl,bool write)651 vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
652 		      struct iov_iter *iter,
653 		      struct scatterlist *sgl,
654 		      bool write)
655 {
656 	struct page **pages = cmd->tvc_upages;
657 	struct scatterlist *sg = sgl;
658 	ssize_t bytes;
659 	size_t offset;
660 	unsigned int npages = 0;
661 
662 	bytes = iov_iter_get_pages(iter, pages, LONG_MAX,
663 				VHOST_SCSI_PREALLOC_UPAGES, &offset);
664 	/* No pages were pinned */
665 	if (bytes <= 0)
666 		return bytes < 0 ? bytes : -EFAULT;
667 
668 	iov_iter_advance(iter, bytes);
669 
670 	while (bytes) {
671 		unsigned n = min_t(unsigned, PAGE_SIZE - offset, bytes);
672 		sg_set_page(sg++, pages[npages++], n, offset);
673 		bytes -= n;
674 		offset = 0;
675 	}
676 	return npages;
677 }
678 
679 static int
vhost_scsi_calc_sgls(struct iov_iter * iter,size_t bytes,int max_sgls)680 vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls)
681 {
682 	int sgl_count = 0;
683 
684 	if (!iter || !iter->iov) {
685 		pr_err("%s: iter->iov is NULL, but expected bytes: %zu"
686 		       " present\n", __func__, bytes);
687 		return -EINVAL;
688 	}
689 
690 	sgl_count = iov_iter_npages(iter, 0xffff);
691 	if (sgl_count > max_sgls) {
692 		pr_err("%s: requested sgl_count: %d exceeds pre-allocated"
693 		       " max_sgls: %d\n", __func__, sgl_count, max_sgls);
694 		return -EINVAL;
695 	}
696 	return sgl_count;
697 }
698 
699 static int
vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd * cmd,bool write,struct iov_iter * iter,struct scatterlist * sg,int sg_count)700 vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write,
701 		      struct iov_iter *iter,
702 		      struct scatterlist *sg, int sg_count)
703 {
704 	struct scatterlist *p = sg;
705 	int ret;
706 
707 	while (iov_iter_count(iter)) {
708 		ret = vhost_scsi_map_to_sgl(cmd, iter, sg, write);
709 		if (ret < 0) {
710 			while (p < sg) {
711 				struct page *page = sg_page(p++);
712 				if (page)
713 					put_page(page);
714 			}
715 			return ret;
716 		}
717 		sg += ret;
718 	}
719 	return 0;
720 }
721 
722 static int
vhost_scsi_mapal(struct vhost_scsi_cmd * cmd,size_t prot_bytes,struct iov_iter * prot_iter,size_t data_bytes,struct iov_iter * data_iter)723 vhost_scsi_mapal(struct vhost_scsi_cmd *cmd,
724 		 size_t prot_bytes, struct iov_iter *prot_iter,
725 		 size_t data_bytes, struct iov_iter *data_iter)
726 {
727 	int sgl_count, ret;
728 	bool write = (cmd->tvc_data_direction == DMA_FROM_DEVICE);
729 
730 	if (prot_bytes) {
731 		sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes,
732 						 VHOST_SCSI_PREALLOC_PROT_SGLS);
733 		if (sgl_count < 0)
734 			return sgl_count;
735 
736 		sg_init_table(cmd->tvc_prot_sgl, sgl_count);
737 		cmd->tvc_prot_sgl_count = sgl_count;
738 		pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
739 			 cmd->tvc_prot_sgl, cmd->tvc_prot_sgl_count);
740 
741 		ret = vhost_scsi_iov_to_sgl(cmd, write, prot_iter,
742 					    cmd->tvc_prot_sgl,
743 					    cmd->tvc_prot_sgl_count);
744 		if (ret < 0) {
745 			cmd->tvc_prot_sgl_count = 0;
746 			return ret;
747 		}
748 	}
749 	sgl_count = vhost_scsi_calc_sgls(data_iter, data_bytes,
750 					 VHOST_SCSI_PREALLOC_SGLS);
751 	if (sgl_count < 0)
752 		return sgl_count;
753 
754 	sg_init_table(cmd->tvc_sgl, sgl_count);
755 	cmd->tvc_sgl_count = sgl_count;
756 	pr_debug("%s data_sg %p data_sgl_count %u\n", __func__,
757 		  cmd->tvc_sgl, cmd->tvc_sgl_count);
758 
759 	ret = vhost_scsi_iov_to_sgl(cmd, write, data_iter,
760 				    cmd->tvc_sgl, cmd->tvc_sgl_count);
761 	if (ret < 0) {
762 		cmd->tvc_sgl_count = 0;
763 		return ret;
764 	}
765 	return 0;
766 }
767 
vhost_scsi_to_tcm_attr(int attr)768 static int vhost_scsi_to_tcm_attr(int attr)
769 {
770 	switch (attr) {
771 	case VIRTIO_SCSI_S_SIMPLE:
772 		return TCM_SIMPLE_TAG;
773 	case VIRTIO_SCSI_S_ORDERED:
774 		return TCM_ORDERED_TAG;
775 	case VIRTIO_SCSI_S_HEAD:
776 		return TCM_HEAD_TAG;
777 	case VIRTIO_SCSI_S_ACA:
778 		return TCM_ACA_TAG;
779 	default:
780 		break;
781 	}
782 	return TCM_SIMPLE_TAG;
783 }
784 
vhost_scsi_submission_work(struct work_struct * work)785 static void vhost_scsi_submission_work(struct work_struct *work)
786 {
787 	struct vhost_scsi_cmd *cmd =
788 		container_of(work, struct vhost_scsi_cmd, work);
789 	struct vhost_scsi_nexus *tv_nexus;
790 	struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
791 	struct scatterlist *sg_ptr, *sg_prot_ptr = NULL;
792 	int rc;
793 
794 	/* FIXME: BIDI operation */
795 	if (cmd->tvc_sgl_count) {
796 		sg_ptr = cmd->tvc_sgl;
797 
798 		if (cmd->tvc_prot_sgl_count)
799 			sg_prot_ptr = cmd->tvc_prot_sgl;
800 		else
801 			se_cmd->prot_pto = true;
802 	} else {
803 		sg_ptr = NULL;
804 	}
805 	tv_nexus = cmd->tvc_nexus;
806 
807 	se_cmd->tag = 0;
808 	rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
809 			cmd->tvc_cdb, &cmd->tvc_sense_buf[0],
810 			cmd->tvc_lun, cmd->tvc_exp_data_len,
811 			vhost_scsi_to_tcm_attr(cmd->tvc_task_attr),
812 			cmd->tvc_data_direction, TARGET_SCF_ACK_KREF,
813 			sg_ptr, cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr,
814 			cmd->tvc_prot_sgl_count);
815 	if (rc < 0) {
816 		transport_send_check_condition_and_sense(se_cmd,
817 				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
818 		transport_generic_free_cmd(se_cmd, 0);
819 	}
820 }
821 
822 static void
vhost_scsi_send_bad_target(struct vhost_scsi * vs,struct vhost_virtqueue * vq,int head,unsigned out)823 vhost_scsi_send_bad_target(struct vhost_scsi *vs,
824 			   struct vhost_virtqueue *vq,
825 			   int head, unsigned out)
826 {
827 	struct virtio_scsi_cmd_resp __user *resp;
828 	struct virtio_scsi_cmd_resp rsp;
829 	int ret;
830 
831 	memset(&rsp, 0, sizeof(rsp));
832 	rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
833 	resp = vq->iov[out].iov_base;
834 	ret = __copy_to_user(resp, &rsp, sizeof(rsp));
835 	if (!ret)
836 		vhost_add_used_and_signal(&vs->dev, vq, head, 0);
837 	else
838 		pr_err("Faulted on virtio_scsi_cmd_resp\n");
839 }
840 
841 static int
vhost_scsi_get_desc(struct vhost_scsi * vs,struct vhost_virtqueue * vq,struct vhost_scsi_ctx * vc)842 vhost_scsi_get_desc(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
843 		    struct vhost_scsi_ctx *vc)
844 {
845 	int ret = -ENXIO;
846 
847 	vc->head = vhost_get_vq_desc(vq, vq->iov,
848 				     ARRAY_SIZE(vq->iov), &vc->out, &vc->in,
849 				     NULL, NULL);
850 
851 	pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
852 		 vc->head, vc->out, vc->in);
853 
854 	/* On error, stop handling until the next kick. */
855 	if (unlikely(vc->head < 0))
856 		goto done;
857 
858 	/* Nothing new?  Wait for eventfd to tell us they refilled. */
859 	if (vc->head == vq->num) {
860 		if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
861 			vhost_disable_notify(&vs->dev, vq);
862 			ret = -EAGAIN;
863 		}
864 		goto done;
865 	}
866 
867 	/*
868 	 * Get the size of request and response buffers.
869 	 * FIXME: Not correct for BIDI operation
870 	 */
871 	vc->out_size = iov_length(vq->iov, vc->out);
872 	vc->in_size = iov_length(&vq->iov[vc->out], vc->in);
873 
874 	/*
875 	 * Copy over the virtio-scsi request header, which for a
876 	 * ANY_LAYOUT enabled guest may span multiple iovecs, or a
877 	 * single iovec may contain both the header + outgoing
878 	 * WRITE payloads.
879 	 *
880 	 * copy_from_iter() will advance out_iter, so that it will
881 	 * point at the start of the outgoing WRITE payload, if
882 	 * DMA_TO_DEVICE is set.
883 	 */
884 	iov_iter_init(&vc->out_iter, WRITE, vq->iov, vc->out, vc->out_size);
885 	ret = 0;
886 
887 done:
888 	return ret;
889 }
890 
891 static int
vhost_scsi_chk_size(struct vhost_virtqueue * vq,struct vhost_scsi_ctx * vc)892 vhost_scsi_chk_size(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc)
893 {
894 	if (unlikely(vc->in_size < vc->rsp_size)) {
895 		vq_err(vq,
896 		       "Response buf too small, need min %zu bytes got %zu",
897 		       vc->rsp_size, vc->in_size);
898 		return -EINVAL;
899 	} else if (unlikely(vc->out_size < vc->req_size)) {
900 		vq_err(vq,
901 		       "Request buf too small, need min %zu bytes got %zu",
902 		       vc->req_size, vc->out_size);
903 		return -EIO;
904 	}
905 
906 	return 0;
907 }
908 
909 static int
vhost_scsi_get_req(struct vhost_virtqueue * vq,struct vhost_scsi_ctx * vc,struct vhost_scsi_tpg ** tpgp)910 vhost_scsi_get_req(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc,
911 		   struct vhost_scsi_tpg **tpgp)
912 {
913 	int ret = -EIO;
914 
915 	if (unlikely(!copy_from_iter_full(vc->req, vc->req_size,
916 					  &vc->out_iter))) {
917 		vq_err(vq, "Faulted on copy_from_iter_full\n");
918 	} else if (unlikely(*vc->lunp != 1)) {
919 		/* virtio-scsi spec requires byte 0 of the lun to be 1 */
920 		vq_err(vq, "Illegal virtio-scsi lun: %u\n", *vc->lunp);
921 	} else {
922 		struct vhost_scsi_tpg **vs_tpg, *tpg;
923 
924 		vs_tpg = vhost_vq_get_backend(vq);	/* validated at handler entry */
925 
926 		tpg = READ_ONCE(vs_tpg[*vc->target]);
927 		if (unlikely(!tpg)) {
928 			vq_err(vq, "Target 0x%x does not exist\n", *vc->target);
929 		} else {
930 			if (tpgp)
931 				*tpgp = tpg;
932 			ret = 0;
933 		}
934 	}
935 
936 	return ret;
937 }
938 
vhost_buf_to_lun(u8 * lun_buf)939 static u16 vhost_buf_to_lun(u8 *lun_buf)
940 {
941 	return ((lun_buf[2] << 8) | lun_buf[3]) & 0x3FFF;
942 }
943 
944 static void
vhost_scsi_handle_vq(struct vhost_scsi * vs,struct vhost_virtqueue * vq)945 vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
946 {
947 	struct vhost_scsi_tpg **vs_tpg, *tpg;
948 	struct virtio_scsi_cmd_req v_req;
949 	struct virtio_scsi_cmd_req_pi v_req_pi;
950 	struct vhost_scsi_ctx vc;
951 	struct vhost_scsi_cmd *cmd;
952 	struct iov_iter in_iter, prot_iter, data_iter;
953 	u64 tag;
954 	u32 exp_data_len, data_direction;
955 	int ret, prot_bytes, c = 0;
956 	u16 lun;
957 	u8 task_attr;
958 	bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
959 	void *cdb;
960 
961 	mutex_lock(&vq->mutex);
962 	/*
963 	 * We can handle the vq only after the endpoint is setup by calling the
964 	 * VHOST_SCSI_SET_ENDPOINT ioctl.
965 	 */
966 	vs_tpg = vhost_vq_get_backend(vq);
967 	if (!vs_tpg)
968 		goto out;
969 
970 	memset(&vc, 0, sizeof(vc));
971 	vc.rsp_size = sizeof(struct virtio_scsi_cmd_resp);
972 
973 	vhost_disable_notify(&vs->dev, vq);
974 
975 	do {
976 		ret = vhost_scsi_get_desc(vs, vq, &vc);
977 		if (ret)
978 			goto err;
979 
980 		/*
981 		 * Setup pointers and values based upon different virtio-scsi
982 		 * request header if T10_PI is enabled in KVM guest.
983 		 */
984 		if (t10_pi) {
985 			vc.req = &v_req_pi;
986 			vc.req_size = sizeof(v_req_pi);
987 			vc.lunp = &v_req_pi.lun[0];
988 			vc.target = &v_req_pi.lun[1];
989 		} else {
990 			vc.req = &v_req;
991 			vc.req_size = sizeof(v_req);
992 			vc.lunp = &v_req.lun[0];
993 			vc.target = &v_req.lun[1];
994 		}
995 
996 		/*
997 		 * Validate the size of request and response buffers.
998 		 * Check for a sane response buffer so we can report
999 		 * early errors back to the guest.
1000 		 */
1001 		ret = vhost_scsi_chk_size(vq, &vc);
1002 		if (ret)
1003 			goto err;
1004 
1005 		ret = vhost_scsi_get_req(vq, &vc, &tpg);
1006 		if (ret)
1007 			goto err;
1008 
1009 		ret = -EIO;	/* bad target on any error from here on */
1010 
1011 		/*
1012 		 * Determine data_direction by calculating the total outgoing
1013 		 * iovec sizes + incoming iovec sizes vs. virtio-scsi request +
1014 		 * response headers respectively.
1015 		 *
1016 		 * For DMA_TO_DEVICE this is out_iter, which is already pointing
1017 		 * to the right place.
1018 		 *
1019 		 * For DMA_FROM_DEVICE, the iovec will be just past the end
1020 		 * of the virtio-scsi response header in either the same
1021 		 * or immediately following iovec.
1022 		 *
1023 		 * Any associated T10_PI bytes for the outgoing / incoming
1024 		 * payloads are included in calculation of exp_data_len here.
1025 		 */
1026 		prot_bytes = 0;
1027 
1028 		if (vc.out_size > vc.req_size) {
1029 			data_direction = DMA_TO_DEVICE;
1030 			exp_data_len = vc.out_size - vc.req_size;
1031 			data_iter = vc.out_iter;
1032 		} else if (vc.in_size > vc.rsp_size) {
1033 			data_direction = DMA_FROM_DEVICE;
1034 			exp_data_len = vc.in_size - vc.rsp_size;
1035 
1036 			iov_iter_init(&in_iter, READ, &vq->iov[vc.out], vc.in,
1037 				      vc.rsp_size + exp_data_len);
1038 			iov_iter_advance(&in_iter, vc.rsp_size);
1039 			data_iter = in_iter;
1040 		} else {
1041 			data_direction = DMA_NONE;
1042 			exp_data_len = 0;
1043 		}
1044 		/*
1045 		 * If T10_PI header + payload is present, setup prot_iter values
1046 		 * and recalculate data_iter for vhost_scsi_mapal() mapping to
1047 		 * host scatterlists via get_user_pages_fast().
1048 		 */
1049 		if (t10_pi) {
1050 			if (v_req_pi.pi_bytesout) {
1051 				if (data_direction != DMA_TO_DEVICE) {
1052 					vq_err(vq, "Received non zero pi_bytesout,"
1053 						" but wrong data_direction\n");
1054 					goto err;
1055 				}
1056 				prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
1057 			} else if (v_req_pi.pi_bytesin) {
1058 				if (data_direction != DMA_FROM_DEVICE) {
1059 					vq_err(vq, "Received non zero pi_bytesin,"
1060 						" but wrong data_direction\n");
1061 					goto err;
1062 				}
1063 				prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
1064 			}
1065 			/*
1066 			 * Set prot_iter to data_iter and truncate it to
1067 			 * prot_bytes, and advance data_iter past any
1068 			 * preceeding prot_bytes that may be present.
1069 			 *
1070 			 * Also fix up the exp_data_len to reflect only the
1071 			 * actual data payload length.
1072 			 */
1073 			if (prot_bytes) {
1074 				exp_data_len -= prot_bytes;
1075 				prot_iter = data_iter;
1076 				iov_iter_truncate(&prot_iter, prot_bytes);
1077 				iov_iter_advance(&data_iter, prot_bytes);
1078 			}
1079 			tag = vhost64_to_cpu(vq, v_req_pi.tag);
1080 			task_attr = v_req_pi.task_attr;
1081 			cdb = &v_req_pi.cdb[0];
1082 			lun = vhost_buf_to_lun(v_req_pi.lun);
1083 		} else {
1084 			tag = vhost64_to_cpu(vq, v_req.tag);
1085 			task_attr = v_req.task_attr;
1086 			cdb = &v_req.cdb[0];
1087 			lun = vhost_buf_to_lun(v_req.lun);
1088 		}
1089 		/*
1090 		 * Check that the received CDB size does not exceeded our
1091 		 * hardcoded max for vhost-scsi, then get a pre-allocated
1092 		 * cmd descriptor for the new virtio-scsi tag.
1093 		 *
1094 		 * TODO what if cdb was too small for varlen cdb header?
1095 		 */
1096 		if (unlikely(scsi_command_size(cdb) > VHOST_SCSI_MAX_CDB_SIZE)) {
1097 			vq_err(vq, "Received SCSI CDB with command_size: %d that"
1098 				" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1099 				scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE);
1100 				goto err;
1101 		}
1102 		cmd = vhost_scsi_get_cmd(vq, tpg, cdb, tag, lun, task_attr,
1103 					 exp_data_len + prot_bytes,
1104 					 data_direction);
1105 		if (IS_ERR(cmd)) {
1106 			vq_err(vq, "vhost_scsi_get_cmd failed %ld\n",
1107 			       PTR_ERR(cmd));
1108 			goto err;
1109 		}
1110 		cmd->tvc_vhost = vs;
1111 		cmd->tvc_vq = vq;
1112 		cmd->tvc_resp_iov = vq->iov[vc.out];
1113 		cmd->tvc_in_iovs = vc.in;
1114 
1115 		pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
1116 			 cmd->tvc_cdb[0], cmd->tvc_lun);
1117 		pr_debug("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:"
1118 			 " %d\n", cmd, exp_data_len, prot_bytes, data_direction);
1119 
1120 		if (data_direction != DMA_NONE) {
1121 			if (unlikely(vhost_scsi_mapal(cmd, prot_bytes,
1122 						      &prot_iter, exp_data_len,
1123 						      &data_iter))) {
1124 				vq_err(vq, "Failed to map iov to sgl\n");
1125 				vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd);
1126 				goto err;
1127 			}
1128 		}
1129 		/*
1130 		 * Save the descriptor from vhost_get_vq_desc() to be used to
1131 		 * complete the virtio-scsi request in TCM callback context via
1132 		 * vhost_scsi_queue_data_in() and vhost_scsi_queue_status()
1133 		 */
1134 		cmd->tvc_vq_desc = vc.head;
1135 		/*
1136 		 * Dispatch cmd descriptor for cmwq execution in process
1137 		 * context provided by vhost_scsi_workqueue.  This also ensures
1138 		 * cmd is executed on the same kworker CPU as this vhost
1139 		 * thread to gain positive L2 cache locality effects.
1140 		 */
1141 		INIT_WORK(&cmd->work, vhost_scsi_submission_work);
1142 		queue_work(vhost_scsi_workqueue, &cmd->work);
1143 		ret = 0;
1144 err:
1145 		/*
1146 		 * ENXIO:  No more requests, or read error, wait for next kick
1147 		 * EINVAL: Invalid response buffer, drop the request
1148 		 * EIO:    Respond with bad target
1149 		 * EAGAIN: Pending request
1150 		 */
1151 		if (ret == -ENXIO)
1152 			break;
1153 		else if (ret == -EIO)
1154 			vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
1155 	} while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
1156 out:
1157 	mutex_unlock(&vq->mutex);
1158 }
1159 
1160 static void
vhost_scsi_send_tmf_resp(struct vhost_scsi * vs,struct vhost_virtqueue * vq,int in_iovs,int vq_desc,struct iovec * resp_iov,int tmf_resp_code)1161 vhost_scsi_send_tmf_resp(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
1162 			 int in_iovs, int vq_desc, struct iovec *resp_iov,
1163 			 int tmf_resp_code)
1164 {
1165 	struct virtio_scsi_ctrl_tmf_resp rsp;
1166 	struct iov_iter iov_iter;
1167 	int ret;
1168 
1169 	pr_debug("%s\n", __func__);
1170 	memset(&rsp, 0, sizeof(rsp));
1171 	rsp.response = tmf_resp_code;
1172 
1173 	iov_iter_init(&iov_iter, READ, resp_iov, in_iovs, sizeof(rsp));
1174 
1175 	ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
1176 	if (likely(ret == sizeof(rsp)))
1177 		vhost_add_used_and_signal(&vs->dev, vq, vq_desc, 0);
1178 	else
1179 		pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n");
1180 }
1181 
vhost_scsi_tmf_resp_work(struct vhost_work * work)1182 static void vhost_scsi_tmf_resp_work(struct vhost_work *work)
1183 {
1184 	struct vhost_scsi_tmf *tmf = container_of(work, struct vhost_scsi_tmf,
1185 						  vwork);
1186 	int resp_code;
1187 
1188 	if (tmf->scsi_resp == TMR_FUNCTION_COMPLETE)
1189 		resp_code = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
1190 	else
1191 		resp_code = VIRTIO_SCSI_S_FUNCTION_REJECTED;
1192 
1193 	vhost_scsi_send_tmf_resp(tmf->vhost, &tmf->svq->vq, tmf->in_iovs,
1194 				 tmf->vq_desc, &tmf->resp_iov, resp_code);
1195 	vhost_scsi_release_tmf_res(tmf);
1196 }
1197 
1198 static void
vhost_scsi_handle_tmf(struct vhost_scsi * vs,struct vhost_scsi_tpg * tpg,struct vhost_virtqueue * vq,struct virtio_scsi_ctrl_tmf_req * vtmf,struct vhost_scsi_ctx * vc)1199 vhost_scsi_handle_tmf(struct vhost_scsi *vs, struct vhost_scsi_tpg *tpg,
1200 		      struct vhost_virtqueue *vq,
1201 		      struct virtio_scsi_ctrl_tmf_req *vtmf,
1202 		      struct vhost_scsi_ctx *vc)
1203 {
1204 	struct vhost_scsi_virtqueue *svq = container_of(vq,
1205 					struct vhost_scsi_virtqueue, vq);
1206 	struct vhost_scsi_tmf *tmf;
1207 
1208 	if (vhost32_to_cpu(vq, vtmf->subtype) !=
1209 	    VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET)
1210 		goto send_reject;
1211 
1212 	if (!tpg->tpg_nexus || !tpg->tpg_nexus->tvn_se_sess) {
1213 		pr_err("Unable to locate active struct vhost_scsi_nexus for LUN RESET.\n");
1214 		goto send_reject;
1215 	}
1216 
1217 	mutex_lock(&tpg->tv_tpg_mutex);
1218 	if (list_empty(&tpg->tmf_queue)) {
1219 		pr_err("Missing reserve TMF. Could not handle LUN RESET.\n");
1220 		mutex_unlock(&tpg->tv_tpg_mutex);
1221 		goto send_reject;
1222 	}
1223 
1224 	tmf = list_first_entry(&tpg->tmf_queue, struct vhost_scsi_tmf,
1225 			       queue_entry);
1226 	list_del_init(&tmf->queue_entry);
1227 	mutex_unlock(&tpg->tv_tpg_mutex);
1228 
1229 	tmf->tpg = tpg;
1230 	tmf->vhost = vs;
1231 	tmf->svq = svq;
1232 	tmf->resp_iov = vq->iov[vc->out];
1233 	tmf->vq_desc = vc->head;
1234 	tmf->in_iovs = vc->in;
1235 	tmf->inflight = vhost_scsi_get_inflight(vq);
1236 
1237 	if (target_submit_tmr(&tmf->se_cmd, tpg->tpg_nexus->tvn_se_sess, NULL,
1238 			      vhost_buf_to_lun(vtmf->lun), NULL,
1239 			      TMR_LUN_RESET, GFP_KERNEL, 0,
1240 			      TARGET_SCF_ACK_KREF) < 0) {
1241 		vhost_scsi_release_tmf_res(tmf);
1242 		goto send_reject;
1243 	}
1244 
1245 	return;
1246 
1247 send_reject:
1248 	vhost_scsi_send_tmf_resp(vs, vq, vc->in, vc->head, &vq->iov[vc->out],
1249 				 VIRTIO_SCSI_S_FUNCTION_REJECTED);
1250 }
1251 
1252 static void
vhost_scsi_send_an_resp(struct vhost_scsi * vs,struct vhost_virtqueue * vq,struct vhost_scsi_ctx * vc)1253 vhost_scsi_send_an_resp(struct vhost_scsi *vs,
1254 			struct vhost_virtqueue *vq,
1255 			struct vhost_scsi_ctx *vc)
1256 {
1257 	struct virtio_scsi_ctrl_an_resp rsp;
1258 	struct iov_iter iov_iter;
1259 	int ret;
1260 
1261 	pr_debug("%s\n", __func__);
1262 	memset(&rsp, 0, sizeof(rsp));	/* event_actual = 0 */
1263 	rsp.response = VIRTIO_SCSI_S_OK;
1264 
1265 	iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp));
1266 
1267 	ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
1268 	if (likely(ret == sizeof(rsp)))
1269 		vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
1270 	else
1271 		pr_err("Faulted on virtio_scsi_ctrl_an_resp\n");
1272 }
1273 
1274 static void
vhost_scsi_ctl_handle_vq(struct vhost_scsi * vs,struct vhost_virtqueue * vq)1275 vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1276 {
1277 	struct vhost_scsi_tpg *tpg;
1278 	union {
1279 		__virtio32 type;
1280 		struct virtio_scsi_ctrl_an_req an;
1281 		struct virtio_scsi_ctrl_tmf_req tmf;
1282 	} v_req;
1283 	struct vhost_scsi_ctx vc;
1284 	size_t typ_size;
1285 	int ret, c = 0;
1286 
1287 	mutex_lock(&vq->mutex);
1288 	/*
1289 	 * We can handle the vq only after the endpoint is setup by calling the
1290 	 * VHOST_SCSI_SET_ENDPOINT ioctl.
1291 	 */
1292 	if (!vhost_vq_get_backend(vq))
1293 		goto out;
1294 
1295 	memset(&vc, 0, sizeof(vc));
1296 
1297 	vhost_disable_notify(&vs->dev, vq);
1298 
1299 	do {
1300 		ret = vhost_scsi_get_desc(vs, vq, &vc);
1301 		if (ret)
1302 			goto err;
1303 
1304 		/*
1305 		 * Get the request type first in order to setup
1306 		 * other parameters dependent on the type.
1307 		 */
1308 		vc.req = &v_req.type;
1309 		typ_size = sizeof(v_req.type);
1310 
1311 		if (unlikely(!copy_from_iter_full(vc.req, typ_size,
1312 						  &vc.out_iter))) {
1313 			vq_err(vq, "Faulted on copy_from_iter tmf type\n");
1314 			/*
1315 			 * The size of the response buffer depends on the
1316 			 * request type and must be validated against it.
1317 			 * Since the request type is not known, don't send
1318 			 * a response.
1319 			 */
1320 			continue;
1321 		}
1322 
1323 		switch (vhost32_to_cpu(vq, v_req.type)) {
1324 		case VIRTIO_SCSI_T_TMF:
1325 			vc.req = &v_req.tmf;
1326 			vc.req_size = sizeof(struct virtio_scsi_ctrl_tmf_req);
1327 			vc.rsp_size = sizeof(struct virtio_scsi_ctrl_tmf_resp);
1328 			vc.lunp = &v_req.tmf.lun[0];
1329 			vc.target = &v_req.tmf.lun[1];
1330 			break;
1331 		case VIRTIO_SCSI_T_AN_QUERY:
1332 		case VIRTIO_SCSI_T_AN_SUBSCRIBE:
1333 			vc.req = &v_req.an;
1334 			vc.req_size = sizeof(struct virtio_scsi_ctrl_an_req);
1335 			vc.rsp_size = sizeof(struct virtio_scsi_ctrl_an_resp);
1336 			vc.lunp = &v_req.an.lun[0];
1337 			vc.target = NULL;
1338 			break;
1339 		default:
1340 			vq_err(vq, "Unknown control request %d", v_req.type);
1341 			continue;
1342 		}
1343 
1344 		/*
1345 		 * Validate the size of request and response buffers.
1346 		 * Check for a sane response buffer so we can report
1347 		 * early errors back to the guest.
1348 		 */
1349 		ret = vhost_scsi_chk_size(vq, &vc);
1350 		if (ret)
1351 			goto err;
1352 
1353 		/*
1354 		 * Get the rest of the request now that its size is known.
1355 		 */
1356 		vc.req += typ_size;
1357 		vc.req_size -= typ_size;
1358 
1359 		ret = vhost_scsi_get_req(vq, &vc, &tpg);
1360 		if (ret)
1361 			goto err;
1362 
1363 		if (v_req.type == VIRTIO_SCSI_T_TMF)
1364 			vhost_scsi_handle_tmf(vs, tpg, vq, &v_req.tmf, &vc);
1365 		else
1366 			vhost_scsi_send_an_resp(vs, vq, &vc);
1367 err:
1368 		/*
1369 		 * ENXIO:  No more requests, or read error, wait for next kick
1370 		 * EINVAL: Invalid response buffer, drop the request
1371 		 * EIO:    Respond with bad target
1372 		 * EAGAIN: Pending request
1373 		 */
1374 		if (ret == -ENXIO)
1375 			break;
1376 		else if (ret == -EIO)
1377 			vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
1378 	} while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
1379 out:
1380 	mutex_unlock(&vq->mutex);
1381 }
1382 
vhost_scsi_ctl_handle_kick(struct vhost_work * work)1383 static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
1384 {
1385 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1386 						poll.work);
1387 	struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1388 
1389 	pr_debug("%s: The handling func for control queue.\n", __func__);
1390 	vhost_scsi_ctl_handle_vq(vs, vq);
1391 }
1392 
1393 static void
vhost_scsi_send_evt(struct vhost_scsi * vs,struct vhost_scsi_tpg * tpg,struct se_lun * lun,u32 event,u32 reason)1394 vhost_scsi_send_evt(struct vhost_scsi *vs,
1395 		   struct vhost_scsi_tpg *tpg,
1396 		   struct se_lun *lun,
1397 		   u32 event,
1398 		   u32 reason)
1399 {
1400 	struct vhost_scsi_evt *evt;
1401 
1402 	evt = vhost_scsi_allocate_evt(vs, event, reason);
1403 	if (!evt)
1404 		return;
1405 
1406 	if (tpg && lun) {
1407 		/* TODO: share lun setup code with virtio-scsi.ko */
1408 		/*
1409 		 * Note: evt->event is zeroed when we allocate it and
1410 		 * lun[4-7] need to be zero according to virtio-scsi spec.
1411 		 */
1412 		evt->event.lun[0] = 0x01;
1413 		evt->event.lun[1] = tpg->tport_tpgt;
1414 		if (lun->unpacked_lun >= 256)
1415 			evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
1416 		evt->event.lun[3] = lun->unpacked_lun & 0xFF;
1417 	}
1418 
1419 	llist_add(&evt->list, &vs->vs_event_list);
1420 	vhost_work_queue(&vs->dev, &vs->vs_event_work);
1421 }
1422 
vhost_scsi_evt_handle_kick(struct vhost_work * work)1423 static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
1424 {
1425 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1426 						poll.work);
1427 	struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1428 
1429 	mutex_lock(&vq->mutex);
1430 	if (!vhost_vq_get_backend(vq))
1431 		goto out;
1432 
1433 	if (vs->vs_events_missed)
1434 		vhost_scsi_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
1435 out:
1436 	mutex_unlock(&vq->mutex);
1437 }
1438 
vhost_scsi_handle_kick(struct vhost_work * work)1439 static void vhost_scsi_handle_kick(struct vhost_work *work)
1440 {
1441 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1442 						poll.work);
1443 	struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1444 
1445 	vhost_scsi_handle_vq(vs, vq);
1446 }
1447 
vhost_scsi_flush_vq(struct vhost_scsi * vs,int index)1448 static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
1449 {
1450 	vhost_poll_flush(&vs->vqs[index].vq.poll);
1451 }
1452 
1453 /* Callers must hold dev mutex */
vhost_scsi_flush(struct vhost_scsi * vs)1454 static void vhost_scsi_flush(struct vhost_scsi *vs)
1455 {
1456 	struct vhost_scsi_inflight *old_inflight[VHOST_SCSI_MAX_VQ];
1457 	int i;
1458 
1459 	/* Init new inflight and remember the old inflight */
1460 	vhost_scsi_init_inflight(vs, old_inflight);
1461 
1462 	/*
1463 	 * The inflight->kref was initialized to 1. We decrement it here to
1464 	 * indicate the start of the flush operation so that it will reach 0
1465 	 * when all the reqs are finished.
1466 	 */
1467 	for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1468 		kref_put(&old_inflight[i]->kref, vhost_scsi_done_inflight);
1469 
1470 	/* Flush both the vhost poll and vhost work */
1471 	for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1472 		vhost_scsi_flush_vq(vs, i);
1473 	vhost_work_flush(&vs->dev, &vs->vs_completion_work);
1474 	vhost_work_flush(&vs->dev, &vs->vs_event_work);
1475 
1476 	/* Wait for all reqs issued before the flush to be finished */
1477 	for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1478 		wait_for_completion(&old_inflight[i]->comp);
1479 }
1480 
vhost_scsi_destroy_vq_cmds(struct vhost_virtqueue * vq)1481 static void vhost_scsi_destroy_vq_cmds(struct vhost_virtqueue *vq)
1482 {
1483 	struct vhost_scsi_virtqueue *svq = container_of(vq,
1484 					struct vhost_scsi_virtqueue, vq);
1485 	struct vhost_scsi_cmd *tv_cmd;
1486 	unsigned int i;
1487 
1488 	if (!svq->scsi_cmds)
1489 		return;
1490 
1491 	for (i = 0; i < svq->max_cmds; i++) {
1492 		tv_cmd = &svq->scsi_cmds[i];
1493 
1494 		kfree(tv_cmd->tvc_sgl);
1495 		kfree(tv_cmd->tvc_prot_sgl);
1496 		kfree(tv_cmd->tvc_upages);
1497 	}
1498 
1499 	sbitmap_free(&svq->scsi_tags);
1500 	kfree(svq->scsi_cmds);
1501 	svq->scsi_cmds = NULL;
1502 }
1503 
vhost_scsi_setup_vq_cmds(struct vhost_virtqueue * vq,int max_cmds)1504 static int vhost_scsi_setup_vq_cmds(struct vhost_virtqueue *vq, int max_cmds)
1505 {
1506 	struct vhost_scsi_virtqueue *svq = container_of(vq,
1507 					struct vhost_scsi_virtqueue, vq);
1508 	struct vhost_scsi_cmd *tv_cmd;
1509 	unsigned int i;
1510 
1511 	if (svq->scsi_cmds)
1512 		return 0;
1513 
1514 	if (sbitmap_init_node(&svq->scsi_tags, max_cmds, -1, GFP_KERNEL,
1515 			      NUMA_NO_NODE))
1516 		return -ENOMEM;
1517 	svq->max_cmds = max_cmds;
1518 
1519 	svq->scsi_cmds = kcalloc(max_cmds, sizeof(*tv_cmd), GFP_KERNEL);
1520 	if (!svq->scsi_cmds) {
1521 		sbitmap_free(&svq->scsi_tags);
1522 		return -ENOMEM;
1523 	}
1524 
1525 	for (i = 0; i < max_cmds; i++) {
1526 		tv_cmd = &svq->scsi_cmds[i];
1527 
1528 		tv_cmd->tvc_sgl = kcalloc(VHOST_SCSI_PREALLOC_SGLS,
1529 					  sizeof(struct scatterlist),
1530 					  GFP_KERNEL);
1531 		if (!tv_cmd->tvc_sgl) {
1532 			pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
1533 			goto out;
1534 		}
1535 
1536 		tv_cmd->tvc_upages = kcalloc(VHOST_SCSI_PREALLOC_UPAGES,
1537 					     sizeof(struct page *),
1538 					     GFP_KERNEL);
1539 		if (!tv_cmd->tvc_upages) {
1540 			pr_err("Unable to allocate tv_cmd->tvc_upages\n");
1541 			goto out;
1542 		}
1543 
1544 		tv_cmd->tvc_prot_sgl = kcalloc(VHOST_SCSI_PREALLOC_PROT_SGLS,
1545 					       sizeof(struct scatterlist),
1546 					       GFP_KERNEL);
1547 		if (!tv_cmd->tvc_prot_sgl) {
1548 			pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
1549 			goto out;
1550 		}
1551 	}
1552 	return 0;
1553 out:
1554 	vhost_scsi_destroy_vq_cmds(vq);
1555 	return -ENOMEM;
1556 }
1557 
1558 /*
1559  * Called from vhost_scsi_ioctl() context to walk the list of available
1560  * vhost_scsi_tpg with an active struct vhost_scsi_nexus
1561  *
1562  *  The lock nesting rule is:
1563  *    vhost_scsi_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
1564  */
1565 static int
vhost_scsi_set_endpoint(struct vhost_scsi * vs,struct vhost_scsi_target * t)1566 vhost_scsi_set_endpoint(struct vhost_scsi *vs,
1567 			struct vhost_scsi_target *t)
1568 {
1569 	struct se_portal_group *se_tpg;
1570 	struct vhost_scsi_tport *tv_tport;
1571 	struct vhost_scsi_tpg *tpg;
1572 	struct vhost_scsi_tpg **vs_tpg;
1573 	struct vhost_virtqueue *vq;
1574 	int index, ret, i, len;
1575 	bool match = false;
1576 
1577 	mutex_lock(&vhost_scsi_mutex);
1578 	mutex_lock(&vs->dev.mutex);
1579 
1580 	/* Verify that ring has been setup correctly. */
1581 	for (index = 0; index < vs->dev.nvqs; ++index) {
1582 		/* Verify that ring has been setup correctly. */
1583 		if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1584 			ret = -EFAULT;
1585 			goto out;
1586 		}
1587 	}
1588 
1589 	len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
1590 	vs_tpg = kzalloc(len, GFP_KERNEL);
1591 	if (!vs_tpg) {
1592 		ret = -ENOMEM;
1593 		goto out;
1594 	}
1595 	if (vs->vs_tpg)
1596 		memcpy(vs_tpg, vs->vs_tpg, len);
1597 
1598 	list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) {
1599 		mutex_lock(&tpg->tv_tpg_mutex);
1600 		if (!tpg->tpg_nexus) {
1601 			mutex_unlock(&tpg->tv_tpg_mutex);
1602 			continue;
1603 		}
1604 		if (tpg->tv_tpg_vhost_count != 0) {
1605 			mutex_unlock(&tpg->tv_tpg_mutex);
1606 			continue;
1607 		}
1608 		tv_tport = tpg->tport;
1609 
1610 		if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1611 			if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
1612 				mutex_unlock(&tpg->tv_tpg_mutex);
1613 				ret = -EEXIST;
1614 				goto undepend;
1615 			}
1616 			/*
1617 			 * In order to ensure individual vhost-scsi configfs
1618 			 * groups cannot be removed while in use by vhost ioctl,
1619 			 * go ahead and take an explicit se_tpg->tpg_group.cg_item
1620 			 * dependency now.
1621 			 */
1622 			se_tpg = &tpg->se_tpg;
1623 			ret = target_depend_item(&se_tpg->tpg_group.cg_item);
1624 			if (ret) {
1625 				pr_warn("target_depend_item() failed: %d\n", ret);
1626 				mutex_unlock(&tpg->tv_tpg_mutex);
1627 				goto undepend;
1628 			}
1629 			tpg->tv_tpg_vhost_count++;
1630 			tpg->vhost_scsi = vs;
1631 			vs_tpg[tpg->tport_tpgt] = tpg;
1632 			match = true;
1633 		}
1634 		mutex_unlock(&tpg->tv_tpg_mutex);
1635 	}
1636 
1637 	if (match) {
1638 		memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
1639 		       sizeof(vs->vs_vhost_wwpn));
1640 
1641 		for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
1642 			vq = &vs->vqs[i].vq;
1643 			if (!vhost_vq_is_setup(vq))
1644 				continue;
1645 
1646 			ret = vhost_scsi_setup_vq_cmds(vq, vq->num);
1647 			if (ret)
1648 				goto destroy_vq_cmds;
1649 		}
1650 
1651 		for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1652 			vq = &vs->vqs[i].vq;
1653 			mutex_lock(&vq->mutex);
1654 			vhost_vq_set_backend(vq, vs_tpg);
1655 			vhost_vq_init_access(vq);
1656 			mutex_unlock(&vq->mutex);
1657 		}
1658 		ret = 0;
1659 	} else {
1660 		ret = -EEXIST;
1661 	}
1662 
1663 	/*
1664 	 * Act as synchronize_rcu to make sure access to
1665 	 * old vs->vs_tpg is finished.
1666 	 */
1667 	vhost_scsi_flush(vs);
1668 	kfree(vs->vs_tpg);
1669 	vs->vs_tpg = vs_tpg;
1670 	goto out;
1671 
1672 destroy_vq_cmds:
1673 	for (i--; i >= VHOST_SCSI_VQ_IO; i--) {
1674 		if (!vhost_vq_get_backend(&vs->vqs[i].vq))
1675 			vhost_scsi_destroy_vq_cmds(&vs->vqs[i].vq);
1676 	}
1677 undepend:
1678 	for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1679 		tpg = vs_tpg[i];
1680 		if (tpg) {
1681 			tpg->tv_tpg_vhost_count--;
1682 			target_undepend_item(&tpg->se_tpg.tpg_group.cg_item);
1683 		}
1684 	}
1685 	kfree(vs_tpg);
1686 out:
1687 	mutex_unlock(&vs->dev.mutex);
1688 	mutex_unlock(&vhost_scsi_mutex);
1689 	return ret;
1690 }
1691 
1692 static int
vhost_scsi_clear_endpoint(struct vhost_scsi * vs,struct vhost_scsi_target * t)1693 vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
1694 			  struct vhost_scsi_target *t)
1695 {
1696 	struct se_portal_group *se_tpg;
1697 	struct vhost_scsi_tport *tv_tport;
1698 	struct vhost_scsi_tpg *tpg;
1699 	struct vhost_virtqueue *vq;
1700 	bool match = false;
1701 	int index, ret, i;
1702 	u8 target;
1703 
1704 	mutex_lock(&vhost_scsi_mutex);
1705 	mutex_lock(&vs->dev.mutex);
1706 	/* Verify that ring has been setup correctly. */
1707 	for (index = 0; index < vs->dev.nvqs; ++index) {
1708 		if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1709 			ret = -EFAULT;
1710 			goto err_dev;
1711 		}
1712 	}
1713 
1714 	if (!vs->vs_tpg) {
1715 		ret = 0;
1716 		goto err_dev;
1717 	}
1718 
1719 	for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1720 		target = i;
1721 		tpg = vs->vs_tpg[target];
1722 		if (!tpg)
1723 			continue;
1724 
1725 		mutex_lock(&tpg->tv_tpg_mutex);
1726 		tv_tport = tpg->tport;
1727 		if (!tv_tport) {
1728 			ret = -ENODEV;
1729 			goto err_tpg;
1730 		}
1731 
1732 		if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1733 			pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu"
1734 				" does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
1735 				tv_tport->tport_name, tpg->tport_tpgt,
1736 				t->vhost_wwpn, t->vhost_tpgt);
1737 			ret = -EINVAL;
1738 			goto err_tpg;
1739 		}
1740 		tpg->tv_tpg_vhost_count--;
1741 		tpg->vhost_scsi = NULL;
1742 		vs->vs_tpg[target] = NULL;
1743 		match = true;
1744 		mutex_unlock(&tpg->tv_tpg_mutex);
1745 		/*
1746 		 * Release se_tpg->tpg_group.cg_item configfs dependency now
1747 		 * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur.
1748 		 */
1749 		se_tpg = &tpg->se_tpg;
1750 		target_undepend_item(&se_tpg->tpg_group.cg_item);
1751 	}
1752 	if (match) {
1753 		for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1754 			vq = &vs->vqs[i].vq;
1755 			mutex_lock(&vq->mutex);
1756 			vhost_vq_set_backend(vq, NULL);
1757 			mutex_unlock(&vq->mutex);
1758 			/*
1759 			 * Make sure cmds are not running before tearing them
1760 			 * down.
1761 			 */
1762 			vhost_scsi_flush(vs);
1763 			vhost_scsi_destroy_vq_cmds(vq);
1764 		}
1765 	}
1766 	/*
1767 	 * Act as synchronize_rcu to make sure access to
1768 	 * old vs->vs_tpg is finished.
1769 	 */
1770 	vhost_scsi_flush(vs);
1771 	kfree(vs->vs_tpg);
1772 	vs->vs_tpg = NULL;
1773 	WARN_ON(vs->vs_events_nr);
1774 	mutex_unlock(&vs->dev.mutex);
1775 	mutex_unlock(&vhost_scsi_mutex);
1776 	return 0;
1777 
1778 err_tpg:
1779 	mutex_unlock(&tpg->tv_tpg_mutex);
1780 err_dev:
1781 	mutex_unlock(&vs->dev.mutex);
1782 	mutex_unlock(&vhost_scsi_mutex);
1783 	return ret;
1784 }
1785 
vhost_scsi_set_features(struct vhost_scsi * vs,u64 features)1786 static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
1787 {
1788 	struct vhost_virtqueue *vq;
1789 	int i;
1790 
1791 	if (features & ~VHOST_SCSI_FEATURES)
1792 		return -EOPNOTSUPP;
1793 
1794 	mutex_lock(&vs->dev.mutex);
1795 	if ((features & (1 << VHOST_F_LOG_ALL)) &&
1796 	    !vhost_log_access_ok(&vs->dev)) {
1797 		mutex_unlock(&vs->dev.mutex);
1798 		return -EFAULT;
1799 	}
1800 
1801 	for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1802 		vq = &vs->vqs[i].vq;
1803 		mutex_lock(&vq->mutex);
1804 		vq->acked_features = features;
1805 		mutex_unlock(&vq->mutex);
1806 	}
1807 	mutex_unlock(&vs->dev.mutex);
1808 	return 0;
1809 }
1810 
vhost_scsi_open(struct inode * inode,struct file * f)1811 static int vhost_scsi_open(struct inode *inode, struct file *f)
1812 {
1813 	struct vhost_scsi *vs;
1814 	struct vhost_virtqueue **vqs;
1815 	int r = -ENOMEM, i;
1816 
1817 	vs = kzalloc(sizeof(*vs), GFP_KERNEL | __GFP_NOWARN | __GFP_RETRY_MAYFAIL);
1818 	if (!vs) {
1819 		vs = vzalloc(sizeof(*vs));
1820 		if (!vs)
1821 			goto err_vs;
1822 	}
1823 
1824 	vqs = kmalloc_array(VHOST_SCSI_MAX_VQ, sizeof(*vqs), GFP_KERNEL);
1825 	if (!vqs)
1826 		goto err_vqs;
1827 
1828 	vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
1829 	vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work);
1830 
1831 	vs->vs_events_nr = 0;
1832 	vs->vs_events_missed = false;
1833 
1834 	vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq;
1835 	vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1836 	vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
1837 	vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
1838 	for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
1839 		vqs[i] = &vs->vqs[i].vq;
1840 		vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
1841 	}
1842 	vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV,
1843 		       VHOST_SCSI_WEIGHT, 0, true, NULL);
1844 
1845 	vhost_scsi_init_inflight(vs, NULL);
1846 
1847 	f->private_data = vs;
1848 	return 0;
1849 
1850 err_vqs:
1851 	kvfree(vs);
1852 err_vs:
1853 	return r;
1854 }
1855 
vhost_scsi_release(struct inode * inode,struct file * f)1856 static int vhost_scsi_release(struct inode *inode, struct file *f)
1857 {
1858 	struct vhost_scsi *vs = f->private_data;
1859 	struct vhost_scsi_target t;
1860 
1861 	mutex_lock(&vs->dev.mutex);
1862 	memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
1863 	mutex_unlock(&vs->dev.mutex);
1864 	vhost_scsi_clear_endpoint(vs, &t);
1865 	vhost_dev_stop(&vs->dev);
1866 	vhost_dev_cleanup(&vs->dev);
1867 	/* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
1868 	vhost_scsi_flush(vs);
1869 	kfree(vs->dev.vqs);
1870 	kvfree(vs);
1871 	return 0;
1872 }
1873 
1874 static long
vhost_scsi_ioctl(struct file * f,unsigned int ioctl,unsigned long arg)1875 vhost_scsi_ioctl(struct file *f,
1876 		 unsigned int ioctl,
1877 		 unsigned long arg)
1878 {
1879 	struct vhost_scsi *vs = f->private_data;
1880 	struct vhost_scsi_target backend;
1881 	void __user *argp = (void __user *)arg;
1882 	u64 __user *featurep = argp;
1883 	u32 __user *eventsp = argp;
1884 	u32 events_missed;
1885 	u64 features;
1886 	int r, abi_version = VHOST_SCSI_ABI_VERSION;
1887 	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1888 
1889 	switch (ioctl) {
1890 	case VHOST_SCSI_SET_ENDPOINT:
1891 		if (copy_from_user(&backend, argp, sizeof backend))
1892 			return -EFAULT;
1893 		if (backend.reserved != 0)
1894 			return -EOPNOTSUPP;
1895 
1896 		return vhost_scsi_set_endpoint(vs, &backend);
1897 	case VHOST_SCSI_CLEAR_ENDPOINT:
1898 		if (copy_from_user(&backend, argp, sizeof backend))
1899 			return -EFAULT;
1900 		if (backend.reserved != 0)
1901 			return -EOPNOTSUPP;
1902 
1903 		return vhost_scsi_clear_endpoint(vs, &backend);
1904 	case VHOST_SCSI_GET_ABI_VERSION:
1905 		if (copy_to_user(argp, &abi_version, sizeof abi_version))
1906 			return -EFAULT;
1907 		return 0;
1908 	case VHOST_SCSI_SET_EVENTS_MISSED:
1909 		if (get_user(events_missed, eventsp))
1910 			return -EFAULT;
1911 		mutex_lock(&vq->mutex);
1912 		vs->vs_events_missed = events_missed;
1913 		mutex_unlock(&vq->mutex);
1914 		return 0;
1915 	case VHOST_SCSI_GET_EVENTS_MISSED:
1916 		mutex_lock(&vq->mutex);
1917 		events_missed = vs->vs_events_missed;
1918 		mutex_unlock(&vq->mutex);
1919 		if (put_user(events_missed, eventsp))
1920 			return -EFAULT;
1921 		return 0;
1922 	case VHOST_GET_FEATURES:
1923 		features = VHOST_SCSI_FEATURES;
1924 		if (copy_to_user(featurep, &features, sizeof features))
1925 			return -EFAULT;
1926 		return 0;
1927 	case VHOST_SET_FEATURES:
1928 		if (copy_from_user(&features, featurep, sizeof features))
1929 			return -EFAULT;
1930 		return vhost_scsi_set_features(vs, features);
1931 	default:
1932 		mutex_lock(&vs->dev.mutex);
1933 		r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
1934 		/* TODO: flush backend after dev ioctl. */
1935 		if (r == -ENOIOCTLCMD)
1936 			r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
1937 		mutex_unlock(&vs->dev.mutex);
1938 		return r;
1939 	}
1940 }
1941 
1942 static const struct file_operations vhost_scsi_fops = {
1943 	.owner          = THIS_MODULE,
1944 	.release        = vhost_scsi_release,
1945 	.unlocked_ioctl = vhost_scsi_ioctl,
1946 	.compat_ioctl	= compat_ptr_ioctl,
1947 	.open           = vhost_scsi_open,
1948 	.llseek		= noop_llseek,
1949 };
1950 
1951 static struct miscdevice vhost_scsi_misc = {
1952 	MISC_DYNAMIC_MINOR,
1953 	"vhost-scsi",
1954 	&vhost_scsi_fops,
1955 };
1956 
vhost_scsi_register(void)1957 static int __init vhost_scsi_register(void)
1958 {
1959 	return misc_register(&vhost_scsi_misc);
1960 }
1961 
vhost_scsi_deregister(void)1962 static void vhost_scsi_deregister(void)
1963 {
1964 	misc_deregister(&vhost_scsi_misc);
1965 }
1966 
vhost_scsi_dump_proto_id(struct vhost_scsi_tport * tport)1967 static char *vhost_scsi_dump_proto_id(struct vhost_scsi_tport *tport)
1968 {
1969 	switch (tport->tport_proto_id) {
1970 	case SCSI_PROTOCOL_SAS:
1971 		return "SAS";
1972 	case SCSI_PROTOCOL_FCP:
1973 		return "FCP";
1974 	case SCSI_PROTOCOL_ISCSI:
1975 		return "iSCSI";
1976 	default:
1977 		break;
1978 	}
1979 
1980 	return "Unknown";
1981 }
1982 
1983 static void
vhost_scsi_do_plug(struct vhost_scsi_tpg * tpg,struct se_lun * lun,bool plug)1984 vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg,
1985 		  struct se_lun *lun, bool plug)
1986 {
1987 
1988 	struct vhost_scsi *vs = tpg->vhost_scsi;
1989 	struct vhost_virtqueue *vq;
1990 	u32 reason;
1991 
1992 	if (!vs)
1993 		return;
1994 
1995 	mutex_lock(&vs->dev.mutex);
1996 
1997 	if (plug)
1998 		reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
1999 	else
2000 		reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
2001 
2002 	vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
2003 	mutex_lock(&vq->mutex);
2004 	if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG))
2005 		vhost_scsi_send_evt(vs, tpg, lun,
2006 				   VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
2007 	mutex_unlock(&vq->mutex);
2008 	mutex_unlock(&vs->dev.mutex);
2009 }
2010 
vhost_scsi_hotplug(struct vhost_scsi_tpg * tpg,struct se_lun * lun)2011 static void vhost_scsi_hotplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
2012 {
2013 	vhost_scsi_do_plug(tpg, lun, true);
2014 }
2015 
vhost_scsi_hotunplug(struct vhost_scsi_tpg * tpg,struct se_lun * lun)2016 static void vhost_scsi_hotunplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
2017 {
2018 	vhost_scsi_do_plug(tpg, lun, false);
2019 }
2020 
vhost_scsi_port_link(struct se_portal_group * se_tpg,struct se_lun * lun)2021 static int vhost_scsi_port_link(struct se_portal_group *se_tpg,
2022 			       struct se_lun *lun)
2023 {
2024 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2025 				struct vhost_scsi_tpg, se_tpg);
2026 	struct vhost_scsi_tmf *tmf;
2027 
2028 	tmf = kzalloc(sizeof(*tmf), GFP_KERNEL);
2029 	if (!tmf)
2030 		return -ENOMEM;
2031 	INIT_LIST_HEAD(&tmf->queue_entry);
2032 	vhost_work_init(&tmf->vwork, vhost_scsi_tmf_resp_work);
2033 
2034 	mutex_lock(&vhost_scsi_mutex);
2035 
2036 	mutex_lock(&tpg->tv_tpg_mutex);
2037 	tpg->tv_tpg_port_count++;
2038 	list_add_tail(&tmf->queue_entry, &tpg->tmf_queue);
2039 	mutex_unlock(&tpg->tv_tpg_mutex);
2040 
2041 	vhost_scsi_hotplug(tpg, lun);
2042 
2043 	mutex_unlock(&vhost_scsi_mutex);
2044 
2045 	return 0;
2046 }
2047 
vhost_scsi_port_unlink(struct se_portal_group * se_tpg,struct se_lun * lun)2048 static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
2049 				  struct se_lun *lun)
2050 {
2051 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2052 				struct vhost_scsi_tpg, se_tpg);
2053 	struct vhost_scsi_tmf *tmf;
2054 
2055 	mutex_lock(&vhost_scsi_mutex);
2056 
2057 	mutex_lock(&tpg->tv_tpg_mutex);
2058 	tpg->tv_tpg_port_count--;
2059 	tmf = list_first_entry(&tpg->tmf_queue, struct vhost_scsi_tmf,
2060 			       queue_entry);
2061 	list_del(&tmf->queue_entry);
2062 	kfree(tmf);
2063 	mutex_unlock(&tpg->tv_tpg_mutex);
2064 
2065 	vhost_scsi_hotunplug(tpg, lun);
2066 
2067 	mutex_unlock(&vhost_scsi_mutex);
2068 }
2069 
vhost_scsi_tpg_attrib_fabric_prot_type_store(struct config_item * item,const char * page,size_t count)2070 static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_store(
2071 		struct config_item *item, const char *page, size_t count)
2072 {
2073 	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2074 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2075 				struct vhost_scsi_tpg, se_tpg);
2076 	unsigned long val;
2077 	int ret = kstrtoul(page, 0, &val);
2078 
2079 	if (ret) {
2080 		pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
2081 		return ret;
2082 	}
2083 	if (val != 0 && val != 1 && val != 3) {
2084 		pr_err("Invalid vhost_scsi fabric_prot_type: %lu\n", val);
2085 		return -EINVAL;
2086 	}
2087 	tpg->tv_fabric_prot_type = val;
2088 
2089 	return count;
2090 }
2091 
vhost_scsi_tpg_attrib_fabric_prot_type_show(struct config_item * item,char * page)2092 static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_show(
2093 		struct config_item *item, char *page)
2094 {
2095 	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2096 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2097 				struct vhost_scsi_tpg, se_tpg);
2098 
2099 	return sprintf(page, "%d\n", tpg->tv_fabric_prot_type);
2100 }
2101 
2102 CONFIGFS_ATTR(vhost_scsi_tpg_attrib_, fabric_prot_type);
2103 
2104 static struct configfs_attribute *vhost_scsi_tpg_attrib_attrs[] = {
2105 	&vhost_scsi_tpg_attrib_attr_fabric_prot_type,
2106 	NULL,
2107 };
2108 
vhost_scsi_make_nexus(struct vhost_scsi_tpg * tpg,const char * name)2109 static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
2110 				const char *name)
2111 {
2112 	struct vhost_scsi_nexus *tv_nexus;
2113 
2114 	mutex_lock(&tpg->tv_tpg_mutex);
2115 	if (tpg->tpg_nexus) {
2116 		mutex_unlock(&tpg->tv_tpg_mutex);
2117 		pr_debug("tpg->tpg_nexus already exists\n");
2118 		return -EEXIST;
2119 	}
2120 
2121 	tv_nexus = kzalloc(sizeof(*tv_nexus), GFP_KERNEL);
2122 	if (!tv_nexus) {
2123 		mutex_unlock(&tpg->tv_tpg_mutex);
2124 		pr_err("Unable to allocate struct vhost_scsi_nexus\n");
2125 		return -ENOMEM;
2126 	}
2127 	/*
2128 	 * Since we are running in 'demo mode' this call with generate a
2129 	 * struct se_node_acl for the vhost_scsi struct se_portal_group with
2130 	 * the SCSI Initiator port name of the passed configfs group 'name'.
2131 	 */
2132 	tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg, 0, 0,
2133 					TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
2134 					(unsigned char *)name, tv_nexus, NULL);
2135 	if (IS_ERR(tv_nexus->tvn_se_sess)) {
2136 		mutex_unlock(&tpg->tv_tpg_mutex);
2137 		kfree(tv_nexus);
2138 		return -ENOMEM;
2139 	}
2140 	tpg->tpg_nexus = tv_nexus;
2141 
2142 	mutex_unlock(&tpg->tv_tpg_mutex);
2143 	return 0;
2144 }
2145 
vhost_scsi_drop_nexus(struct vhost_scsi_tpg * tpg)2146 static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
2147 {
2148 	struct se_session *se_sess;
2149 	struct vhost_scsi_nexus *tv_nexus;
2150 
2151 	mutex_lock(&tpg->tv_tpg_mutex);
2152 	tv_nexus = tpg->tpg_nexus;
2153 	if (!tv_nexus) {
2154 		mutex_unlock(&tpg->tv_tpg_mutex);
2155 		return -ENODEV;
2156 	}
2157 
2158 	se_sess = tv_nexus->tvn_se_sess;
2159 	if (!se_sess) {
2160 		mutex_unlock(&tpg->tv_tpg_mutex);
2161 		return -ENODEV;
2162 	}
2163 
2164 	if (tpg->tv_tpg_port_count != 0) {
2165 		mutex_unlock(&tpg->tv_tpg_mutex);
2166 		pr_err("Unable to remove TCM_vhost I_T Nexus with"
2167 			" active TPG port count: %d\n",
2168 			tpg->tv_tpg_port_count);
2169 		return -EBUSY;
2170 	}
2171 
2172 	if (tpg->tv_tpg_vhost_count != 0) {
2173 		mutex_unlock(&tpg->tv_tpg_mutex);
2174 		pr_err("Unable to remove TCM_vhost I_T Nexus with"
2175 			" active TPG vhost count: %d\n",
2176 			tpg->tv_tpg_vhost_count);
2177 		return -EBUSY;
2178 	}
2179 
2180 	pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
2181 		" %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
2182 		tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
2183 
2184 	/*
2185 	 * Release the SCSI I_T Nexus to the emulated vhost Target Port
2186 	 */
2187 	target_remove_session(se_sess);
2188 	tpg->tpg_nexus = NULL;
2189 	mutex_unlock(&tpg->tv_tpg_mutex);
2190 
2191 	kfree(tv_nexus);
2192 	return 0;
2193 }
2194 
vhost_scsi_tpg_nexus_show(struct config_item * item,char * page)2195 static ssize_t vhost_scsi_tpg_nexus_show(struct config_item *item, char *page)
2196 {
2197 	struct se_portal_group *se_tpg = to_tpg(item);
2198 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2199 				struct vhost_scsi_tpg, se_tpg);
2200 	struct vhost_scsi_nexus *tv_nexus;
2201 	ssize_t ret;
2202 
2203 	mutex_lock(&tpg->tv_tpg_mutex);
2204 	tv_nexus = tpg->tpg_nexus;
2205 	if (!tv_nexus) {
2206 		mutex_unlock(&tpg->tv_tpg_mutex);
2207 		return -ENODEV;
2208 	}
2209 	ret = snprintf(page, PAGE_SIZE, "%s\n",
2210 			tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
2211 	mutex_unlock(&tpg->tv_tpg_mutex);
2212 
2213 	return ret;
2214 }
2215 
vhost_scsi_tpg_nexus_store(struct config_item * item,const char * page,size_t count)2216 static ssize_t vhost_scsi_tpg_nexus_store(struct config_item *item,
2217 		const char *page, size_t count)
2218 {
2219 	struct se_portal_group *se_tpg = to_tpg(item);
2220 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2221 				struct vhost_scsi_tpg, se_tpg);
2222 	struct vhost_scsi_tport *tport_wwn = tpg->tport;
2223 	unsigned char i_port[VHOST_SCSI_NAMELEN], *ptr, *port_ptr;
2224 	int ret;
2225 	/*
2226 	 * Shutdown the active I_T nexus if 'NULL' is passed..
2227 	 */
2228 	if (!strncmp(page, "NULL", 4)) {
2229 		ret = vhost_scsi_drop_nexus(tpg);
2230 		return (!ret) ? count : ret;
2231 	}
2232 	/*
2233 	 * Otherwise make sure the passed virtual Initiator port WWN matches
2234 	 * the fabric protocol_id set in vhost_scsi_make_tport(), and call
2235 	 * vhost_scsi_make_nexus().
2236 	 */
2237 	if (strlen(page) >= VHOST_SCSI_NAMELEN) {
2238 		pr_err("Emulated NAA Sas Address: %s, exceeds"
2239 				" max: %d\n", page, VHOST_SCSI_NAMELEN);
2240 		return -EINVAL;
2241 	}
2242 	snprintf(&i_port[0], VHOST_SCSI_NAMELEN, "%s", page);
2243 
2244 	ptr = strstr(i_port, "naa.");
2245 	if (ptr) {
2246 		if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
2247 			pr_err("Passed SAS Initiator Port %s does not"
2248 				" match target port protoid: %s\n", i_port,
2249 				vhost_scsi_dump_proto_id(tport_wwn));
2250 			return -EINVAL;
2251 		}
2252 		port_ptr = &i_port[0];
2253 		goto check_newline;
2254 	}
2255 	ptr = strstr(i_port, "fc.");
2256 	if (ptr) {
2257 		if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
2258 			pr_err("Passed FCP Initiator Port %s does not"
2259 				" match target port protoid: %s\n", i_port,
2260 				vhost_scsi_dump_proto_id(tport_wwn));
2261 			return -EINVAL;
2262 		}
2263 		port_ptr = &i_port[3]; /* Skip over "fc." */
2264 		goto check_newline;
2265 	}
2266 	ptr = strstr(i_port, "iqn.");
2267 	if (ptr) {
2268 		if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
2269 			pr_err("Passed iSCSI Initiator Port %s does not"
2270 				" match target port protoid: %s\n", i_port,
2271 				vhost_scsi_dump_proto_id(tport_wwn));
2272 			return -EINVAL;
2273 		}
2274 		port_ptr = &i_port[0];
2275 		goto check_newline;
2276 	}
2277 	pr_err("Unable to locate prefix for emulated Initiator Port:"
2278 			" %s\n", i_port);
2279 	return -EINVAL;
2280 	/*
2281 	 * Clear any trailing newline for the NAA WWN
2282 	 */
2283 check_newline:
2284 	if (i_port[strlen(i_port)-1] == '\n')
2285 		i_port[strlen(i_port)-1] = '\0';
2286 
2287 	ret = vhost_scsi_make_nexus(tpg, port_ptr);
2288 	if (ret < 0)
2289 		return ret;
2290 
2291 	return count;
2292 }
2293 
2294 CONFIGFS_ATTR(vhost_scsi_tpg_, nexus);
2295 
2296 static struct configfs_attribute *vhost_scsi_tpg_attrs[] = {
2297 	&vhost_scsi_tpg_attr_nexus,
2298 	NULL,
2299 };
2300 
2301 static struct se_portal_group *
vhost_scsi_make_tpg(struct se_wwn * wwn,const char * name)2302 vhost_scsi_make_tpg(struct se_wwn *wwn, const char *name)
2303 {
2304 	struct vhost_scsi_tport *tport = container_of(wwn,
2305 			struct vhost_scsi_tport, tport_wwn);
2306 
2307 	struct vhost_scsi_tpg *tpg;
2308 	u16 tpgt;
2309 	int ret;
2310 
2311 	if (strstr(name, "tpgt_") != name)
2312 		return ERR_PTR(-EINVAL);
2313 	if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET)
2314 		return ERR_PTR(-EINVAL);
2315 
2316 	tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
2317 	if (!tpg) {
2318 		pr_err("Unable to allocate struct vhost_scsi_tpg");
2319 		return ERR_PTR(-ENOMEM);
2320 	}
2321 	mutex_init(&tpg->tv_tpg_mutex);
2322 	INIT_LIST_HEAD(&tpg->tv_tpg_list);
2323 	INIT_LIST_HEAD(&tpg->tmf_queue);
2324 	tpg->tport = tport;
2325 	tpg->tport_tpgt = tpgt;
2326 
2327 	ret = core_tpg_register(wwn, &tpg->se_tpg, tport->tport_proto_id);
2328 	if (ret < 0) {
2329 		kfree(tpg);
2330 		return NULL;
2331 	}
2332 	mutex_lock(&vhost_scsi_mutex);
2333 	list_add_tail(&tpg->tv_tpg_list, &vhost_scsi_list);
2334 	mutex_unlock(&vhost_scsi_mutex);
2335 
2336 	return &tpg->se_tpg;
2337 }
2338 
vhost_scsi_drop_tpg(struct se_portal_group * se_tpg)2339 static void vhost_scsi_drop_tpg(struct se_portal_group *se_tpg)
2340 {
2341 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2342 				struct vhost_scsi_tpg, se_tpg);
2343 
2344 	mutex_lock(&vhost_scsi_mutex);
2345 	list_del(&tpg->tv_tpg_list);
2346 	mutex_unlock(&vhost_scsi_mutex);
2347 	/*
2348 	 * Release the virtual I_T Nexus for this vhost TPG
2349 	 */
2350 	vhost_scsi_drop_nexus(tpg);
2351 	/*
2352 	 * Deregister the se_tpg from TCM..
2353 	 */
2354 	core_tpg_deregister(se_tpg);
2355 	kfree(tpg);
2356 }
2357 
2358 static struct se_wwn *
vhost_scsi_make_tport(struct target_fabric_configfs * tf,struct config_group * group,const char * name)2359 vhost_scsi_make_tport(struct target_fabric_configfs *tf,
2360 		     struct config_group *group,
2361 		     const char *name)
2362 {
2363 	struct vhost_scsi_tport *tport;
2364 	char *ptr;
2365 	u64 wwpn = 0;
2366 	int off = 0;
2367 
2368 	/* if (vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
2369 		return ERR_PTR(-EINVAL); */
2370 
2371 	tport = kzalloc(sizeof(*tport), GFP_KERNEL);
2372 	if (!tport) {
2373 		pr_err("Unable to allocate struct vhost_scsi_tport");
2374 		return ERR_PTR(-ENOMEM);
2375 	}
2376 	tport->tport_wwpn = wwpn;
2377 	/*
2378 	 * Determine the emulated Protocol Identifier and Target Port Name
2379 	 * based on the incoming configfs directory name.
2380 	 */
2381 	ptr = strstr(name, "naa.");
2382 	if (ptr) {
2383 		tport->tport_proto_id = SCSI_PROTOCOL_SAS;
2384 		goto check_len;
2385 	}
2386 	ptr = strstr(name, "fc.");
2387 	if (ptr) {
2388 		tport->tport_proto_id = SCSI_PROTOCOL_FCP;
2389 		off = 3; /* Skip over "fc." */
2390 		goto check_len;
2391 	}
2392 	ptr = strstr(name, "iqn.");
2393 	if (ptr) {
2394 		tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
2395 		goto check_len;
2396 	}
2397 
2398 	pr_err("Unable to locate prefix for emulated Target Port:"
2399 			" %s\n", name);
2400 	kfree(tport);
2401 	return ERR_PTR(-EINVAL);
2402 
2403 check_len:
2404 	if (strlen(name) >= VHOST_SCSI_NAMELEN) {
2405 		pr_err("Emulated %s Address: %s, exceeds"
2406 			" max: %d\n", name, vhost_scsi_dump_proto_id(tport),
2407 			VHOST_SCSI_NAMELEN);
2408 		kfree(tport);
2409 		return ERR_PTR(-EINVAL);
2410 	}
2411 	snprintf(&tport->tport_name[0], VHOST_SCSI_NAMELEN, "%s", &name[off]);
2412 
2413 	pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
2414 		" %s Address: %s\n", vhost_scsi_dump_proto_id(tport), name);
2415 
2416 	return &tport->tport_wwn;
2417 }
2418 
vhost_scsi_drop_tport(struct se_wwn * wwn)2419 static void vhost_scsi_drop_tport(struct se_wwn *wwn)
2420 {
2421 	struct vhost_scsi_tport *tport = container_of(wwn,
2422 				struct vhost_scsi_tport, tport_wwn);
2423 
2424 	pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
2425 		" %s Address: %s\n", vhost_scsi_dump_proto_id(tport),
2426 		tport->tport_name);
2427 
2428 	kfree(tport);
2429 }
2430 
2431 static ssize_t
vhost_scsi_wwn_version_show(struct config_item * item,char * page)2432 vhost_scsi_wwn_version_show(struct config_item *item, char *page)
2433 {
2434 	return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
2435 		"on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2436 		utsname()->machine);
2437 }
2438 
2439 CONFIGFS_ATTR_RO(vhost_scsi_wwn_, version);
2440 
2441 static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
2442 	&vhost_scsi_wwn_attr_version,
2443 	NULL,
2444 };
2445 
2446 static const struct target_core_fabric_ops vhost_scsi_ops = {
2447 	.module				= THIS_MODULE,
2448 	.fabric_name			= "vhost",
2449 	.max_data_sg_nents		= VHOST_SCSI_PREALLOC_SGLS,
2450 	.tpg_get_wwn			= vhost_scsi_get_fabric_wwn,
2451 	.tpg_get_tag			= vhost_scsi_get_tpgt,
2452 	.tpg_check_demo_mode		= vhost_scsi_check_true,
2453 	.tpg_check_demo_mode_cache	= vhost_scsi_check_true,
2454 	.tpg_check_demo_mode_write_protect = vhost_scsi_check_false,
2455 	.tpg_check_prod_mode_write_protect = vhost_scsi_check_false,
2456 	.tpg_check_prot_fabric_only	= vhost_scsi_check_prot_fabric_only,
2457 	.tpg_get_inst_index		= vhost_scsi_tpg_get_inst_index,
2458 	.release_cmd			= vhost_scsi_release_cmd,
2459 	.check_stop_free		= vhost_scsi_check_stop_free,
2460 	.sess_get_index			= vhost_scsi_sess_get_index,
2461 	.sess_get_initiator_sid		= NULL,
2462 	.write_pending			= vhost_scsi_write_pending,
2463 	.set_default_node_attributes	= vhost_scsi_set_default_node_attrs,
2464 	.get_cmd_state			= vhost_scsi_get_cmd_state,
2465 	.queue_data_in			= vhost_scsi_queue_data_in,
2466 	.queue_status			= vhost_scsi_queue_status,
2467 	.queue_tm_rsp			= vhost_scsi_queue_tm_rsp,
2468 	.aborted_task			= vhost_scsi_aborted_task,
2469 	/*
2470 	 * Setup callers for generic logic in target_core_fabric_configfs.c
2471 	 */
2472 	.fabric_make_wwn		= vhost_scsi_make_tport,
2473 	.fabric_drop_wwn		= vhost_scsi_drop_tport,
2474 	.fabric_make_tpg		= vhost_scsi_make_tpg,
2475 	.fabric_drop_tpg		= vhost_scsi_drop_tpg,
2476 	.fabric_post_link		= vhost_scsi_port_link,
2477 	.fabric_pre_unlink		= vhost_scsi_port_unlink,
2478 
2479 	.tfc_wwn_attrs			= vhost_scsi_wwn_attrs,
2480 	.tfc_tpg_base_attrs		= vhost_scsi_tpg_attrs,
2481 	.tfc_tpg_attrib_attrs		= vhost_scsi_tpg_attrib_attrs,
2482 };
2483 
vhost_scsi_init(void)2484 static int __init vhost_scsi_init(void)
2485 {
2486 	int ret = -ENOMEM;
2487 
2488 	pr_debug("TCM_VHOST fabric module %s on %s/%s"
2489 		" on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2490 		utsname()->machine);
2491 
2492 	/*
2493 	 * Use our own dedicated workqueue for submitting I/O into
2494 	 * target core to avoid contention within system_wq.
2495 	 */
2496 	vhost_scsi_workqueue = alloc_workqueue("vhost_scsi", 0, 0);
2497 	if (!vhost_scsi_workqueue)
2498 		goto out;
2499 
2500 	ret = vhost_scsi_register();
2501 	if (ret < 0)
2502 		goto out_destroy_workqueue;
2503 
2504 	ret = target_register_template(&vhost_scsi_ops);
2505 	if (ret < 0)
2506 		goto out_vhost_scsi_deregister;
2507 
2508 	return 0;
2509 
2510 out_vhost_scsi_deregister:
2511 	vhost_scsi_deregister();
2512 out_destroy_workqueue:
2513 	destroy_workqueue(vhost_scsi_workqueue);
2514 out:
2515 	return ret;
2516 };
2517 
vhost_scsi_exit(void)2518 static void vhost_scsi_exit(void)
2519 {
2520 	target_unregister_template(&vhost_scsi_ops);
2521 	vhost_scsi_deregister();
2522 	destroy_workqueue(vhost_scsi_workqueue);
2523 };
2524 
2525 MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
2526 MODULE_ALIAS("tcm_vhost");
2527 MODULE_LICENSE("GPL");
2528 module_init(vhost_scsi_init);
2529 module_exit(vhost_scsi_exit);
2530