• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*******************************************************************************
2  *
3  * This file contains the Linux/SCSI LLD virtual SCSI initiator driver
4  * for emulated SAS initiator ports
5  *
6  * © Copyright 2011-2013 Datera, Inc.
7  *
8  * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
9  *
10  * Author: Nicholas A. Bellinger <nab@risingtidesystems.com>
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation; either version 2 of the License, or
15  * (at your option) any later version.
16  *
17  * This program is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  * GNU General Public License for more details.
21  ****************************************************************************/
22 
23 #include <linux/module.h>
24 #include <linux/moduleparam.h>
25 #include <linux/init.h>
26 #include <linux/slab.h>
27 #include <linux/types.h>
28 #include <linux/configfs.h>
29 #include <scsi/scsi.h>
30 #include <scsi/scsi_tcq.h>
31 #include <scsi/scsi_host.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_cmnd.h>
34 
35 #include <target/target_core_base.h>
36 #include <target/target_core_fabric.h>
37 
38 #include "tcm_loop.h"
39 
40 #define to_tcm_loop_hba(hba)	container_of(hba, struct tcm_loop_hba, dev)
41 
42 static struct workqueue_struct *tcm_loop_workqueue;
43 static struct kmem_cache *tcm_loop_cmd_cache;
44 
45 static int tcm_loop_hba_no_cnt;
46 
47 static int tcm_loop_queue_status(struct se_cmd *se_cmd);
48 
49 /*
50  * Called from struct target_core_fabric_ops->check_stop_free()
51  */
tcm_loop_check_stop_free(struct se_cmd * se_cmd)52 static int tcm_loop_check_stop_free(struct se_cmd *se_cmd)
53 {
54 	return transport_generic_free_cmd(se_cmd, 0);
55 }
56 
tcm_loop_release_cmd(struct se_cmd * se_cmd)57 static void tcm_loop_release_cmd(struct se_cmd *se_cmd)
58 {
59 	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
60 				struct tcm_loop_cmd, tl_se_cmd);
61 
62 	kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
63 }
64 
tcm_loop_show_info(struct seq_file * m,struct Scsi_Host * host)65 static int tcm_loop_show_info(struct seq_file *m, struct Scsi_Host *host)
66 {
67 	seq_puts(m, "tcm_loop_proc_info()\n");
68 	return 0;
69 }
70 
71 static int tcm_loop_driver_probe(struct device *);
72 static int tcm_loop_driver_remove(struct device *);
73 
pseudo_lld_bus_match(struct device * dev,struct device_driver * dev_driver)74 static int pseudo_lld_bus_match(struct device *dev,
75 				struct device_driver *dev_driver)
76 {
77 	return 1;
78 }
79 
80 static struct bus_type tcm_loop_lld_bus = {
81 	.name			= "tcm_loop_bus",
82 	.match			= pseudo_lld_bus_match,
83 	.probe			= tcm_loop_driver_probe,
84 	.remove			= tcm_loop_driver_remove,
85 };
86 
87 static struct device_driver tcm_loop_driverfs = {
88 	.name			= "tcm_loop",
89 	.bus			= &tcm_loop_lld_bus,
90 };
91 /*
92  * Used with root_device_register() in tcm_loop_alloc_core_bus() below
93  */
94 static struct device *tcm_loop_primary;
95 
tcm_loop_submission_work(struct work_struct * work)96 static void tcm_loop_submission_work(struct work_struct *work)
97 {
98 	struct tcm_loop_cmd *tl_cmd =
99 		container_of(work, struct tcm_loop_cmd, work);
100 	struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd;
101 	struct scsi_cmnd *sc = tl_cmd->sc;
102 	struct tcm_loop_nexus *tl_nexus;
103 	struct tcm_loop_hba *tl_hba;
104 	struct tcm_loop_tpg *tl_tpg;
105 	struct scatterlist *sgl_bidi = NULL;
106 	u32 sgl_bidi_count = 0, transfer_length;
107 	int rc;
108 
109 	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
110 	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
111 
112 	/*
113 	 * Ensure that this tl_tpg reference from the incoming sc->device->id
114 	 * has already been configured via tcm_loop_make_naa_tpg().
115 	 */
116 	if (!tl_tpg->tl_hba) {
117 		set_host_byte(sc, DID_NO_CONNECT);
118 		goto out_done;
119 	}
120 	if (tl_tpg->tl_transport_status == TCM_TRANSPORT_OFFLINE) {
121 		set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
122 		goto out_done;
123 	}
124 	tl_nexus = tl_tpg->tl_nexus;
125 	if (!tl_nexus) {
126 		scmd_printk(KERN_ERR, sc,
127 			    "TCM_Loop I_T Nexus does not exist\n");
128 		set_host_byte(sc, DID_ERROR);
129 		goto out_done;
130 	}
131 
132 	transfer_length = scsi_transfer_length(sc);
133 	if (!scsi_prot_sg_count(sc) &&
134 	    scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) {
135 		se_cmd->prot_pto = true;
136 		/*
137 		 * loopback transport doesn't support
138 		 * WRITE_GENERATE, READ_STRIP protection
139 		 * information operations, go ahead unprotected.
140 		 */
141 		transfer_length = scsi_bufflen(sc);
142 	}
143 
144 	se_cmd->tag = tl_cmd->sc_cmd_tag;
145 	rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
146 			&tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
147 			transfer_length, TCM_SIMPLE_TAG,
148 			sc->sc_data_direction, 0,
149 			scsi_sglist(sc), scsi_sg_count(sc),
150 			sgl_bidi, sgl_bidi_count,
151 			scsi_prot_sglist(sc), scsi_prot_sg_count(sc));
152 	if (rc < 0) {
153 		set_host_byte(sc, DID_NO_CONNECT);
154 		goto out_done;
155 	}
156 	return;
157 
158 out_done:
159 	kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
160 	sc->scsi_done(sc);
161 }
162 
163 /*
164  * ->queuecommand can be and usually is called from interrupt context, so
165  * defer the actual submission to a workqueue.
166  */
tcm_loop_queuecommand(struct Scsi_Host * sh,struct scsi_cmnd * sc)167 static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
168 {
169 	struct tcm_loop_cmd *tl_cmd;
170 
171 	pr_debug("%s() %d:%d:%d:%llu got CDB: 0x%02x scsi_buf_len: %u\n",
172 		 __func__, sc->device->host->host_no, sc->device->id,
173 		 sc->device->channel, sc->device->lun, sc->cmnd[0],
174 		 scsi_bufflen(sc));
175 
176 	tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC);
177 	if (!tl_cmd) {
178 		set_host_byte(sc, DID_ERROR);
179 		sc->scsi_done(sc);
180 		return 0;
181 	}
182 
183 	tl_cmd->sc = sc;
184 	tl_cmd->sc_cmd_tag = sc->request->tag;
185 	INIT_WORK(&tl_cmd->work, tcm_loop_submission_work);
186 	queue_work(tcm_loop_workqueue, &tl_cmd->work);
187 	return 0;
188 }
189 
190 /*
191  * Called from SCSI EH process context to issue a LUN_RESET TMR
192  * to struct scsi_device
193  */
tcm_loop_issue_tmr(struct tcm_loop_tpg * tl_tpg,u64 lun,int task,enum tcm_tmreq_table tmr)194 static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
195 			      u64 lun, int task, enum tcm_tmreq_table tmr)
196 {
197 	struct se_cmd *se_cmd;
198 	struct se_session *se_sess;
199 	struct tcm_loop_nexus *tl_nexus;
200 	struct tcm_loop_cmd *tl_cmd;
201 	int ret = TMR_FUNCTION_FAILED, rc;
202 
203 	/*
204 	 * Locate the tl_nexus and se_sess pointers
205 	 */
206 	tl_nexus = tl_tpg->tl_nexus;
207 	if (!tl_nexus) {
208 		pr_err("Unable to perform device reset without active I_T Nexus\n");
209 		return ret;
210 	}
211 
212 	tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
213 	if (!tl_cmd)
214 		return ret;
215 
216 	init_completion(&tl_cmd->tmr_done);
217 
218 	se_cmd = &tl_cmd->tl_se_cmd;
219 	se_sess = tl_tpg->tl_nexus->se_sess;
220 
221 	rc = target_submit_tmr(se_cmd, se_sess, tl_cmd->tl_sense_buf, lun,
222 			       NULL, tmr, GFP_KERNEL, task,
223 			       TARGET_SCF_ACK_KREF);
224 	if (rc < 0)
225 		goto release;
226 	wait_for_completion(&tl_cmd->tmr_done);
227 	ret = se_cmd->se_tmr_req->response;
228 	target_put_sess_cmd(se_cmd);
229 
230 out:
231 	return ret;
232 
233 release:
234 	kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
235 	goto out;
236 }
237 
tcm_loop_abort_task(struct scsi_cmnd * sc)238 static int tcm_loop_abort_task(struct scsi_cmnd *sc)
239 {
240 	struct tcm_loop_hba *tl_hba;
241 	struct tcm_loop_tpg *tl_tpg;
242 	int ret = FAILED;
243 
244 	/*
245 	 * Locate the tcm_loop_hba_t pointer
246 	 */
247 	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
248 	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
249 	ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
250 				 sc->request->tag, TMR_ABORT_TASK);
251 	return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
252 }
253 
254 /*
255  * Called from SCSI EH process context to issue a LUN_RESET TMR
256  * to struct scsi_device
257  */
tcm_loop_device_reset(struct scsi_cmnd * sc)258 static int tcm_loop_device_reset(struct scsi_cmnd *sc)
259 {
260 	struct tcm_loop_hba *tl_hba;
261 	struct tcm_loop_tpg *tl_tpg;
262 	int ret = FAILED;
263 
264 	/*
265 	 * Locate the tcm_loop_hba_t pointer
266 	 */
267 	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
268 	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
269 
270 	ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
271 				 0, TMR_LUN_RESET);
272 	return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
273 }
274 
tcm_loop_target_reset(struct scsi_cmnd * sc)275 static int tcm_loop_target_reset(struct scsi_cmnd *sc)
276 {
277 	struct tcm_loop_hba *tl_hba;
278 	struct tcm_loop_tpg *tl_tpg;
279 
280 	/*
281 	 * Locate the tcm_loop_hba_t pointer
282 	 */
283 	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
284 	if (!tl_hba) {
285 		pr_err("Unable to perform device reset without active I_T Nexus\n");
286 		return FAILED;
287 	}
288 	/*
289 	 * Locate the tl_tpg pointer from TargetID in sc->device->id
290 	 */
291 	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
292 	if (tl_tpg) {
293 		tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
294 		return SUCCESS;
295 	}
296 	return FAILED;
297 }
298 
299 static struct scsi_host_template tcm_loop_driver_template = {
300 	.show_info		= tcm_loop_show_info,
301 	.proc_name		= "tcm_loopback",
302 	.name			= "TCM_Loopback",
303 	.queuecommand		= tcm_loop_queuecommand,
304 	.change_queue_depth	= scsi_change_queue_depth,
305 	.eh_abort_handler = tcm_loop_abort_task,
306 	.eh_device_reset_handler = tcm_loop_device_reset,
307 	.eh_target_reset_handler = tcm_loop_target_reset,
308 	.can_queue		= 1024,
309 	.this_id		= -1,
310 	.sg_tablesize		= 256,
311 	.cmd_per_lun		= 1024,
312 	.max_sectors		= 0xFFFF,
313 	.dma_boundary		= PAGE_SIZE - 1,
314 	.module			= THIS_MODULE,
315 	.track_queue_depth	= 1,
316 };
317 
tcm_loop_driver_probe(struct device * dev)318 static int tcm_loop_driver_probe(struct device *dev)
319 {
320 	struct tcm_loop_hba *tl_hba;
321 	struct Scsi_Host *sh;
322 	int error, host_prot;
323 
324 	tl_hba = to_tcm_loop_hba(dev);
325 
326 	sh = scsi_host_alloc(&tcm_loop_driver_template,
327 			sizeof(struct tcm_loop_hba));
328 	if (!sh) {
329 		pr_err("Unable to allocate struct scsi_host\n");
330 		return -ENODEV;
331 	}
332 	tl_hba->sh = sh;
333 
334 	/*
335 	 * Assign the struct tcm_loop_hba pointer to struct Scsi_Host->hostdata
336 	 */
337 	*((struct tcm_loop_hba **)sh->hostdata) = tl_hba;
338 	/*
339 	 * Setup single ID, Channel and LUN for now..
340 	 */
341 	sh->max_id = 2;
342 	sh->max_lun = 0;
343 	sh->max_channel = 0;
344 	sh->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
345 
346 	host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
347 		    SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
348 		    SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
349 
350 	scsi_host_set_prot(sh, host_prot);
351 	scsi_host_set_guard(sh, SHOST_DIX_GUARD_CRC);
352 
353 	error = scsi_add_host(sh, &tl_hba->dev);
354 	if (error) {
355 		pr_err("%s: scsi_add_host failed\n", __func__);
356 		scsi_host_put(sh);
357 		return -ENODEV;
358 	}
359 	return 0;
360 }
361 
tcm_loop_driver_remove(struct device * dev)362 static int tcm_loop_driver_remove(struct device *dev)
363 {
364 	struct tcm_loop_hba *tl_hba;
365 	struct Scsi_Host *sh;
366 
367 	tl_hba = to_tcm_loop_hba(dev);
368 	sh = tl_hba->sh;
369 
370 	scsi_remove_host(sh);
371 	scsi_host_put(sh);
372 	return 0;
373 }
374 
tcm_loop_release_adapter(struct device * dev)375 static void tcm_loop_release_adapter(struct device *dev)
376 {
377 	struct tcm_loop_hba *tl_hba = to_tcm_loop_hba(dev);
378 
379 	kfree(tl_hba);
380 }
381 
382 /*
383  * Called from tcm_loop_make_scsi_hba() in tcm_loop_configfs.c
384  */
tcm_loop_setup_hba_bus(struct tcm_loop_hba * tl_hba,int tcm_loop_host_id)385 static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host_id)
386 {
387 	int ret;
388 
389 	tl_hba->dev.bus = &tcm_loop_lld_bus;
390 	tl_hba->dev.parent = tcm_loop_primary;
391 	tl_hba->dev.release = &tcm_loop_release_adapter;
392 	dev_set_name(&tl_hba->dev, "tcm_loop_adapter_%d", tcm_loop_host_id);
393 
394 	ret = device_register(&tl_hba->dev);
395 	if (ret) {
396 		pr_err("device_register() failed for tl_hba->dev: %d\n", ret);
397 		put_device(&tl_hba->dev);
398 		return -ENODEV;
399 	}
400 
401 	return 0;
402 }
403 
404 /*
405  * Called from tcm_loop_fabric_init() in tcl_loop_fabric.c to load the emulated
406  * tcm_loop SCSI bus.
407  */
tcm_loop_alloc_core_bus(void)408 static int tcm_loop_alloc_core_bus(void)
409 {
410 	int ret;
411 
412 	tcm_loop_primary = root_device_register("tcm_loop_0");
413 	if (IS_ERR(tcm_loop_primary)) {
414 		pr_err("Unable to allocate tcm_loop_primary\n");
415 		return PTR_ERR(tcm_loop_primary);
416 	}
417 
418 	ret = bus_register(&tcm_loop_lld_bus);
419 	if (ret) {
420 		pr_err("bus_register() failed for tcm_loop_lld_bus\n");
421 		goto dev_unreg;
422 	}
423 
424 	ret = driver_register(&tcm_loop_driverfs);
425 	if (ret) {
426 		pr_err("driver_register() failed for tcm_loop_driverfs\n");
427 		goto bus_unreg;
428 	}
429 
430 	pr_debug("Initialized TCM Loop Core Bus\n");
431 	return ret;
432 
433 bus_unreg:
434 	bus_unregister(&tcm_loop_lld_bus);
435 dev_unreg:
436 	root_device_unregister(tcm_loop_primary);
437 	return ret;
438 }
439 
tcm_loop_release_core_bus(void)440 static void tcm_loop_release_core_bus(void)
441 {
442 	driver_unregister(&tcm_loop_driverfs);
443 	bus_unregister(&tcm_loop_lld_bus);
444 	root_device_unregister(tcm_loop_primary);
445 
446 	pr_debug("Releasing TCM Loop Core BUS\n");
447 }
448 
tl_tpg(struct se_portal_group * se_tpg)449 static inline struct tcm_loop_tpg *tl_tpg(struct se_portal_group *se_tpg)
450 {
451 	return container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
452 }
453 
tcm_loop_get_endpoint_wwn(struct se_portal_group * se_tpg)454 static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg)
455 {
456 	/*
457 	 * Return the passed NAA identifier for the Target Port
458 	 */
459 	return &tl_tpg(se_tpg)->tl_hba->tl_wwn_address[0];
460 }
461 
tcm_loop_get_tag(struct se_portal_group * se_tpg)462 static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg)
463 {
464 	/*
465 	 * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83
466 	 * to represent the SCSI Target Port.
467 	 */
468 	return tl_tpg(se_tpg)->tl_tpgt;
469 }
470 
471 /*
472  * Returning (1) here allows for target_core_mod struct se_node_acl to be generated
473  * based upon the incoming fabric dependent SCSI Initiator Port
474  */
tcm_loop_check_demo_mode(struct se_portal_group * se_tpg)475 static int tcm_loop_check_demo_mode(struct se_portal_group *se_tpg)
476 {
477 	return 1;
478 }
479 
tcm_loop_check_demo_mode_cache(struct se_portal_group * se_tpg)480 static int tcm_loop_check_demo_mode_cache(struct se_portal_group *se_tpg)
481 {
482 	return 0;
483 }
484 
485 /*
486  * Allow I_T Nexus full READ-WRITE access without explict Initiator Node ACLs for
487  * local virtual Linux/SCSI LLD passthrough into VM hypervisor guest
488  */
tcm_loop_check_demo_mode_write_protect(struct se_portal_group * se_tpg)489 static int tcm_loop_check_demo_mode_write_protect(struct se_portal_group *se_tpg)
490 {
491 	return 0;
492 }
493 
494 /*
495  * Because TCM_Loop does not use explict ACLs and MappedLUNs, this will
496  * never be called for TCM_Loop by target_core_fabric_configfs.c code.
497  * It has been added here as a nop for target_fabric_tf_ops_check()
498  */
tcm_loop_check_prod_mode_write_protect(struct se_portal_group * se_tpg)499 static int tcm_loop_check_prod_mode_write_protect(struct se_portal_group *se_tpg)
500 {
501 	return 0;
502 }
503 
tcm_loop_check_prot_fabric_only(struct se_portal_group * se_tpg)504 static int tcm_loop_check_prot_fabric_only(struct se_portal_group *se_tpg)
505 {
506 	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
507 						   tl_se_tpg);
508 	return tl_tpg->tl_fabric_prot_type;
509 }
510 
tcm_loop_get_inst_index(struct se_portal_group * se_tpg)511 static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg)
512 {
513 	return 1;
514 }
515 
tcm_loop_sess_get_index(struct se_session * se_sess)516 static u32 tcm_loop_sess_get_index(struct se_session *se_sess)
517 {
518 	return 1;
519 }
520 
tcm_loop_set_default_node_attributes(struct se_node_acl * se_acl)521 static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl)
522 {
523 	return;
524 }
525 
tcm_loop_get_cmd_state(struct se_cmd * se_cmd)526 static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd)
527 {
528 	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
529 			struct tcm_loop_cmd, tl_se_cmd);
530 
531 	return tl_cmd->sc_cmd_state;
532 }
533 
tcm_loop_write_pending(struct se_cmd * se_cmd)534 static int tcm_loop_write_pending(struct se_cmd *se_cmd)
535 {
536 	/*
537 	 * Since Linux/SCSI has already sent down a struct scsi_cmnd
538 	 * sc->sc_data_direction of DMA_TO_DEVICE with struct scatterlist array
539 	 * memory, and memory has already been mapped to struct se_cmd->t_mem_list
540 	 * format with transport_generic_map_mem_to_cmd().
541 	 *
542 	 * We now tell TCM to add this WRITE CDB directly into the TCM storage
543 	 * object execution queue.
544 	 */
545 	target_execute_cmd(se_cmd);
546 	return 0;
547 }
548 
tcm_loop_queue_data_or_status(const char * func,struct se_cmd * se_cmd,u8 scsi_status)549 static int tcm_loop_queue_data_or_status(const char *func,
550 		struct se_cmd *se_cmd, u8 scsi_status)
551 {
552 	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
553 				struct tcm_loop_cmd, tl_se_cmd);
554 	struct scsi_cmnd *sc = tl_cmd->sc;
555 
556 	pr_debug("%s() called for scsi_cmnd: %p cdb: 0x%02x\n",
557 		 func, sc, sc->cmnd[0]);
558 
559 	if (se_cmd->sense_buffer &&
560 	   ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
561 	    (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
562 
563 		memcpy(sc->sense_buffer, se_cmd->sense_buffer,
564 				SCSI_SENSE_BUFFERSIZE);
565 		sc->result = SAM_STAT_CHECK_CONDITION;
566 		set_driver_byte(sc, DRIVER_SENSE);
567 	} else
568 		sc->result = scsi_status;
569 
570 	set_host_byte(sc, DID_OK);
571 	if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
572 	    (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
573 		scsi_set_resid(sc, se_cmd->residual_count);
574 	sc->scsi_done(sc);
575 	return 0;
576 }
577 
tcm_loop_queue_data_in(struct se_cmd * se_cmd)578 static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
579 {
580 	return tcm_loop_queue_data_or_status(__func__, se_cmd, SAM_STAT_GOOD);
581 }
582 
tcm_loop_queue_status(struct se_cmd * se_cmd)583 static int tcm_loop_queue_status(struct se_cmd *se_cmd)
584 {
585 	return tcm_loop_queue_data_or_status(__func__,
586 					     se_cmd, se_cmd->scsi_status);
587 }
588 
tcm_loop_queue_tm_rsp(struct se_cmd * se_cmd)589 static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)
590 {
591 	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
592 				struct tcm_loop_cmd, tl_se_cmd);
593 
594 	/* Wake up tcm_loop_issue_tmr(). */
595 	complete(&tl_cmd->tmr_done);
596 }
597 
tcm_loop_aborted_task(struct se_cmd * se_cmd)598 static void tcm_loop_aborted_task(struct se_cmd *se_cmd)
599 {
600 	return;
601 }
602 
tcm_loop_dump_proto_id(struct tcm_loop_hba * tl_hba)603 static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba)
604 {
605 	switch (tl_hba->tl_proto_id) {
606 	case SCSI_PROTOCOL_SAS:
607 		return "SAS";
608 	case SCSI_PROTOCOL_FCP:
609 		return "FCP";
610 	case SCSI_PROTOCOL_ISCSI:
611 		return "iSCSI";
612 	default:
613 		break;
614 	}
615 
616 	return "Unknown";
617 }
618 
619 /* Start items for tcm_loop_port_cit */
620 
tcm_loop_port_link(struct se_portal_group * se_tpg,struct se_lun * lun)621 static int tcm_loop_port_link(
622 	struct se_portal_group *se_tpg,
623 	struct se_lun *lun)
624 {
625 	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
626 				struct tcm_loop_tpg, tl_se_tpg);
627 	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
628 
629 	atomic_inc_mb(&tl_tpg->tl_tpg_port_count);
630 	/*
631 	 * Add Linux/SCSI struct scsi_device by HCTL
632 	 */
633 	scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun);
634 
635 	pr_debug("TCM_Loop_ConfigFS: Port Link Successful\n");
636 	return 0;
637 }
638 
tcm_loop_port_unlink(struct se_portal_group * se_tpg,struct se_lun * se_lun)639 static void tcm_loop_port_unlink(
640 	struct se_portal_group *se_tpg,
641 	struct se_lun *se_lun)
642 {
643 	struct scsi_device *sd;
644 	struct tcm_loop_hba *tl_hba;
645 	struct tcm_loop_tpg *tl_tpg;
646 
647 	tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
648 	tl_hba = tl_tpg->tl_hba;
649 
650 	sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt,
651 				se_lun->unpacked_lun);
652 	if (!sd) {
653 		pr_err("Unable to locate struct scsi_device for %d:%d:%llu\n",
654 		       0, tl_tpg->tl_tpgt, se_lun->unpacked_lun);
655 		return;
656 	}
657 	/*
658 	 * Remove Linux/SCSI struct scsi_device by HCTL
659 	 */
660 	scsi_remove_device(sd);
661 	scsi_device_put(sd);
662 
663 	atomic_dec_mb(&tl_tpg->tl_tpg_port_count);
664 
665 	pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n");
666 }
667 
668 /* End items for tcm_loop_port_cit */
669 
tcm_loop_tpg_attrib_fabric_prot_type_show(struct config_item * item,char * page)670 static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_show(
671 		struct config_item *item, char *page)
672 {
673 	struct se_portal_group *se_tpg = attrib_to_tpg(item);
674 	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
675 						   tl_se_tpg);
676 
677 	return sprintf(page, "%d\n", tl_tpg->tl_fabric_prot_type);
678 }
679 
tcm_loop_tpg_attrib_fabric_prot_type_store(struct config_item * item,const char * page,size_t count)680 static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_store(
681 		struct config_item *item, const char *page, size_t count)
682 {
683 	struct se_portal_group *se_tpg = attrib_to_tpg(item);
684 	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
685 						   tl_se_tpg);
686 	unsigned long val;
687 	int ret = kstrtoul(page, 0, &val);
688 
689 	if (ret) {
690 		pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
691 		return ret;
692 	}
693 	if (val != 0 && val != 1 && val != 3) {
694 		pr_err("Invalid qla2xxx fabric_prot_type: %lu\n", val);
695 		return -EINVAL;
696 	}
697 	tl_tpg->tl_fabric_prot_type = val;
698 
699 	return count;
700 }
701 
702 CONFIGFS_ATTR(tcm_loop_tpg_attrib_, fabric_prot_type);
703 
704 static struct configfs_attribute *tcm_loop_tpg_attrib_attrs[] = {
705 	&tcm_loop_tpg_attrib_attr_fabric_prot_type,
706 	NULL,
707 };
708 
709 /* Start items for tcm_loop_nexus_cit */
710 
tcm_loop_alloc_sess_cb(struct se_portal_group * se_tpg,struct se_session * se_sess,void * p)711 static int tcm_loop_alloc_sess_cb(struct se_portal_group *se_tpg,
712 				  struct se_session *se_sess, void *p)
713 {
714 	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
715 					struct tcm_loop_tpg, tl_se_tpg);
716 
717 	tl_tpg->tl_nexus = p;
718 	return 0;
719 }
720 
tcm_loop_make_nexus(struct tcm_loop_tpg * tl_tpg,const char * name)721 static int tcm_loop_make_nexus(
722 	struct tcm_loop_tpg *tl_tpg,
723 	const char *name)
724 {
725 	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
726 	struct tcm_loop_nexus *tl_nexus;
727 	int ret;
728 
729 	if (tl_tpg->tl_nexus) {
730 		pr_debug("tl_tpg->tl_nexus already exists\n");
731 		return -EEXIST;
732 	}
733 
734 	tl_nexus = kzalloc(sizeof(*tl_nexus), GFP_KERNEL);
735 	if (!tl_nexus)
736 		return -ENOMEM;
737 
738 	tl_nexus->se_sess = target_setup_session(&tl_tpg->tl_se_tpg, 0, 0,
739 					TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
740 					name, tl_nexus, tcm_loop_alloc_sess_cb);
741 	if (IS_ERR(tl_nexus->se_sess)) {
742 		ret = PTR_ERR(tl_nexus->se_sess);
743 		kfree(tl_nexus);
744 		return ret;
745 	}
746 
747 	pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated %s Initiator Port: %s\n",
748 		 tcm_loop_dump_proto_id(tl_hba), name);
749 	return 0;
750 }
751 
tcm_loop_drop_nexus(struct tcm_loop_tpg * tpg)752 static int tcm_loop_drop_nexus(
753 	struct tcm_loop_tpg *tpg)
754 {
755 	struct se_session *se_sess;
756 	struct tcm_loop_nexus *tl_nexus;
757 
758 	tl_nexus = tpg->tl_nexus;
759 	if (!tl_nexus)
760 		return -ENODEV;
761 
762 	se_sess = tl_nexus->se_sess;
763 	if (!se_sess)
764 		return -ENODEV;
765 
766 	if (atomic_read(&tpg->tl_tpg_port_count)) {
767 		pr_err("Unable to remove TCM_Loop I_T Nexus with active TPG port count: %d\n",
768 		       atomic_read(&tpg->tl_tpg_port_count));
769 		return -EPERM;
770 	}
771 
772 	pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated %s Initiator Port: %s\n",
773 		 tcm_loop_dump_proto_id(tpg->tl_hba),
774 		 tl_nexus->se_sess->se_node_acl->initiatorname);
775 	/*
776 	 * Release the SCSI I_T Nexus to the emulated Target Port
777 	 */
778 	target_remove_session(se_sess);
779 	tpg->tl_nexus = NULL;
780 	kfree(tl_nexus);
781 	return 0;
782 }
783 
784 /* End items for tcm_loop_nexus_cit */
785 
tcm_loop_tpg_nexus_show(struct config_item * item,char * page)786 static ssize_t tcm_loop_tpg_nexus_show(struct config_item *item, char *page)
787 {
788 	struct se_portal_group *se_tpg = to_tpg(item);
789 	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
790 			struct tcm_loop_tpg, tl_se_tpg);
791 	struct tcm_loop_nexus *tl_nexus;
792 	ssize_t ret;
793 
794 	tl_nexus = tl_tpg->tl_nexus;
795 	if (!tl_nexus)
796 		return -ENODEV;
797 
798 	ret = snprintf(page, PAGE_SIZE, "%s\n",
799 		tl_nexus->se_sess->se_node_acl->initiatorname);
800 
801 	return ret;
802 }
803 
tcm_loop_tpg_nexus_store(struct config_item * item,const char * page,size_t count)804 static ssize_t tcm_loop_tpg_nexus_store(struct config_item *item,
805 		const char *page, size_t count)
806 {
807 	struct se_portal_group *se_tpg = to_tpg(item);
808 	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
809 			struct tcm_loop_tpg, tl_se_tpg);
810 	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
811 	unsigned char i_port[TL_WWN_ADDR_LEN], *ptr, *port_ptr;
812 	int ret;
813 	/*
814 	 * Shutdown the active I_T nexus if 'NULL' is passed..
815 	 */
816 	if (!strncmp(page, "NULL", 4)) {
817 		ret = tcm_loop_drop_nexus(tl_tpg);
818 		return (!ret) ? count : ret;
819 	}
820 	/*
821 	 * Otherwise make sure the passed virtual Initiator port WWN matches
822 	 * the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call
823 	 * tcm_loop_make_nexus()
824 	 */
825 	if (strlen(page) >= TL_WWN_ADDR_LEN) {
826 		pr_err("Emulated NAA Sas Address: %s, exceeds max: %d\n",
827 		       page, TL_WWN_ADDR_LEN);
828 		return -EINVAL;
829 	}
830 	snprintf(&i_port[0], TL_WWN_ADDR_LEN, "%s", page);
831 
832 	ptr = strstr(i_port, "naa.");
833 	if (ptr) {
834 		if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) {
835 			pr_err("Passed SAS Initiator Port %s does not match target port protoid: %s\n",
836 			       i_port, tcm_loop_dump_proto_id(tl_hba));
837 			return -EINVAL;
838 		}
839 		port_ptr = &i_port[0];
840 		goto check_newline;
841 	}
842 	ptr = strstr(i_port, "fc.");
843 	if (ptr) {
844 		if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) {
845 			pr_err("Passed FCP Initiator Port %s does not match target port protoid: %s\n",
846 			       i_port, tcm_loop_dump_proto_id(tl_hba));
847 			return -EINVAL;
848 		}
849 		port_ptr = &i_port[3]; /* Skip over "fc." */
850 		goto check_newline;
851 	}
852 	ptr = strstr(i_port, "iqn.");
853 	if (ptr) {
854 		if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) {
855 			pr_err("Passed iSCSI Initiator Port %s does not match target port protoid: %s\n",
856 			       i_port, tcm_loop_dump_proto_id(tl_hba));
857 			return -EINVAL;
858 		}
859 		port_ptr = &i_port[0];
860 		goto check_newline;
861 	}
862 	pr_err("Unable to locate prefix for emulated Initiator Port: %s\n",
863 	       i_port);
864 	return -EINVAL;
865 	/*
866 	 * Clear any trailing newline for the NAA WWN
867 	 */
868 check_newline:
869 	if (i_port[strlen(i_port)-1] == '\n')
870 		i_port[strlen(i_port)-1] = '\0';
871 
872 	ret = tcm_loop_make_nexus(tl_tpg, port_ptr);
873 	if (ret < 0)
874 		return ret;
875 
876 	return count;
877 }
878 
tcm_loop_tpg_transport_status_show(struct config_item * item,char * page)879 static ssize_t tcm_loop_tpg_transport_status_show(struct config_item *item,
880 		char *page)
881 {
882 	struct se_portal_group *se_tpg = to_tpg(item);
883 	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
884 			struct tcm_loop_tpg, tl_se_tpg);
885 	const char *status = NULL;
886 	ssize_t ret = -EINVAL;
887 
888 	switch (tl_tpg->tl_transport_status) {
889 	case TCM_TRANSPORT_ONLINE:
890 		status = "online";
891 		break;
892 	case TCM_TRANSPORT_OFFLINE:
893 		status = "offline";
894 		break;
895 	default:
896 		break;
897 	}
898 
899 	if (status)
900 		ret = snprintf(page, PAGE_SIZE, "%s\n", status);
901 
902 	return ret;
903 }
904 
tcm_loop_tpg_transport_status_store(struct config_item * item,const char * page,size_t count)905 static ssize_t tcm_loop_tpg_transport_status_store(struct config_item *item,
906 		const char *page, size_t count)
907 {
908 	struct se_portal_group *se_tpg = to_tpg(item);
909 	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
910 			struct tcm_loop_tpg, tl_se_tpg);
911 
912 	if (!strncmp(page, "online", 6)) {
913 		tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
914 		return count;
915 	}
916 	if (!strncmp(page, "offline", 7)) {
917 		tl_tpg->tl_transport_status = TCM_TRANSPORT_OFFLINE;
918 		if (tl_tpg->tl_nexus) {
919 			struct se_session *tl_sess = tl_tpg->tl_nexus->se_sess;
920 
921 			core_allocate_nexus_loss_ua(tl_sess->se_node_acl);
922 		}
923 		return count;
924 	}
925 	return -EINVAL;
926 }
927 
tcm_loop_tpg_address_show(struct config_item * item,char * page)928 static ssize_t tcm_loop_tpg_address_show(struct config_item *item,
929 					 char *page)
930 {
931 	struct se_portal_group *se_tpg = to_tpg(item);
932 	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
933 			struct tcm_loop_tpg, tl_se_tpg);
934 	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
935 
936 	return snprintf(page, PAGE_SIZE, "%d:0:%d\n",
937 			tl_hba->sh->host_no, tl_tpg->tl_tpgt);
938 }
939 
940 CONFIGFS_ATTR(tcm_loop_tpg_, nexus);
941 CONFIGFS_ATTR(tcm_loop_tpg_, transport_status);
942 CONFIGFS_ATTR_RO(tcm_loop_tpg_, address);
943 
944 static struct configfs_attribute *tcm_loop_tpg_attrs[] = {
945 	&tcm_loop_tpg_attr_nexus,
946 	&tcm_loop_tpg_attr_transport_status,
947 	&tcm_loop_tpg_attr_address,
948 	NULL,
949 };
950 
951 /* Start items for tcm_loop_naa_cit */
952 
tcm_loop_make_naa_tpg(struct se_wwn * wwn,const char * name)953 static struct se_portal_group *tcm_loop_make_naa_tpg(struct se_wwn *wwn,
954 						     const char *name)
955 {
956 	struct tcm_loop_hba *tl_hba = container_of(wwn,
957 			struct tcm_loop_hba, tl_hba_wwn);
958 	struct tcm_loop_tpg *tl_tpg;
959 	int ret;
960 	unsigned long tpgt;
961 
962 	if (strstr(name, "tpgt_") != name) {
963 		pr_err("Unable to locate \"tpgt_#\" directory group\n");
964 		return ERR_PTR(-EINVAL);
965 	}
966 	if (kstrtoul(name+5, 10, &tpgt))
967 		return ERR_PTR(-EINVAL);
968 
969 	if (tpgt >= TL_TPGS_PER_HBA) {
970 		pr_err("Passed tpgt: %lu exceeds TL_TPGS_PER_HBA: %u\n",
971 		       tpgt, TL_TPGS_PER_HBA);
972 		return ERR_PTR(-EINVAL);
973 	}
974 	tl_tpg = &tl_hba->tl_hba_tpgs[tpgt];
975 	tl_tpg->tl_hba = tl_hba;
976 	tl_tpg->tl_tpgt = tpgt;
977 	/*
978 	 * Register the tl_tpg as a emulated TCM Target Endpoint
979 	 */
980 	ret = core_tpg_register(wwn, &tl_tpg->tl_se_tpg, tl_hba->tl_proto_id);
981 	if (ret < 0)
982 		return ERR_PTR(-ENOMEM);
983 
984 	pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s Target Port %s,t,0x%04lx\n",
985 		 tcm_loop_dump_proto_id(tl_hba),
986 		 config_item_name(&wwn->wwn_group.cg_item), tpgt);
987 	return &tl_tpg->tl_se_tpg;
988 }
989 
tcm_loop_drop_naa_tpg(struct se_portal_group * se_tpg)990 static void tcm_loop_drop_naa_tpg(
991 	struct se_portal_group *se_tpg)
992 {
993 	struct se_wwn *wwn = se_tpg->se_tpg_wwn;
994 	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
995 				struct tcm_loop_tpg, tl_se_tpg);
996 	struct tcm_loop_hba *tl_hba;
997 	unsigned short tpgt;
998 
999 	tl_hba = tl_tpg->tl_hba;
1000 	tpgt = tl_tpg->tl_tpgt;
1001 	/*
1002 	 * Release the I_T Nexus for the Virtual target link if present
1003 	 */
1004 	tcm_loop_drop_nexus(tl_tpg);
1005 	/*
1006 	 * Deregister the tl_tpg as a emulated TCM Target Endpoint
1007 	 */
1008 	core_tpg_deregister(se_tpg);
1009 
1010 	tl_tpg->tl_hba = NULL;
1011 	tl_tpg->tl_tpgt = 0;
1012 
1013 	pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s Target Port %s,t,0x%04x\n",
1014 		 tcm_loop_dump_proto_id(tl_hba),
1015 		 config_item_name(&wwn->wwn_group.cg_item), tpgt);
1016 }
1017 
1018 /* End items for tcm_loop_naa_cit */
1019 
1020 /* Start items for tcm_loop_cit */
1021 
tcm_loop_make_scsi_hba(struct target_fabric_configfs * tf,struct config_group * group,const char * name)1022 static struct se_wwn *tcm_loop_make_scsi_hba(
1023 	struct target_fabric_configfs *tf,
1024 	struct config_group *group,
1025 	const char *name)
1026 {
1027 	struct tcm_loop_hba *tl_hba;
1028 	struct Scsi_Host *sh;
1029 	char *ptr;
1030 	int ret, off = 0;
1031 
1032 	tl_hba = kzalloc(sizeof(*tl_hba), GFP_KERNEL);
1033 	if (!tl_hba)
1034 		return ERR_PTR(-ENOMEM);
1035 
1036 	/*
1037 	 * Determine the emulated Protocol Identifier and Target Port Name
1038 	 * based on the incoming configfs directory name.
1039 	 */
1040 	ptr = strstr(name, "naa.");
1041 	if (ptr) {
1042 		tl_hba->tl_proto_id = SCSI_PROTOCOL_SAS;
1043 		goto check_len;
1044 	}
1045 	ptr = strstr(name, "fc.");
1046 	if (ptr) {
1047 		tl_hba->tl_proto_id = SCSI_PROTOCOL_FCP;
1048 		off = 3; /* Skip over "fc." */
1049 		goto check_len;
1050 	}
1051 	ptr = strstr(name, "iqn.");
1052 	if (!ptr) {
1053 		pr_err("Unable to locate prefix for emulated Target Port: %s\n",
1054 		       name);
1055 		ret = -EINVAL;
1056 		goto out;
1057 	}
1058 	tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI;
1059 
1060 check_len:
1061 	if (strlen(name) >= TL_WWN_ADDR_LEN) {
1062 		pr_err("Emulated NAA %s Address: %s, exceeds max: %d\n",
1063 		       name, tcm_loop_dump_proto_id(tl_hba), TL_WWN_ADDR_LEN);
1064 		ret = -EINVAL;
1065 		goto out;
1066 	}
1067 	snprintf(&tl_hba->tl_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]);
1068 
1069 	/*
1070 	 * Call device_register(tl_hba->dev) to register the emulated
1071 	 * Linux/SCSI LLD of type struct Scsi_Host at tl_hba->sh after
1072 	 * device_register() callbacks in tcm_loop_driver_probe()
1073 	 */
1074 	ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt);
1075 	if (ret)
1076 		return ERR_PTR(ret);
1077 
1078 	sh = tl_hba->sh;
1079 	tcm_loop_hba_no_cnt++;
1080 	pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target %s Address: %s at Linux/SCSI Host ID: %d\n",
1081 		 tcm_loop_dump_proto_id(tl_hba), name, sh->host_no);
1082 	return &tl_hba->tl_hba_wwn;
1083 out:
1084 	kfree(tl_hba);
1085 	return ERR_PTR(ret);
1086 }
1087 
tcm_loop_drop_scsi_hba(struct se_wwn * wwn)1088 static void tcm_loop_drop_scsi_hba(
1089 	struct se_wwn *wwn)
1090 {
1091 	struct tcm_loop_hba *tl_hba = container_of(wwn,
1092 				struct tcm_loop_hba, tl_hba_wwn);
1093 
1094 	pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target %s Address: %s at Linux/SCSI Host ID: %d\n",
1095 		 tcm_loop_dump_proto_id(tl_hba), tl_hba->tl_wwn_address,
1096 		 tl_hba->sh->host_no);
1097 	/*
1098 	 * Call device_unregister() on the original tl_hba->dev.
1099 	 * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
1100 	 * release *tl_hba;
1101 	 */
1102 	device_unregister(&tl_hba->dev);
1103 }
1104 
1105 /* Start items for tcm_loop_cit */
tcm_loop_wwn_version_show(struct config_item * item,char * page)1106 static ssize_t tcm_loop_wwn_version_show(struct config_item *item, char *page)
1107 {
1108 	return sprintf(page, "TCM Loopback Fabric module %s\n", TCM_LOOP_VERSION);
1109 }
1110 
1111 CONFIGFS_ATTR_RO(tcm_loop_wwn_, version);
1112 
1113 static struct configfs_attribute *tcm_loop_wwn_attrs[] = {
1114 	&tcm_loop_wwn_attr_version,
1115 	NULL,
1116 };
1117 
1118 /* End items for tcm_loop_cit */
1119 
1120 static const struct target_core_fabric_ops loop_ops = {
1121 	.module				= THIS_MODULE,
1122 	.fabric_name			= "loopback",
1123 	.tpg_get_wwn			= tcm_loop_get_endpoint_wwn,
1124 	.tpg_get_tag			= tcm_loop_get_tag,
1125 	.tpg_check_demo_mode		= tcm_loop_check_demo_mode,
1126 	.tpg_check_demo_mode_cache	= tcm_loop_check_demo_mode_cache,
1127 	.tpg_check_demo_mode_write_protect =
1128 				tcm_loop_check_demo_mode_write_protect,
1129 	.tpg_check_prod_mode_write_protect =
1130 				tcm_loop_check_prod_mode_write_protect,
1131 	.tpg_check_prot_fabric_only	= tcm_loop_check_prot_fabric_only,
1132 	.tpg_get_inst_index		= tcm_loop_get_inst_index,
1133 	.check_stop_free		= tcm_loop_check_stop_free,
1134 	.release_cmd			= tcm_loop_release_cmd,
1135 	.sess_get_index			= tcm_loop_sess_get_index,
1136 	.write_pending			= tcm_loop_write_pending,
1137 	.set_default_node_attributes	= tcm_loop_set_default_node_attributes,
1138 	.get_cmd_state			= tcm_loop_get_cmd_state,
1139 	.queue_data_in			= tcm_loop_queue_data_in,
1140 	.queue_status			= tcm_loop_queue_status,
1141 	.queue_tm_rsp			= tcm_loop_queue_tm_rsp,
1142 	.aborted_task			= tcm_loop_aborted_task,
1143 	.fabric_make_wwn		= tcm_loop_make_scsi_hba,
1144 	.fabric_drop_wwn		= tcm_loop_drop_scsi_hba,
1145 	.fabric_make_tpg		= tcm_loop_make_naa_tpg,
1146 	.fabric_drop_tpg		= tcm_loop_drop_naa_tpg,
1147 	.fabric_post_link		= tcm_loop_port_link,
1148 	.fabric_pre_unlink		= tcm_loop_port_unlink,
1149 	.tfc_wwn_attrs			= tcm_loop_wwn_attrs,
1150 	.tfc_tpg_base_attrs		= tcm_loop_tpg_attrs,
1151 	.tfc_tpg_attrib_attrs		= tcm_loop_tpg_attrib_attrs,
1152 };
1153 
tcm_loop_fabric_init(void)1154 static int __init tcm_loop_fabric_init(void)
1155 {
1156 	int ret = -ENOMEM;
1157 
1158 	tcm_loop_workqueue = alloc_workqueue("tcm_loop", 0, 0);
1159 	if (!tcm_loop_workqueue)
1160 		goto out;
1161 
1162 	tcm_loop_cmd_cache = kmem_cache_create("tcm_loop_cmd_cache",
1163 				sizeof(struct tcm_loop_cmd),
1164 				__alignof__(struct tcm_loop_cmd),
1165 				0, NULL);
1166 	if (!tcm_loop_cmd_cache) {
1167 		pr_debug("kmem_cache_create() for tcm_loop_cmd_cache failed\n");
1168 		goto out_destroy_workqueue;
1169 	}
1170 
1171 	ret = tcm_loop_alloc_core_bus();
1172 	if (ret)
1173 		goto out_destroy_cache;
1174 
1175 	ret = target_register_template(&loop_ops);
1176 	if (ret)
1177 		goto out_release_core_bus;
1178 
1179 	return 0;
1180 
1181 out_release_core_bus:
1182 	tcm_loop_release_core_bus();
1183 out_destroy_cache:
1184 	kmem_cache_destroy(tcm_loop_cmd_cache);
1185 out_destroy_workqueue:
1186 	destroy_workqueue(tcm_loop_workqueue);
1187 out:
1188 	return ret;
1189 }
1190 
tcm_loop_fabric_exit(void)1191 static void __exit tcm_loop_fabric_exit(void)
1192 {
1193 	target_unregister_template(&loop_ops);
1194 	tcm_loop_release_core_bus();
1195 	kmem_cache_destroy(tcm_loop_cmd_cache);
1196 	destroy_workqueue(tcm_loop_workqueue);
1197 }
1198 
1199 MODULE_DESCRIPTION("TCM loopback virtual Linux/SCSI fabric module");
1200 MODULE_AUTHOR("Nicholas A. Bellinger <nab@risingtidesystems.com>");
1201 MODULE_LICENSE("GPL");
1202 module_init(tcm_loop_fabric_init);
1203 module_exit(tcm_loop_fabric_exit);
1204