1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * iSCSI lib functions
4 *
5 * Copyright (C) 2006 Red Hat, Inc. All rights reserved.
6 * Copyright (C) 2004 - 2006 Mike Christie
7 * Copyright (C) 2004 - 2005 Dmitry Yusupov
8 * Copyright (C) 2004 - 2005 Alex Aizman
9 * maintained by open-iscsi@googlegroups.com
10 */
11 #include <linux/types.h>
12 #include <linux/kfifo.h>
13 #include <linux/delay.h>
14 #include <linux/log2.h>
15 #include <linux/slab.h>
16 #include <linux/sched/signal.h>
17 #include <linux/module.h>
18 #include <asm/unaligned.h>
19 #include <net/tcp.h>
20 #include <scsi/scsi_cmnd.h>
21 #include <scsi/scsi_device.h>
22 #include <scsi/scsi_eh.h>
23 #include <scsi/scsi_tcq.h>
24 #include <scsi/scsi_host.h>
25 #include <scsi/scsi.h>
26 #include <scsi/iscsi_proto.h>
27 #include <scsi/scsi_transport.h>
28 #include <scsi/scsi_transport_iscsi.h>
29 #include <scsi/libiscsi.h>
30 #include <trace/events/iscsi.h>
31
32 static int iscsi_dbg_lib_conn;
33 module_param_named(debug_libiscsi_conn, iscsi_dbg_lib_conn, int,
34 S_IRUGO | S_IWUSR);
35 MODULE_PARM_DESC(debug_libiscsi_conn,
36 "Turn on debugging for connections in libiscsi module. "
37 "Set to 1 to turn on, and zero to turn off. Default is off.");
38
39 static int iscsi_dbg_lib_session;
40 module_param_named(debug_libiscsi_session, iscsi_dbg_lib_session, int,
41 S_IRUGO | S_IWUSR);
42 MODULE_PARM_DESC(debug_libiscsi_session,
43 "Turn on debugging for sessions in libiscsi module. "
44 "Set to 1 to turn on, and zero to turn off. Default is off.");
45
46 static int iscsi_dbg_lib_eh;
47 module_param_named(debug_libiscsi_eh, iscsi_dbg_lib_eh, int,
48 S_IRUGO | S_IWUSR);
49 MODULE_PARM_DESC(debug_libiscsi_eh,
50 "Turn on debugging for error handling in libiscsi module. "
51 "Set to 1 to turn on, and zero to turn off. Default is off.");
52
53 #define ISCSI_DBG_CONN(_conn, dbg_fmt, arg...) \
54 do { \
55 if (iscsi_dbg_lib_conn) \
56 iscsi_conn_printk(KERN_INFO, _conn, \
57 "%s " dbg_fmt, \
58 __func__, ##arg); \
59 iscsi_dbg_trace(trace_iscsi_dbg_conn, \
60 &(_conn)->cls_conn->dev, \
61 "%s " dbg_fmt, __func__, ##arg);\
62 } while (0);
63
64 #define ISCSI_DBG_SESSION(_session, dbg_fmt, arg...) \
65 do { \
66 if (iscsi_dbg_lib_session) \
67 iscsi_session_printk(KERN_INFO, _session, \
68 "%s " dbg_fmt, \
69 __func__, ##arg); \
70 iscsi_dbg_trace(trace_iscsi_dbg_session, \
71 &(_session)->cls_session->dev, \
72 "%s " dbg_fmt, __func__, ##arg); \
73 } while (0);
74
75 #define ISCSI_DBG_EH(_session, dbg_fmt, arg...) \
76 do { \
77 if (iscsi_dbg_lib_eh) \
78 iscsi_session_printk(KERN_INFO, _session, \
79 "%s " dbg_fmt, \
80 __func__, ##arg); \
81 iscsi_dbg_trace(trace_iscsi_dbg_eh, \
82 &(_session)->cls_session->dev, \
83 "%s " dbg_fmt, __func__, ##arg); \
84 } while (0);
85
iscsi_conn_queue_work(struct iscsi_conn * conn)86 inline void iscsi_conn_queue_work(struct iscsi_conn *conn)
87 {
88 struct Scsi_Host *shost = conn->session->host;
89 struct iscsi_host *ihost = shost_priv(shost);
90
91 if (ihost->workq)
92 queue_work(ihost->workq, &conn->xmitwork);
93 }
94 EXPORT_SYMBOL_GPL(iscsi_conn_queue_work);
95
__iscsi_update_cmdsn(struct iscsi_session * session,uint32_t exp_cmdsn,uint32_t max_cmdsn)96 static void __iscsi_update_cmdsn(struct iscsi_session *session,
97 uint32_t exp_cmdsn, uint32_t max_cmdsn)
98 {
99 /*
100 * standard specifies this check for when to update expected and
101 * max sequence numbers
102 */
103 if (iscsi_sna_lt(max_cmdsn, exp_cmdsn - 1))
104 return;
105
106 if (exp_cmdsn != session->exp_cmdsn &&
107 !iscsi_sna_lt(exp_cmdsn, session->exp_cmdsn))
108 session->exp_cmdsn = exp_cmdsn;
109
110 if (max_cmdsn != session->max_cmdsn &&
111 !iscsi_sna_lt(max_cmdsn, session->max_cmdsn))
112 session->max_cmdsn = max_cmdsn;
113 }
114
iscsi_update_cmdsn(struct iscsi_session * session,struct iscsi_nopin * hdr)115 void iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
116 {
117 __iscsi_update_cmdsn(session, be32_to_cpu(hdr->exp_cmdsn),
118 be32_to_cpu(hdr->max_cmdsn));
119 }
120 EXPORT_SYMBOL_GPL(iscsi_update_cmdsn);
121
122 /**
123 * iscsi_prep_data_out_pdu - initialize Data-Out
124 * @task: scsi command task
125 * @r2t: R2T info
126 * @hdr: iscsi data in pdu
127 *
128 * Notes:
129 * Initialize Data-Out within this R2T sequence and finds
130 * proper data_offset within this SCSI command.
131 *
132 * This function is called with connection lock taken.
133 **/
iscsi_prep_data_out_pdu(struct iscsi_task * task,struct iscsi_r2t_info * r2t,struct iscsi_data * hdr)134 void iscsi_prep_data_out_pdu(struct iscsi_task *task, struct iscsi_r2t_info *r2t,
135 struct iscsi_data *hdr)
136 {
137 struct iscsi_conn *conn = task->conn;
138 unsigned int left = r2t->data_length - r2t->sent;
139
140 task->hdr_len = sizeof(struct iscsi_data);
141
142 memset(hdr, 0, sizeof(struct iscsi_data));
143 hdr->ttt = r2t->ttt;
144 hdr->datasn = cpu_to_be32(r2t->datasn);
145 r2t->datasn++;
146 hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
147 hdr->lun = task->lun;
148 hdr->itt = task->hdr_itt;
149 hdr->exp_statsn = r2t->exp_statsn;
150 hdr->offset = cpu_to_be32(r2t->data_offset + r2t->sent);
151 if (left > conn->max_xmit_dlength) {
152 hton24(hdr->dlength, conn->max_xmit_dlength);
153 r2t->data_count = conn->max_xmit_dlength;
154 hdr->flags = 0;
155 } else {
156 hton24(hdr->dlength, left);
157 r2t->data_count = left;
158 hdr->flags = ISCSI_FLAG_CMD_FINAL;
159 }
160 conn->dataout_pdus_cnt++;
161 }
162 EXPORT_SYMBOL_GPL(iscsi_prep_data_out_pdu);
163
iscsi_add_hdr(struct iscsi_task * task,unsigned len)164 static int iscsi_add_hdr(struct iscsi_task *task, unsigned len)
165 {
166 unsigned exp_len = task->hdr_len + len;
167
168 if (exp_len > task->hdr_max) {
169 WARN_ON(1);
170 return -EINVAL;
171 }
172
173 WARN_ON(len & (ISCSI_PAD_LEN - 1)); /* caller must pad the AHS */
174 task->hdr_len = exp_len;
175 return 0;
176 }
177
178 /*
179 * make an extended cdb AHS
180 */
iscsi_prep_ecdb_ahs(struct iscsi_task * task)181 static int iscsi_prep_ecdb_ahs(struct iscsi_task *task)
182 {
183 struct scsi_cmnd *cmd = task->sc;
184 unsigned rlen, pad_len;
185 unsigned short ahslength;
186 struct iscsi_ecdb_ahdr *ecdb_ahdr;
187 int rc;
188
189 ecdb_ahdr = iscsi_next_hdr(task);
190 rlen = cmd->cmd_len - ISCSI_CDB_SIZE;
191
192 BUG_ON(rlen > sizeof(ecdb_ahdr->ecdb));
193 ahslength = rlen + sizeof(ecdb_ahdr->reserved);
194
195 pad_len = iscsi_padding(rlen);
196
197 rc = iscsi_add_hdr(task, sizeof(ecdb_ahdr->ahslength) +
198 sizeof(ecdb_ahdr->ahstype) + ahslength + pad_len);
199 if (rc)
200 return rc;
201
202 if (pad_len)
203 memset(&ecdb_ahdr->ecdb[rlen], 0, pad_len);
204
205 ecdb_ahdr->ahslength = cpu_to_be16(ahslength);
206 ecdb_ahdr->ahstype = ISCSI_AHSTYPE_CDB;
207 ecdb_ahdr->reserved = 0;
208 memcpy(ecdb_ahdr->ecdb, cmd->cmnd + ISCSI_CDB_SIZE, rlen);
209
210 ISCSI_DBG_SESSION(task->conn->session,
211 "iscsi_prep_ecdb_ahs: varlen_cdb_len %d "
212 "rlen %d pad_len %d ahs_length %d iscsi_headers_size "
213 "%u\n", cmd->cmd_len, rlen, pad_len, ahslength,
214 task->hdr_len);
215 return 0;
216 }
217
218 /**
219 * iscsi_check_tmf_restrictions - check if a task is affected by TMF
220 * @task: iscsi task
221 * @opcode: opcode to check for
222 *
223 * During TMF a task has to be checked if it's affected.
224 * All unrelated I/O can be passed through, but I/O to the
225 * affected LUN should be restricted.
226 * If 'fast_abort' is set we won't be sending any I/O to the
227 * affected LUN.
228 * Otherwise the target is waiting for all TTTs to be completed,
229 * so we have to send all outstanding Data-Out PDUs to the target.
230 */
iscsi_check_tmf_restrictions(struct iscsi_task * task,int opcode)231 static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode)
232 {
233 struct iscsi_session *session = task->conn->session;
234 struct iscsi_tm *tmf = &session->tmhdr;
235 u64 hdr_lun;
236
237 if (session->tmf_state == TMF_INITIAL)
238 return 0;
239
240 if ((tmf->opcode & ISCSI_OPCODE_MASK) != ISCSI_OP_SCSI_TMFUNC)
241 return 0;
242
243 switch (ISCSI_TM_FUNC_VALUE(tmf)) {
244 case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
245 /*
246 * Allow PDUs for unrelated LUNs
247 */
248 hdr_lun = scsilun_to_int(&tmf->lun);
249 if (hdr_lun != task->sc->device->lun)
250 return 0;
251 fallthrough;
252 case ISCSI_TM_FUNC_TARGET_WARM_RESET:
253 /*
254 * Fail all SCSI cmd PDUs
255 */
256 if (opcode != ISCSI_OP_SCSI_DATA_OUT) {
257 iscsi_session_printk(KERN_INFO, session,
258 "task [op %x itt 0x%x/0x%x] rejected.\n",
259 opcode, task->itt, task->hdr_itt);
260 return -EACCES;
261 }
262 /*
263 * And also all data-out PDUs in response to R2T
264 * if fast_abort is set.
265 */
266 if (session->fast_abort) {
267 iscsi_session_printk(KERN_INFO, session,
268 "task [op %x itt 0x%x/0x%x] fast abort.\n",
269 opcode, task->itt, task->hdr_itt);
270 return -EACCES;
271 }
272 break;
273 case ISCSI_TM_FUNC_ABORT_TASK:
274 /*
275 * the caller has already checked if the task
276 * they want to abort was in the pending queue so if
277 * we are here the cmd pdu has gone out already, and
278 * we will only hit this for data-outs
279 */
280 if (opcode == ISCSI_OP_SCSI_DATA_OUT &&
281 task->hdr_itt == tmf->rtt) {
282 ISCSI_DBG_SESSION(session,
283 "Preventing task %x/%x from sending "
284 "data-out due to abort task in "
285 "progress\n", task->itt,
286 task->hdr_itt);
287 return -EACCES;
288 }
289 break;
290 }
291
292 return 0;
293 }
294
295 /**
296 * iscsi_prep_scsi_cmd_pdu - prep iscsi scsi cmd pdu
297 * @task: iscsi task
298 *
299 * Prep basic iSCSI PDU fields for a scsi cmd pdu. The LLD should set
300 * fields like dlength or final based on how much data it sends
301 */
iscsi_prep_scsi_cmd_pdu(struct iscsi_task * task)302 static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
303 {
304 struct iscsi_conn *conn = task->conn;
305 struct iscsi_session *session = conn->session;
306 struct scsi_cmnd *sc = task->sc;
307 struct iscsi_scsi_req *hdr;
308 unsigned hdrlength, cmd_len, transfer_length;
309 itt_t itt;
310 int rc;
311
312 rc = iscsi_check_tmf_restrictions(task, ISCSI_OP_SCSI_CMD);
313 if (rc)
314 return rc;
315
316 if (conn->session->tt->alloc_pdu) {
317 rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_CMD);
318 if (rc)
319 return rc;
320 }
321 hdr = (struct iscsi_scsi_req *)task->hdr;
322 itt = hdr->itt;
323 memset(hdr, 0, sizeof(*hdr));
324
325 if (session->tt->parse_pdu_itt)
326 hdr->itt = task->hdr_itt = itt;
327 else
328 hdr->itt = task->hdr_itt = build_itt(task->itt,
329 task->conn->session->age);
330 task->hdr_len = 0;
331 rc = iscsi_add_hdr(task, sizeof(*hdr));
332 if (rc)
333 return rc;
334 hdr->opcode = ISCSI_OP_SCSI_CMD;
335 hdr->flags = ISCSI_ATTR_SIMPLE;
336 int_to_scsilun(sc->device->lun, &hdr->lun);
337 task->lun = hdr->lun;
338 hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
339 cmd_len = sc->cmd_len;
340 if (cmd_len < ISCSI_CDB_SIZE)
341 memset(&hdr->cdb[cmd_len], 0, ISCSI_CDB_SIZE - cmd_len);
342 else if (cmd_len > ISCSI_CDB_SIZE) {
343 rc = iscsi_prep_ecdb_ahs(task);
344 if (rc)
345 return rc;
346 cmd_len = ISCSI_CDB_SIZE;
347 }
348 memcpy(hdr->cdb, sc->cmnd, cmd_len);
349
350 task->imm_count = 0;
351 if (scsi_get_prot_op(sc) != SCSI_PROT_NORMAL)
352 task->protected = true;
353
354 transfer_length = scsi_transfer_length(sc);
355 hdr->data_length = cpu_to_be32(transfer_length);
356 if (sc->sc_data_direction == DMA_TO_DEVICE) {
357 struct iscsi_r2t_info *r2t = &task->unsol_r2t;
358
359 hdr->flags |= ISCSI_FLAG_CMD_WRITE;
360 /*
361 * Write counters:
362 *
363 * imm_count bytes to be sent right after
364 * SCSI PDU Header
365 *
366 * unsol_count bytes(as Data-Out) to be sent
367 * without R2T ack right after
368 * immediate data
369 *
370 * r2t data_length bytes to be sent via R2T ack's
371 *
372 * pad_count bytes to be sent as zero-padding
373 */
374 memset(r2t, 0, sizeof(*r2t));
375
376 if (session->imm_data_en) {
377 if (transfer_length >= session->first_burst)
378 task->imm_count = min(session->first_burst,
379 conn->max_xmit_dlength);
380 else
381 task->imm_count = min(transfer_length,
382 conn->max_xmit_dlength);
383 hton24(hdr->dlength, task->imm_count);
384 } else
385 zero_data(hdr->dlength);
386
387 if (!session->initial_r2t_en) {
388 r2t->data_length = min(session->first_burst,
389 transfer_length) -
390 task->imm_count;
391 r2t->data_offset = task->imm_count;
392 r2t->ttt = cpu_to_be32(ISCSI_RESERVED_TAG);
393 r2t->exp_statsn = cpu_to_be32(conn->exp_statsn);
394 }
395
396 if (!task->unsol_r2t.data_length)
397 /* No unsolicit Data-Out's */
398 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
399 } else {
400 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
401 zero_data(hdr->dlength);
402
403 if (sc->sc_data_direction == DMA_FROM_DEVICE)
404 hdr->flags |= ISCSI_FLAG_CMD_READ;
405 }
406
407 /* calculate size of additional header segments (AHSs) */
408 hdrlength = task->hdr_len - sizeof(*hdr);
409
410 WARN_ON(hdrlength & (ISCSI_PAD_LEN-1));
411 hdrlength /= ISCSI_PAD_LEN;
412
413 WARN_ON(hdrlength >= 256);
414 hdr->hlength = hdrlength & 0xFF;
415 hdr->cmdsn = task->cmdsn = cpu_to_be32(session->cmdsn);
416
417 if (session->tt->init_task && session->tt->init_task(task))
418 return -EIO;
419
420 task->state = ISCSI_TASK_RUNNING;
421 session->cmdsn++;
422
423 conn->scsicmd_pdus_cnt++;
424 ISCSI_DBG_SESSION(session, "iscsi prep [%s cid %d sc %p cdb 0x%x "
425 "itt 0x%x len %d cmdsn %d win %d]\n",
426 sc->sc_data_direction == DMA_TO_DEVICE ?
427 "write" : "read", conn->id, sc, sc->cmnd[0],
428 task->itt, transfer_length,
429 session->cmdsn,
430 session->max_cmdsn - session->exp_cmdsn + 1);
431 return 0;
432 }
433
434 /**
435 * iscsi_free_task - free a task
436 * @task: iscsi cmd task
437 *
438 * Must be called with session back_lock.
439 * This function returns the scsi command to scsi-ml or cleans
440 * up mgmt tasks then returns the task to the pool.
441 */
iscsi_free_task(struct iscsi_task * task)442 static void iscsi_free_task(struct iscsi_task *task)
443 {
444 struct iscsi_conn *conn = task->conn;
445 struct iscsi_session *session = conn->session;
446 struct scsi_cmnd *sc = task->sc;
447 int oldstate = task->state;
448
449 ISCSI_DBG_SESSION(session, "freeing task itt 0x%x state %d sc %p\n",
450 task->itt, task->state, task->sc);
451
452 session->tt->cleanup_task(task);
453 task->state = ISCSI_TASK_FREE;
454 task->sc = NULL;
455 /*
456 * login task is preallocated so do not free
457 */
458 if (conn->login_task == task)
459 return;
460
461 kfifo_in(&session->cmdpool.queue, (void*)&task, sizeof(void*));
462
463 if (sc) {
464 /* SCSI eh reuses commands to verify us */
465 sc->SCp.ptr = NULL;
466 /*
467 * queue command may call this to free the task, so
468 * it will decide how to return sc to scsi-ml.
469 */
470 if (oldstate != ISCSI_TASK_REQUEUE_SCSIQ)
471 sc->scsi_done(sc);
472 }
473 }
474
__iscsi_get_task(struct iscsi_task * task)475 void __iscsi_get_task(struct iscsi_task *task)
476 {
477 refcount_inc(&task->refcount);
478 }
479 EXPORT_SYMBOL_GPL(__iscsi_get_task);
480
__iscsi_put_task(struct iscsi_task * task)481 void __iscsi_put_task(struct iscsi_task *task)
482 {
483 if (refcount_dec_and_test(&task->refcount))
484 iscsi_free_task(task);
485 }
486 EXPORT_SYMBOL_GPL(__iscsi_put_task);
487
iscsi_put_task(struct iscsi_task * task)488 void iscsi_put_task(struct iscsi_task *task)
489 {
490 struct iscsi_session *session = task->conn->session;
491
492 /* regular RX path uses back_lock */
493 spin_lock_bh(&session->back_lock);
494 __iscsi_put_task(task);
495 spin_unlock_bh(&session->back_lock);
496 }
497 EXPORT_SYMBOL_GPL(iscsi_put_task);
498
499 /**
500 * iscsi_complete_task - finish a task
501 * @task: iscsi cmd task
502 * @state: state to complete task with
503 *
504 * Must be called with session back_lock.
505 */
iscsi_complete_task(struct iscsi_task * task,int state)506 static void iscsi_complete_task(struct iscsi_task *task, int state)
507 {
508 struct iscsi_conn *conn = task->conn;
509
510 ISCSI_DBG_SESSION(conn->session,
511 "complete task itt 0x%x state %d sc %p\n",
512 task->itt, task->state, task->sc);
513 if (task->state == ISCSI_TASK_COMPLETED ||
514 task->state == ISCSI_TASK_ABRT_TMF ||
515 task->state == ISCSI_TASK_ABRT_SESS_RECOV ||
516 task->state == ISCSI_TASK_REQUEUE_SCSIQ)
517 return;
518 WARN_ON_ONCE(task->state == ISCSI_TASK_FREE);
519 task->state = state;
520
521 if (READ_ONCE(conn->ping_task) == task)
522 WRITE_ONCE(conn->ping_task, NULL);
523
524 /* release get from queueing */
525 __iscsi_put_task(task);
526 }
527
528 /**
529 * iscsi_complete_scsi_task - finish scsi task normally
530 * @task: iscsi task for scsi cmd
531 * @exp_cmdsn: expected cmd sn in cpu format
532 * @max_cmdsn: max cmd sn in cpu format
533 *
534 * This is used when drivers do not need or cannot perform
535 * lower level pdu processing.
536 *
537 * Called with session back_lock
538 */
iscsi_complete_scsi_task(struct iscsi_task * task,uint32_t exp_cmdsn,uint32_t max_cmdsn)539 void iscsi_complete_scsi_task(struct iscsi_task *task,
540 uint32_t exp_cmdsn, uint32_t max_cmdsn)
541 {
542 struct iscsi_conn *conn = task->conn;
543
544 ISCSI_DBG_SESSION(conn->session, "[itt 0x%x]\n", task->itt);
545
546 conn->last_recv = jiffies;
547 __iscsi_update_cmdsn(conn->session, exp_cmdsn, max_cmdsn);
548 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
549 }
550 EXPORT_SYMBOL_GPL(iscsi_complete_scsi_task);
551
552 /*
553 * Must be called with back and frwd lock
554 */
cleanup_queued_task(struct iscsi_task * task)555 static bool cleanup_queued_task(struct iscsi_task *task)
556 {
557 struct iscsi_conn *conn = task->conn;
558 bool early_complete = false;
559
560 /* Bad target might have completed task while it was still running */
561 if (task->state == ISCSI_TASK_COMPLETED)
562 early_complete = true;
563
564 if (!list_empty(&task->running)) {
565 list_del_init(&task->running);
566 /*
567 * If it's on a list but still running, this could be from
568 * a bad target sending a rsp early, cleanup from a TMF, or
569 * session recovery.
570 */
571 if (task->state == ISCSI_TASK_RUNNING ||
572 task->state == ISCSI_TASK_COMPLETED)
573 __iscsi_put_task(task);
574 }
575
576 if (conn->task == task) {
577 conn->task = NULL;
578 __iscsi_put_task(task);
579 }
580
581 return early_complete;
582 }
583
584 /*
585 * session frwd lock must be held and if not called for a task that is still
586 * pending or from the xmit thread, then xmit thread must be suspended
587 */
fail_scsi_task(struct iscsi_task * task,int err)588 static void fail_scsi_task(struct iscsi_task *task, int err)
589 {
590 struct iscsi_conn *conn = task->conn;
591 struct scsi_cmnd *sc;
592 int state;
593
594 spin_lock_bh(&conn->session->back_lock);
595 if (cleanup_queued_task(task)) {
596 spin_unlock_bh(&conn->session->back_lock);
597 return;
598 }
599
600 if (task->state == ISCSI_TASK_PENDING) {
601 /*
602 * cmd never made it to the xmit thread, so we should not count
603 * the cmd in the sequencing
604 */
605 conn->session->queued_cmdsn--;
606 /* it was never sent so just complete like normal */
607 state = ISCSI_TASK_COMPLETED;
608 } else if (err == DID_TRANSPORT_DISRUPTED)
609 state = ISCSI_TASK_ABRT_SESS_RECOV;
610 else
611 state = ISCSI_TASK_ABRT_TMF;
612
613 sc = task->sc;
614 sc->result = err << 16;
615 scsi_set_resid(sc, scsi_bufflen(sc));
616 iscsi_complete_task(task, state);
617 spin_unlock_bh(&conn->session->back_lock);
618 }
619
iscsi_prep_mgmt_task(struct iscsi_conn * conn,struct iscsi_task * task)620 static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
621 struct iscsi_task *task)
622 {
623 struct iscsi_session *session = conn->session;
624 struct iscsi_hdr *hdr = task->hdr;
625 struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
626 uint8_t opcode = hdr->opcode & ISCSI_OPCODE_MASK;
627
628 if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
629 return -ENOTCONN;
630
631 if (opcode != ISCSI_OP_LOGIN && opcode != ISCSI_OP_TEXT)
632 nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
633 /*
634 * pre-format CmdSN for outgoing PDU.
635 */
636 nop->cmdsn = cpu_to_be32(session->cmdsn);
637 if (hdr->itt != RESERVED_ITT) {
638 /*
639 * TODO: We always use immediate for normal session pdus.
640 * If we start to send tmfs or nops as non-immediate then
641 * we should start checking the cmdsn numbers for mgmt tasks.
642 *
643 * During discovery sessions iscsid sends TEXT as non immediate,
644 * but we always only send one PDU at a time.
645 */
646 if (conn->c_stage == ISCSI_CONN_STARTED &&
647 !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
648 session->queued_cmdsn++;
649 session->cmdsn++;
650 }
651 }
652
653 if (session->tt->init_task && session->tt->init_task(task))
654 return -EIO;
655
656 if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
657 session->state = ISCSI_STATE_LOGGING_OUT;
658
659 task->state = ISCSI_TASK_RUNNING;
660 ISCSI_DBG_SESSION(session, "mgmtpdu [op 0x%x hdr->itt 0x%x "
661 "datalen %d]\n", hdr->opcode & ISCSI_OPCODE_MASK,
662 hdr->itt, task->data_count);
663 return 0;
664 }
665
666 static struct iscsi_task *
__iscsi_conn_send_pdu(struct iscsi_conn * conn,struct iscsi_hdr * hdr,char * data,uint32_t data_size)667 __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
668 char *data, uint32_t data_size)
669 {
670 struct iscsi_session *session = conn->session;
671 struct iscsi_host *ihost = shost_priv(session->host);
672 uint8_t opcode = hdr->opcode & ISCSI_OPCODE_MASK;
673 struct iscsi_task *task;
674 itt_t itt;
675
676 if (session->state == ISCSI_STATE_TERMINATE)
677 return NULL;
678
679 if (opcode == ISCSI_OP_LOGIN || opcode == ISCSI_OP_TEXT) {
680 /*
681 * Login and Text are sent serially, in
682 * request-followed-by-response sequence.
683 * Same task can be used. Same ITT must be used.
684 * Note that login_task is preallocated at conn_create().
685 */
686 if (conn->login_task->state != ISCSI_TASK_FREE) {
687 iscsi_conn_printk(KERN_ERR, conn, "Login/Text in "
688 "progress. Cannot start new task.\n");
689 return NULL;
690 }
691
692 if (data_size > ISCSI_DEF_MAX_RECV_SEG_LEN) {
693 iscsi_conn_printk(KERN_ERR, conn, "Invalid buffer len of %u for login task. Max len is %u\n", data_size, ISCSI_DEF_MAX_RECV_SEG_LEN);
694 return NULL;
695 }
696
697 task = conn->login_task;
698 } else {
699 if (session->state != ISCSI_STATE_LOGGED_IN)
700 return NULL;
701
702 if (data_size != 0) {
703 iscsi_conn_printk(KERN_ERR, conn, "Can not send data buffer of len %u for op 0x%x\n", data_size, opcode);
704 return NULL;
705 }
706
707 BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
708 BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
709
710 if (!kfifo_out(&session->cmdpool.queue,
711 (void*)&task, sizeof(void*)))
712 return NULL;
713 }
714 /*
715 * released in complete pdu for task we expect a response for, and
716 * released by the lld when it has transmitted the task for
717 * pdus we do not expect a response for.
718 */
719 refcount_set(&task->refcount, 1);
720 task->conn = conn;
721 task->sc = NULL;
722 INIT_LIST_HEAD(&task->running);
723 task->state = ISCSI_TASK_PENDING;
724
725 if (data_size) {
726 memcpy(task->data, data, data_size);
727 task->data_count = data_size;
728 } else
729 task->data_count = 0;
730
731 if (conn->session->tt->alloc_pdu) {
732 if (conn->session->tt->alloc_pdu(task, hdr->opcode)) {
733 iscsi_conn_printk(KERN_ERR, conn, "Could not allocate "
734 "pdu for mgmt task.\n");
735 goto free_task;
736 }
737 }
738
739 itt = task->hdr->itt;
740 task->hdr_len = sizeof(struct iscsi_hdr);
741 memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr));
742
743 if (hdr->itt != RESERVED_ITT) {
744 if (session->tt->parse_pdu_itt)
745 task->hdr->itt = itt;
746 else
747 task->hdr->itt = build_itt(task->itt,
748 task->conn->session->age);
749 }
750
751 if (unlikely(READ_ONCE(conn->ping_task) == INVALID_SCSI_TASK))
752 WRITE_ONCE(conn->ping_task, task);
753
754 if (!ihost->workq) {
755 if (iscsi_prep_mgmt_task(conn, task))
756 goto free_task;
757
758 if (session->tt->xmit_task(task))
759 goto free_task;
760 } else {
761 list_add_tail(&task->running, &conn->mgmtqueue);
762 iscsi_conn_queue_work(conn);
763 }
764
765 return task;
766
767 free_task:
768 /* regular RX path uses back_lock */
769 spin_lock(&session->back_lock);
770 __iscsi_put_task(task);
771 spin_unlock(&session->back_lock);
772 return NULL;
773 }
774
iscsi_conn_send_pdu(struct iscsi_cls_conn * cls_conn,struct iscsi_hdr * hdr,char * data,uint32_t data_size)775 int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
776 char *data, uint32_t data_size)
777 {
778 struct iscsi_conn *conn = cls_conn->dd_data;
779 struct iscsi_session *session = conn->session;
780 int err = 0;
781
782 spin_lock_bh(&session->frwd_lock);
783 if (!__iscsi_conn_send_pdu(conn, hdr, data, data_size))
784 err = -EPERM;
785 spin_unlock_bh(&session->frwd_lock);
786 return err;
787 }
788 EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
789
790 /**
791 * iscsi_cmd_rsp - SCSI Command Response processing
792 * @conn: iscsi connection
793 * @hdr: iscsi header
794 * @task: scsi command task
795 * @data: cmd data buffer
796 * @datalen: len of buffer
797 *
798 * iscsi_cmd_rsp sets up the scsi_cmnd fields based on the PDU and
799 * then completes the command and task. called under back_lock
800 **/
iscsi_scsi_cmd_rsp(struct iscsi_conn * conn,struct iscsi_hdr * hdr,struct iscsi_task * task,char * data,int datalen)801 static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
802 struct iscsi_task *task, char *data,
803 int datalen)
804 {
805 struct iscsi_scsi_rsp *rhdr = (struct iscsi_scsi_rsp *)hdr;
806 struct iscsi_session *session = conn->session;
807 struct scsi_cmnd *sc = task->sc;
808
809 iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
810 conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
811
812 sc->result = (DID_OK << 16) | rhdr->cmd_status;
813
814 if (task->protected) {
815 sector_t sector;
816 u8 ascq;
817
818 /**
819 * Transports that didn't implement check_protection
820 * callback but still published T10-PI support to scsi-mid
821 * deserve this BUG_ON.
822 **/
823 BUG_ON(!session->tt->check_protection);
824
825 ascq = session->tt->check_protection(task, §or);
826 if (ascq) {
827 sc->result = DRIVER_SENSE << 24 |
828 SAM_STAT_CHECK_CONDITION;
829 scsi_build_sense_buffer(1, sc->sense_buffer,
830 ILLEGAL_REQUEST, 0x10, ascq);
831 scsi_set_sense_information(sc->sense_buffer,
832 SCSI_SENSE_BUFFERSIZE,
833 sector);
834 goto out;
835 }
836 }
837
838 if (rhdr->response != ISCSI_STATUS_CMD_COMPLETED) {
839 sc->result = DID_ERROR << 16;
840 goto out;
841 }
842
843 if (rhdr->cmd_status == SAM_STAT_CHECK_CONDITION) {
844 uint16_t senselen;
845
846 if (datalen < 2) {
847 invalid_datalen:
848 iscsi_conn_printk(KERN_ERR, conn,
849 "Got CHECK_CONDITION but invalid data "
850 "buffer size of %d\n", datalen);
851 sc->result = DID_BAD_TARGET << 16;
852 goto out;
853 }
854
855 senselen = get_unaligned_be16(data);
856 if (datalen < senselen)
857 goto invalid_datalen;
858
859 memcpy(sc->sense_buffer, data + 2,
860 min_t(uint16_t, senselen, SCSI_SENSE_BUFFERSIZE));
861 ISCSI_DBG_SESSION(session, "copied %d bytes of sense\n",
862 min_t(uint16_t, senselen,
863 SCSI_SENSE_BUFFERSIZE));
864 }
865
866 if (rhdr->flags & (ISCSI_FLAG_CMD_BIDI_UNDERFLOW |
867 ISCSI_FLAG_CMD_BIDI_OVERFLOW)) {
868 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
869 }
870
871 if (rhdr->flags & (ISCSI_FLAG_CMD_UNDERFLOW |
872 ISCSI_FLAG_CMD_OVERFLOW)) {
873 int res_count = be32_to_cpu(rhdr->residual_count);
874
875 if (res_count > 0 &&
876 (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
877 res_count <= scsi_bufflen(sc)))
878 /* write side for bidi or uni-io set_resid */
879 scsi_set_resid(sc, res_count);
880 else
881 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
882 }
883 out:
884 ISCSI_DBG_SESSION(session, "cmd rsp done [sc %p res %d itt 0x%x]\n",
885 sc, sc->result, task->itt);
886 conn->scsirsp_pdus_cnt++;
887 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
888 }
889
890 /**
891 * iscsi_data_in_rsp - SCSI Data-In Response processing
892 * @conn: iscsi connection
893 * @hdr: iscsi pdu
894 * @task: scsi command task
895 *
896 * iscsi_data_in_rsp sets up the scsi_cmnd fields based on the data received
897 * then completes the command and task. called under back_lock
898 **/
899 static void
iscsi_data_in_rsp(struct iscsi_conn * conn,struct iscsi_hdr * hdr,struct iscsi_task * task)900 iscsi_data_in_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
901 struct iscsi_task *task)
902 {
903 struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)hdr;
904 struct scsi_cmnd *sc = task->sc;
905
906 if (!(rhdr->flags & ISCSI_FLAG_DATA_STATUS))
907 return;
908
909 iscsi_update_cmdsn(conn->session, (struct iscsi_nopin *)hdr);
910 sc->result = (DID_OK << 16) | rhdr->cmd_status;
911 conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
912 if (rhdr->flags & (ISCSI_FLAG_DATA_UNDERFLOW |
913 ISCSI_FLAG_DATA_OVERFLOW)) {
914 int res_count = be32_to_cpu(rhdr->residual_count);
915
916 if (res_count > 0 &&
917 (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
918 res_count <= sc->sdb.length))
919 scsi_set_resid(sc, res_count);
920 else
921 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
922 }
923
924 ISCSI_DBG_SESSION(conn->session, "data in with status done "
925 "[sc %p res %d itt 0x%x]\n",
926 sc, sc->result, task->itt);
927 conn->scsirsp_pdus_cnt++;
928 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
929 }
930
iscsi_tmf_rsp(struct iscsi_conn * conn,struct iscsi_hdr * hdr)931 static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
932 {
933 struct iscsi_tm_rsp *tmf = (struct iscsi_tm_rsp *)hdr;
934 struct iscsi_session *session = conn->session;
935
936 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
937 conn->tmfrsp_pdus_cnt++;
938
939 if (session->tmf_state != TMF_QUEUED)
940 return;
941
942 if (tmf->response == ISCSI_TMF_RSP_COMPLETE)
943 session->tmf_state = TMF_SUCCESS;
944 else if (tmf->response == ISCSI_TMF_RSP_NO_TASK)
945 session->tmf_state = TMF_NOT_FOUND;
946 else
947 session->tmf_state = TMF_FAILED;
948 wake_up(&session->ehwait);
949 }
950
iscsi_send_nopout(struct iscsi_conn * conn,struct iscsi_nopin * rhdr)951 static int iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
952 {
953 struct iscsi_nopout hdr;
954 struct iscsi_task *task;
955
956 if (!rhdr) {
957 if (READ_ONCE(conn->ping_task))
958 return -EINVAL;
959 WRITE_ONCE(conn->ping_task, INVALID_SCSI_TASK);
960 }
961
962 memset(&hdr, 0, sizeof(struct iscsi_nopout));
963 hdr.opcode = ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE;
964 hdr.flags = ISCSI_FLAG_CMD_FINAL;
965
966 if (rhdr) {
967 hdr.lun = rhdr->lun;
968 hdr.ttt = rhdr->ttt;
969 hdr.itt = RESERVED_ITT;
970 } else
971 hdr.ttt = RESERVED_ITT;
972
973 task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
974 if (!task) {
975 if (!rhdr)
976 WRITE_ONCE(conn->ping_task, NULL);
977 iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n");
978 return -EIO;
979 } else if (!rhdr) {
980 /* only track our nops */
981 conn->last_ping = jiffies;
982 }
983
984 return 0;
985 }
986
987 /**
988 * iscsi_nop_out_rsp - SCSI NOP Response processing
989 * @task: scsi command task
990 * @nop: the nop structure
991 * @data: where to put the data
992 * @datalen: length of data
993 *
994 * iscsi_nop_out_rsp handles nop response from use or
995 * from user space. called under back_lock
996 **/
iscsi_nop_out_rsp(struct iscsi_task * task,struct iscsi_nopin * nop,char * data,int datalen)997 static int iscsi_nop_out_rsp(struct iscsi_task *task,
998 struct iscsi_nopin *nop, char *data, int datalen)
999 {
1000 struct iscsi_conn *conn = task->conn;
1001 int rc = 0;
1002
1003 if (READ_ONCE(conn->ping_task) != task) {
1004 /*
1005 * If this is not in response to one of our
1006 * nops then it must be from userspace.
1007 */
1008 if (iscsi_recv_pdu(conn->cls_conn, (struct iscsi_hdr *)nop,
1009 data, datalen))
1010 rc = ISCSI_ERR_CONN_FAILED;
1011 } else
1012 mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout);
1013 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
1014 return rc;
1015 }
1016
iscsi_handle_reject(struct iscsi_conn * conn,struct iscsi_hdr * hdr,char * data,int datalen)1017 static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
1018 char *data, int datalen)
1019 {
1020 struct iscsi_reject *reject = (struct iscsi_reject *)hdr;
1021 struct iscsi_hdr rejected_pdu;
1022 int opcode, rc = 0;
1023
1024 conn->exp_statsn = be32_to_cpu(reject->statsn) + 1;
1025
1026 if (ntoh24(reject->dlength) > datalen ||
1027 ntoh24(reject->dlength) < sizeof(struct iscsi_hdr)) {
1028 iscsi_conn_printk(KERN_ERR, conn, "Cannot handle rejected "
1029 "pdu. Invalid data length (pdu dlength "
1030 "%u, datalen %d\n", ntoh24(reject->dlength),
1031 datalen);
1032 return ISCSI_ERR_PROTO;
1033 }
1034 memcpy(&rejected_pdu, data, sizeof(struct iscsi_hdr));
1035 opcode = rejected_pdu.opcode & ISCSI_OPCODE_MASK;
1036
1037 switch (reject->reason) {
1038 case ISCSI_REASON_DATA_DIGEST_ERROR:
1039 iscsi_conn_printk(KERN_ERR, conn,
1040 "pdu (op 0x%x itt 0x%x) rejected "
1041 "due to DataDigest error.\n",
1042 opcode, rejected_pdu.itt);
1043 break;
1044 case ISCSI_REASON_IMM_CMD_REJECT:
1045 iscsi_conn_printk(KERN_ERR, conn,
1046 "pdu (op 0x%x itt 0x%x) rejected. Too many "
1047 "immediate commands.\n",
1048 opcode, rejected_pdu.itt);
1049 /*
1050 * We only send one TMF at a time so if the target could not
1051 * handle it, then it should get fixed (RFC mandates that
1052 * a target can handle one immediate TMF per conn).
1053 *
1054 * For nops-outs, we could have sent more than one if
1055 * the target is sending us lots of nop-ins
1056 */
1057 if (opcode != ISCSI_OP_NOOP_OUT)
1058 return 0;
1059
1060 if (rejected_pdu.itt == cpu_to_be32(ISCSI_RESERVED_TAG)) {
1061 /*
1062 * nop-out in response to target's nop-out rejected.
1063 * Just resend.
1064 */
1065 /* In RX path we are under back lock */
1066 spin_unlock(&conn->session->back_lock);
1067 spin_lock(&conn->session->frwd_lock);
1068 iscsi_send_nopout(conn,
1069 (struct iscsi_nopin*)&rejected_pdu);
1070 spin_unlock(&conn->session->frwd_lock);
1071 spin_lock(&conn->session->back_lock);
1072 } else {
1073 struct iscsi_task *task;
1074 /*
1075 * Our nop as ping got dropped. We know the target
1076 * and transport are ok so just clean up
1077 */
1078 task = iscsi_itt_to_task(conn, rejected_pdu.itt);
1079 if (!task) {
1080 iscsi_conn_printk(KERN_ERR, conn,
1081 "Invalid pdu reject. Could "
1082 "not lookup rejected task.\n");
1083 rc = ISCSI_ERR_BAD_ITT;
1084 } else
1085 rc = iscsi_nop_out_rsp(task,
1086 (struct iscsi_nopin*)&rejected_pdu,
1087 NULL, 0);
1088 }
1089 break;
1090 default:
1091 iscsi_conn_printk(KERN_ERR, conn,
1092 "pdu (op 0x%x itt 0x%x) rejected. Reason "
1093 "code 0x%x\n", rejected_pdu.opcode,
1094 rejected_pdu.itt, reject->reason);
1095 break;
1096 }
1097 return rc;
1098 }
1099
1100 /**
1101 * iscsi_itt_to_task - look up task by itt
1102 * @conn: iscsi connection
1103 * @itt: itt
1104 *
1105 * This should be used for mgmt tasks like login and nops, or if
1106 * the LDD's itt space does not include the session age.
1107 *
1108 * The session back_lock must be held.
1109 */
iscsi_itt_to_task(struct iscsi_conn * conn,itt_t itt)1110 struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
1111 {
1112 struct iscsi_session *session = conn->session;
1113 int i;
1114
1115 if (itt == RESERVED_ITT)
1116 return NULL;
1117
1118 if (session->tt->parse_pdu_itt)
1119 session->tt->parse_pdu_itt(conn, itt, &i, NULL);
1120 else
1121 i = get_itt(itt);
1122 if (i >= session->cmds_max)
1123 return NULL;
1124
1125 return session->cmds[i];
1126 }
1127 EXPORT_SYMBOL_GPL(iscsi_itt_to_task);
1128
1129 /**
1130 * __iscsi_complete_pdu - complete pdu
1131 * @conn: iscsi conn
1132 * @hdr: iscsi header
1133 * @data: data buffer
1134 * @datalen: len of data buffer
1135 *
1136 * Completes pdu processing by freeing any resources allocated at
1137 * queuecommand or send generic. session back_lock must be held and verify
1138 * itt must have been called.
1139 */
__iscsi_complete_pdu(struct iscsi_conn * conn,struct iscsi_hdr * hdr,char * data,int datalen)1140 int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
1141 char *data, int datalen)
1142 {
1143 struct iscsi_session *session = conn->session;
1144 int opcode = hdr->opcode & ISCSI_OPCODE_MASK, rc = 0;
1145 struct iscsi_task *task;
1146 uint32_t itt;
1147
1148 conn->last_recv = jiffies;
1149 rc = iscsi_verify_itt(conn, hdr->itt);
1150 if (rc)
1151 return rc;
1152
1153 if (hdr->itt != RESERVED_ITT)
1154 itt = get_itt(hdr->itt);
1155 else
1156 itt = ~0U;
1157
1158 ISCSI_DBG_SESSION(session, "[op 0x%x cid %d itt 0x%x len %d]\n",
1159 opcode, conn->id, itt, datalen);
1160
1161 if (itt == ~0U) {
1162 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
1163
1164 switch(opcode) {
1165 case ISCSI_OP_NOOP_IN:
1166 if (datalen) {
1167 rc = ISCSI_ERR_PROTO;
1168 break;
1169 }
1170
1171 if (hdr->ttt == cpu_to_be32(ISCSI_RESERVED_TAG))
1172 break;
1173
1174 /* In RX path we are under back lock */
1175 spin_unlock(&session->back_lock);
1176 spin_lock(&session->frwd_lock);
1177 iscsi_send_nopout(conn, (struct iscsi_nopin*)hdr);
1178 spin_unlock(&session->frwd_lock);
1179 spin_lock(&session->back_lock);
1180 break;
1181 case ISCSI_OP_REJECT:
1182 rc = iscsi_handle_reject(conn, hdr, data, datalen);
1183 break;
1184 case ISCSI_OP_ASYNC_EVENT:
1185 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
1186 if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
1187 rc = ISCSI_ERR_CONN_FAILED;
1188 break;
1189 default:
1190 rc = ISCSI_ERR_BAD_OPCODE;
1191 break;
1192 }
1193 goto out;
1194 }
1195
1196 switch(opcode) {
1197 case ISCSI_OP_SCSI_CMD_RSP:
1198 case ISCSI_OP_SCSI_DATA_IN:
1199 task = iscsi_itt_to_ctask(conn, hdr->itt);
1200 if (!task)
1201 return ISCSI_ERR_BAD_ITT;
1202 task->last_xfer = jiffies;
1203 break;
1204 case ISCSI_OP_R2T:
1205 /*
1206 * LLD handles R2Ts if they need to.
1207 */
1208 return 0;
1209 case ISCSI_OP_LOGOUT_RSP:
1210 case ISCSI_OP_LOGIN_RSP:
1211 case ISCSI_OP_TEXT_RSP:
1212 case ISCSI_OP_SCSI_TMFUNC_RSP:
1213 case ISCSI_OP_NOOP_IN:
1214 task = iscsi_itt_to_task(conn, hdr->itt);
1215 if (!task)
1216 return ISCSI_ERR_BAD_ITT;
1217 break;
1218 default:
1219 return ISCSI_ERR_BAD_OPCODE;
1220 }
1221
1222 switch(opcode) {
1223 case ISCSI_OP_SCSI_CMD_RSP:
1224 iscsi_scsi_cmd_rsp(conn, hdr, task, data, datalen);
1225 break;
1226 case ISCSI_OP_SCSI_DATA_IN:
1227 iscsi_data_in_rsp(conn, hdr, task);
1228 break;
1229 case ISCSI_OP_LOGOUT_RSP:
1230 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
1231 if (datalen) {
1232 rc = ISCSI_ERR_PROTO;
1233 break;
1234 }
1235 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
1236 goto recv_pdu;
1237 case ISCSI_OP_LOGIN_RSP:
1238 case ISCSI_OP_TEXT_RSP:
1239 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
1240 /*
1241 * login related PDU's exp_statsn is handled in
1242 * userspace
1243 */
1244 goto recv_pdu;
1245 case ISCSI_OP_SCSI_TMFUNC_RSP:
1246 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
1247 if (datalen) {
1248 rc = ISCSI_ERR_PROTO;
1249 break;
1250 }
1251
1252 iscsi_tmf_rsp(conn, hdr);
1253 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
1254 break;
1255 case ISCSI_OP_NOOP_IN:
1256 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
1257 if (hdr->ttt != cpu_to_be32(ISCSI_RESERVED_TAG) || datalen) {
1258 rc = ISCSI_ERR_PROTO;
1259 break;
1260 }
1261 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
1262
1263 rc = iscsi_nop_out_rsp(task, (struct iscsi_nopin*)hdr,
1264 data, datalen);
1265 break;
1266 default:
1267 rc = ISCSI_ERR_BAD_OPCODE;
1268 break;
1269 }
1270
1271 out:
1272 return rc;
1273 recv_pdu:
1274 if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
1275 rc = ISCSI_ERR_CONN_FAILED;
1276 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
1277 return rc;
1278 }
1279 EXPORT_SYMBOL_GPL(__iscsi_complete_pdu);
1280
iscsi_complete_pdu(struct iscsi_conn * conn,struct iscsi_hdr * hdr,char * data,int datalen)1281 int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
1282 char *data, int datalen)
1283 {
1284 int rc;
1285
1286 spin_lock(&conn->session->back_lock);
1287 rc = __iscsi_complete_pdu(conn, hdr, data, datalen);
1288 spin_unlock(&conn->session->back_lock);
1289 return rc;
1290 }
1291 EXPORT_SYMBOL_GPL(iscsi_complete_pdu);
1292
iscsi_verify_itt(struct iscsi_conn * conn,itt_t itt)1293 int iscsi_verify_itt(struct iscsi_conn *conn, itt_t itt)
1294 {
1295 struct iscsi_session *session = conn->session;
1296 int age = 0, i = 0;
1297
1298 if (itt == RESERVED_ITT)
1299 return 0;
1300
1301 if (session->tt->parse_pdu_itt)
1302 session->tt->parse_pdu_itt(conn, itt, &i, &age);
1303 else {
1304 i = get_itt(itt);
1305 age = ((__force u32)itt >> ISCSI_AGE_SHIFT) & ISCSI_AGE_MASK;
1306 }
1307
1308 if (age != session->age) {
1309 iscsi_conn_printk(KERN_ERR, conn,
1310 "received itt %x expected session age (%x)\n",
1311 (__force u32)itt, session->age);
1312 return ISCSI_ERR_BAD_ITT;
1313 }
1314
1315 if (i >= session->cmds_max) {
1316 iscsi_conn_printk(KERN_ERR, conn,
1317 "received invalid itt index %u (max cmds "
1318 "%u.\n", i, session->cmds_max);
1319 return ISCSI_ERR_BAD_ITT;
1320 }
1321 return 0;
1322 }
1323 EXPORT_SYMBOL_GPL(iscsi_verify_itt);
1324
1325 /**
1326 * iscsi_itt_to_ctask - look up ctask by itt
1327 * @conn: iscsi connection
1328 * @itt: itt
1329 *
1330 * This should be used for cmd tasks.
1331 *
1332 * The session back_lock must be held.
1333 */
iscsi_itt_to_ctask(struct iscsi_conn * conn,itt_t itt)1334 struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *conn, itt_t itt)
1335 {
1336 struct iscsi_task *task;
1337
1338 if (iscsi_verify_itt(conn, itt))
1339 return NULL;
1340
1341 task = iscsi_itt_to_task(conn, itt);
1342 if (!task || !task->sc)
1343 return NULL;
1344
1345 if (task->sc->SCp.phase != conn->session->age) {
1346 iscsi_session_printk(KERN_ERR, conn->session,
1347 "task's session age %d, expected %d\n",
1348 task->sc->SCp.phase, conn->session->age);
1349 return NULL;
1350 }
1351
1352 return task;
1353 }
1354 EXPORT_SYMBOL_GPL(iscsi_itt_to_ctask);
1355
iscsi_session_failure(struct iscsi_session * session,enum iscsi_err err)1356 void iscsi_session_failure(struct iscsi_session *session,
1357 enum iscsi_err err)
1358 {
1359 struct iscsi_conn *conn;
1360
1361 spin_lock_bh(&session->frwd_lock);
1362 conn = session->leadconn;
1363 if (session->state == ISCSI_STATE_TERMINATE || !conn) {
1364 spin_unlock_bh(&session->frwd_lock);
1365 return;
1366 }
1367
1368 iscsi_get_conn(conn->cls_conn);
1369 spin_unlock_bh(&session->frwd_lock);
1370 /*
1371 * if the host is being removed bypass the connection
1372 * recovery initialization because we are going to kill
1373 * the session.
1374 */
1375 if (err == ISCSI_ERR_INVALID_HOST)
1376 iscsi_conn_error_event(conn->cls_conn, err);
1377 else
1378 iscsi_conn_failure(conn, err);
1379 iscsi_put_conn(conn->cls_conn);
1380 }
1381 EXPORT_SYMBOL_GPL(iscsi_session_failure);
1382
iscsi_conn_failure(struct iscsi_conn * conn,enum iscsi_err err)1383 void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
1384 {
1385 struct iscsi_session *session = conn->session;
1386
1387 spin_lock_bh(&session->frwd_lock);
1388 if (session->state == ISCSI_STATE_FAILED) {
1389 spin_unlock_bh(&session->frwd_lock);
1390 return;
1391 }
1392
1393 if (conn->stop_stage == 0)
1394 session->state = ISCSI_STATE_FAILED;
1395 spin_unlock_bh(&session->frwd_lock);
1396
1397 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1398 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
1399 iscsi_conn_error_event(conn->cls_conn, err);
1400 }
1401 EXPORT_SYMBOL_GPL(iscsi_conn_failure);
1402
iscsi_check_cmdsn_window_closed(struct iscsi_conn * conn)1403 static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn)
1404 {
1405 struct iscsi_session *session = conn->session;
1406
1407 /*
1408 * Check for iSCSI window and take care of CmdSN wrap-around
1409 */
1410 if (!iscsi_sna_lte(session->queued_cmdsn, session->max_cmdsn)) {
1411 ISCSI_DBG_SESSION(session, "iSCSI CmdSN closed. ExpCmdSn "
1412 "%u MaxCmdSN %u CmdSN %u/%u\n",
1413 session->exp_cmdsn, session->max_cmdsn,
1414 session->cmdsn, session->queued_cmdsn);
1415 return -ENOSPC;
1416 }
1417 return 0;
1418 }
1419
iscsi_xmit_task(struct iscsi_conn * conn,struct iscsi_task * task,bool was_requeue)1420 static int iscsi_xmit_task(struct iscsi_conn *conn, struct iscsi_task *task,
1421 bool was_requeue)
1422 {
1423 int rc;
1424
1425 spin_lock_bh(&conn->session->back_lock);
1426
1427 if (!conn->task) {
1428 /* Take a ref so we can access it after xmit_task() */
1429 __iscsi_get_task(task);
1430 } else {
1431 /* Already have a ref from when we failed to send it last call */
1432 conn->task = NULL;
1433 }
1434
1435 /*
1436 * If this was a requeue for a R2T we have an extra ref on the task in
1437 * case a bad target sends a cmd rsp before we have handled the task.
1438 */
1439 if (was_requeue)
1440 __iscsi_put_task(task);
1441
1442 /*
1443 * Do this after dropping the extra ref because if this was a requeue
1444 * it's removed from that list and cleanup_queued_task would miss it.
1445 */
1446 if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
1447 /*
1448 * Save the task and ref in case we weren't cleaning up this
1449 * task and get woken up again.
1450 */
1451 conn->task = task;
1452 spin_unlock_bh(&conn->session->back_lock);
1453 return -ENODATA;
1454 }
1455 spin_unlock_bh(&conn->session->back_lock);
1456
1457 spin_unlock_bh(&conn->session->frwd_lock);
1458 rc = conn->session->tt->xmit_task(task);
1459 spin_lock_bh(&conn->session->frwd_lock);
1460 if (!rc) {
1461 /* done with this task */
1462 task->last_xfer = jiffies;
1463 }
1464 /* regular RX path uses back_lock */
1465 spin_lock(&conn->session->back_lock);
1466 if (rc && task->state == ISCSI_TASK_RUNNING) {
1467 /*
1468 * get an extra ref that is released next time we access it
1469 * as conn->task above.
1470 */
1471 __iscsi_get_task(task);
1472 conn->task = task;
1473 }
1474
1475 __iscsi_put_task(task);
1476 spin_unlock(&conn->session->back_lock);
1477 return rc;
1478 }
1479
1480 /**
1481 * iscsi_requeue_task - requeue task to run from session workqueue
1482 * @task: task to requeue
1483 *
1484 * Callers must have taken a ref to the task that is going to be requeued.
1485 */
iscsi_requeue_task(struct iscsi_task * task)1486 void iscsi_requeue_task(struct iscsi_task *task)
1487 {
1488 struct iscsi_conn *conn = task->conn;
1489
1490 /*
1491 * this may be on the requeue list already if the xmit_task callout
1492 * is handling the r2ts while we are adding new ones
1493 */
1494 spin_lock_bh(&conn->session->frwd_lock);
1495 if (list_empty(&task->running)) {
1496 list_add_tail(&task->running, &conn->requeue);
1497 } else {
1498 /*
1499 * Don't need the extra ref since it's already requeued and
1500 * has a ref.
1501 */
1502 iscsi_put_task(task);
1503 }
1504 iscsi_conn_queue_work(conn);
1505 spin_unlock_bh(&conn->session->frwd_lock);
1506 }
1507 EXPORT_SYMBOL_GPL(iscsi_requeue_task);
1508
1509 /**
1510 * iscsi_data_xmit - xmit any command into the scheduled connection
1511 * @conn: iscsi connection
1512 *
1513 * Notes:
1514 * The function can return -EAGAIN in which case the caller must
1515 * re-schedule it again later or recover. '0' return code means
1516 * successful xmit.
1517 **/
iscsi_data_xmit(struct iscsi_conn * conn)1518 static int iscsi_data_xmit(struct iscsi_conn *conn)
1519 {
1520 struct iscsi_task *task;
1521 int rc = 0;
1522
1523 spin_lock_bh(&conn->session->frwd_lock);
1524 if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
1525 ISCSI_DBG_SESSION(conn->session, "Tx suspended!\n");
1526 spin_unlock_bh(&conn->session->frwd_lock);
1527 return -ENODATA;
1528 }
1529
1530 if (conn->task) {
1531 rc = iscsi_xmit_task(conn, conn->task, false);
1532 if (rc)
1533 goto done;
1534 }
1535
1536 /*
1537 * process mgmt pdus like nops before commands since we should
1538 * only have one nop-out as a ping from us and targets should not
1539 * overflow us with nop-ins
1540 */
1541 check_mgmt:
1542 while (!list_empty(&conn->mgmtqueue)) {
1543 task = list_entry(conn->mgmtqueue.next, struct iscsi_task,
1544 running);
1545 list_del_init(&task->running);
1546 if (iscsi_prep_mgmt_task(conn, task)) {
1547 /* regular RX path uses back_lock */
1548 spin_lock_bh(&conn->session->back_lock);
1549 __iscsi_put_task(task);
1550 spin_unlock_bh(&conn->session->back_lock);
1551 continue;
1552 }
1553 rc = iscsi_xmit_task(conn, task, false);
1554 if (rc)
1555 goto done;
1556 }
1557
1558 /* process pending command queue */
1559 while (!list_empty(&conn->cmdqueue)) {
1560 task = list_entry(conn->cmdqueue.next, struct iscsi_task,
1561 running);
1562 list_del_init(&task->running);
1563 if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
1564 fail_scsi_task(task, DID_IMM_RETRY);
1565 continue;
1566 }
1567 rc = iscsi_prep_scsi_cmd_pdu(task);
1568 if (rc) {
1569 if (rc == -ENOMEM || rc == -EACCES)
1570 fail_scsi_task(task, DID_IMM_RETRY);
1571 else
1572 fail_scsi_task(task, DID_ABORT);
1573 continue;
1574 }
1575 rc = iscsi_xmit_task(conn, task, false);
1576 if (rc)
1577 goto done;
1578 /*
1579 * we could continuously get new task requests so
1580 * we need to check the mgmt queue for nops that need to
1581 * be sent to aviod starvation
1582 */
1583 if (!list_empty(&conn->mgmtqueue))
1584 goto check_mgmt;
1585 }
1586
1587 while (!list_empty(&conn->requeue)) {
1588 /*
1589 * we always do fastlogout - conn stop code will clean up.
1590 */
1591 if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
1592 break;
1593
1594 task = list_entry(conn->requeue.next, struct iscsi_task,
1595 running);
1596
1597 if (iscsi_check_tmf_restrictions(task, ISCSI_OP_SCSI_DATA_OUT))
1598 break;
1599
1600 list_del_init(&task->running);
1601 rc = iscsi_xmit_task(conn, task, true);
1602 if (rc)
1603 goto done;
1604 if (!list_empty(&conn->mgmtqueue))
1605 goto check_mgmt;
1606 }
1607 spin_unlock_bh(&conn->session->frwd_lock);
1608 return -ENODATA;
1609
1610 done:
1611 spin_unlock_bh(&conn->session->frwd_lock);
1612 return rc;
1613 }
1614
iscsi_xmitworker(struct work_struct * work)1615 static void iscsi_xmitworker(struct work_struct *work)
1616 {
1617 struct iscsi_conn *conn =
1618 container_of(work, struct iscsi_conn, xmitwork);
1619 int rc;
1620 /*
1621 * serialize Xmit worker on a per-connection basis.
1622 */
1623 do {
1624 rc = iscsi_data_xmit(conn);
1625 } while (rc >= 0 || rc == -EAGAIN);
1626 }
1627
iscsi_alloc_task(struct iscsi_conn * conn,struct scsi_cmnd * sc)1628 static inline struct iscsi_task *iscsi_alloc_task(struct iscsi_conn *conn,
1629 struct scsi_cmnd *sc)
1630 {
1631 struct iscsi_task *task;
1632
1633 if (!kfifo_out(&conn->session->cmdpool.queue,
1634 (void *) &task, sizeof(void *)))
1635 return NULL;
1636
1637 sc->SCp.phase = conn->session->age;
1638 sc->SCp.ptr = (char *) task;
1639
1640 refcount_set(&task->refcount, 1);
1641 task->state = ISCSI_TASK_PENDING;
1642 task->conn = conn;
1643 task->sc = sc;
1644 task->have_checked_conn = false;
1645 task->last_timeout = jiffies;
1646 task->last_xfer = jiffies;
1647 task->protected = false;
1648 INIT_LIST_HEAD(&task->running);
1649 return task;
1650 }
1651
1652 enum {
1653 FAILURE_BAD_HOST = 1,
1654 FAILURE_SESSION_FAILED,
1655 FAILURE_SESSION_FREED,
1656 FAILURE_WINDOW_CLOSED,
1657 FAILURE_OOM,
1658 FAILURE_SESSION_TERMINATE,
1659 FAILURE_SESSION_IN_RECOVERY,
1660 FAILURE_SESSION_RECOVERY_TIMEOUT,
1661 FAILURE_SESSION_LOGGING_OUT,
1662 FAILURE_SESSION_NOT_READY,
1663 };
1664
iscsi_queuecommand(struct Scsi_Host * host,struct scsi_cmnd * sc)1665 int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
1666 {
1667 struct iscsi_cls_session *cls_session;
1668 struct iscsi_host *ihost;
1669 int reason = 0;
1670 struct iscsi_session *session;
1671 struct iscsi_conn *conn;
1672 struct iscsi_task *task = NULL;
1673
1674 sc->result = 0;
1675 sc->SCp.ptr = NULL;
1676
1677 ihost = shost_priv(host);
1678
1679 cls_session = starget_to_session(scsi_target(sc->device));
1680 session = cls_session->dd_data;
1681 spin_lock_bh(&session->frwd_lock);
1682
1683 reason = iscsi_session_chkready(cls_session);
1684 if (reason) {
1685 sc->result = reason;
1686 goto fault;
1687 }
1688
1689 if (session->state != ISCSI_STATE_LOGGED_IN) {
1690 /*
1691 * to handle the race between when we set the recovery state
1692 * and block the session we requeue here (commands could
1693 * be entering our queuecommand while a block is starting
1694 * up because the block code is not locked)
1695 */
1696 switch (session->state) {
1697 case ISCSI_STATE_FAILED:
1698 /*
1699 * cmds should fail during shutdown, if the session
1700 * state is bad, allowing completion to happen
1701 */
1702 if (unlikely(system_state != SYSTEM_RUNNING)) {
1703 reason = FAILURE_SESSION_FAILED;
1704 sc->result = DID_NO_CONNECT << 16;
1705 break;
1706 }
1707 fallthrough;
1708 case ISCSI_STATE_IN_RECOVERY:
1709 reason = FAILURE_SESSION_IN_RECOVERY;
1710 sc->result = DID_IMM_RETRY << 16;
1711 break;
1712 case ISCSI_STATE_LOGGING_OUT:
1713 reason = FAILURE_SESSION_LOGGING_OUT;
1714 sc->result = DID_IMM_RETRY << 16;
1715 break;
1716 case ISCSI_STATE_RECOVERY_FAILED:
1717 reason = FAILURE_SESSION_RECOVERY_TIMEOUT;
1718 sc->result = DID_TRANSPORT_FAILFAST << 16;
1719 break;
1720 case ISCSI_STATE_TERMINATE:
1721 reason = FAILURE_SESSION_TERMINATE;
1722 sc->result = DID_NO_CONNECT << 16;
1723 break;
1724 default:
1725 reason = FAILURE_SESSION_FREED;
1726 sc->result = DID_NO_CONNECT << 16;
1727 }
1728 goto fault;
1729 }
1730
1731 conn = session->leadconn;
1732 if (!conn) {
1733 reason = FAILURE_SESSION_FREED;
1734 sc->result = DID_NO_CONNECT << 16;
1735 goto fault;
1736 }
1737
1738 if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
1739 reason = FAILURE_SESSION_IN_RECOVERY;
1740 sc->result = DID_REQUEUE << 16;
1741 goto fault;
1742 }
1743
1744 if (iscsi_check_cmdsn_window_closed(conn)) {
1745 reason = FAILURE_WINDOW_CLOSED;
1746 goto reject;
1747 }
1748
1749 task = iscsi_alloc_task(conn, sc);
1750 if (!task) {
1751 reason = FAILURE_OOM;
1752 goto reject;
1753 }
1754
1755 if (!ihost->workq) {
1756 reason = iscsi_prep_scsi_cmd_pdu(task);
1757 if (reason) {
1758 if (reason == -ENOMEM || reason == -EACCES) {
1759 reason = FAILURE_OOM;
1760 goto prepd_reject;
1761 } else {
1762 sc->result = DID_ABORT << 16;
1763 goto prepd_fault;
1764 }
1765 }
1766 if (session->tt->xmit_task(task)) {
1767 session->cmdsn--;
1768 reason = FAILURE_SESSION_NOT_READY;
1769 goto prepd_reject;
1770 }
1771 } else {
1772 list_add_tail(&task->running, &conn->cmdqueue);
1773 iscsi_conn_queue_work(conn);
1774 }
1775
1776 session->queued_cmdsn++;
1777 spin_unlock_bh(&session->frwd_lock);
1778 return 0;
1779
1780 prepd_reject:
1781 spin_lock_bh(&session->back_lock);
1782 iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ);
1783 spin_unlock_bh(&session->back_lock);
1784 reject:
1785 spin_unlock_bh(&session->frwd_lock);
1786 ISCSI_DBG_SESSION(session, "cmd 0x%x rejected (%d)\n",
1787 sc->cmnd[0], reason);
1788 return SCSI_MLQUEUE_TARGET_BUSY;
1789
1790 prepd_fault:
1791 spin_lock_bh(&session->back_lock);
1792 iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ);
1793 spin_unlock_bh(&session->back_lock);
1794 fault:
1795 spin_unlock_bh(&session->frwd_lock);
1796 ISCSI_DBG_SESSION(session, "iscsi: cmd 0x%x is not queued (%d)\n",
1797 sc->cmnd[0], reason);
1798 scsi_set_resid(sc, scsi_bufflen(sc));
1799 sc->scsi_done(sc);
1800 return 0;
1801 }
1802 EXPORT_SYMBOL_GPL(iscsi_queuecommand);
1803
iscsi_target_alloc(struct scsi_target * starget)1804 int iscsi_target_alloc(struct scsi_target *starget)
1805 {
1806 struct iscsi_cls_session *cls_session = starget_to_session(starget);
1807 struct iscsi_session *session = cls_session->dd_data;
1808
1809 starget->can_queue = session->scsi_cmds_max;
1810 return 0;
1811 }
1812 EXPORT_SYMBOL_GPL(iscsi_target_alloc);
1813
iscsi_tmf_timedout(struct timer_list * t)1814 static void iscsi_tmf_timedout(struct timer_list *t)
1815 {
1816 struct iscsi_session *session = from_timer(session, t, tmf_timer);
1817
1818 spin_lock(&session->frwd_lock);
1819 if (session->tmf_state == TMF_QUEUED) {
1820 session->tmf_state = TMF_TIMEDOUT;
1821 ISCSI_DBG_EH(session, "tmf timedout\n");
1822 /* unblock eh_abort() */
1823 wake_up(&session->ehwait);
1824 }
1825 spin_unlock(&session->frwd_lock);
1826 }
1827
iscsi_exec_task_mgmt_fn(struct iscsi_conn * conn,struct iscsi_tm * hdr,int age,int timeout)1828 static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
1829 struct iscsi_tm *hdr, int age,
1830 int timeout)
1831 __must_hold(&session->frwd_lock)
1832 {
1833 struct iscsi_session *session = conn->session;
1834 struct iscsi_task *task;
1835
1836 task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr,
1837 NULL, 0);
1838 if (!task) {
1839 spin_unlock_bh(&session->frwd_lock);
1840 iscsi_conn_printk(KERN_ERR, conn, "Could not send TMF.\n");
1841 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1842 spin_lock_bh(&session->frwd_lock);
1843 return -EPERM;
1844 }
1845 conn->tmfcmd_pdus_cnt++;
1846 session->tmf_timer.expires = timeout * HZ + jiffies;
1847 add_timer(&session->tmf_timer);
1848 ISCSI_DBG_EH(session, "tmf set timeout\n");
1849
1850 spin_unlock_bh(&session->frwd_lock);
1851 mutex_unlock(&session->eh_mutex);
1852
1853 /*
1854 * block eh thread until:
1855 *
1856 * 1) tmf response
1857 * 2) tmf timeout
1858 * 3) session is terminated or restarted or userspace has
1859 * given up on recovery
1860 */
1861 wait_event_interruptible(session->ehwait, age != session->age ||
1862 session->state != ISCSI_STATE_LOGGED_IN ||
1863 session->tmf_state != TMF_QUEUED);
1864 if (signal_pending(current))
1865 flush_signals(current);
1866 del_timer_sync(&session->tmf_timer);
1867
1868 mutex_lock(&session->eh_mutex);
1869 spin_lock_bh(&session->frwd_lock);
1870 /* if the session drops it will clean up the task */
1871 if (age != session->age ||
1872 session->state != ISCSI_STATE_LOGGED_IN)
1873 return -ENOTCONN;
1874 return 0;
1875 }
1876
1877 /*
1878 * Fail commands. session frwd lock held and xmit thread flushed.
1879 */
fail_scsi_tasks(struct iscsi_conn * conn,u64 lun,int error)1880 static void fail_scsi_tasks(struct iscsi_conn *conn, u64 lun, int error)
1881 {
1882 struct iscsi_session *session = conn->session;
1883 struct iscsi_task *task;
1884 int i;
1885
1886 spin_lock_bh(&session->back_lock);
1887 for (i = 0; i < session->cmds_max; i++) {
1888 task = session->cmds[i];
1889 if (!task->sc || task->state == ISCSI_TASK_FREE)
1890 continue;
1891
1892 if (lun != -1 && lun != task->sc->device->lun)
1893 continue;
1894
1895 __iscsi_get_task(task);
1896 spin_unlock_bh(&session->back_lock);
1897
1898 ISCSI_DBG_SESSION(session,
1899 "failing sc %p itt 0x%x state %d\n",
1900 task->sc, task->itt, task->state);
1901 fail_scsi_task(task, error);
1902
1903 spin_unlock_bh(&session->frwd_lock);
1904 iscsi_put_task(task);
1905 spin_lock_bh(&session->frwd_lock);
1906
1907 spin_lock_bh(&session->back_lock);
1908 }
1909
1910 spin_unlock_bh(&session->back_lock);
1911 }
1912
1913 /**
1914 * iscsi_suspend_queue - suspend iscsi_queuecommand
1915 * @conn: iscsi conn to stop queueing IO on
1916 *
1917 * This grabs the session frwd_lock to make sure no one is in
1918 * xmit_task/queuecommand, and then sets suspend to prevent
1919 * new commands from being queued. This only needs to be called
1920 * by offload drivers that need to sync a path like ep disconnect
1921 * with the iscsi_queuecommand/xmit_task. To start IO again libiscsi
1922 * will call iscsi_start_tx and iscsi_unblock_session when in FFP.
1923 */
iscsi_suspend_queue(struct iscsi_conn * conn)1924 void iscsi_suspend_queue(struct iscsi_conn *conn)
1925 {
1926 spin_lock_bh(&conn->session->frwd_lock);
1927 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1928 spin_unlock_bh(&conn->session->frwd_lock);
1929 }
1930 EXPORT_SYMBOL_GPL(iscsi_suspend_queue);
1931
1932 /**
1933 * iscsi_suspend_tx - suspend iscsi_data_xmit
1934 * @conn: iscsi conn tp stop processing IO on.
1935 *
1936 * This function sets the suspend bit to prevent iscsi_data_xmit
1937 * from sending new IO, and if work is queued on the xmit thread
1938 * it will wait for it to be completed.
1939 */
iscsi_suspend_tx(struct iscsi_conn * conn)1940 void iscsi_suspend_tx(struct iscsi_conn *conn)
1941 {
1942 struct Scsi_Host *shost = conn->session->host;
1943 struct iscsi_host *ihost = shost_priv(shost);
1944
1945 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1946 if (ihost->workq)
1947 flush_workqueue(ihost->workq);
1948 }
1949 EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
1950
iscsi_start_tx(struct iscsi_conn * conn)1951 static void iscsi_start_tx(struct iscsi_conn *conn)
1952 {
1953 clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1954 iscsi_conn_queue_work(conn);
1955 }
1956
1957 /*
1958 * We want to make sure a ping is in flight. It has timed out.
1959 * And we are not busy processing a pdu that is making
1960 * progress but got started before the ping and is taking a while
1961 * to complete so the ping is just stuck behind it in a queue.
1962 */
iscsi_has_ping_timed_out(struct iscsi_conn * conn)1963 static int iscsi_has_ping_timed_out(struct iscsi_conn *conn)
1964 {
1965 if (READ_ONCE(conn->ping_task) &&
1966 time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) +
1967 (conn->ping_timeout * HZ), jiffies))
1968 return 1;
1969 else
1970 return 0;
1971 }
1972
iscsi_eh_cmd_timed_out(struct scsi_cmnd * sc)1973 enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
1974 {
1975 enum blk_eh_timer_return rc = BLK_EH_DONE;
1976 struct iscsi_task *task = NULL, *running_task;
1977 struct iscsi_cls_session *cls_session;
1978 struct iscsi_session *session;
1979 struct iscsi_conn *conn;
1980 int i;
1981
1982 cls_session = starget_to_session(scsi_target(sc->device));
1983 session = cls_session->dd_data;
1984
1985 ISCSI_DBG_EH(session, "scsi cmd %p timedout\n", sc);
1986
1987 spin_lock_bh(&session->frwd_lock);
1988 spin_lock(&session->back_lock);
1989 task = (struct iscsi_task *)sc->SCp.ptr;
1990 if (!task) {
1991 /*
1992 * Raced with completion. Blk layer has taken ownership
1993 * so let timeout code complete it now.
1994 */
1995 rc = BLK_EH_DONE;
1996 spin_unlock(&session->back_lock);
1997 goto done;
1998 }
1999 __iscsi_get_task(task);
2000 spin_unlock(&session->back_lock);
2001
2002 if (session->state != ISCSI_STATE_LOGGED_IN) {
2003 /*
2004 * During shutdown, if session is prematurely disconnected,
2005 * recovery won't happen and there will be hung cmds. Not
2006 * handling cmds would trigger EH, also bad in this case.
2007 * Instead, handle cmd, allow completion to happen and let
2008 * upper layer to deal with the result.
2009 */
2010 if (unlikely(system_state != SYSTEM_RUNNING)) {
2011 sc->result = DID_NO_CONNECT << 16;
2012 ISCSI_DBG_EH(session, "sc on shutdown, handled\n");
2013 rc = BLK_EH_DONE;
2014 goto done;
2015 }
2016 /*
2017 * We are probably in the middle of iscsi recovery so let
2018 * that complete and handle the error.
2019 */
2020 rc = BLK_EH_RESET_TIMER;
2021 goto done;
2022 }
2023
2024 conn = session->leadconn;
2025 if (!conn) {
2026 /* In the middle of shuting down */
2027 rc = BLK_EH_RESET_TIMER;
2028 goto done;
2029 }
2030
2031 /*
2032 * If we have sent (at least queued to the network layer) a pdu or
2033 * recvd one for the task since the last timeout ask for
2034 * more time. If on the next timeout we have not made progress
2035 * we can check if it is the task or connection when we send the
2036 * nop as a ping.
2037 */
2038 if (time_after(task->last_xfer, task->last_timeout)) {
2039 ISCSI_DBG_EH(session, "Command making progress. Asking "
2040 "scsi-ml for more time to complete. "
2041 "Last data xfer at %lu. Last timeout was at "
2042 "%lu\n.", task->last_xfer, task->last_timeout);
2043 task->have_checked_conn = false;
2044 rc = BLK_EH_RESET_TIMER;
2045 goto done;
2046 }
2047
2048 if (!conn->recv_timeout && !conn->ping_timeout)
2049 goto done;
2050 /*
2051 * if the ping timedout then we are in the middle of cleaning up
2052 * and can let the iscsi eh handle it
2053 */
2054 if (iscsi_has_ping_timed_out(conn)) {
2055 rc = BLK_EH_RESET_TIMER;
2056 goto done;
2057 }
2058
2059 spin_lock(&session->back_lock);
2060 for (i = 0; i < conn->session->cmds_max; i++) {
2061 running_task = conn->session->cmds[i];
2062 if (!running_task->sc || running_task == task ||
2063 running_task->state != ISCSI_TASK_RUNNING)
2064 continue;
2065
2066 /*
2067 * Only check if cmds started before this one have made
2068 * progress, or this could never fail
2069 */
2070 if (time_after(running_task->sc->jiffies_at_alloc,
2071 task->sc->jiffies_at_alloc))
2072 continue;
2073
2074 if (time_after(running_task->last_xfer, task->last_timeout)) {
2075 /*
2076 * This task has not made progress, but a task
2077 * started before us has transferred data since
2078 * we started/last-checked. We could be queueing
2079 * too many tasks or the LU is bad.
2080 *
2081 * If the device is bad the cmds ahead of us on
2082 * other devs will complete, and this loop will
2083 * eventually fail starting the scsi eh.
2084 */
2085 ISCSI_DBG_EH(session, "Command has not made progress "
2086 "but commands ahead of it have. "
2087 "Asking scsi-ml for more time to "
2088 "complete. Our last xfer vs running task "
2089 "last xfer %lu/%lu. Last check %lu.\n",
2090 task->last_xfer, running_task->last_xfer,
2091 task->last_timeout);
2092 spin_unlock(&session->back_lock);
2093 rc = BLK_EH_RESET_TIMER;
2094 goto done;
2095 }
2096 }
2097 spin_unlock(&session->back_lock);
2098
2099 /* Assumes nop timeout is shorter than scsi cmd timeout */
2100 if (task->have_checked_conn)
2101 goto done;
2102
2103 /*
2104 * Checking the transport already or nop from a cmd timeout still
2105 * running
2106 */
2107 if (READ_ONCE(conn->ping_task)) {
2108 task->have_checked_conn = true;
2109 rc = BLK_EH_RESET_TIMER;
2110 goto done;
2111 }
2112
2113 /* Make sure there is a transport check done */
2114 iscsi_send_nopout(conn, NULL);
2115 task->have_checked_conn = true;
2116 rc = BLK_EH_RESET_TIMER;
2117
2118 done:
2119 spin_unlock_bh(&session->frwd_lock);
2120
2121 if (task) {
2122 task->last_timeout = jiffies;
2123 iscsi_put_task(task);
2124 }
2125 ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ?
2126 "timer reset" : "shutdown or nh");
2127 return rc;
2128 }
2129 EXPORT_SYMBOL_GPL(iscsi_eh_cmd_timed_out);
2130
iscsi_check_transport_timeouts(struct timer_list * t)2131 static void iscsi_check_transport_timeouts(struct timer_list *t)
2132 {
2133 struct iscsi_conn *conn = from_timer(conn, t, transport_timer);
2134 struct iscsi_session *session = conn->session;
2135 unsigned long recv_timeout, next_timeout = 0, last_recv;
2136
2137 spin_lock(&session->frwd_lock);
2138 if (session->state != ISCSI_STATE_LOGGED_IN)
2139 goto done;
2140
2141 recv_timeout = conn->recv_timeout;
2142 if (!recv_timeout)
2143 goto done;
2144
2145 recv_timeout *= HZ;
2146 last_recv = conn->last_recv;
2147
2148 if (iscsi_has_ping_timed_out(conn)) {
2149 iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs "
2150 "expired, recv timeout %d, last rx %lu, "
2151 "last ping %lu, now %lu\n",
2152 conn->ping_timeout, conn->recv_timeout,
2153 last_recv, conn->last_ping, jiffies);
2154 spin_unlock(&session->frwd_lock);
2155 iscsi_conn_failure(conn, ISCSI_ERR_NOP_TIMEDOUT);
2156 return;
2157 }
2158
2159 if (time_before_eq(last_recv + recv_timeout, jiffies)) {
2160 /* send a ping to try to provoke some traffic */
2161 ISCSI_DBG_CONN(conn, "Sending nopout as ping\n");
2162 if (iscsi_send_nopout(conn, NULL))
2163 next_timeout = jiffies + (1 * HZ);
2164 else
2165 next_timeout = conn->last_ping + (conn->ping_timeout * HZ);
2166 } else
2167 next_timeout = last_recv + recv_timeout;
2168
2169 ISCSI_DBG_CONN(conn, "Setting next tmo %lu\n", next_timeout);
2170 mod_timer(&conn->transport_timer, next_timeout);
2171 done:
2172 spin_unlock(&session->frwd_lock);
2173 }
2174
iscsi_prep_abort_task_pdu(struct iscsi_task * task,struct iscsi_tm * hdr)2175 static void iscsi_prep_abort_task_pdu(struct iscsi_task *task,
2176 struct iscsi_tm *hdr)
2177 {
2178 memset(hdr, 0, sizeof(*hdr));
2179 hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
2180 hdr->flags = ISCSI_TM_FUNC_ABORT_TASK & ISCSI_FLAG_TM_FUNC_MASK;
2181 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
2182 hdr->lun = task->lun;
2183 hdr->rtt = task->hdr_itt;
2184 hdr->refcmdsn = task->cmdsn;
2185 }
2186
iscsi_eh_abort(struct scsi_cmnd * sc)2187 int iscsi_eh_abort(struct scsi_cmnd *sc)
2188 {
2189 struct iscsi_cls_session *cls_session;
2190 struct iscsi_session *session;
2191 struct iscsi_conn *conn;
2192 struct iscsi_task *task;
2193 struct iscsi_tm *hdr;
2194 int age;
2195
2196 cls_session = starget_to_session(scsi_target(sc->device));
2197 session = cls_session->dd_data;
2198
2199 ISCSI_DBG_EH(session, "aborting sc %p\n", sc);
2200
2201 mutex_lock(&session->eh_mutex);
2202 spin_lock_bh(&session->frwd_lock);
2203 /*
2204 * if session was ISCSI_STATE_IN_RECOVERY then we may not have
2205 * got the command.
2206 */
2207 if (!sc->SCp.ptr) {
2208 ISCSI_DBG_EH(session, "sc never reached iscsi layer or "
2209 "it completed.\n");
2210 spin_unlock_bh(&session->frwd_lock);
2211 mutex_unlock(&session->eh_mutex);
2212 return SUCCESS;
2213 }
2214
2215 /*
2216 * If we are not logged in or we have started a new session
2217 * then let the host reset code handle this
2218 */
2219 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN ||
2220 sc->SCp.phase != session->age) {
2221 spin_unlock_bh(&session->frwd_lock);
2222 mutex_unlock(&session->eh_mutex);
2223 ISCSI_DBG_EH(session, "failing abort due to dropped "
2224 "session.\n");
2225 return FAILED;
2226 }
2227
2228 conn = session->leadconn;
2229 conn->eh_abort_cnt++;
2230 age = session->age;
2231
2232 spin_lock(&session->back_lock);
2233 task = (struct iscsi_task *)sc->SCp.ptr;
2234 if (!task || !task->sc) {
2235 /* task completed before time out */
2236 ISCSI_DBG_EH(session, "sc completed while abort in progress\n");
2237
2238 spin_unlock(&session->back_lock);
2239 spin_unlock_bh(&session->frwd_lock);
2240 mutex_unlock(&session->eh_mutex);
2241 return SUCCESS;
2242 }
2243 ISCSI_DBG_EH(session, "aborting [sc %p itt 0x%x]\n", sc, task->itt);
2244 __iscsi_get_task(task);
2245 spin_unlock(&session->back_lock);
2246
2247 if (task->state == ISCSI_TASK_PENDING) {
2248 fail_scsi_task(task, DID_ABORT);
2249 goto success;
2250 }
2251
2252 /* only have one tmf outstanding at a time */
2253 if (session->tmf_state != TMF_INITIAL)
2254 goto failed;
2255 session->tmf_state = TMF_QUEUED;
2256
2257 hdr = &session->tmhdr;
2258 iscsi_prep_abort_task_pdu(task, hdr);
2259
2260 if (iscsi_exec_task_mgmt_fn(conn, hdr, age, session->abort_timeout))
2261 goto failed;
2262
2263 switch (session->tmf_state) {
2264 case TMF_SUCCESS:
2265 spin_unlock_bh(&session->frwd_lock);
2266 /*
2267 * stop tx side incase the target had sent a abort rsp but
2268 * the initiator was still writing out data.
2269 */
2270 iscsi_suspend_tx(conn);
2271 /*
2272 * we do not stop the recv side because targets have been
2273 * good and have never sent us a successful tmf response
2274 * then sent more data for the cmd.
2275 */
2276 spin_lock_bh(&session->frwd_lock);
2277 fail_scsi_task(task, DID_ABORT);
2278 session->tmf_state = TMF_INITIAL;
2279 memset(hdr, 0, sizeof(*hdr));
2280 spin_unlock_bh(&session->frwd_lock);
2281 iscsi_start_tx(conn);
2282 goto success_unlocked;
2283 case TMF_TIMEDOUT:
2284 spin_unlock_bh(&session->frwd_lock);
2285 iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
2286 goto failed_unlocked;
2287 case TMF_NOT_FOUND:
2288 if (!sc->SCp.ptr) {
2289 session->tmf_state = TMF_INITIAL;
2290 memset(hdr, 0, sizeof(*hdr));
2291 /* task completed before tmf abort response */
2292 ISCSI_DBG_EH(session, "sc completed while abort in "
2293 "progress\n");
2294 goto success;
2295 }
2296 fallthrough;
2297 default:
2298 session->tmf_state = TMF_INITIAL;
2299 goto failed;
2300 }
2301
2302 success:
2303 spin_unlock_bh(&session->frwd_lock);
2304 success_unlocked:
2305 ISCSI_DBG_EH(session, "abort success [sc %p itt 0x%x]\n",
2306 sc, task->itt);
2307 iscsi_put_task(task);
2308 mutex_unlock(&session->eh_mutex);
2309 return SUCCESS;
2310
2311 failed:
2312 spin_unlock_bh(&session->frwd_lock);
2313 failed_unlocked:
2314 ISCSI_DBG_EH(session, "abort failed [sc %p itt 0x%x]\n", sc,
2315 task ? task->itt : 0);
2316 iscsi_put_task(task);
2317 mutex_unlock(&session->eh_mutex);
2318 return FAILED;
2319 }
2320 EXPORT_SYMBOL_GPL(iscsi_eh_abort);
2321
iscsi_prep_lun_reset_pdu(struct scsi_cmnd * sc,struct iscsi_tm * hdr)2322 static void iscsi_prep_lun_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr)
2323 {
2324 memset(hdr, 0, sizeof(*hdr));
2325 hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
2326 hdr->flags = ISCSI_TM_FUNC_LOGICAL_UNIT_RESET & ISCSI_FLAG_TM_FUNC_MASK;
2327 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
2328 int_to_scsilun(sc->device->lun, &hdr->lun);
2329 hdr->rtt = RESERVED_ITT;
2330 }
2331
iscsi_eh_device_reset(struct scsi_cmnd * sc)2332 int iscsi_eh_device_reset(struct scsi_cmnd *sc)
2333 {
2334 struct iscsi_cls_session *cls_session;
2335 struct iscsi_session *session;
2336 struct iscsi_conn *conn;
2337 struct iscsi_tm *hdr;
2338 int rc = FAILED;
2339
2340 cls_session = starget_to_session(scsi_target(sc->device));
2341 session = cls_session->dd_data;
2342
2343 ISCSI_DBG_EH(session, "LU Reset [sc %p lun %llu]\n", sc,
2344 sc->device->lun);
2345
2346 mutex_lock(&session->eh_mutex);
2347 spin_lock_bh(&session->frwd_lock);
2348 /*
2349 * Just check if we are not logged in. We cannot check for
2350 * the phase because the reset could come from a ioctl.
2351 */
2352 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN)
2353 goto unlock;
2354 conn = session->leadconn;
2355
2356 /* only have one tmf outstanding at a time */
2357 if (session->tmf_state != TMF_INITIAL)
2358 goto unlock;
2359 session->tmf_state = TMF_QUEUED;
2360
2361 hdr = &session->tmhdr;
2362 iscsi_prep_lun_reset_pdu(sc, hdr);
2363
2364 if (iscsi_exec_task_mgmt_fn(conn, hdr, session->age,
2365 session->lu_reset_timeout)) {
2366 rc = FAILED;
2367 goto unlock;
2368 }
2369
2370 switch (session->tmf_state) {
2371 case TMF_SUCCESS:
2372 break;
2373 case TMF_TIMEDOUT:
2374 spin_unlock_bh(&session->frwd_lock);
2375 iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
2376 goto done;
2377 default:
2378 session->tmf_state = TMF_INITIAL;
2379 goto unlock;
2380 }
2381
2382 rc = SUCCESS;
2383 spin_unlock_bh(&session->frwd_lock);
2384
2385 iscsi_suspend_tx(conn);
2386
2387 spin_lock_bh(&session->frwd_lock);
2388 memset(hdr, 0, sizeof(*hdr));
2389 fail_scsi_tasks(conn, sc->device->lun, DID_ERROR);
2390 session->tmf_state = TMF_INITIAL;
2391 spin_unlock_bh(&session->frwd_lock);
2392
2393 iscsi_start_tx(conn);
2394 goto done;
2395
2396 unlock:
2397 spin_unlock_bh(&session->frwd_lock);
2398 done:
2399 ISCSI_DBG_EH(session, "dev reset result = %s\n",
2400 rc == SUCCESS ? "SUCCESS" : "FAILED");
2401 mutex_unlock(&session->eh_mutex);
2402 return rc;
2403 }
2404 EXPORT_SYMBOL_GPL(iscsi_eh_device_reset);
2405
iscsi_session_recovery_timedout(struct iscsi_cls_session * cls_session)2406 void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session)
2407 {
2408 struct iscsi_session *session = cls_session->dd_data;
2409
2410 spin_lock_bh(&session->frwd_lock);
2411 if (session->state != ISCSI_STATE_LOGGED_IN) {
2412 session->state = ISCSI_STATE_RECOVERY_FAILED;
2413 wake_up(&session->ehwait);
2414 }
2415 spin_unlock_bh(&session->frwd_lock);
2416 }
2417 EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout);
2418
2419 /**
2420 * iscsi_eh_session_reset - drop session and attempt relogin
2421 * @sc: scsi command
2422 *
2423 * This function will wait for a relogin, session termination from
2424 * userspace, or a recovery/replacement timeout.
2425 */
iscsi_eh_session_reset(struct scsi_cmnd * sc)2426 int iscsi_eh_session_reset(struct scsi_cmnd *sc)
2427 {
2428 struct iscsi_cls_session *cls_session;
2429 struct iscsi_session *session;
2430 struct iscsi_conn *conn;
2431
2432 cls_session = starget_to_session(scsi_target(sc->device));
2433 session = cls_session->dd_data;
2434 conn = session->leadconn;
2435
2436 mutex_lock(&session->eh_mutex);
2437 spin_lock_bh(&session->frwd_lock);
2438 if (session->state == ISCSI_STATE_TERMINATE) {
2439 failed:
2440 ISCSI_DBG_EH(session,
2441 "failing session reset: Could not log back into "
2442 "%s [age %d]\n", session->targetname,
2443 session->age);
2444 spin_unlock_bh(&session->frwd_lock);
2445 mutex_unlock(&session->eh_mutex);
2446 return FAILED;
2447 }
2448
2449 spin_unlock_bh(&session->frwd_lock);
2450 mutex_unlock(&session->eh_mutex);
2451 /*
2452 * we drop the lock here but the leadconn cannot be destoyed while
2453 * we are in the scsi eh
2454 */
2455 iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
2456
2457 ISCSI_DBG_EH(session, "wait for relogin\n");
2458 wait_event_interruptible(session->ehwait,
2459 session->state == ISCSI_STATE_TERMINATE ||
2460 session->state == ISCSI_STATE_LOGGED_IN ||
2461 session->state == ISCSI_STATE_RECOVERY_FAILED);
2462 if (signal_pending(current))
2463 flush_signals(current);
2464
2465 mutex_lock(&session->eh_mutex);
2466 spin_lock_bh(&session->frwd_lock);
2467 if (session->state == ISCSI_STATE_LOGGED_IN) {
2468 ISCSI_DBG_EH(session,
2469 "session reset succeeded for %s,%s\n",
2470 session->targetname, conn->persistent_address);
2471 } else
2472 goto failed;
2473 spin_unlock_bh(&session->frwd_lock);
2474 mutex_unlock(&session->eh_mutex);
2475 return SUCCESS;
2476 }
2477 EXPORT_SYMBOL_GPL(iscsi_eh_session_reset);
2478
iscsi_prep_tgt_reset_pdu(struct scsi_cmnd * sc,struct iscsi_tm * hdr)2479 static void iscsi_prep_tgt_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr)
2480 {
2481 memset(hdr, 0, sizeof(*hdr));
2482 hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
2483 hdr->flags = ISCSI_TM_FUNC_TARGET_WARM_RESET & ISCSI_FLAG_TM_FUNC_MASK;
2484 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
2485 hdr->rtt = RESERVED_ITT;
2486 }
2487
2488 /**
2489 * iscsi_eh_target_reset - reset target
2490 * @sc: scsi command
2491 *
2492 * This will attempt to send a warm target reset.
2493 */
iscsi_eh_target_reset(struct scsi_cmnd * sc)2494 static int iscsi_eh_target_reset(struct scsi_cmnd *sc)
2495 {
2496 struct iscsi_cls_session *cls_session;
2497 struct iscsi_session *session;
2498 struct iscsi_conn *conn;
2499 struct iscsi_tm *hdr;
2500 int rc = FAILED;
2501
2502 cls_session = starget_to_session(scsi_target(sc->device));
2503 session = cls_session->dd_data;
2504
2505 ISCSI_DBG_EH(session, "tgt Reset [sc %p tgt %s]\n", sc,
2506 session->targetname);
2507
2508 mutex_lock(&session->eh_mutex);
2509 spin_lock_bh(&session->frwd_lock);
2510 /*
2511 * Just check if we are not logged in. We cannot check for
2512 * the phase because the reset could come from a ioctl.
2513 */
2514 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN)
2515 goto unlock;
2516 conn = session->leadconn;
2517
2518 /* only have one tmf outstanding at a time */
2519 if (session->tmf_state != TMF_INITIAL)
2520 goto unlock;
2521 session->tmf_state = TMF_QUEUED;
2522
2523 hdr = &session->tmhdr;
2524 iscsi_prep_tgt_reset_pdu(sc, hdr);
2525
2526 if (iscsi_exec_task_mgmt_fn(conn, hdr, session->age,
2527 session->tgt_reset_timeout)) {
2528 rc = FAILED;
2529 goto unlock;
2530 }
2531
2532 switch (session->tmf_state) {
2533 case TMF_SUCCESS:
2534 break;
2535 case TMF_TIMEDOUT:
2536 spin_unlock_bh(&session->frwd_lock);
2537 iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
2538 goto done;
2539 default:
2540 session->tmf_state = TMF_INITIAL;
2541 goto unlock;
2542 }
2543
2544 rc = SUCCESS;
2545 spin_unlock_bh(&session->frwd_lock);
2546
2547 iscsi_suspend_tx(conn);
2548
2549 spin_lock_bh(&session->frwd_lock);
2550 memset(hdr, 0, sizeof(*hdr));
2551 fail_scsi_tasks(conn, -1, DID_ERROR);
2552 session->tmf_state = TMF_INITIAL;
2553 spin_unlock_bh(&session->frwd_lock);
2554
2555 iscsi_start_tx(conn);
2556 goto done;
2557
2558 unlock:
2559 spin_unlock_bh(&session->frwd_lock);
2560 done:
2561 ISCSI_DBG_EH(session, "tgt %s reset result = %s\n", session->targetname,
2562 rc == SUCCESS ? "SUCCESS" : "FAILED");
2563 mutex_unlock(&session->eh_mutex);
2564 return rc;
2565 }
2566
2567 /**
2568 * iscsi_eh_recover_target - reset target and possibly the session
2569 * @sc: scsi command
2570 *
2571 * This will attempt to send a warm target reset. If that fails,
2572 * we will escalate to ERL0 session recovery.
2573 */
iscsi_eh_recover_target(struct scsi_cmnd * sc)2574 int iscsi_eh_recover_target(struct scsi_cmnd *sc)
2575 {
2576 int rc;
2577
2578 rc = iscsi_eh_target_reset(sc);
2579 if (rc == FAILED)
2580 rc = iscsi_eh_session_reset(sc);
2581 return rc;
2582 }
2583 EXPORT_SYMBOL_GPL(iscsi_eh_recover_target);
2584
2585 /*
2586 * Pre-allocate a pool of @max items of @item_size. By default, the pool
2587 * should be accessed via kfifo_{get,put} on q->queue.
2588 * Optionally, the caller can obtain the array of object pointers
2589 * by passing in a non-NULL @items pointer
2590 */
2591 int
iscsi_pool_init(struct iscsi_pool * q,int max,void *** items,int item_size)2592 iscsi_pool_init(struct iscsi_pool *q, int max, void ***items, int item_size)
2593 {
2594 int i, num_arrays = 1;
2595
2596 memset(q, 0, sizeof(*q));
2597
2598 q->max = max;
2599
2600 /* If the user passed an items pointer, he wants a copy of
2601 * the array. */
2602 if (items)
2603 num_arrays++;
2604 q->pool = kvcalloc(num_arrays * max, sizeof(void *), GFP_KERNEL);
2605 if (q->pool == NULL)
2606 return -ENOMEM;
2607
2608 kfifo_init(&q->queue, (void*)q->pool, max * sizeof(void*));
2609
2610 for (i = 0; i < max; i++) {
2611 q->pool[i] = kzalloc(item_size, GFP_KERNEL);
2612 if (q->pool[i] == NULL) {
2613 q->max = i;
2614 goto enomem;
2615 }
2616 kfifo_in(&q->queue, (void*)&q->pool[i], sizeof(void*));
2617 }
2618
2619 if (items) {
2620 *items = q->pool + max;
2621 memcpy(*items, q->pool, max * sizeof(void *));
2622 }
2623
2624 return 0;
2625
2626 enomem:
2627 iscsi_pool_free(q);
2628 return -ENOMEM;
2629 }
2630 EXPORT_SYMBOL_GPL(iscsi_pool_init);
2631
iscsi_pool_free(struct iscsi_pool * q)2632 void iscsi_pool_free(struct iscsi_pool *q)
2633 {
2634 int i;
2635
2636 for (i = 0; i < q->max; i++)
2637 kfree(q->pool[i]);
2638 kvfree(q->pool);
2639 }
2640 EXPORT_SYMBOL_GPL(iscsi_pool_free);
2641
iscsi_host_get_max_scsi_cmds(struct Scsi_Host * shost,uint16_t requested_cmds_max)2642 int iscsi_host_get_max_scsi_cmds(struct Scsi_Host *shost,
2643 uint16_t requested_cmds_max)
2644 {
2645 int scsi_cmds, total_cmds = requested_cmds_max;
2646
2647 check:
2648 if (!total_cmds)
2649 total_cmds = ISCSI_DEF_XMIT_CMDS_MAX;
2650 /*
2651 * The iscsi layer needs some tasks for nop handling and tmfs,
2652 * so the cmds_max must at least be greater than ISCSI_MGMT_CMDS_MAX
2653 * + 1 command for scsi IO.
2654 */
2655 if (total_cmds < ISCSI_TOTAL_CMDS_MIN) {
2656 printk(KERN_ERR "iscsi: invalid max cmds of %d. Must be a power of two that is at least %d.\n",
2657 total_cmds, ISCSI_TOTAL_CMDS_MIN);
2658 return -EINVAL;
2659 }
2660
2661 if (total_cmds > ISCSI_TOTAL_CMDS_MAX) {
2662 printk(KERN_INFO "iscsi: invalid max cmds of %d. Must be a power of 2 less than or equal to %d. Using %d.\n",
2663 requested_cmds_max, ISCSI_TOTAL_CMDS_MAX,
2664 ISCSI_TOTAL_CMDS_MAX);
2665 total_cmds = ISCSI_TOTAL_CMDS_MAX;
2666 }
2667
2668 if (!is_power_of_2(total_cmds)) {
2669 total_cmds = rounddown_pow_of_two(total_cmds);
2670 if (total_cmds < ISCSI_TOTAL_CMDS_MIN) {
2671 printk(KERN_ERR "iscsi: invalid max cmds of %d. Must be a power of 2 greater than %d.\n", requested_cmds_max, ISCSI_TOTAL_CMDS_MIN);
2672 return -EINVAL;
2673 }
2674
2675 printk(KERN_INFO "iscsi: invalid max cmds %d. Must be a power of 2. Rounding max cmds down to %d.\n",
2676 requested_cmds_max, total_cmds);
2677 }
2678
2679 scsi_cmds = total_cmds - ISCSI_MGMT_CMDS_MAX;
2680 if (shost->can_queue && scsi_cmds > shost->can_queue) {
2681 total_cmds = shost->can_queue;
2682
2683 printk(KERN_INFO "iscsi: requested max cmds %u is higher than driver limit. Using driver limit %u\n",
2684 requested_cmds_max, shost->can_queue);
2685 goto check;
2686 }
2687
2688 return scsi_cmds;
2689 }
2690 EXPORT_SYMBOL_GPL(iscsi_host_get_max_scsi_cmds);
2691
2692 /**
2693 * iscsi_host_add - add host to system
2694 * @shost: scsi host
2695 * @pdev: parent device
2696 *
2697 * This should be called by partial offload and software iscsi drivers
2698 * to add a host to the system.
2699 */
iscsi_host_add(struct Scsi_Host * shost,struct device * pdev)2700 int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev)
2701 {
2702 if (!shost->can_queue)
2703 shost->can_queue = ISCSI_DEF_XMIT_CMDS_MAX;
2704
2705 if (!shost->cmd_per_lun)
2706 shost->cmd_per_lun = ISCSI_DEF_CMD_PER_LUN;
2707
2708 return scsi_add_host(shost, pdev);
2709 }
2710 EXPORT_SYMBOL_GPL(iscsi_host_add);
2711
2712 /**
2713 * iscsi_host_alloc - allocate a host and driver data
2714 * @sht: scsi host template
2715 * @dd_data_size: driver host data size
2716 * @xmit_can_sleep: bool indicating if LLD will queue IO from a work queue
2717 *
2718 * This should be called by partial offload and software iscsi drivers.
2719 * To access the driver specific memory use the iscsi_host_priv() macro.
2720 */
iscsi_host_alloc(struct scsi_host_template * sht,int dd_data_size,bool xmit_can_sleep)2721 struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
2722 int dd_data_size, bool xmit_can_sleep)
2723 {
2724 struct Scsi_Host *shost;
2725 struct iscsi_host *ihost;
2726
2727 shost = scsi_host_alloc(sht, sizeof(struct iscsi_host) + dd_data_size);
2728 if (!shost)
2729 return NULL;
2730 ihost = shost_priv(shost);
2731
2732 if (xmit_can_sleep) {
2733 snprintf(ihost->workq_name, sizeof(ihost->workq_name),
2734 "iscsi_q_%d", shost->host_no);
2735 ihost->workq = alloc_workqueue("%s",
2736 WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND,
2737 1, ihost->workq_name);
2738 if (!ihost->workq)
2739 goto free_host;
2740 }
2741
2742 spin_lock_init(&ihost->lock);
2743 ihost->state = ISCSI_HOST_SETUP;
2744 ihost->num_sessions = 0;
2745 init_waitqueue_head(&ihost->session_removal_wq);
2746 return shost;
2747
2748 free_host:
2749 scsi_host_put(shost);
2750 return NULL;
2751 }
2752 EXPORT_SYMBOL_GPL(iscsi_host_alloc);
2753
iscsi_notify_host_removed(struct iscsi_cls_session * cls_session)2754 static void iscsi_notify_host_removed(struct iscsi_cls_session *cls_session)
2755 {
2756 iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_INVALID_HOST);
2757 }
2758
2759 /**
2760 * iscsi_host_remove - remove host and sessions
2761 * @shost: scsi host
2762 *
2763 * If there are any sessions left, this will initiate the removal and wait
2764 * for the completion.
2765 */
iscsi_host_remove(struct Scsi_Host * shost)2766 void iscsi_host_remove(struct Scsi_Host *shost)
2767 {
2768 struct iscsi_host *ihost = shost_priv(shost);
2769 unsigned long flags;
2770
2771 spin_lock_irqsave(&ihost->lock, flags);
2772 ihost->state = ISCSI_HOST_REMOVED;
2773 spin_unlock_irqrestore(&ihost->lock, flags);
2774
2775 iscsi_host_for_each_session(shost, iscsi_notify_host_removed);
2776 wait_event_interruptible(ihost->session_removal_wq,
2777 ihost->num_sessions == 0);
2778 if (signal_pending(current))
2779 flush_signals(current);
2780
2781 scsi_remove_host(shost);
2782 }
2783 EXPORT_SYMBOL_GPL(iscsi_host_remove);
2784
iscsi_host_free(struct Scsi_Host * shost)2785 void iscsi_host_free(struct Scsi_Host *shost)
2786 {
2787 struct iscsi_host *ihost = shost_priv(shost);
2788
2789 if (ihost->workq)
2790 destroy_workqueue(ihost->workq);
2791
2792 kfree(ihost->netdev);
2793 kfree(ihost->hwaddress);
2794 kfree(ihost->initiatorname);
2795 scsi_host_put(shost);
2796 }
2797 EXPORT_SYMBOL_GPL(iscsi_host_free);
2798
iscsi_host_dec_session_cnt(struct Scsi_Host * shost)2799 static void iscsi_host_dec_session_cnt(struct Scsi_Host *shost)
2800 {
2801 struct iscsi_host *ihost = shost_priv(shost);
2802 unsigned long flags;
2803
2804 shost = scsi_host_get(shost);
2805 if (!shost) {
2806 printk(KERN_ERR "Invalid state. Cannot notify host removal "
2807 "of session teardown event because host already "
2808 "removed.\n");
2809 return;
2810 }
2811
2812 spin_lock_irqsave(&ihost->lock, flags);
2813 ihost->num_sessions--;
2814 if (ihost->num_sessions == 0)
2815 wake_up(&ihost->session_removal_wq);
2816 spin_unlock_irqrestore(&ihost->lock, flags);
2817 scsi_host_put(shost);
2818 }
2819
2820 /**
2821 * iscsi_session_setup - create iscsi cls session and host and session
2822 * @iscsit: iscsi transport template
2823 * @shost: scsi host
2824 * @cmds_max: session can queue
2825 * @dd_size: private driver data size, added to session allocation size
2826 * @cmd_task_size: LLD task private data size
2827 * @initial_cmdsn: initial CmdSN
2828 * @id: target ID to add to this session
2829 *
2830 * This can be used by software iscsi_transports that allocate
2831 * a session per scsi host.
2832 *
2833 * Callers should set cmds_max to the largest total numer (mgmt + scsi) of
2834 * tasks they support. The iscsi layer reserves ISCSI_MGMT_CMDS_MAX tasks
2835 * for nop handling and login/logout requests.
2836 */
2837 struct iscsi_cls_session *
iscsi_session_setup(struct iscsi_transport * iscsit,struct Scsi_Host * shost,uint16_t cmds_max,int dd_size,int cmd_task_size,uint32_t initial_cmdsn,unsigned int id)2838 iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
2839 uint16_t cmds_max, int dd_size, int cmd_task_size,
2840 uint32_t initial_cmdsn, unsigned int id)
2841 {
2842 struct iscsi_host *ihost = shost_priv(shost);
2843 struct iscsi_session *session;
2844 struct iscsi_cls_session *cls_session;
2845 int cmd_i, scsi_cmds;
2846 unsigned long flags;
2847
2848 spin_lock_irqsave(&ihost->lock, flags);
2849 if (ihost->state == ISCSI_HOST_REMOVED) {
2850 spin_unlock_irqrestore(&ihost->lock, flags);
2851 return NULL;
2852 }
2853 ihost->num_sessions++;
2854 spin_unlock_irqrestore(&ihost->lock, flags);
2855
2856 scsi_cmds = iscsi_host_get_max_scsi_cmds(shost, cmds_max);
2857 if (scsi_cmds < 0)
2858 goto dec_session_count;
2859
2860 cls_session = iscsi_alloc_session(shost, iscsit,
2861 sizeof(struct iscsi_session) +
2862 dd_size);
2863 if (!cls_session)
2864 goto dec_session_count;
2865 session = cls_session->dd_data;
2866 session->cls_session = cls_session;
2867 session->host = shost;
2868 session->state = ISCSI_STATE_FREE;
2869 session->fast_abort = 1;
2870 session->tgt_reset_timeout = 30;
2871 session->lu_reset_timeout = 15;
2872 session->abort_timeout = 10;
2873 session->scsi_cmds_max = scsi_cmds;
2874 session->cmds_max = scsi_cmds + ISCSI_MGMT_CMDS_MAX;
2875 session->queued_cmdsn = session->cmdsn = initial_cmdsn;
2876 session->exp_cmdsn = initial_cmdsn + 1;
2877 session->max_cmdsn = initial_cmdsn + 1;
2878 session->max_r2t = 1;
2879 session->tt = iscsit;
2880 session->dd_data = cls_session->dd_data + sizeof(*session);
2881
2882 session->tmf_state = TMF_INITIAL;
2883 timer_setup(&session->tmf_timer, iscsi_tmf_timedout, 0);
2884 mutex_init(&session->eh_mutex);
2885
2886 spin_lock_init(&session->frwd_lock);
2887 spin_lock_init(&session->back_lock);
2888
2889 /* initialize SCSI PDU commands pool */
2890 if (iscsi_pool_init(&session->cmdpool, session->cmds_max,
2891 (void***)&session->cmds,
2892 cmd_task_size + sizeof(struct iscsi_task)))
2893 goto cmdpool_alloc_fail;
2894
2895 /* pre-format cmds pool with ITT */
2896 for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
2897 struct iscsi_task *task = session->cmds[cmd_i];
2898
2899 if (cmd_task_size)
2900 task->dd_data = &task[1];
2901 task->itt = cmd_i;
2902 task->state = ISCSI_TASK_FREE;
2903 INIT_LIST_HEAD(&task->running);
2904 }
2905
2906 if (!try_module_get(iscsit->owner))
2907 goto module_get_fail;
2908
2909 if (iscsi_add_session(cls_session, id))
2910 goto cls_session_fail;
2911
2912 return cls_session;
2913
2914 cls_session_fail:
2915 module_put(iscsit->owner);
2916 module_get_fail:
2917 iscsi_pool_free(&session->cmdpool);
2918 cmdpool_alloc_fail:
2919 iscsi_free_session(cls_session);
2920 dec_session_count:
2921 iscsi_host_dec_session_cnt(shost);
2922 return NULL;
2923 }
2924 EXPORT_SYMBOL_GPL(iscsi_session_setup);
2925
2926 /**
2927 * iscsi_session_teardown - destroy session, host, and cls_session
2928 * @cls_session: iscsi session
2929 */
iscsi_session_teardown(struct iscsi_cls_session * cls_session)2930 void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
2931 {
2932 struct iscsi_session *session = cls_session->dd_data;
2933 struct module *owner = cls_session->transport->owner;
2934 struct Scsi_Host *shost = session->host;
2935
2936 iscsi_pool_free(&session->cmdpool);
2937
2938 iscsi_remove_session(cls_session);
2939
2940 kfree(session->password);
2941 kfree(session->password_in);
2942 kfree(session->username);
2943 kfree(session->username_in);
2944 kfree(session->targetname);
2945 kfree(session->targetalias);
2946 kfree(session->initiatorname);
2947 kfree(session->boot_root);
2948 kfree(session->boot_nic);
2949 kfree(session->boot_target);
2950 kfree(session->ifacename);
2951 kfree(session->portal_type);
2952 kfree(session->discovery_parent_type);
2953
2954 iscsi_free_session(cls_session);
2955
2956 iscsi_host_dec_session_cnt(shost);
2957 module_put(owner);
2958 }
2959 EXPORT_SYMBOL_GPL(iscsi_session_teardown);
2960
2961 /**
2962 * iscsi_conn_setup - create iscsi_cls_conn and iscsi_conn
2963 * @cls_session: iscsi_cls_session
2964 * @dd_size: private driver data size
2965 * @conn_idx: cid
2966 */
2967 struct iscsi_cls_conn *
iscsi_conn_setup(struct iscsi_cls_session * cls_session,int dd_size,uint32_t conn_idx)2968 iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
2969 uint32_t conn_idx)
2970 {
2971 struct iscsi_session *session = cls_session->dd_data;
2972 struct iscsi_conn *conn;
2973 struct iscsi_cls_conn *cls_conn;
2974 char *data;
2975
2976 cls_conn = iscsi_create_conn(cls_session, sizeof(*conn) + dd_size,
2977 conn_idx);
2978 if (!cls_conn)
2979 return NULL;
2980 conn = cls_conn->dd_data;
2981 memset(conn, 0, sizeof(*conn) + dd_size);
2982
2983 conn->dd_data = cls_conn->dd_data + sizeof(*conn);
2984 conn->session = session;
2985 conn->cls_conn = cls_conn;
2986 conn->c_stage = ISCSI_CONN_INITIAL_STAGE;
2987 conn->id = conn_idx;
2988 conn->exp_statsn = 0;
2989
2990 timer_setup(&conn->transport_timer, iscsi_check_transport_timeouts, 0);
2991
2992 INIT_LIST_HEAD(&conn->mgmtqueue);
2993 INIT_LIST_HEAD(&conn->cmdqueue);
2994 INIT_LIST_HEAD(&conn->requeue);
2995 INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
2996
2997 /* allocate login_task used for the login/text sequences */
2998 spin_lock_bh(&session->frwd_lock);
2999 if (!kfifo_out(&session->cmdpool.queue,
3000 (void*)&conn->login_task,
3001 sizeof(void*))) {
3002 spin_unlock_bh(&session->frwd_lock);
3003 goto login_task_alloc_fail;
3004 }
3005 spin_unlock_bh(&session->frwd_lock);
3006
3007 data = (char *) __get_free_pages(GFP_KERNEL,
3008 get_order(ISCSI_DEF_MAX_RECV_SEG_LEN));
3009 if (!data)
3010 goto login_task_data_alloc_fail;
3011 conn->login_task->data = conn->data = data;
3012
3013 init_waitqueue_head(&session->ehwait);
3014
3015 return cls_conn;
3016
3017 login_task_data_alloc_fail:
3018 kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task,
3019 sizeof(void*));
3020 login_task_alloc_fail:
3021 iscsi_destroy_conn(cls_conn);
3022 return NULL;
3023 }
3024 EXPORT_SYMBOL_GPL(iscsi_conn_setup);
3025
3026 /**
3027 * iscsi_conn_teardown - teardown iscsi connection
3028 * @cls_conn: iscsi class connection
3029 *
3030 * TODO: we may need to make this into a two step process
3031 * like scsi-mls remove + put host
3032 */
iscsi_conn_teardown(struct iscsi_cls_conn * cls_conn)3033 void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
3034 {
3035 struct iscsi_conn *conn = cls_conn->dd_data;
3036 struct iscsi_session *session = conn->session;
3037 char *tmp_persistent_address = conn->persistent_address;
3038 char *tmp_local_ipaddr = conn->local_ipaddr;
3039
3040 del_timer_sync(&conn->transport_timer);
3041
3042 mutex_lock(&session->eh_mutex);
3043 spin_lock_bh(&session->frwd_lock);
3044 conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;
3045 if (session->leadconn == conn) {
3046 /*
3047 * leading connection? then give up on recovery.
3048 */
3049 session->state = ISCSI_STATE_TERMINATE;
3050 wake_up(&session->ehwait);
3051 }
3052 spin_unlock_bh(&session->frwd_lock);
3053
3054 /* flush queued up work because we free the connection below */
3055 iscsi_suspend_tx(conn);
3056
3057 spin_lock_bh(&session->frwd_lock);
3058 free_pages((unsigned long) conn->data,
3059 get_order(ISCSI_DEF_MAX_RECV_SEG_LEN));
3060 /* regular RX path uses back_lock */
3061 spin_lock_bh(&session->back_lock);
3062 kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task,
3063 sizeof(void*));
3064 spin_unlock_bh(&session->back_lock);
3065 if (session->leadconn == conn)
3066 session->leadconn = NULL;
3067 spin_unlock_bh(&session->frwd_lock);
3068 mutex_unlock(&session->eh_mutex);
3069
3070 iscsi_destroy_conn(cls_conn);
3071 kfree(tmp_persistent_address);
3072 kfree(tmp_local_ipaddr);
3073 }
3074 EXPORT_SYMBOL_GPL(iscsi_conn_teardown);
3075
iscsi_conn_start(struct iscsi_cls_conn * cls_conn)3076 int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
3077 {
3078 struct iscsi_conn *conn = cls_conn->dd_data;
3079 struct iscsi_session *session = conn->session;
3080
3081 if (!session) {
3082 iscsi_conn_printk(KERN_ERR, conn,
3083 "can't start unbound connection\n");
3084 return -EPERM;
3085 }
3086
3087 if ((session->imm_data_en || !session->initial_r2t_en) &&
3088 session->first_burst > session->max_burst) {
3089 iscsi_conn_printk(KERN_INFO, conn, "invalid burst lengths: "
3090 "first_burst %d max_burst %d\n",
3091 session->first_burst, session->max_burst);
3092 return -EINVAL;
3093 }
3094
3095 if (conn->ping_timeout && !conn->recv_timeout) {
3096 iscsi_conn_printk(KERN_ERR, conn, "invalid recv timeout of "
3097 "zero. Using 5 seconds\n.");
3098 conn->recv_timeout = 5;
3099 }
3100
3101 if (conn->recv_timeout && !conn->ping_timeout) {
3102 iscsi_conn_printk(KERN_ERR, conn, "invalid ping timeout of "
3103 "zero. Using 5 seconds.\n");
3104 conn->ping_timeout = 5;
3105 }
3106
3107 spin_lock_bh(&session->frwd_lock);
3108 conn->c_stage = ISCSI_CONN_STARTED;
3109 session->state = ISCSI_STATE_LOGGED_IN;
3110 session->queued_cmdsn = session->cmdsn;
3111
3112 conn->last_recv = jiffies;
3113 conn->last_ping = jiffies;
3114 if (conn->recv_timeout && conn->ping_timeout)
3115 mod_timer(&conn->transport_timer,
3116 jiffies + (conn->recv_timeout * HZ));
3117
3118 switch(conn->stop_stage) {
3119 case STOP_CONN_RECOVER:
3120 /*
3121 * unblock eh_abort() if it is blocked. re-try all
3122 * commands after successful recovery
3123 */
3124 conn->stop_stage = 0;
3125 session->tmf_state = TMF_INITIAL;
3126 session->age++;
3127 if (session->age == 16)
3128 session->age = 0;
3129 break;
3130 case STOP_CONN_TERM:
3131 conn->stop_stage = 0;
3132 break;
3133 default:
3134 break;
3135 }
3136 spin_unlock_bh(&session->frwd_lock);
3137
3138 iscsi_unblock_session(session->cls_session);
3139 wake_up(&session->ehwait);
3140 return 0;
3141 }
3142 EXPORT_SYMBOL_GPL(iscsi_conn_start);
3143
3144 static void
fail_mgmt_tasks(struct iscsi_session * session,struct iscsi_conn * conn)3145 fail_mgmt_tasks(struct iscsi_session *session, struct iscsi_conn *conn)
3146 {
3147 struct iscsi_task *task;
3148 int i, state;
3149
3150 for (i = 0; i < conn->session->cmds_max; i++) {
3151 task = conn->session->cmds[i];
3152 if (task->sc)
3153 continue;
3154
3155 if (task->state == ISCSI_TASK_FREE)
3156 continue;
3157
3158 ISCSI_DBG_SESSION(conn->session,
3159 "failing mgmt itt 0x%x state %d\n",
3160 task->itt, task->state);
3161
3162 spin_lock_bh(&session->back_lock);
3163 if (cleanup_queued_task(task)) {
3164 spin_unlock_bh(&session->back_lock);
3165 continue;
3166 }
3167
3168 state = ISCSI_TASK_ABRT_SESS_RECOV;
3169 if (task->state == ISCSI_TASK_PENDING)
3170 state = ISCSI_TASK_COMPLETED;
3171 iscsi_complete_task(task, state);
3172 spin_unlock_bh(&session->back_lock);
3173 }
3174 }
3175
iscsi_conn_stop(struct iscsi_cls_conn * cls_conn,int flag)3176 void iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
3177 {
3178 struct iscsi_conn *conn = cls_conn->dd_data;
3179 struct iscsi_session *session = conn->session;
3180 int old_stop_stage;
3181
3182 mutex_lock(&session->eh_mutex);
3183 spin_lock_bh(&session->frwd_lock);
3184 if (conn->stop_stage == STOP_CONN_TERM) {
3185 spin_unlock_bh(&session->frwd_lock);
3186 mutex_unlock(&session->eh_mutex);
3187 return;
3188 }
3189
3190 /*
3191 * When this is called for the in_login state, we only want to clean
3192 * up the login task and connection. We do not need to block and set
3193 * the recovery state again
3194 */
3195 if (flag == STOP_CONN_TERM)
3196 session->state = ISCSI_STATE_TERMINATE;
3197 else if (conn->stop_stage != STOP_CONN_RECOVER)
3198 session->state = ISCSI_STATE_IN_RECOVERY;
3199
3200 old_stop_stage = conn->stop_stage;
3201 conn->stop_stage = flag;
3202 spin_unlock_bh(&session->frwd_lock);
3203
3204 del_timer_sync(&conn->transport_timer);
3205 iscsi_suspend_tx(conn);
3206
3207 spin_lock_bh(&session->frwd_lock);
3208 conn->c_stage = ISCSI_CONN_STOPPED;
3209 spin_unlock_bh(&session->frwd_lock);
3210
3211 /*
3212 * for connection level recovery we should not calculate
3213 * header digest. conn->hdr_size used for optimization
3214 * in hdr_extract() and will be re-negotiated at
3215 * set_param() time.
3216 */
3217 if (flag == STOP_CONN_RECOVER) {
3218 conn->hdrdgst_en = 0;
3219 conn->datadgst_en = 0;
3220 if (session->state == ISCSI_STATE_IN_RECOVERY &&
3221 old_stop_stage != STOP_CONN_RECOVER) {
3222 ISCSI_DBG_SESSION(session, "blocking session\n");
3223 iscsi_block_session(session->cls_session);
3224 }
3225 }
3226
3227 /*
3228 * flush queues.
3229 */
3230 spin_lock_bh(&session->frwd_lock);
3231 fail_scsi_tasks(conn, -1, DID_TRANSPORT_DISRUPTED);
3232 fail_mgmt_tasks(session, conn);
3233 memset(&session->tmhdr, 0, sizeof(session->tmhdr));
3234 spin_unlock_bh(&session->frwd_lock);
3235 mutex_unlock(&session->eh_mutex);
3236 }
3237 EXPORT_SYMBOL_GPL(iscsi_conn_stop);
3238
iscsi_conn_bind(struct iscsi_cls_session * cls_session,struct iscsi_cls_conn * cls_conn,int is_leading)3239 int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
3240 struct iscsi_cls_conn *cls_conn, int is_leading)
3241 {
3242 struct iscsi_session *session = cls_session->dd_data;
3243 struct iscsi_conn *conn = cls_conn->dd_data;
3244
3245 spin_lock_bh(&session->frwd_lock);
3246 if (is_leading)
3247 session->leadconn = conn;
3248 spin_unlock_bh(&session->frwd_lock);
3249
3250 /*
3251 * The target could have reduced it's window size between logins, so
3252 * we have to reset max/exp cmdsn so we can see the new values.
3253 */
3254 spin_lock_bh(&session->back_lock);
3255 session->max_cmdsn = session->exp_cmdsn = session->cmdsn + 1;
3256 spin_unlock_bh(&session->back_lock);
3257 /*
3258 * Unblock xmitworker(), Login Phase will pass through.
3259 */
3260 clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
3261 clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
3262 return 0;
3263 }
3264 EXPORT_SYMBOL_GPL(iscsi_conn_bind);
3265
iscsi_switch_str_param(char ** param,char * new_val_buf)3266 int iscsi_switch_str_param(char **param, char *new_val_buf)
3267 {
3268 char *new_val;
3269
3270 if (*param) {
3271 if (!strcmp(*param, new_val_buf))
3272 return 0;
3273 }
3274
3275 new_val = kstrdup(new_val_buf, GFP_NOIO);
3276 if (!new_val)
3277 return -ENOMEM;
3278
3279 kfree(*param);
3280 *param = new_val;
3281 return 0;
3282 }
3283 EXPORT_SYMBOL_GPL(iscsi_switch_str_param);
3284
iscsi_set_param(struct iscsi_cls_conn * cls_conn,enum iscsi_param param,char * buf,int buflen)3285 int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
3286 enum iscsi_param param, char *buf, int buflen)
3287 {
3288 struct iscsi_conn *conn = cls_conn->dd_data;
3289 struct iscsi_session *session = conn->session;
3290 int val;
3291
3292 switch(param) {
3293 case ISCSI_PARAM_FAST_ABORT:
3294 sscanf(buf, "%d", &session->fast_abort);
3295 break;
3296 case ISCSI_PARAM_ABORT_TMO:
3297 sscanf(buf, "%d", &session->abort_timeout);
3298 break;
3299 case ISCSI_PARAM_LU_RESET_TMO:
3300 sscanf(buf, "%d", &session->lu_reset_timeout);
3301 break;
3302 case ISCSI_PARAM_TGT_RESET_TMO:
3303 sscanf(buf, "%d", &session->tgt_reset_timeout);
3304 break;
3305 case ISCSI_PARAM_PING_TMO:
3306 sscanf(buf, "%d", &conn->ping_timeout);
3307 break;
3308 case ISCSI_PARAM_RECV_TMO:
3309 sscanf(buf, "%d", &conn->recv_timeout);
3310 break;
3311 case ISCSI_PARAM_MAX_RECV_DLENGTH:
3312 sscanf(buf, "%d", &conn->max_recv_dlength);
3313 break;
3314 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
3315 sscanf(buf, "%d", &conn->max_xmit_dlength);
3316 break;
3317 case ISCSI_PARAM_HDRDGST_EN:
3318 sscanf(buf, "%d", &conn->hdrdgst_en);
3319 break;
3320 case ISCSI_PARAM_DATADGST_EN:
3321 sscanf(buf, "%d", &conn->datadgst_en);
3322 break;
3323 case ISCSI_PARAM_INITIAL_R2T_EN:
3324 sscanf(buf, "%d", &session->initial_r2t_en);
3325 break;
3326 case ISCSI_PARAM_MAX_R2T:
3327 sscanf(buf, "%hu", &session->max_r2t);
3328 break;
3329 case ISCSI_PARAM_IMM_DATA_EN:
3330 sscanf(buf, "%d", &session->imm_data_en);
3331 break;
3332 case ISCSI_PARAM_FIRST_BURST:
3333 sscanf(buf, "%d", &session->first_burst);
3334 break;
3335 case ISCSI_PARAM_MAX_BURST:
3336 sscanf(buf, "%d", &session->max_burst);
3337 break;
3338 case ISCSI_PARAM_PDU_INORDER_EN:
3339 sscanf(buf, "%d", &session->pdu_inorder_en);
3340 break;
3341 case ISCSI_PARAM_DATASEQ_INORDER_EN:
3342 sscanf(buf, "%d", &session->dataseq_inorder_en);
3343 break;
3344 case ISCSI_PARAM_ERL:
3345 sscanf(buf, "%d", &session->erl);
3346 break;
3347 case ISCSI_PARAM_EXP_STATSN:
3348 sscanf(buf, "%u", &conn->exp_statsn);
3349 break;
3350 case ISCSI_PARAM_USERNAME:
3351 return iscsi_switch_str_param(&session->username, buf);
3352 case ISCSI_PARAM_USERNAME_IN:
3353 return iscsi_switch_str_param(&session->username_in, buf);
3354 case ISCSI_PARAM_PASSWORD:
3355 return iscsi_switch_str_param(&session->password, buf);
3356 case ISCSI_PARAM_PASSWORD_IN:
3357 return iscsi_switch_str_param(&session->password_in, buf);
3358 case ISCSI_PARAM_TARGET_NAME:
3359 return iscsi_switch_str_param(&session->targetname, buf);
3360 case ISCSI_PARAM_TARGET_ALIAS:
3361 return iscsi_switch_str_param(&session->targetalias, buf);
3362 case ISCSI_PARAM_TPGT:
3363 sscanf(buf, "%d", &session->tpgt);
3364 break;
3365 case ISCSI_PARAM_PERSISTENT_PORT:
3366 sscanf(buf, "%d", &conn->persistent_port);
3367 break;
3368 case ISCSI_PARAM_PERSISTENT_ADDRESS:
3369 return iscsi_switch_str_param(&conn->persistent_address, buf);
3370 case ISCSI_PARAM_IFACE_NAME:
3371 return iscsi_switch_str_param(&session->ifacename, buf);
3372 case ISCSI_PARAM_INITIATOR_NAME:
3373 return iscsi_switch_str_param(&session->initiatorname, buf);
3374 case ISCSI_PARAM_BOOT_ROOT:
3375 return iscsi_switch_str_param(&session->boot_root, buf);
3376 case ISCSI_PARAM_BOOT_NIC:
3377 return iscsi_switch_str_param(&session->boot_nic, buf);
3378 case ISCSI_PARAM_BOOT_TARGET:
3379 return iscsi_switch_str_param(&session->boot_target, buf);
3380 case ISCSI_PARAM_PORTAL_TYPE:
3381 return iscsi_switch_str_param(&session->portal_type, buf);
3382 case ISCSI_PARAM_DISCOVERY_PARENT_TYPE:
3383 return iscsi_switch_str_param(&session->discovery_parent_type,
3384 buf);
3385 case ISCSI_PARAM_DISCOVERY_SESS:
3386 sscanf(buf, "%d", &val);
3387 session->discovery_sess = !!val;
3388 break;
3389 case ISCSI_PARAM_LOCAL_IPADDR:
3390 return iscsi_switch_str_param(&conn->local_ipaddr, buf);
3391 default:
3392 return -ENOSYS;
3393 }
3394
3395 return 0;
3396 }
3397 EXPORT_SYMBOL_GPL(iscsi_set_param);
3398
iscsi_session_get_param(struct iscsi_cls_session * cls_session,enum iscsi_param param,char * buf)3399 int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
3400 enum iscsi_param param, char *buf)
3401 {
3402 struct iscsi_session *session = cls_session->dd_data;
3403 int len;
3404
3405 switch(param) {
3406 case ISCSI_PARAM_FAST_ABORT:
3407 len = sysfs_emit(buf, "%d\n", session->fast_abort);
3408 break;
3409 case ISCSI_PARAM_ABORT_TMO:
3410 len = sysfs_emit(buf, "%d\n", session->abort_timeout);
3411 break;
3412 case ISCSI_PARAM_LU_RESET_TMO:
3413 len = sysfs_emit(buf, "%d\n", session->lu_reset_timeout);
3414 break;
3415 case ISCSI_PARAM_TGT_RESET_TMO:
3416 len = sysfs_emit(buf, "%d\n", session->tgt_reset_timeout);
3417 break;
3418 case ISCSI_PARAM_INITIAL_R2T_EN:
3419 len = sysfs_emit(buf, "%d\n", session->initial_r2t_en);
3420 break;
3421 case ISCSI_PARAM_MAX_R2T:
3422 len = sysfs_emit(buf, "%hu\n", session->max_r2t);
3423 break;
3424 case ISCSI_PARAM_IMM_DATA_EN:
3425 len = sysfs_emit(buf, "%d\n", session->imm_data_en);
3426 break;
3427 case ISCSI_PARAM_FIRST_BURST:
3428 len = sysfs_emit(buf, "%u\n", session->first_burst);
3429 break;
3430 case ISCSI_PARAM_MAX_BURST:
3431 len = sysfs_emit(buf, "%u\n", session->max_burst);
3432 break;
3433 case ISCSI_PARAM_PDU_INORDER_EN:
3434 len = sysfs_emit(buf, "%d\n", session->pdu_inorder_en);
3435 break;
3436 case ISCSI_PARAM_DATASEQ_INORDER_EN:
3437 len = sysfs_emit(buf, "%d\n", session->dataseq_inorder_en);
3438 break;
3439 case ISCSI_PARAM_DEF_TASKMGMT_TMO:
3440 len = sysfs_emit(buf, "%d\n", session->def_taskmgmt_tmo);
3441 break;
3442 case ISCSI_PARAM_ERL:
3443 len = sysfs_emit(buf, "%d\n", session->erl);
3444 break;
3445 case ISCSI_PARAM_TARGET_NAME:
3446 len = sysfs_emit(buf, "%s\n", session->targetname);
3447 break;
3448 case ISCSI_PARAM_TARGET_ALIAS:
3449 len = sysfs_emit(buf, "%s\n", session->targetalias);
3450 break;
3451 case ISCSI_PARAM_TPGT:
3452 len = sysfs_emit(buf, "%d\n", session->tpgt);
3453 break;
3454 case ISCSI_PARAM_USERNAME:
3455 len = sysfs_emit(buf, "%s\n", session->username);
3456 break;
3457 case ISCSI_PARAM_USERNAME_IN:
3458 len = sysfs_emit(buf, "%s\n", session->username_in);
3459 break;
3460 case ISCSI_PARAM_PASSWORD:
3461 len = sysfs_emit(buf, "%s\n", session->password);
3462 break;
3463 case ISCSI_PARAM_PASSWORD_IN:
3464 len = sysfs_emit(buf, "%s\n", session->password_in);
3465 break;
3466 case ISCSI_PARAM_IFACE_NAME:
3467 len = sysfs_emit(buf, "%s\n", session->ifacename);
3468 break;
3469 case ISCSI_PARAM_INITIATOR_NAME:
3470 len = sysfs_emit(buf, "%s\n", session->initiatorname);
3471 break;
3472 case ISCSI_PARAM_BOOT_ROOT:
3473 len = sysfs_emit(buf, "%s\n", session->boot_root);
3474 break;
3475 case ISCSI_PARAM_BOOT_NIC:
3476 len = sysfs_emit(buf, "%s\n", session->boot_nic);
3477 break;
3478 case ISCSI_PARAM_BOOT_TARGET:
3479 len = sysfs_emit(buf, "%s\n", session->boot_target);
3480 break;
3481 case ISCSI_PARAM_AUTO_SND_TGT_DISABLE:
3482 len = sysfs_emit(buf, "%u\n", session->auto_snd_tgt_disable);
3483 break;
3484 case ISCSI_PARAM_DISCOVERY_SESS:
3485 len = sysfs_emit(buf, "%u\n", session->discovery_sess);
3486 break;
3487 case ISCSI_PARAM_PORTAL_TYPE:
3488 len = sysfs_emit(buf, "%s\n", session->portal_type);
3489 break;
3490 case ISCSI_PARAM_CHAP_AUTH_EN:
3491 len = sysfs_emit(buf, "%u\n", session->chap_auth_en);
3492 break;
3493 case ISCSI_PARAM_DISCOVERY_LOGOUT_EN:
3494 len = sysfs_emit(buf, "%u\n", session->discovery_logout_en);
3495 break;
3496 case ISCSI_PARAM_BIDI_CHAP_EN:
3497 len = sysfs_emit(buf, "%u\n", session->bidi_chap_en);
3498 break;
3499 case ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL:
3500 len = sysfs_emit(buf, "%u\n", session->discovery_auth_optional);
3501 break;
3502 case ISCSI_PARAM_DEF_TIME2WAIT:
3503 len = sysfs_emit(buf, "%d\n", session->time2wait);
3504 break;
3505 case ISCSI_PARAM_DEF_TIME2RETAIN:
3506 len = sysfs_emit(buf, "%d\n", session->time2retain);
3507 break;
3508 case ISCSI_PARAM_TSID:
3509 len = sysfs_emit(buf, "%u\n", session->tsid);
3510 break;
3511 case ISCSI_PARAM_ISID:
3512 len = sysfs_emit(buf, "%02x%02x%02x%02x%02x%02x\n",
3513 session->isid[0], session->isid[1],
3514 session->isid[2], session->isid[3],
3515 session->isid[4], session->isid[5]);
3516 break;
3517 case ISCSI_PARAM_DISCOVERY_PARENT_IDX:
3518 len = sysfs_emit(buf, "%u\n", session->discovery_parent_idx);
3519 break;
3520 case ISCSI_PARAM_DISCOVERY_PARENT_TYPE:
3521 if (session->discovery_parent_type)
3522 len = sysfs_emit(buf, "%s\n",
3523 session->discovery_parent_type);
3524 else
3525 len = sysfs_emit(buf, "\n");
3526 break;
3527 default:
3528 return -ENOSYS;
3529 }
3530
3531 return len;
3532 }
3533 EXPORT_SYMBOL_GPL(iscsi_session_get_param);
3534
iscsi_conn_get_addr_param(struct sockaddr_storage * addr,enum iscsi_param param,char * buf)3535 int iscsi_conn_get_addr_param(struct sockaddr_storage *addr,
3536 enum iscsi_param param, char *buf)
3537 {
3538 struct sockaddr_in6 *sin6 = NULL;
3539 struct sockaddr_in *sin = NULL;
3540 int len;
3541
3542 switch (addr->ss_family) {
3543 case AF_INET:
3544 sin = (struct sockaddr_in *)addr;
3545 break;
3546 case AF_INET6:
3547 sin6 = (struct sockaddr_in6 *)addr;
3548 break;
3549 default:
3550 return -EINVAL;
3551 }
3552
3553 switch (param) {
3554 case ISCSI_PARAM_CONN_ADDRESS:
3555 case ISCSI_HOST_PARAM_IPADDRESS:
3556 if (sin)
3557 len = sysfs_emit(buf, "%pI4\n", &sin->sin_addr.s_addr);
3558 else
3559 len = sysfs_emit(buf, "%pI6\n", &sin6->sin6_addr);
3560 break;
3561 case ISCSI_PARAM_CONN_PORT:
3562 case ISCSI_PARAM_LOCAL_PORT:
3563 if (sin)
3564 len = sysfs_emit(buf, "%hu\n", be16_to_cpu(sin->sin_port));
3565 else
3566 len = sysfs_emit(buf, "%hu\n",
3567 be16_to_cpu(sin6->sin6_port));
3568 break;
3569 default:
3570 return -EINVAL;
3571 }
3572
3573 return len;
3574 }
3575 EXPORT_SYMBOL_GPL(iscsi_conn_get_addr_param);
3576
iscsi_conn_get_param(struct iscsi_cls_conn * cls_conn,enum iscsi_param param,char * buf)3577 int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
3578 enum iscsi_param param, char *buf)
3579 {
3580 struct iscsi_conn *conn = cls_conn->dd_data;
3581 int len;
3582
3583 switch(param) {
3584 case ISCSI_PARAM_PING_TMO:
3585 len = sysfs_emit(buf, "%u\n", conn->ping_timeout);
3586 break;
3587 case ISCSI_PARAM_RECV_TMO:
3588 len = sysfs_emit(buf, "%u\n", conn->recv_timeout);
3589 break;
3590 case ISCSI_PARAM_MAX_RECV_DLENGTH:
3591 len = sysfs_emit(buf, "%u\n", conn->max_recv_dlength);
3592 break;
3593 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
3594 len = sysfs_emit(buf, "%u\n", conn->max_xmit_dlength);
3595 break;
3596 case ISCSI_PARAM_HDRDGST_EN:
3597 len = sysfs_emit(buf, "%d\n", conn->hdrdgst_en);
3598 break;
3599 case ISCSI_PARAM_DATADGST_EN:
3600 len = sysfs_emit(buf, "%d\n", conn->datadgst_en);
3601 break;
3602 case ISCSI_PARAM_IFMARKER_EN:
3603 len = sysfs_emit(buf, "%d\n", conn->ifmarker_en);
3604 break;
3605 case ISCSI_PARAM_OFMARKER_EN:
3606 len = sysfs_emit(buf, "%d\n", conn->ofmarker_en);
3607 break;
3608 case ISCSI_PARAM_EXP_STATSN:
3609 len = sysfs_emit(buf, "%u\n", conn->exp_statsn);
3610 break;
3611 case ISCSI_PARAM_PERSISTENT_PORT:
3612 len = sysfs_emit(buf, "%d\n", conn->persistent_port);
3613 break;
3614 case ISCSI_PARAM_PERSISTENT_ADDRESS:
3615 len = sysfs_emit(buf, "%s\n", conn->persistent_address);
3616 break;
3617 case ISCSI_PARAM_STATSN:
3618 len = sysfs_emit(buf, "%u\n", conn->statsn);
3619 break;
3620 case ISCSI_PARAM_MAX_SEGMENT_SIZE:
3621 len = sysfs_emit(buf, "%u\n", conn->max_segment_size);
3622 break;
3623 case ISCSI_PARAM_KEEPALIVE_TMO:
3624 len = sysfs_emit(buf, "%u\n", conn->keepalive_tmo);
3625 break;
3626 case ISCSI_PARAM_LOCAL_PORT:
3627 len = sysfs_emit(buf, "%u\n", conn->local_port);
3628 break;
3629 case ISCSI_PARAM_TCP_TIMESTAMP_STAT:
3630 len = sysfs_emit(buf, "%u\n", conn->tcp_timestamp_stat);
3631 break;
3632 case ISCSI_PARAM_TCP_NAGLE_DISABLE:
3633 len = sysfs_emit(buf, "%u\n", conn->tcp_nagle_disable);
3634 break;
3635 case ISCSI_PARAM_TCP_WSF_DISABLE:
3636 len = sysfs_emit(buf, "%u\n", conn->tcp_wsf_disable);
3637 break;
3638 case ISCSI_PARAM_TCP_TIMER_SCALE:
3639 len = sysfs_emit(buf, "%u\n", conn->tcp_timer_scale);
3640 break;
3641 case ISCSI_PARAM_TCP_TIMESTAMP_EN:
3642 len = sysfs_emit(buf, "%u\n", conn->tcp_timestamp_en);
3643 break;
3644 case ISCSI_PARAM_IP_FRAGMENT_DISABLE:
3645 len = sysfs_emit(buf, "%u\n", conn->fragment_disable);
3646 break;
3647 case ISCSI_PARAM_IPV4_TOS:
3648 len = sysfs_emit(buf, "%u\n", conn->ipv4_tos);
3649 break;
3650 case ISCSI_PARAM_IPV6_TC:
3651 len = sysfs_emit(buf, "%u\n", conn->ipv6_traffic_class);
3652 break;
3653 case ISCSI_PARAM_IPV6_FLOW_LABEL:
3654 len = sysfs_emit(buf, "%u\n", conn->ipv6_flow_label);
3655 break;
3656 case ISCSI_PARAM_IS_FW_ASSIGNED_IPV6:
3657 len = sysfs_emit(buf, "%u\n", conn->is_fw_assigned_ipv6);
3658 break;
3659 case ISCSI_PARAM_TCP_XMIT_WSF:
3660 len = sysfs_emit(buf, "%u\n", conn->tcp_xmit_wsf);
3661 break;
3662 case ISCSI_PARAM_TCP_RECV_WSF:
3663 len = sysfs_emit(buf, "%u\n", conn->tcp_recv_wsf);
3664 break;
3665 case ISCSI_PARAM_LOCAL_IPADDR:
3666 len = sysfs_emit(buf, "%s\n", conn->local_ipaddr);
3667 break;
3668 default:
3669 return -ENOSYS;
3670 }
3671
3672 return len;
3673 }
3674 EXPORT_SYMBOL_GPL(iscsi_conn_get_param);
3675
iscsi_host_get_param(struct Scsi_Host * shost,enum iscsi_host_param param,char * buf)3676 int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
3677 char *buf)
3678 {
3679 struct iscsi_host *ihost = shost_priv(shost);
3680 int len;
3681
3682 switch (param) {
3683 case ISCSI_HOST_PARAM_NETDEV_NAME:
3684 len = sysfs_emit(buf, "%s\n", ihost->netdev);
3685 break;
3686 case ISCSI_HOST_PARAM_HWADDRESS:
3687 len = sysfs_emit(buf, "%s\n", ihost->hwaddress);
3688 break;
3689 case ISCSI_HOST_PARAM_INITIATOR_NAME:
3690 len = sysfs_emit(buf, "%s\n", ihost->initiatorname);
3691 break;
3692 default:
3693 return -ENOSYS;
3694 }
3695
3696 return len;
3697 }
3698 EXPORT_SYMBOL_GPL(iscsi_host_get_param);
3699
iscsi_host_set_param(struct Scsi_Host * shost,enum iscsi_host_param param,char * buf,int buflen)3700 int iscsi_host_set_param(struct Scsi_Host *shost, enum iscsi_host_param param,
3701 char *buf, int buflen)
3702 {
3703 struct iscsi_host *ihost = shost_priv(shost);
3704
3705 switch (param) {
3706 case ISCSI_HOST_PARAM_NETDEV_NAME:
3707 return iscsi_switch_str_param(&ihost->netdev, buf);
3708 case ISCSI_HOST_PARAM_HWADDRESS:
3709 return iscsi_switch_str_param(&ihost->hwaddress, buf);
3710 case ISCSI_HOST_PARAM_INITIATOR_NAME:
3711 return iscsi_switch_str_param(&ihost->initiatorname, buf);
3712 default:
3713 return -ENOSYS;
3714 }
3715
3716 return 0;
3717 }
3718 EXPORT_SYMBOL_GPL(iscsi_host_set_param);
3719
3720 MODULE_AUTHOR("Mike Christie");
3721 MODULE_DESCRIPTION("iSCSI library functions");
3722 MODULE_LICENSE("GPL");
3723