1 /*******************************************************************************
2 * This file contains main functions related to the iSCSI Target Core Driver.
3 *
4 * (c) Copyright 2007-2013 Datera, Inc.
5 *
6 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 ******************************************************************************/
18
19 #include <crypto/hash.h>
20 #include <linux/string.h>
21 #include <linux/kthread.h>
22 #include <linux/completion.h>
23 #include <linux/module.h>
24 #include <linux/vmalloc.h>
25 #include <linux/idr.h>
26 #include <linux/delay.h>
27 #include <linux/sched/signal.h>
28 #include <asm/unaligned.h>
29 #include <net/ipv6.h>
30 #include <scsi/scsi_proto.h>
31 #include <scsi/iscsi_proto.h>
32 #include <scsi/scsi_tcq.h>
33 #include <target/target_core_base.h>
34 #include <target/target_core_fabric.h>
35
36 #include <target/iscsi/iscsi_target_core.h>
37 #include "iscsi_target_parameters.h"
38 #include "iscsi_target_seq_pdu_list.h"
39 #include "iscsi_target_datain_values.h"
40 #include "iscsi_target_erl0.h"
41 #include "iscsi_target_erl1.h"
42 #include "iscsi_target_erl2.h"
43 #include "iscsi_target_login.h"
44 #include "iscsi_target_tmr.h"
45 #include "iscsi_target_tpg.h"
46 #include "iscsi_target_util.h"
47 #include "iscsi_target.h"
48 #include "iscsi_target_device.h"
49 #include <target/iscsi/iscsi_target_stat.h>
50
51 #include <target/iscsi/iscsi_transport.h>
52
53 static LIST_HEAD(g_tiqn_list);
54 static LIST_HEAD(g_np_list);
55 static DEFINE_SPINLOCK(tiqn_lock);
56 static DEFINE_MUTEX(np_lock);
57
58 static struct idr tiqn_idr;
59 struct idr sess_idr;
60 struct mutex auth_id_lock;
61 spinlock_t sess_idr_lock;
62
63 struct iscsit_global *iscsit_global;
64
65 struct kmem_cache *lio_qr_cache;
66 struct kmem_cache *lio_dr_cache;
67 struct kmem_cache *lio_ooo_cache;
68 struct kmem_cache *lio_r2t_cache;
69
70 static int iscsit_handle_immediate_data(struct iscsi_cmd *,
71 struct iscsi_scsi_req *, u32);
72
iscsit_get_tiqn_for_login(unsigned char * buf)73 struct iscsi_tiqn *iscsit_get_tiqn_for_login(unsigned char *buf)
74 {
75 struct iscsi_tiqn *tiqn = NULL;
76
77 spin_lock(&tiqn_lock);
78 list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) {
79 if (!strcmp(tiqn->tiqn, buf)) {
80
81 spin_lock(&tiqn->tiqn_state_lock);
82 if (tiqn->tiqn_state == TIQN_STATE_ACTIVE) {
83 tiqn->tiqn_access_count++;
84 spin_unlock(&tiqn->tiqn_state_lock);
85 spin_unlock(&tiqn_lock);
86 return tiqn;
87 }
88 spin_unlock(&tiqn->tiqn_state_lock);
89 }
90 }
91 spin_unlock(&tiqn_lock);
92
93 return NULL;
94 }
95
iscsit_set_tiqn_shutdown(struct iscsi_tiqn * tiqn)96 static int iscsit_set_tiqn_shutdown(struct iscsi_tiqn *tiqn)
97 {
98 spin_lock(&tiqn->tiqn_state_lock);
99 if (tiqn->tiqn_state == TIQN_STATE_ACTIVE) {
100 tiqn->tiqn_state = TIQN_STATE_SHUTDOWN;
101 spin_unlock(&tiqn->tiqn_state_lock);
102 return 0;
103 }
104 spin_unlock(&tiqn->tiqn_state_lock);
105
106 return -1;
107 }
108
iscsit_put_tiqn_for_login(struct iscsi_tiqn * tiqn)109 void iscsit_put_tiqn_for_login(struct iscsi_tiqn *tiqn)
110 {
111 spin_lock(&tiqn->tiqn_state_lock);
112 tiqn->tiqn_access_count--;
113 spin_unlock(&tiqn->tiqn_state_lock);
114 }
115
116 /*
117 * Note that IQN formatting is expected to be done in userspace, and
118 * no explict IQN format checks are done here.
119 */
iscsit_add_tiqn(unsigned char * buf)120 struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *buf)
121 {
122 struct iscsi_tiqn *tiqn = NULL;
123 int ret;
124
125 if (strlen(buf) >= ISCSI_IQN_LEN) {
126 pr_err("Target IQN exceeds %d bytes\n",
127 ISCSI_IQN_LEN);
128 return ERR_PTR(-EINVAL);
129 }
130
131 tiqn = kzalloc(sizeof(*tiqn), GFP_KERNEL);
132 if (!tiqn)
133 return ERR_PTR(-ENOMEM);
134
135 sprintf(tiqn->tiqn, "%s", buf);
136 INIT_LIST_HEAD(&tiqn->tiqn_list);
137 INIT_LIST_HEAD(&tiqn->tiqn_tpg_list);
138 spin_lock_init(&tiqn->tiqn_state_lock);
139 spin_lock_init(&tiqn->tiqn_tpg_lock);
140 spin_lock_init(&tiqn->sess_err_stats.lock);
141 spin_lock_init(&tiqn->login_stats.lock);
142 spin_lock_init(&tiqn->logout_stats.lock);
143
144 tiqn->tiqn_state = TIQN_STATE_ACTIVE;
145
146 idr_preload(GFP_KERNEL);
147 spin_lock(&tiqn_lock);
148
149 ret = idr_alloc(&tiqn_idr, NULL, 0, 0, GFP_NOWAIT);
150 if (ret < 0) {
151 pr_err("idr_alloc() failed for tiqn->tiqn_index\n");
152 spin_unlock(&tiqn_lock);
153 idr_preload_end();
154 kfree(tiqn);
155 return ERR_PTR(ret);
156 }
157 tiqn->tiqn_index = ret;
158 list_add_tail(&tiqn->tiqn_list, &g_tiqn_list);
159
160 spin_unlock(&tiqn_lock);
161 idr_preload_end();
162
163 pr_debug("CORE[0] - Added iSCSI Target IQN: %s\n", tiqn->tiqn);
164
165 return tiqn;
166
167 }
168
iscsit_wait_for_tiqn(struct iscsi_tiqn * tiqn)169 static void iscsit_wait_for_tiqn(struct iscsi_tiqn *tiqn)
170 {
171 /*
172 * Wait for accesses to said struct iscsi_tiqn to end.
173 */
174 spin_lock(&tiqn->tiqn_state_lock);
175 while (tiqn->tiqn_access_count != 0) {
176 spin_unlock(&tiqn->tiqn_state_lock);
177 msleep(10);
178 spin_lock(&tiqn->tiqn_state_lock);
179 }
180 spin_unlock(&tiqn->tiqn_state_lock);
181 }
182
iscsit_del_tiqn(struct iscsi_tiqn * tiqn)183 void iscsit_del_tiqn(struct iscsi_tiqn *tiqn)
184 {
185 /*
186 * iscsit_set_tiqn_shutdown sets tiqn->tiqn_state = TIQN_STATE_SHUTDOWN
187 * while holding tiqn->tiqn_state_lock. This means that all subsequent
188 * attempts to access this struct iscsi_tiqn will fail from both transport
189 * fabric and control code paths.
190 */
191 if (iscsit_set_tiqn_shutdown(tiqn) < 0) {
192 pr_err("iscsit_set_tiqn_shutdown() failed\n");
193 return;
194 }
195
196 iscsit_wait_for_tiqn(tiqn);
197
198 spin_lock(&tiqn_lock);
199 list_del(&tiqn->tiqn_list);
200 idr_remove(&tiqn_idr, tiqn->tiqn_index);
201 spin_unlock(&tiqn_lock);
202
203 pr_debug("CORE[0] - Deleted iSCSI Target IQN: %s\n",
204 tiqn->tiqn);
205 kfree(tiqn);
206 }
207
iscsit_access_np(struct iscsi_np * np,struct iscsi_portal_group * tpg)208 int iscsit_access_np(struct iscsi_np *np, struct iscsi_portal_group *tpg)
209 {
210 int ret;
211 /*
212 * Determine if the network portal is accepting storage traffic.
213 */
214 spin_lock_bh(&np->np_thread_lock);
215 if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
216 spin_unlock_bh(&np->np_thread_lock);
217 return -1;
218 }
219 spin_unlock_bh(&np->np_thread_lock);
220 /*
221 * Determine if the portal group is accepting storage traffic.
222 */
223 spin_lock_bh(&tpg->tpg_state_lock);
224 if (tpg->tpg_state != TPG_STATE_ACTIVE) {
225 spin_unlock_bh(&tpg->tpg_state_lock);
226 return -1;
227 }
228 spin_unlock_bh(&tpg->tpg_state_lock);
229
230 /*
231 * Here we serialize access across the TIQN+TPG Tuple.
232 */
233 ret = down_interruptible(&tpg->np_login_sem);
234 if (ret != 0)
235 return -1;
236
237 spin_lock_bh(&tpg->tpg_state_lock);
238 if (tpg->tpg_state != TPG_STATE_ACTIVE) {
239 spin_unlock_bh(&tpg->tpg_state_lock);
240 up(&tpg->np_login_sem);
241 return -1;
242 }
243 spin_unlock_bh(&tpg->tpg_state_lock);
244
245 return 0;
246 }
247
iscsit_login_kref_put(struct kref * kref)248 void iscsit_login_kref_put(struct kref *kref)
249 {
250 struct iscsi_tpg_np *tpg_np = container_of(kref,
251 struct iscsi_tpg_np, tpg_np_kref);
252
253 complete(&tpg_np->tpg_np_comp);
254 }
255
iscsit_deaccess_np(struct iscsi_np * np,struct iscsi_portal_group * tpg,struct iscsi_tpg_np * tpg_np)256 int iscsit_deaccess_np(struct iscsi_np *np, struct iscsi_portal_group *tpg,
257 struct iscsi_tpg_np *tpg_np)
258 {
259 struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
260
261 up(&tpg->np_login_sem);
262
263 if (tpg_np)
264 kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put);
265
266 if (tiqn)
267 iscsit_put_tiqn_for_login(tiqn);
268
269 return 0;
270 }
271
iscsit_check_np_match(struct sockaddr_storage * sockaddr,struct iscsi_np * np,int network_transport)272 bool iscsit_check_np_match(
273 struct sockaddr_storage *sockaddr,
274 struct iscsi_np *np,
275 int network_transport)
276 {
277 struct sockaddr_in *sock_in, *sock_in_e;
278 struct sockaddr_in6 *sock_in6, *sock_in6_e;
279 bool ip_match = false;
280 u16 port, port_e;
281
282 if (sockaddr->ss_family == AF_INET6) {
283 sock_in6 = (struct sockaddr_in6 *)sockaddr;
284 sock_in6_e = (struct sockaddr_in6 *)&np->np_sockaddr;
285
286 if (!memcmp(&sock_in6->sin6_addr.in6_u,
287 &sock_in6_e->sin6_addr.in6_u,
288 sizeof(struct in6_addr)))
289 ip_match = true;
290
291 port = ntohs(sock_in6->sin6_port);
292 port_e = ntohs(sock_in6_e->sin6_port);
293 } else {
294 sock_in = (struct sockaddr_in *)sockaddr;
295 sock_in_e = (struct sockaddr_in *)&np->np_sockaddr;
296
297 if (sock_in->sin_addr.s_addr == sock_in_e->sin_addr.s_addr)
298 ip_match = true;
299
300 port = ntohs(sock_in->sin_port);
301 port_e = ntohs(sock_in_e->sin_port);
302 }
303
304 if (ip_match && (port_e == port) &&
305 (np->np_network_transport == network_transport))
306 return true;
307
308 return false;
309 }
310
311 /*
312 * Called with mutex np_lock held
313 */
iscsit_get_np(struct sockaddr_storage * sockaddr,int network_transport)314 static struct iscsi_np *iscsit_get_np(
315 struct sockaddr_storage *sockaddr,
316 int network_transport)
317 {
318 struct iscsi_np *np;
319 bool match;
320
321 list_for_each_entry(np, &g_np_list, np_list) {
322 spin_lock_bh(&np->np_thread_lock);
323 if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
324 spin_unlock_bh(&np->np_thread_lock);
325 continue;
326 }
327
328 match = iscsit_check_np_match(sockaddr, np, network_transport);
329 if (match) {
330 /*
331 * Increment the np_exports reference count now to
332 * prevent iscsit_del_np() below from being called
333 * while iscsi_tpg_add_network_portal() is called.
334 */
335 np->np_exports++;
336 spin_unlock_bh(&np->np_thread_lock);
337 return np;
338 }
339 spin_unlock_bh(&np->np_thread_lock);
340 }
341
342 return NULL;
343 }
344
iscsit_add_np(struct sockaddr_storage * sockaddr,int network_transport)345 struct iscsi_np *iscsit_add_np(
346 struct sockaddr_storage *sockaddr,
347 int network_transport)
348 {
349 struct iscsi_np *np;
350 int ret;
351
352 mutex_lock(&np_lock);
353
354 /*
355 * Locate the existing struct iscsi_np if already active..
356 */
357 np = iscsit_get_np(sockaddr, network_transport);
358 if (np) {
359 mutex_unlock(&np_lock);
360 return np;
361 }
362
363 np = kzalloc(sizeof(*np), GFP_KERNEL);
364 if (!np) {
365 mutex_unlock(&np_lock);
366 return ERR_PTR(-ENOMEM);
367 }
368
369 np->np_flags |= NPF_IP_NETWORK;
370 np->np_network_transport = network_transport;
371 spin_lock_init(&np->np_thread_lock);
372 init_completion(&np->np_restart_comp);
373 INIT_LIST_HEAD(&np->np_list);
374
375 ret = iscsi_target_setup_login_socket(np, sockaddr);
376 if (ret != 0) {
377 kfree(np);
378 mutex_unlock(&np_lock);
379 return ERR_PTR(ret);
380 }
381
382 np->np_thread = kthread_run(iscsi_target_login_thread, np, "iscsi_np");
383 if (IS_ERR(np->np_thread)) {
384 pr_err("Unable to create kthread: iscsi_np\n");
385 ret = PTR_ERR(np->np_thread);
386 kfree(np);
387 mutex_unlock(&np_lock);
388 return ERR_PTR(ret);
389 }
390 /*
391 * Increment the np_exports reference count now to prevent
392 * iscsit_del_np() below from being run while a new call to
393 * iscsi_tpg_add_network_portal() for a matching iscsi_np is
394 * active. We don't need to hold np->np_thread_lock at this
395 * point because iscsi_np has not been added to g_np_list yet.
396 */
397 np->np_exports = 1;
398 np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
399
400 list_add_tail(&np->np_list, &g_np_list);
401 mutex_unlock(&np_lock);
402
403 pr_debug("CORE[0] - Added Network Portal: %pISpc on %s\n",
404 &np->np_sockaddr, np->np_transport->name);
405
406 return np;
407 }
408
iscsit_reset_np_thread(struct iscsi_np * np,struct iscsi_tpg_np * tpg_np,struct iscsi_portal_group * tpg,bool shutdown)409 int iscsit_reset_np_thread(
410 struct iscsi_np *np,
411 struct iscsi_tpg_np *tpg_np,
412 struct iscsi_portal_group *tpg,
413 bool shutdown)
414 {
415 spin_lock_bh(&np->np_thread_lock);
416 if (np->np_thread_state == ISCSI_NP_THREAD_INACTIVE) {
417 spin_unlock_bh(&np->np_thread_lock);
418 return 0;
419 }
420 np->np_thread_state = ISCSI_NP_THREAD_RESET;
421 atomic_inc(&np->np_reset_count);
422
423 if (np->np_thread) {
424 spin_unlock_bh(&np->np_thread_lock);
425 send_sig(SIGINT, np->np_thread, 1);
426 wait_for_completion(&np->np_restart_comp);
427 spin_lock_bh(&np->np_thread_lock);
428 }
429 spin_unlock_bh(&np->np_thread_lock);
430
431 if (tpg_np && shutdown) {
432 kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put);
433
434 wait_for_completion(&tpg_np->tpg_np_comp);
435 }
436
437 return 0;
438 }
439
iscsit_free_np(struct iscsi_np * np)440 static void iscsit_free_np(struct iscsi_np *np)
441 {
442 if (np->np_socket)
443 sock_release(np->np_socket);
444 }
445
iscsit_del_np(struct iscsi_np * np)446 int iscsit_del_np(struct iscsi_np *np)
447 {
448 spin_lock_bh(&np->np_thread_lock);
449 np->np_exports--;
450 if (np->np_exports) {
451 np->enabled = true;
452 spin_unlock_bh(&np->np_thread_lock);
453 return 0;
454 }
455 np->np_thread_state = ISCSI_NP_THREAD_SHUTDOWN;
456 spin_unlock_bh(&np->np_thread_lock);
457
458 if (np->np_thread) {
459 /*
460 * We need to send the signal to wakeup Linux/Net
461 * which may be sleeping in sock_accept()..
462 */
463 send_sig(SIGINT, np->np_thread, 1);
464 kthread_stop(np->np_thread);
465 np->np_thread = NULL;
466 }
467
468 np->np_transport->iscsit_free_np(np);
469
470 mutex_lock(&np_lock);
471 list_del(&np->np_list);
472 mutex_unlock(&np_lock);
473
474 pr_debug("CORE[0] - Removed Network Portal: %pISpc on %s\n",
475 &np->np_sockaddr, np->np_transport->name);
476
477 iscsit_put_transport(np->np_transport);
478 kfree(np);
479 return 0;
480 }
481
482 static void iscsit_get_rx_pdu(struct iscsi_conn *);
483
iscsit_queue_rsp(struct iscsi_conn * conn,struct iscsi_cmd * cmd)484 int iscsit_queue_rsp(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
485 {
486 return iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
487 }
488 EXPORT_SYMBOL(iscsit_queue_rsp);
489
iscsit_aborted_task(struct iscsi_conn * conn,struct iscsi_cmd * cmd)490 void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
491 {
492 spin_lock_bh(&conn->cmd_lock);
493 if (!list_empty(&cmd->i_conn_node) &&
494 !(cmd->se_cmd.transport_state & CMD_T_FABRIC_STOP))
495 list_del_init(&cmd->i_conn_node);
496 spin_unlock_bh(&conn->cmd_lock);
497
498 __iscsit_free_cmd(cmd, true);
499 }
500 EXPORT_SYMBOL(iscsit_aborted_task);
501
502 static void iscsit_do_crypto_hash_buf(struct ahash_request *, const void *,
503 u32, u32, u8 *, u8 *);
504 static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *);
505
506 static int
iscsit_xmit_nondatain_pdu(struct iscsi_conn * conn,struct iscsi_cmd * cmd,const void * data_buf,u32 data_buf_len)507 iscsit_xmit_nondatain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
508 const void *data_buf, u32 data_buf_len)
509 {
510 struct iscsi_hdr *hdr = (struct iscsi_hdr *)cmd->pdu;
511 struct kvec *iov;
512 u32 niov = 0, tx_size = ISCSI_HDR_LEN;
513 int ret;
514
515 iov = &cmd->iov_misc[0];
516 iov[niov].iov_base = cmd->pdu;
517 iov[niov++].iov_len = ISCSI_HDR_LEN;
518
519 if (conn->conn_ops->HeaderDigest) {
520 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
521
522 iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
523 ISCSI_HDR_LEN, 0, NULL,
524 (u8 *)header_digest);
525
526 iov[0].iov_len += ISCSI_CRC_LEN;
527 tx_size += ISCSI_CRC_LEN;
528 pr_debug("Attaching CRC32C HeaderDigest"
529 " to opcode 0x%x 0x%08x\n",
530 hdr->opcode, *header_digest);
531 }
532
533 if (data_buf_len) {
534 u32 padding = ((-data_buf_len) & 3);
535
536 iov[niov].iov_base = (void *)data_buf;
537 iov[niov++].iov_len = data_buf_len;
538 tx_size += data_buf_len;
539
540 if (padding != 0) {
541 iov[niov].iov_base = &cmd->pad_bytes;
542 iov[niov++].iov_len = padding;
543 tx_size += padding;
544 pr_debug("Attaching %u additional"
545 " padding bytes.\n", padding);
546 }
547
548 if (conn->conn_ops->DataDigest) {
549 iscsit_do_crypto_hash_buf(conn->conn_tx_hash,
550 data_buf, data_buf_len,
551 padding,
552 (u8 *)&cmd->pad_bytes,
553 (u8 *)&cmd->data_crc);
554
555 iov[niov].iov_base = &cmd->data_crc;
556 iov[niov++].iov_len = ISCSI_CRC_LEN;
557 tx_size += ISCSI_CRC_LEN;
558 pr_debug("Attached DataDigest for %u"
559 " bytes opcode 0x%x, CRC 0x%08x\n",
560 data_buf_len, hdr->opcode, cmd->data_crc);
561 }
562 }
563
564 cmd->iov_misc_count = niov;
565 cmd->tx_size = tx_size;
566
567 ret = iscsit_send_tx_data(cmd, conn, 1);
568 if (ret < 0) {
569 iscsit_tx_thread_wait_for_tcp(conn);
570 return ret;
571 }
572
573 return 0;
574 }
575
576 static int iscsit_map_iovec(struct iscsi_cmd *, struct kvec *, u32, u32);
577 static void iscsit_unmap_iovec(struct iscsi_cmd *);
578 static u32 iscsit_do_crypto_hash_sg(struct ahash_request *, struct iscsi_cmd *,
579 u32, u32, u32, u8 *);
580 static int
iscsit_xmit_datain_pdu(struct iscsi_conn * conn,struct iscsi_cmd * cmd,const struct iscsi_datain * datain)581 iscsit_xmit_datain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
582 const struct iscsi_datain *datain)
583 {
584 struct kvec *iov;
585 u32 iov_count = 0, tx_size = 0;
586 int ret, iov_ret;
587
588 iov = &cmd->iov_data[0];
589 iov[iov_count].iov_base = cmd->pdu;
590 iov[iov_count++].iov_len = ISCSI_HDR_LEN;
591 tx_size += ISCSI_HDR_LEN;
592
593 if (conn->conn_ops->HeaderDigest) {
594 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
595
596 iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->pdu,
597 ISCSI_HDR_LEN, 0, NULL,
598 (u8 *)header_digest);
599
600 iov[0].iov_len += ISCSI_CRC_LEN;
601 tx_size += ISCSI_CRC_LEN;
602
603 pr_debug("Attaching CRC32 HeaderDigest for DataIN PDU 0x%08x\n",
604 *header_digest);
605 }
606
607 iov_ret = iscsit_map_iovec(cmd, &cmd->iov_data[1],
608 datain->offset, datain->length);
609 if (iov_ret < 0)
610 return -1;
611
612 iov_count += iov_ret;
613 tx_size += datain->length;
614
615 cmd->padding = ((-datain->length) & 3);
616 if (cmd->padding) {
617 iov[iov_count].iov_base = cmd->pad_bytes;
618 iov[iov_count++].iov_len = cmd->padding;
619 tx_size += cmd->padding;
620
621 pr_debug("Attaching %u padding bytes\n", cmd->padding);
622 }
623
624 if (conn->conn_ops->DataDigest) {
625 cmd->data_crc = iscsit_do_crypto_hash_sg(conn->conn_tx_hash,
626 cmd, datain->offset,
627 datain->length,
628 cmd->padding,
629 cmd->pad_bytes);
630
631 iov[iov_count].iov_base = &cmd->data_crc;
632 iov[iov_count++].iov_len = ISCSI_CRC_LEN;
633 tx_size += ISCSI_CRC_LEN;
634
635 pr_debug("Attached CRC32C DataDigest %d bytes, crc 0x%08x\n",
636 datain->length + cmd->padding, cmd->data_crc);
637 }
638
639 cmd->iov_data_count = iov_count;
640 cmd->tx_size = tx_size;
641
642 ret = iscsit_fe_sendpage_sg(cmd, conn);
643
644 iscsit_unmap_iovec(cmd);
645
646 if (ret < 0) {
647 iscsit_tx_thread_wait_for_tcp(conn);
648 return ret;
649 }
650
651 return 0;
652 }
653
iscsit_xmit_pdu(struct iscsi_conn * conn,struct iscsi_cmd * cmd,struct iscsi_datain_req * dr,const void * buf,u32 buf_len)654 static int iscsit_xmit_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
655 struct iscsi_datain_req *dr, const void *buf,
656 u32 buf_len)
657 {
658 if (dr)
659 return iscsit_xmit_datain_pdu(conn, cmd, buf);
660 else
661 return iscsit_xmit_nondatain_pdu(conn, cmd, buf, buf_len);
662 }
663
iscsit_get_sup_prot_ops(struct iscsi_conn * conn)664 static enum target_prot_op iscsit_get_sup_prot_ops(struct iscsi_conn *conn)
665 {
666 return TARGET_PROT_NORMAL;
667 }
668
669 static struct iscsit_transport iscsi_target_transport = {
670 .name = "iSCSI/TCP",
671 .transport_type = ISCSI_TCP,
672 .rdma_shutdown = false,
673 .owner = NULL,
674 .iscsit_setup_np = iscsit_setup_np,
675 .iscsit_accept_np = iscsit_accept_np,
676 .iscsit_free_np = iscsit_free_np,
677 .iscsit_get_login_rx = iscsit_get_login_rx,
678 .iscsit_put_login_tx = iscsit_put_login_tx,
679 .iscsit_get_dataout = iscsit_build_r2ts_for_cmd,
680 .iscsit_immediate_queue = iscsit_immediate_queue,
681 .iscsit_response_queue = iscsit_response_queue,
682 .iscsit_queue_data_in = iscsit_queue_rsp,
683 .iscsit_queue_status = iscsit_queue_rsp,
684 .iscsit_aborted_task = iscsit_aborted_task,
685 .iscsit_xmit_pdu = iscsit_xmit_pdu,
686 .iscsit_get_rx_pdu = iscsit_get_rx_pdu,
687 .iscsit_get_sup_prot_ops = iscsit_get_sup_prot_ops,
688 };
689
iscsi_target_init_module(void)690 static int __init iscsi_target_init_module(void)
691 {
692 int ret = 0, size;
693
694 pr_debug("iSCSI-Target "ISCSIT_VERSION"\n");
695 iscsit_global = kzalloc(sizeof(*iscsit_global), GFP_KERNEL);
696 if (!iscsit_global)
697 return -1;
698
699 spin_lock_init(&iscsit_global->ts_bitmap_lock);
700 mutex_init(&auth_id_lock);
701 spin_lock_init(&sess_idr_lock);
702 idr_init(&tiqn_idr);
703 idr_init(&sess_idr);
704
705 ret = target_register_template(&iscsi_ops);
706 if (ret)
707 goto out;
708
709 size = BITS_TO_LONGS(ISCSIT_BITMAP_BITS) * sizeof(long);
710 iscsit_global->ts_bitmap = vzalloc(size);
711 if (!iscsit_global->ts_bitmap)
712 goto configfs_out;
713
714 lio_qr_cache = kmem_cache_create("lio_qr_cache",
715 sizeof(struct iscsi_queue_req),
716 __alignof__(struct iscsi_queue_req), 0, NULL);
717 if (!lio_qr_cache) {
718 pr_err("nable to kmem_cache_create() for"
719 " lio_qr_cache\n");
720 goto bitmap_out;
721 }
722
723 lio_dr_cache = kmem_cache_create("lio_dr_cache",
724 sizeof(struct iscsi_datain_req),
725 __alignof__(struct iscsi_datain_req), 0, NULL);
726 if (!lio_dr_cache) {
727 pr_err("Unable to kmem_cache_create() for"
728 " lio_dr_cache\n");
729 goto qr_out;
730 }
731
732 lio_ooo_cache = kmem_cache_create("lio_ooo_cache",
733 sizeof(struct iscsi_ooo_cmdsn),
734 __alignof__(struct iscsi_ooo_cmdsn), 0, NULL);
735 if (!lio_ooo_cache) {
736 pr_err("Unable to kmem_cache_create() for"
737 " lio_ooo_cache\n");
738 goto dr_out;
739 }
740
741 lio_r2t_cache = kmem_cache_create("lio_r2t_cache",
742 sizeof(struct iscsi_r2t), __alignof__(struct iscsi_r2t),
743 0, NULL);
744 if (!lio_r2t_cache) {
745 pr_err("Unable to kmem_cache_create() for"
746 " lio_r2t_cache\n");
747 goto ooo_out;
748 }
749
750 iscsit_register_transport(&iscsi_target_transport);
751
752 if (iscsit_load_discovery_tpg() < 0)
753 goto r2t_out;
754
755 return ret;
756 r2t_out:
757 iscsit_unregister_transport(&iscsi_target_transport);
758 kmem_cache_destroy(lio_r2t_cache);
759 ooo_out:
760 kmem_cache_destroy(lio_ooo_cache);
761 dr_out:
762 kmem_cache_destroy(lio_dr_cache);
763 qr_out:
764 kmem_cache_destroy(lio_qr_cache);
765 bitmap_out:
766 vfree(iscsit_global->ts_bitmap);
767 configfs_out:
768 /* XXX: this probably wants it to be it's own unwind step.. */
769 if (iscsit_global->discovery_tpg)
770 iscsit_tpg_disable_portal_group(iscsit_global->discovery_tpg, 1);
771 target_unregister_template(&iscsi_ops);
772 out:
773 kfree(iscsit_global);
774 return -ENOMEM;
775 }
776
iscsi_target_cleanup_module(void)777 static void __exit iscsi_target_cleanup_module(void)
778 {
779 iscsit_release_discovery_tpg();
780 iscsit_unregister_transport(&iscsi_target_transport);
781 kmem_cache_destroy(lio_qr_cache);
782 kmem_cache_destroy(lio_dr_cache);
783 kmem_cache_destroy(lio_ooo_cache);
784 kmem_cache_destroy(lio_r2t_cache);
785
786 /*
787 * Shutdown discovery sessions and disable discovery TPG
788 */
789 if (iscsit_global->discovery_tpg)
790 iscsit_tpg_disable_portal_group(iscsit_global->discovery_tpg, 1);
791
792 target_unregister_template(&iscsi_ops);
793
794 vfree(iscsit_global->ts_bitmap);
795 kfree(iscsit_global);
796 }
797
iscsit_add_reject(struct iscsi_conn * conn,u8 reason,unsigned char * buf)798 int iscsit_add_reject(
799 struct iscsi_conn *conn,
800 u8 reason,
801 unsigned char *buf)
802 {
803 struct iscsi_cmd *cmd;
804
805 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
806 if (!cmd)
807 return -1;
808
809 cmd->iscsi_opcode = ISCSI_OP_REJECT;
810 cmd->reject_reason = reason;
811
812 cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
813 if (!cmd->buf_ptr) {
814 pr_err("Unable to allocate memory for cmd->buf_ptr\n");
815 iscsit_free_cmd(cmd, false);
816 return -1;
817 }
818
819 spin_lock_bh(&conn->cmd_lock);
820 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
821 spin_unlock_bh(&conn->cmd_lock);
822
823 cmd->i_state = ISTATE_SEND_REJECT;
824 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
825
826 return -1;
827 }
828 EXPORT_SYMBOL(iscsit_add_reject);
829
iscsit_add_reject_from_cmd(struct iscsi_cmd * cmd,u8 reason,bool add_to_conn,unsigned char * buf)830 static int iscsit_add_reject_from_cmd(
831 struct iscsi_cmd *cmd,
832 u8 reason,
833 bool add_to_conn,
834 unsigned char *buf)
835 {
836 struct iscsi_conn *conn;
837 const bool do_put = cmd->se_cmd.se_tfo != NULL;
838
839 if (!cmd->conn) {
840 pr_err("cmd->conn is NULL for ITT: 0x%08x\n",
841 cmd->init_task_tag);
842 return -1;
843 }
844 conn = cmd->conn;
845
846 cmd->iscsi_opcode = ISCSI_OP_REJECT;
847 cmd->reject_reason = reason;
848
849 cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
850 if (!cmd->buf_ptr) {
851 pr_err("Unable to allocate memory for cmd->buf_ptr\n");
852 iscsit_free_cmd(cmd, false);
853 return -1;
854 }
855
856 if (add_to_conn) {
857 spin_lock_bh(&conn->cmd_lock);
858 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
859 spin_unlock_bh(&conn->cmd_lock);
860 }
861
862 cmd->i_state = ISTATE_SEND_REJECT;
863 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
864 /*
865 * Perform the kref_put now if se_cmd has already been setup by
866 * scsit_setup_scsi_cmd()
867 */
868 if (do_put) {
869 pr_debug("iscsi reject: calling target_put_sess_cmd >>>>>>\n");
870 target_put_sess_cmd(&cmd->se_cmd);
871 }
872 return -1;
873 }
874
iscsit_add_reject_cmd(struct iscsi_cmd * cmd,u8 reason,unsigned char * buf)875 static int iscsit_add_reject_cmd(struct iscsi_cmd *cmd, u8 reason,
876 unsigned char *buf)
877 {
878 return iscsit_add_reject_from_cmd(cmd, reason, true, buf);
879 }
880
iscsit_reject_cmd(struct iscsi_cmd * cmd,u8 reason,unsigned char * buf)881 int iscsit_reject_cmd(struct iscsi_cmd *cmd, u8 reason, unsigned char *buf)
882 {
883 return iscsit_add_reject_from_cmd(cmd, reason, false, buf);
884 }
885 EXPORT_SYMBOL(iscsit_reject_cmd);
886
887 /*
888 * Map some portion of the allocated scatterlist to an iovec, suitable for
889 * kernel sockets to copy data in/out.
890 */
iscsit_map_iovec(struct iscsi_cmd * cmd,struct kvec * iov,u32 data_offset,u32 data_length)891 static int iscsit_map_iovec(
892 struct iscsi_cmd *cmd,
893 struct kvec *iov,
894 u32 data_offset,
895 u32 data_length)
896 {
897 u32 i = 0;
898 struct scatterlist *sg;
899 unsigned int page_off;
900
901 /*
902 * We know each entry in t_data_sg contains a page.
903 */
904 u32 ent = data_offset / PAGE_SIZE;
905
906 if (ent >= cmd->se_cmd.t_data_nents) {
907 pr_err("Initial page entry out-of-bounds\n");
908 return -1;
909 }
910
911 sg = &cmd->se_cmd.t_data_sg[ent];
912 page_off = (data_offset % PAGE_SIZE);
913
914 cmd->first_data_sg = sg;
915 cmd->first_data_sg_off = page_off;
916
917 while (data_length) {
918 u32 cur_len = min_t(u32, data_length, sg->length - page_off);
919
920 iov[i].iov_base = kmap(sg_page(sg)) + sg->offset + page_off;
921 iov[i].iov_len = cur_len;
922
923 data_length -= cur_len;
924 page_off = 0;
925 sg = sg_next(sg);
926 i++;
927 }
928
929 cmd->kmapped_nents = i;
930
931 return i;
932 }
933
iscsit_unmap_iovec(struct iscsi_cmd * cmd)934 static void iscsit_unmap_iovec(struct iscsi_cmd *cmd)
935 {
936 u32 i;
937 struct scatterlist *sg;
938
939 sg = cmd->first_data_sg;
940
941 for (i = 0; i < cmd->kmapped_nents; i++)
942 kunmap(sg_page(&sg[i]));
943 }
944
iscsit_ack_from_expstatsn(struct iscsi_conn * conn,u32 exp_statsn)945 static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn)
946 {
947 LIST_HEAD(ack_list);
948 struct iscsi_cmd *cmd, *cmd_p;
949
950 conn->exp_statsn = exp_statsn;
951
952 if (conn->sess->sess_ops->RDMAExtensions)
953 return;
954
955 spin_lock_bh(&conn->cmd_lock);
956 list_for_each_entry_safe(cmd, cmd_p, &conn->conn_cmd_list, i_conn_node) {
957 spin_lock(&cmd->istate_lock);
958 if ((cmd->i_state == ISTATE_SENT_STATUS) &&
959 iscsi_sna_lt(cmd->stat_sn, exp_statsn)) {
960 cmd->i_state = ISTATE_REMOVE;
961 spin_unlock(&cmd->istate_lock);
962 list_move_tail(&cmd->i_conn_node, &ack_list);
963 continue;
964 }
965 spin_unlock(&cmd->istate_lock);
966 }
967 spin_unlock_bh(&conn->cmd_lock);
968
969 list_for_each_entry_safe(cmd, cmd_p, &ack_list, i_conn_node) {
970 list_del_init(&cmd->i_conn_node);
971 iscsit_free_cmd(cmd, false);
972 }
973 }
974
iscsit_allocate_iovecs(struct iscsi_cmd * cmd)975 static int iscsit_allocate_iovecs(struct iscsi_cmd *cmd)
976 {
977 u32 iov_count = max(1UL, DIV_ROUND_UP(cmd->se_cmd.data_length, PAGE_SIZE));
978
979 iov_count += ISCSI_IOV_DATA_BUFFER;
980 cmd->iov_data = kcalloc(iov_count, sizeof(*cmd->iov_data), GFP_KERNEL);
981 if (!cmd->iov_data)
982 return -ENOMEM;
983
984 cmd->orig_iov_data_count = iov_count;
985 return 0;
986 }
987
iscsit_setup_scsi_cmd(struct iscsi_conn * conn,struct iscsi_cmd * cmd,unsigned char * buf)988 int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
989 unsigned char *buf)
990 {
991 int data_direction, payload_length;
992 struct iscsi_scsi_req *hdr;
993 int iscsi_task_attr;
994 int sam_task_attr;
995
996 atomic_long_inc(&conn->sess->cmd_pdus);
997
998 hdr = (struct iscsi_scsi_req *) buf;
999 payload_length = ntoh24(hdr->dlength);
1000
1001 /* FIXME; Add checks for AdditionalHeaderSegment */
1002
1003 if (!(hdr->flags & ISCSI_FLAG_CMD_WRITE) &&
1004 !(hdr->flags & ISCSI_FLAG_CMD_FINAL)) {
1005 pr_err("ISCSI_FLAG_CMD_WRITE & ISCSI_FLAG_CMD_FINAL"
1006 " not set. Bad iSCSI Initiator.\n");
1007 return iscsit_add_reject_cmd(cmd,
1008 ISCSI_REASON_BOOKMARK_INVALID, buf);
1009 }
1010
1011 if (((hdr->flags & ISCSI_FLAG_CMD_READ) ||
1012 (hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) {
1013 /*
1014 * From RFC-3720 Section 10.3.1:
1015 *
1016 * "Either or both of R and W MAY be 1 when either the
1017 * Expected Data Transfer Length and/or Bidirectional Read
1018 * Expected Data Transfer Length are 0"
1019 *
1020 * For this case, go ahead and clear the unnecssary bits
1021 * to avoid any confusion with ->data_direction.
1022 */
1023 hdr->flags &= ~ISCSI_FLAG_CMD_READ;
1024 hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
1025
1026 pr_warn("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE"
1027 " set when Expected Data Transfer Length is 0 for"
1028 " CDB: 0x%02x, Fixing up flags\n", hdr->cdb[0]);
1029 }
1030
1031 if (!(hdr->flags & ISCSI_FLAG_CMD_READ) &&
1032 !(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) {
1033 pr_err("ISCSI_FLAG_CMD_READ and/or ISCSI_FLAG_CMD_WRITE"
1034 " MUST be set if Expected Data Transfer Length is not 0."
1035 " Bad iSCSI Initiator\n");
1036 return iscsit_add_reject_cmd(cmd,
1037 ISCSI_REASON_BOOKMARK_INVALID, buf);
1038 }
1039
1040 if ((hdr->flags & ISCSI_FLAG_CMD_READ) &&
1041 (hdr->flags & ISCSI_FLAG_CMD_WRITE)) {
1042 pr_err("Bidirectional operations not supported!\n");
1043 return iscsit_add_reject_cmd(cmd,
1044 ISCSI_REASON_BOOKMARK_INVALID, buf);
1045 }
1046
1047 if (hdr->opcode & ISCSI_OP_IMMEDIATE) {
1048 pr_err("Illegally set Immediate Bit in iSCSI Initiator"
1049 " Scsi Command PDU.\n");
1050 return iscsit_add_reject_cmd(cmd,
1051 ISCSI_REASON_BOOKMARK_INVALID, buf);
1052 }
1053
1054 if (payload_length && !conn->sess->sess_ops->ImmediateData) {
1055 pr_err("ImmediateData=No but DataSegmentLength=%u,"
1056 " protocol error.\n", payload_length);
1057 return iscsit_add_reject_cmd(cmd,
1058 ISCSI_REASON_PROTOCOL_ERROR, buf);
1059 }
1060
1061 if ((be32_to_cpu(hdr->data_length) == payload_length) &&
1062 (!(hdr->flags & ISCSI_FLAG_CMD_FINAL))) {
1063 pr_err("Expected Data Transfer Length and Length of"
1064 " Immediate Data are the same, but ISCSI_FLAG_CMD_FINAL"
1065 " bit is not set protocol error\n");
1066 return iscsit_add_reject_cmd(cmd,
1067 ISCSI_REASON_PROTOCOL_ERROR, buf);
1068 }
1069
1070 if (payload_length > be32_to_cpu(hdr->data_length)) {
1071 pr_err("DataSegmentLength: %u is greater than"
1072 " EDTL: %u, protocol error.\n", payload_length,
1073 hdr->data_length);
1074 return iscsit_add_reject_cmd(cmd,
1075 ISCSI_REASON_PROTOCOL_ERROR, buf);
1076 }
1077
1078 if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
1079 pr_err("DataSegmentLength: %u is greater than"
1080 " MaxXmitDataSegmentLength: %u, protocol error.\n",
1081 payload_length, conn->conn_ops->MaxXmitDataSegmentLength);
1082 return iscsit_add_reject_cmd(cmd,
1083 ISCSI_REASON_PROTOCOL_ERROR, buf);
1084 }
1085
1086 if (payload_length > conn->sess->sess_ops->FirstBurstLength) {
1087 pr_err("DataSegmentLength: %u is greater than"
1088 " FirstBurstLength: %u, protocol error.\n",
1089 payload_length, conn->sess->sess_ops->FirstBurstLength);
1090 return iscsit_add_reject_cmd(cmd,
1091 ISCSI_REASON_BOOKMARK_INVALID, buf);
1092 }
1093
1094 data_direction = (hdr->flags & ISCSI_FLAG_CMD_WRITE) ? DMA_TO_DEVICE :
1095 (hdr->flags & ISCSI_FLAG_CMD_READ) ? DMA_FROM_DEVICE :
1096 DMA_NONE;
1097
1098 cmd->data_direction = data_direction;
1099 iscsi_task_attr = hdr->flags & ISCSI_FLAG_CMD_ATTR_MASK;
1100 /*
1101 * Figure out the SAM Task Attribute for the incoming SCSI CDB
1102 */
1103 if ((iscsi_task_attr == ISCSI_ATTR_UNTAGGED) ||
1104 (iscsi_task_attr == ISCSI_ATTR_SIMPLE))
1105 sam_task_attr = TCM_SIMPLE_TAG;
1106 else if (iscsi_task_attr == ISCSI_ATTR_ORDERED)
1107 sam_task_attr = TCM_ORDERED_TAG;
1108 else if (iscsi_task_attr == ISCSI_ATTR_HEAD_OF_QUEUE)
1109 sam_task_attr = TCM_HEAD_TAG;
1110 else if (iscsi_task_attr == ISCSI_ATTR_ACA)
1111 sam_task_attr = TCM_ACA_TAG;
1112 else {
1113 pr_debug("Unknown iSCSI Task Attribute: 0x%02x, using"
1114 " TCM_SIMPLE_TAG\n", iscsi_task_attr);
1115 sam_task_attr = TCM_SIMPLE_TAG;
1116 }
1117
1118 cmd->iscsi_opcode = ISCSI_OP_SCSI_CMD;
1119 cmd->i_state = ISTATE_NEW_CMD;
1120 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
1121 cmd->immediate_data = (payload_length) ? 1 : 0;
1122 cmd->unsolicited_data = ((!(hdr->flags & ISCSI_FLAG_CMD_FINAL) &&
1123 (hdr->flags & ISCSI_FLAG_CMD_WRITE)) ? 1 : 0);
1124 if (cmd->unsolicited_data)
1125 cmd->cmd_flags |= ICF_NON_IMMEDIATE_UNSOLICITED_DATA;
1126
1127 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
1128 if (hdr->flags & ISCSI_FLAG_CMD_READ)
1129 cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
1130 else
1131 cmd->targ_xfer_tag = 0xFFFFFFFF;
1132 cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
1133 cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
1134 cmd->first_burst_len = payload_length;
1135
1136 if (!conn->sess->sess_ops->RDMAExtensions &&
1137 cmd->data_direction == DMA_FROM_DEVICE) {
1138 struct iscsi_datain_req *dr;
1139
1140 dr = iscsit_allocate_datain_req();
1141 if (!dr)
1142 return iscsit_add_reject_cmd(cmd,
1143 ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
1144
1145 iscsit_attach_datain_req(cmd, dr);
1146 }
1147
1148 /*
1149 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
1150 */
1151 transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops,
1152 conn->sess->se_sess, be32_to_cpu(hdr->data_length),
1153 cmd->data_direction, sam_task_attr,
1154 cmd->sense_buffer + 2);
1155
1156 pr_debug("Got SCSI Command, ITT: 0x%08x, CmdSN: 0x%08x,"
1157 " ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt,
1158 hdr->cmdsn, be32_to_cpu(hdr->data_length), payload_length,
1159 conn->cid);
1160
1161 target_get_sess_cmd(&cmd->se_cmd, true);
1162
1163 cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd,
1164 scsilun_to_int(&hdr->lun));
1165 if (cmd->sense_reason)
1166 goto attach_cmd;
1167
1168 /* only used for printks or comparing with ->ref_task_tag */
1169 cmd->se_cmd.tag = (__force u32)cmd->init_task_tag;
1170 cmd->sense_reason = target_setup_cmd_from_cdb(&cmd->se_cmd, hdr->cdb);
1171 if (cmd->sense_reason) {
1172 if (cmd->sense_reason == TCM_OUT_OF_RESOURCES) {
1173 return iscsit_add_reject_cmd(cmd,
1174 ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
1175 }
1176
1177 goto attach_cmd;
1178 }
1179
1180 if (iscsit_build_pdu_and_seq_lists(cmd, payload_length) < 0) {
1181 return iscsit_add_reject_cmd(cmd,
1182 ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
1183 }
1184
1185 attach_cmd:
1186 spin_lock_bh(&conn->cmd_lock);
1187 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
1188 spin_unlock_bh(&conn->cmd_lock);
1189 /*
1190 * Check if we need to delay processing because of ALUA
1191 * Active/NonOptimized primary access state..
1192 */
1193 core_alua_check_nonop_delay(&cmd->se_cmd);
1194
1195 return 0;
1196 }
1197 EXPORT_SYMBOL(iscsit_setup_scsi_cmd);
1198
iscsit_set_unsoliticed_dataout(struct iscsi_cmd * cmd)1199 void iscsit_set_unsoliticed_dataout(struct iscsi_cmd *cmd)
1200 {
1201 iscsit_set_dataout_sequence_values(cmd);
1202
1203 spin_lock_bh(&cmd->dataout_timeout_lock);
1204 iscsit_start_dataout_timer(cmd, cmd->conn);
1205 spin_unlock_bh(&cmd->dataout_timeout_lock);
1206 }
1207 EXPORT_SYMBOL(iscsit_set_unsoliticed_dataout);
1208
iscsit_process_scsi_cmd(struct iscsi_conn * conn,struct iscsi_cmd * cmd,struct iscsi_scsi_req * hdr)1209 int iscsit_process_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
1210 struct iscsi_scsi_req *hdr)
1211 {
1212 int cmdsn_ret = 0;
1213 /*
1214 * Check the CmdSN against ExpCmdSN/MaxCmdSN here if
1215 * the Immediate Bit is not set, and no Immediate
1216 * Data is attached.
1217 *
1218 * A PDU/CmdSN carrying Immediate Data can only
1219 * be processed after the DataCRC has passed.
1220 * If the DataCRC fails, the CmdSN MUST NOT
1221 * be acknowledged. (See below)
1222 */
1223 if (!cmd->immediate_data) {
1224 cmdsn_ret = iscsit_sequence_cmd(conn, cmd,
1225 (unsigned char *)hdr, hdr->cmdsn);
1226 if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
1227 return -1;
1228 else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
1229 target_put_sess_cmd(&cmd->se_cmd);
1230 return 0;
1231 }
1232 }
1233
1234 iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
1235
1236 /*
1237 * If no Immediate Data is attached, it's OK to return now.
1238 */
1239 if (!cmd->immediate_data) {
1240 if (!cmd->sense_reason && cmd->unsolicited_data)
1241 iscsit_set_unsoliticed_dataout(cmd);
1242 if (!cmd->sense_reason)
1243 return 0;
1244
1245 target_put_sess_cmd(&cmd->se_cmd);
1246 return 0;
1247 }
1248
1249 /*
1250 * Early CHECK_CONDITIONs with ImmediateData never make it to command
1251 * execution. These exceptions are processed in CmdSN order using
1252 * iscsit_check_received_cmdsn() in iscsit_get_immediate_data() below.
1253 */
1254 if (cmd->sense_reason)
1255 return 1;
1256 /*
1257 * Call directly into transport_generic_new_cmd() to perform
1258 * the backend memory allocation.
1259 */
1260 cmd->sense_reason = transport_generic_new_cmd(&cmd->se_cmd);
1261 if (cmd->sense_reason)
1262 return 1;
1263
1264 return 0;
1265 }
1266 EXPORT_SYMBOL(iscsit_process_scsi_cmd);
1267
1268 static int
iscsit_get_immediate_data(struct iscsi_cmd * cmd,struct iscsi_scsi_req * hdr,bool dump_payload)1269 iscsit_get_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr,
1270 bool dump_payload)
1271 {
1272 int cmdsn_ret = 0, immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
1273 /*
1274 * Special case for Unsupported SAM WRITE Opcodes and ImmediateData=Yes.
1275 */
1276 if (dump_payload)
1277 goto after_immediate_data;
1278 /*
1279 * Check for underflow case where both EDTL and immediate data payload
1280 * exceeds what is presented by CDB's TRANSFER LENGTH, and what has
1281 * already been set in target_cmd_size_check() as se_cmd->data_length.
1282 *
1283 * For this special case, fail the command and dump the immediate data
1284 * payload.
1285 */
1286 if (cmd->first_burst_len > cmd->se_cmd.data_length) {
1287 cmd->sense_reason = TCM_INVALID_CDB_FIELD;
1288 goto after_immediate_data;
1289 }
1290
1291 immed_ret = iscsit_handle_immediate_data(cmd, hdr,
1292 cmd->first_burst_len);
1293 after_immediate_data:
1294 if (immed_ret == IMMEDIATE_DATA_NORMAL_OPERATION) {
1295 /*
1296 * A PDU/CmdSN carrying Immediate Data passed
1297 * DataCRC, check against ExpCmdSN/MaxCmdSN if
1298 * Immediate Bit is not set.
1299 */
1300 cmdsn_ret = iscsit_sequence_cmd(cmd->conn, cmd,
1301 (unsigned char *)hdr, hdr->cmdsn);
1302 if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
1303 return -1;
1304
1305 if (cmd->sense_reason || cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
1306 int rc;
1307
1308 rc = iscsit_dump_data_payload(cmd->conn,
1309 cmd->first_burst_len, 1);
1310 target_put_sess_cmd(&cmd->se_cmd);
1311 return rc;
1312 } else if (cmd->unsolicited_data)
1313 iscsit_set_unsoliticed_dataout(cmd);
1314
1315 } else if (immed_ret == IMMEDIATE_DATA_ERL1_CRC_FAILURE) {
1316 /*
1317 * Immediate Data failed DataCRC and ERL>=1,
1318 * silently drop this PDU and let the initiator
1319 * plug the CmdSN gap.
1320 *
1321 * FIXME: Send Unsolicited NOPIN with reserved
1322 * TTT here to help the initiator figure out
1323 * the missing CmdSN, although they should be
1324 * intelligent enough to determine the missing
1325 * CmdSN and issue a retry to plug the sequence.
1326 */
1327 cmd->i_state = ISTATE_REMOVE;
1328 iscsit_add_cmd_to_immediate_queue(cmd, cmd->conn, cmd->i_state);
1329 } else /* immed_ret == IMMEDIATE_DATA_CANNOT_RECOVER */
1330 return -1;
1331
1332 return 0;
1333 }
1334
1335 static int
iscsit_handle_scsi_cmd(struct iscsi_conn * conn,struct iscsi_cmd * cmd,unsigned char * buf)1336 iscsit_handle_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
1337 unsigned char *buf)
1338 {
1339 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
1340 int rc, immed_data;
1341 bool dump_payload = false;
1342
1343 rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
1344 if (rc < 0)
1345 return 0;
1346 /*
1347 * Allocation iovecs needed for struct socket operations for
1348 * traditional iSCSI block I/O.
1349 */
1350 if (iscsit_allocate_iovecs(cmd) < 0) {
1351 return iscsit_reject_cmd(cmd,
1352 ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
1353 }
1354 immed_data = cmd->immediate_data;
1355
1356 rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
1357 if (rc < 0)
1358 return rc;
1359 else if (rc > 0)
1360 dump_payload = true;
1361
1362 if (!immed_data)
1363 return 0;
1364
1365 return iscsit_get_immediate_data(cmd, hdr, dump_payload);
1366 }
1367
iscsit_do_crypto_hash_sg(struct ahash_request * hash,struct iscsi_cmd * cmd,u32 data_offset,u32 data_length,u32 padding,u8 * pad_bytes)1368 static u32 iscsit_do_crypto_hash_sg(
1369 struct ahash_request *hash,
1370 struct iscsi_cmd *cmd,
1371 u32 data_offset,
1372 u32 data_length,
1373 u32 padding,
1374 u8 *pad_bytes)
1375 {
1376 u32 data_crc;
1377 struct scatterlist *sg;
1378 unsigned int page_off;
1379
1380 crypto_ahash_init(hash);
1381
1382 sg = cmd->first_data_sg;
1383 page_off = cmd->first_data_sg_off;
1384
1385 while (data_length) {
1386 u32 cur_len = min_t(u32, data_length, (sg->length - page_off));
1387
1388 ahash_request_set_crypt(hash, sg, NULL, cur_len);
1389 crypto_ahash_update(hash);
1390
1391 data_length -= cur_len;
1392 page_off = 0;
1393 /* iscsit_map_iovec has already checked for invalid sg pointers */
1394 sg = sg_next(sg);
1395 }
1396
1397 if (padding) {
1398 struct scatterlist pad_sg;
1399
1400 sg_init_one(&pad_sg, pad_bytes, padding);
1401 ahash_request_set_crypt(hash, &pad_sg, (u8 *)&data_crc,
1402 padding);
1403 crypto_ahash_finup(hash);
1404 } else {
1405 ahash_request_set_crypt(hash, NULL, (u8 *)&data_crc, 0);
1406 crypto_ahash_final(hash);
1407 }
1408
1409 return data_crc;
1410 }
1411
iscsit_do_crypto_hash_buf(struct ahash_request * hash,const void * buf,u32 payload_length,u32 padding,u8 * pad_bytes,u8 * data_crc)1412 static void iscsit_do_crypto_hash_buf(
1413 struct ahash_request *hash,
1414 const void *buf,
1415 u32 payload_length,
1416 u32 padding,
1417 u8 *pad_bytes,
1418 u8 *data_crc)
1419 {
1420 struct scatterlist sg[2];
1421
1422 sg_init_table(sg, ARRAY_SIZE(sg));
1423 sg_set_buf(sg, buf, payload_length);
1424 if (padding)
1425 sg_set_buf(sg + 1, pad_bytes, padding);
1426
1427 ahash_request_set_crypt(hash, sg, data_crc, payload_length + padding);
1428
1429 crypto_ahash_digest(hash);
1430 }
1431
1432 int
__iscsit_check_dataout_hdr(struct iscsi_conn * conn,void * buf,struct iscsi_cmd * cmd,u32 payload_length,bool * success)1433 __iscsit_check_dataout_hdr(struct iscsi_conn *conn, void *buf,
1434 struct iscsi_cmd *cmd, u32 payload_length,
1435 bool *success)
1436 {
1437 struct iscsi_data *hdr = buf;
1438 struct se_cmd *se_cmd;
1439 int rc;
1440
1441 /* iSCSI write */
1442 atomic_long_add(payload_length, &conn->sess->rx_data_octets);
1443
1444 pr_debug("Got DataOut ITT: 0x%08x, TTT: 0x%08x,"
1445 " DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n",
1446 hdr->itt, hdr->ttt, hdr->datasn, ntohl(hdr->offset),
1447 payload_length, conn->cid);
1448
1449 if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) {
1450 pr_err("Command ITT: 0x%08x received DataOUT after"
1451 " last DataOUT received, dumping payload\n",
1452 cmd->init_task_tag);
1453 return iscsit_dump_data_payload(conn, payload_length, 1);
1454 }
1455
1456 if (cmd->data_direction != DMA_TO_DEVICE) {
1457 pr_err("Command ITT: 0x%08x received DataOUT for a"
1458 " NON-WRITE command.\n", cmd->init_task_tag);
1459 return iscsit_dump_data_payload(conn, payload_length, 1);
1460 }
1461 se_cmd = &cmd->se_cmd;
1462 iscsit_mod_dataout_timer(cmd);
1463
1464 if ((be32_to_cpu(hdr->offset) + payload_length) > cmd->se_cmd.data_length) {
1465 pr_err("DataOut Offset: %u, Length %u greater than"
1466 " iSCSI Command EDTL %u, protocol error.\n",
1467 hdr->offset, payload_length, cmd->se_cmd.data_length);
1468 return iscsit_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_INVALID, buf);
1469 }
1470
1471 if (cmd->unsolicited_data) {
1472 int dump_unsolicited_data = 0;
1473
1474 if (conn->sess->sess_ops->InitialR2T) {
1475 pr_err("Received unexpected unsolicited data"
1476 " while InitialR2T=Yes, protocol error.\n");
1477 transport_send_check_condition_and_sense(&cmd->se_cmd,
1478 TCM_UNEXPECTED_UNSOLICITED_DATA, 0);
1479 return -1;
1480 }
1481 /*
1482 * Special case for dealing with Unsolicited DataOUT
1483 * and Unsupported SAM WRITE Opcodes and SE resource allocation
1484 * failures;
1485 */
1486
1487 /* Something's amiss if we're not in WRITE_PENDING state... */
1488 WARN_ON(se_cmd->t_state != TRANSPORT_WRITE_PENDING);
1489 if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE))
1490 dump_unsolicited_data = 1;
1491
1492 if (dump_unsolicited_data) {
1493 /*
1494 * Check if a delayed TASK_ABORTED status needs to
1495 * be sent now if the ISCSI_FLAG_CMD_FINAL has been
1496 * received with the unsolicited data out.
1497 */
1498 if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
1499 iscsit_stop_dataout_timer(cmd);
1500
1501 transport_check_aborted_status(se_cmd,
1502 (hdr->flags & ISCSI_FLAG_CMD_FINAL));
1503 return iscsit_dump_data_payload(conn, payload_length, 1);
1504 }
1505 } else {
1506 /*
1507 * For the normal solicited data path:
1508 *
1509 * Check for a delayed TASK_ABORTED status and dump any
1510 * incoming data out payload if one exists. Also, when the
1511 * ISCSI_FLAG_CMD_FINAL is set to denote the end of the current
1512 * data out sequence, we decrement outstanding_r2ts. Once
1513 * outstanding_r2ts reaches zero, go ahead and send the delayed
1514 * TASK_ABORTED status.
1515 */
1516 if (se_cmd->transport_state & CMD_T_ABORTED) {
1517 if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
1518 if (--cmd->outstanding_r2ts < 1) {
1519 iscsit_stop_dataout_timer(cmd);
1520 transport_check_aborted_status(
1521 se_cmd, 1);
1522 }
1523
1524 return iscsit_dump_data_payload(conn, payload_length, 1);
1525 }
1526 }
1527 /*
1528 * Perform DataSN, DataSequenceInOrder, DataPDUInOrder, and
1529 * within-command recovery checks before receiving the payload.
1530 */
1531 rc = iscsit_check_pre_dataout(cmd, buf);
1532 if (rc == DATAOUT_WITHIN_COMMAND_RECOVERY)
1533 return 0;
1534 else if (rc == DATAOUT_CANNOT_RECOVER)
1535 return -1;
1536 *success = true;
1537 return 0;
1538 }
1539 EXPORT_SYMBOL(__iscsit_check_dataout_hdr);
1540
1541 int
iscsit_check_dataout_hdr(struct iscsi_conn * conn,void * buf,struct iscsi_cmd ** out_cmd)1542 iscsit_check_dataout_hdr(struct iscsi_conn *conn, void *buf,
1543 struct iscsi_cmd **out_cmd)
1544 {
1545 struct iscsi_data *hdr = buf;
1546 struct iscsi_cmd *cmd;
1547 u32 payload_length = ntoh24(hdr->dlength);
1548 int rc;
1549 bool success = false;
1550
1551 if (!payload_length) {
1552 pr_warn_ratelimited("DataOUT payload is ZERO, ignoring.\n");
1553 return 0;
1554 }
1555
1556 if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
1557 pr_err_ratelimited("DataSegmentLength: %u is greater than"
1558 " MaxXmitDataSegmentLength: %u\n", payload_length,
1559 conn->conn_ops->MaxXmitDataSegmentLength);
1560 return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, buf);
1561 }
1562
1563 cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt, payload_length);
1564 if (!cmd)
1565 return 0;
1566
1567 rc = __iscsit_check_dataout_hdr(conn, buf, cmd, payload_length, &success);
1568
1569 if (success)
1570 *out_cmd = cmd;
1571
1572 return rc;
1573 }
1574 EXPORT_SYMBOL(iscsit_check_dataout_hdr);
1575
1576 static int
iscsit_get_dataout(struct iscsi_conn * conn,struct iscsi_cmd * cmd,struct iscsi_data * hdr)1577 iscsit_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
1578 struct iscsi_data *hdr)
1579 {
1580 struct kvec *iov;
1581 u32 checksum, iov_count = 0, padding = 0, rx_got = 0, rx_size = 0;
1582 u32 payload_length = ntoh24(hdr->dlength);
1583 int iov_ret, data_crc_failed = 0;
1584
1585 rx_size += payload_length;
1586 iov = &cmd->iov_data[0];
1587
1588 iov_ret = iscsit_map_iovec(cmd, iov, be32_to_cpu(hdr->offset),
1589 payload_length);
1590 if (iov_ret < 0)
1591 return -1;
1592
1593 iov_count += iov_ret;
1594
1595 padding = ((-payload_length) & 3);
1596 if (padding != 0) {
1597 iov[iov_count].iov_base = cmd->pad_bytes;
1598 iov[iov_count++].iov_len = padding;
1599 rx_size += padding;
1600 pr_debug("Receiving %u padding bytes.\n", padding);
1601 }
1602
1603 if (conn->conn_ops->DataDigest) {
1604 iov[iov_count].iov_base = &checksum;
1605 iov[iov_count++].iov_len = ISCSI_CRC_LEN;
1606 rx_size += ISCSI_CRC_LEN;
1607 }
1608
1609 rx_got = rx_data(conn, &cmd->iov_data[0], iov_count, rx_size);
1610
1611 iscsit_unmap_iovec(cmd);
1612
1613 if (rx_got != rx_size)
1614 return -1;
1615
1616 if (conn->conn_ops->DataDigest) {
1617 u32 data_crc;
1618
1619 data_crc = iscsit_do_crypto_hash_sg(conn->conn_rx_hash, cmd,
1620 be32_to_cpu(hdr->offset),
1621 payload_length, padding,
1622 cmd->pad_bytes);
1623
1624 if (checksum != data_crc) {
1625 pr_err("ITT: 0x%08x, Offset: %u, Length: %u,"
1626 " DataSN: 0x%08x, CRC32C DataDigest 0x%08x"
1627 " does not match computed 0x%08x\n",
1628 hdr->itt, hdr->offset, payload_length,
1629 hdr->datasn, checksum, data_crc);
1630 data_crc_failed = 1;
1631 } else {
1632 pr_debug("Got CRC32C DataDigest 0x%08x for"
1633 " %u bytes of Data Out\n", checksum,
1634 payload_length);
1635 }
1636 }
1637
1638 return data_crc_failed;
1639 }
1640
1641 int
iscsit_check_dataout_payload(struct iscsi_cmd * cmd,struct iscsi_data * hdr,bool data_crc_failed)1642 iscsit_check_dataout_payload(struct iscsi_cmd *cmd, struct iscsi_data *hdr,
1643 bool data_crc_failed)
1644 {
1645 struct iscsi_conn *conn = cmd->conn;
1646 int rc, ooo_cmdsn;
1647 /*
1648 * Increment post receive data and CRC values or perform
1649 * within-command recovery.
1650 */
1651 rc = iscsit_check_post_dataout(cmd, (unsigned char *)hdr, data_crc_failed);
1652 if ((rc == DATAOUT_NORMAL) || (rc == DATAOUT_WITHIN_COMMAND_RECOVERY))
1653 return 0;
1654 else if (rc == DATAOUT_SEND_R2T) {
1655 iscsit_set_dataout_sequence_values(cmd);
1656 conn->conn_transport->iscsit_get_dataout(conn, cmd, false);
1657 } else if (rc == DATAOUT_SEND_TO_TRANSPORT) {
1658 /*
1659 * Handle extra special case for out of order
1660 * Unsolicited Data Out.
1661 */
1662 spin_lock_bh(&cmd->istate_lock);
1663 ooo_cmdsn = (cmd->cmd_flags & ICF_OOO_CMDSN);
1664 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1665 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1666 spin_unlock_bh(&cmd->istate_lock);
1667
1668 iscsit_stop_dataout_timer(cmd);
1669 if (ooo_cmdsn)
1670 return 0;
1671 target_execute_cmd(&cmd->se_cmd);
1672 return 0;
1673 } else /* DATAOUT_CANNOT_RECOVER */
1674 return -1;
1675
1676 return 0;
1677 }
1678 EXPORT_SYMBOL(iscsit_check_dataout_payload);
1679
iscsit_handle_data_out(struct iscsi_conn * conn,unsigned char * buf)1680 static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
1681 {
1682 struct iscsi_cmd *cmd = NULL;
1683 struct iscsi_data *hdr = (struct iscsi_data *)buf;
1684 int rc;
1685 bool data_crc_failed = false;
1686
1687 rc = iscsit_check_dataout_hdr(conn, buf, &cmd);
1688 if (rc < 0)
1689 return 0;
1690 else if (!cmd)
1691 return 0;
1692
1693 rc = iscsit_get_dataout(conn, cmd, hdr);
1694 if (rc < 0)
1695 return rc;
1696 else if (rc > 0)
1697 data_crc_failed = true;
1698
1699 return iscsit_check_dataout_payload(cmd, hdr, data_crc_failed);
1700 }
1701
iscsit_setup_nop_out(struct iscsi_conn * conn,struct iscsi_cmd * cmd,struct iscsi_nopout * hdr)1702 int iscsit_setup_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
1703 struct iscsi_nopout *hdr)
1704 {
1705 u32 payload_length = ntoh24(hdr->dlength);
1706
1707 if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL)) {
1708 pr_err("NopOUT Flag's, Left Most Bit not set, protocol error.\n");
1709 if (!cmd)
1710 return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
1711 (unsigned char *)hdr);
1712
1713 return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
1714 (unsigned char *)hdr);
1715 }
1716
1717 if (hdr->itt == RESERVED_ITT && !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
1718 pr_err("NOPOUT ITT is reserved, but Immediate Bit is"
1719 " not set, protocol error.\n");
1720 if (!cmd)
1721 return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
1722 (unsigned char *)hdr);
1723
1724 return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
1725 (unsigned char *)hdr);
1726 }
1727
1728 if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
1729 pr_err("NOPOUT Ping Data DataSegmentLength: %u is"
1730 " greater than MaxXmitDataSegmentLength: %u, protocol"
1731 " error.\n", payload_length,
1732 conn->conn_ops->MaxXmitDataSegmentLength);
1733 if (!cmd)
1734 return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
1735 (unsigned char *)hdr);
1736
1737 return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
1738 (unsigned char *)hdr);
1739 }
1740
1741 pr_debug("Got NOPOUT Ping %s ITT: 0x%08x, TTT: 0x%08x,"
1742 " CmdSN: 0x%08x, ExpStatSN: 0x%08x, Length: %u\n",
1743 hdr->itt == RESERVED_ITT ? "Response" : "Request",
1744 hdr->itt, hdr->ttt, hdr->cmdsn, hdr->exp_statsn,
1745 payload_length);
1746 /*
1747 * This is not a response to a Unsolicited NopIN, which means
1748 * it can either be a NOPOUT ping request (with a valid ITT),
1749 * or a NOPOUT not requesting a NOPIN (with a reserved ITT).
1750 * Either way, make sure we allocate an struct iscsi_cmd, as both
1751 * can contain ping data.
1752 */
1753 if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
1754 cmd->iscsi_opcode = ISCSI_OP_NOOP_OUT;
1755 cmd->i_state = ISTATE_SEND_NOPIN;
1756 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ?
1757 1 : 0);
1758 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
1759 cmd->targ_xfer_tag = 0xFFFFFFFF;
1760 cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
1761 cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
1762 cmd->data_direction = DMA_NONE;
1763 }
1764
1765 return 0;
1766 }
1767 EXPORT_SYMBOL(iscsit_setup_nop_out);
1768
iscsit_process_nop_out(struct iscsi_conn * conn,struct iscsi_cmd * cmd,struct iscsi_nopout * hdr)1769 int iscsit_process_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
1770 struct iscsi_nopout *hdr)
1771 {
1772 struct iscsi_cmd *cmd_p = NULL;
1773 int cmdsn_ret = 0;
1774 /*
1775 * Initiator is expecting a NopIN ping reply..
1776 */
1777 if (hdr->itt != RESERVED_ITT) {
1778 if (!cmd)
1779 return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
1780 (unsigned char *)hdr);
1781
1782 spin_lock_bh(&conn->cmd_lock);
1783 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
1784 spin_unlock_bh(&conn->cmd_lock);
1785
1786 iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
1787
1788 if (hdr->opcode & ISCSI_OP_IMMEDIATE) {
1789 iscsit_add_cmd_to_response_queue(cmd, conn,
1790 cmd->i_state);
1791 return 0;
1792 }
1793
1794 cmdsn_ret = iscsit_sequence_cmd(conn, cmd,
1795 (unsigned char *)hdr, hdr->cmdsn);
1796 if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
1797 return 0;
1798 if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
1799 return -1;
1800
1801 return 0;
1802 }
1803 /*
1804 * This was a response to a unsolicited NOPIN ping.
1805 */
1806 if (hdr->ttt != cpu_to_be32(0xFFFFFFFF)) {
1807 cmd_p = iscsit_find_cmd_from_ttt(conn, be32_to_cpu(hdr->ttt));
1808 if (!cmd_p)
1809 return -EINVAL;
1810
1811 iscsit_stop_nopin_response_timer(conn);
1812
1813 cmd_p->i_state = ISTATE_REMOVE;
1814 iscsit_add_cmd_to_immediate_queue(cmd_p, conn, cmd_p->i_state);
1815
1816 iscsit_start_nopin_timer(conn);
1817 return 0;
1818 }
1819 /*
1820 * Otherwise, initiator is not expecting a NOPIN is response.
1821 * Just ignore for now.
1822 */
1823
1824 if (cmd)
1825 iscsit_free_cmd(cmd, false);
1826
1827 return 0;
1828 }
1829 EXPORT_SYMBOL(iscsit_process_nop_out);
1830
iscsit_handle_nop_out(struct iscsi_conn * conn,struct iscsi_cmd * cmd,unsigned char * buf)1831 static int iscsit_handle_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
1832 unsigned char *buf)
1833 {
1834 unsigned char *ping_data = NULL;
1835 struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf;
1836 struct kvec *iov = NULL;
1837 u32 payload_length = ntoh24(hdr->dlength);
1838 int ret;
1839
1840 ret = iscsit_setup_nop_out(conn, cmd, hdr);
1841 if (ret < 0)
1842 return 0;
1843 /*
1844 * Handle NOP-OUT payload for traditional iSCSI sockets
1845 */
1846 if (payload_length && hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
1847 u32 checksum, data_crc, padding = 0;
1848 int niov = 0, rx_got, rx_size = payload_length;
1849
1850 ping_data = kzalloc(payload_length + 1, GFP_KERNEL);
1851 if (!ping_data) {
1852 ret = -1;
1853 goto out;
1854 }
1855
1856 iov = &cmd->iov_misc[0];
1857 iov[niov].iov_base = ping_data;
1858 iov[niov++].iov_len = payload_length;
1859
1860 padding = ((-payload_length) & 3);
1861 if (padding != 0) {
1862 pr_debug("Receiving %u additional bytes"
1863 " for padding.\n", padding);
1864 iov[niov].iov_base = &cmd->pad_bytes;
1865 iov[niov++].iov_len = padding;
1866 rx_size += padding;
1867 }
1868 if (conn->conn_ops->DataDigest) {
1869 iov[niov].iov_base = &checksum;
1870 iov[niov++].iov_len = ISCSI_CRC_LEN;
1871 rx_size += ISCSI_CRC_LEN;
1872 }
1873
1874 rx_got = rx_data(conn, &cmd->iov_misc[0], niov, rx_size);
1875 if (rx_got != rx_size) {
1876 ret = -1;
1877 goto out;
1878 }
1879
1880 if (conn->conn_ops->DataDigest) {
1881 iscsit_do_crypto_hash_buf(conn->conn_rx_hash,
1882 ping_data, payload_length,
1883 padding, cmd->pad_bytes,
1884 (u8 *)&data_crc);
1885
1886 if (checksum != data_crc) {
1887 pr_err("Ping data CRC32C DataDigest"
1888 " 0x%08x does not match computed 0x%08x\n",
1889 checksum, data_crc);
1890 if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
1891 pr_err("Unable to recover from"
1892 " NOPOUT Ping DataCRC failure while in"
1893 " ERL=0.\n");
1894 ret = -1;
1895 goto out;
1896 } else {
1897 /*
1898 * Silently drop this PDU and let the
1899 * initiator plug the CmdSN gap.
1900 */
1901 pr_debug("Dropping NOPOUT"
1902 " Command CmdSN: 0x%08x due to"
1903 " DataCRC error.\n", hdr->cmdsn);
1904 ret = 0;
1905 goto out;
1906 }
1907 } else {
1908 pr_debug("Got CRC32C DataDigest"
1909 " 0x%08x for %u bytes of ping data.\n",
1910 checksum, payload_length);
1911 }
1912 }
1913
1914 ping_data[payload_length] = '\0';
1915 /*
1916 * Attach ping data to struct iscsi_cmd->buf_ptr.
1917 */
1918 cmd->buf_ptr = ping_data;
1919 cmd->buf_ptr_size = payload_length;
1920
1921 pr_debug("Got %u bytes of NOPOUT ping"
1922 " data.\n", payload_length);
1923 pr_debug("Ping Data: \"%s\"\n", ping_data);
1924 }
1925
1926 return iscsit_process_nop_out(conn, cmd, hdr);
1927 out:
1928 if (cmd)
1929 iscsit_free_cmd(cmd, false);
1930
1931 kfree(ping_data);
1932 return ret;
1933 }
1934
iscsit_convert_tmf(u8 iscsi_tmf)1935 static enum tcm_tmreq_table iscsit_convert_tmf(u8 iscsi_tmf)
1936 {
1937 switch (iscsi_tmf) {
1938 case ISCSI_TM_FUNC_ABORT_TASK:
1939 return TMR_ABORT_TASK;
1940 case ISCSI_TM_FUNC_ABORT_TASK_SET:
1941 return TMR_ABORT_TASK_SET;
1942 case ISCSI_TM_FUNC_CLEAR_ACA:
1943 return TMR_CLEAR_ACA;
1944 case ISCSI_TM_FUNC_CLEAR_TASK_SET:
1945 return TMR_CLEAR_TASK_SET;
1946 case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
1947 return TMR_LUN_RESET;
1948 case ISCSI_TM_FUNC_TARGET_WARM_RESET:
1949 return TMR_TARGET_WARM_RESET;
1950 case ISCSI_TM_FUNC_TARGET_COLD_RESET:
1951 return TMR_TARGET_COLD_RESET;
1952 default:
1953 return TMR_UNKNOWN;
1954 }
1955 }
1956
1957 int
iscsit_handle_task_mgt_cmd(struct iscsi_conn * conn,struct iscsi_cmd * cmd,unsigned char * buf)1958 iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
1959 unsigned char *buf)
1960 {
1961 struct se_tmr_req *se_tmr;
1962 struct iscsi_tmr_req *tmr_req;
1963 struct iscsi_tm *hdr;
1964 int out_of_order_cmdsn = 0, ret;
1965 u8 function, tcm_function = TMR_UNKNOWN;
1966
1967 hdr = (struct iscsi_tm *) buf;
1968 hdr->flags &= ~ISCSI_FLAG_CMD_FINAL;
1969 function = hdr->flags;
1970
1971 pr_debug("Got Task Management Request ITT: 0x%08x, CmdSN:"
1972 " 0x%08x, Function: 0x%02x, RefTaskTag: 0x%08x, RefCmdSN:"
1973 " 0x%08x, CID: %hu\n", hdr->itt, hdr->cmdsn, function,
1974 hdr->rtt, hdr->refcmdsn, conn->cid);
1975
1976 if ((function != ISCSI_TM_FUNC_ABORT_TASK) &&
1977 ((function != ISCSI_TM_FUNC_TASK_REASSIGN) &&
1978 hdr->rtt != RESERVED_ITT)) {
1979 pr_err("RefTaskTag should be set to 0xFFFFFFFF.\n");
1980 hdr->rtt = RESERVED_ITT;
1981 }
1982
1983 if ((function == ISCSI_TM_FUNC_TASK_REASSIGN) &&
1984 !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
1985 pr_err("Task Management Request TASK_REASSIGN not"
1986 " issued as immediate command, bad iSCSI Initiator"
1987 "implementation\n");
1988 return iscsit_add_reject_cmd(cmd,
1989 ISCSI_REASON_PROTOCOL_ERROR, buf);
1990 }
1991 if ((function != ISCSI_TM_FUNC_ABORT_TASK) &&
1992 be32_to_cpu(hdr->refcmdsn) != ISCSI_RESERVED_TAG)
1993 hdr->refcmdsn = cpu_to_be32(ISCSI_RESERVED_TAG);
1994
1995 cmd->data_direction = DMA_NONE;
1996 cmd->tmr_req = kzalloc(sizeof(*cmd->tmr_req), GFP_KERNEL);
1997 if (!cmd->tmr_req) {
1998 return iscsit_add_reject_cmd(cmd,
1999 ISCSI_REASON_BOOKMARK_NO_RESOURCES,
2000 buf);
2001 }
2002
2003 transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops,
2004 conn->sess->se_sess, 0, DMA_NONE,
2005 TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
2006
2007 target_get_sess_cmd(&cmd->se_cmd, true);
2008
2009 /*
2010 * TASK_REASSIGN for ERL=2 / connection stays inside of
2011 * LIO-Target $FABRIC_MOD
2012 */
2013 if (function != ISCSI_TM_FUNC_TASK_REASSIGN) {
2014 tcm_function = iscsit_convert_tmf(function);
2015 if (tcm_function == TMR_UNKNOWN) {
2016 pr_err("Unknown iSCSI TMR Function:"
2017 " 0x%02x\n", function);
2018 return iscsit_add_reject_cmd(cmd,
2019 ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
2020 }
2021 }
2022 ret = core_tmr_alloc_req(&cmd->se_cmd, cmd->tmr_req, tcm_function,
2023 GFP_KERNEL);
2024 if (ret < 0)
2025 return iscsit_add_reject_cmd(cmd,
2026 ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
2027
2028 cmd->tmr_req->se_tmr_req = cmd->se_cmd.se_tmr_req;
2029
2030 cmd->iscsi_opcode = ISCSI_OP_SCSI_TMFUNC;
2031 cmd->i_state = ISTATE_SEND_TASKMGTRSP;
2032 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
2033 cmd->init_task_tag = hdr->itt;
2034 cmd->targ_xfer_tag = 0xFFFFFFFF;
2035 cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
2036 cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
2037 se_tmr = cmd->se_cmd.se_tmr_req;
2038 tmr_req = cmd->tmr_req;
2039 /*
2040 * Locate the struct se_lun for all TMRs not related to ERL=2 TASK_REASSIGN
2041 */
2042 if (function != ISCSI_TM_FUNC_TASK_REASSIGN) {
2043 ret = transport_lookup_tmr_lun(&cmd->se_cmd,
2044 scsilun_to_int(&hdr->lun));
2045 if (ret < 0) {
2046 se_tmr->response = ISCSI_TMF_RSP_NO_LUN;
2047 goto attach;
2048 }
2049 }
2050
2051 switch (function) {
2052 case ISCSI_TM_FUNC_ABORT_TASK:
2053 se_tmr->response = iscsit_tmr_abort_task(cmd, buf);
2054 if (se_tmr->response)
2055 goto attach;
2056 break;
2057 case ISCSI_TM_FUNC_ABORT_TASK_SET:
2058 case ISCSI_TM_FUNC_CLEAR_ACA:
2059 case ISCSI_TM_FUNC_CLEAR_TASK_SET:
2060 case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
2061 break;
2062 case ISCSI_TM_FUNC_TARGET_WARM_RESET:
2063 if (iscsit_tmr_task_warm_reset(conn, tmr_req, buf) < 0) {
2064 se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED;
2065 goto attach;
2066 }
2067 break;
2068 case ISCSI_TM_FUNC_TARGET_COLD_RESET:
2069 if (iscsit_tmr_task_cold_reset(conn, tmr_req, buf) < 0) {
2070 se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED;
2071 goto attach;
2072 }
2073 break;
2074 case ISCSI_TM_FUNC_TASK_REASSIGN:
2075 se_tmr->response = iscsit_tmr_task_reassign(cmd, buf);
2076 /*
2077 * Perform sanity checks on the ExpDataSN only if the
2078 * TASK_REASSIGN was successful.
2079 */
2080 if (se_tmr->response)
2081 break;
2082
2083 if (iscsit_check_task_reassign_expdatasn(tmr_req, conn) < 0)
2084 return iscsit_add_reject_cmd(cmd,
2085 ISCSI_REASON_BOOKMARK_INVALID, buf);
2086 break;
2087 default:
2088 pr_err("Unknown TMR function: 0x%02x, protocol"
2089 " error.\n", function);
2090 se_tmr->response = ISCSI_TMF_RSP_NOT_SUPPORTED;
2091 goto attach;
2092 }
2093
2094 if ((function != ISCSI_TM_FUNC_TASK_REASSIGN) &&
2095 (se_tmr->response == ISCSI_TMF_RSP_COMPLETE))
2096 se_tmr->call_transport = 1;
2097 attach:
2098 spin_lock_bh(&conn->cmd_lock);
2099 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
2100 spin_unlock_bh(&conn->cmd_lock);
2101
2102 if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
2103 int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
2104 if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP) {
2105 out_of_order_cmdsn = 1;
2106 } else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
2107 target_put_sess_cmd(&cmd->se_cmd);
2108 return 0;
2109 } else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) {
2110 return -1;
2111 }
2112 }
2113 iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
2114
2115 if (out_of_order_cmdsn || !(hdr->opcode & ISCSI_OP_IMMEDIATE))
2116 return 0;
2117 /*
2118 * Found the referenced task, send to transport for processing.
2119 */
2120 if (se_tmr->call_transport)
2121 return transport_generic_handle_tmr(&cmd->se_cmd);
2122
2123 /*
2124 * Could not find the referenced LUN, task, or Task Management
2125 * command not authorized or supported. Change state and
2126 * let the tx_thread send the response.
2127 *
2128 * For connection recovery, this is also the default action for
2129 * TMR TASK_REASSIGN.
2130 */
2131 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2132 target_put_sess_cmd(&cmd->se_cmd);
2133 return 0;
2134 }
2135 EXPORT_SYMBOL(iscsit_handle_task_mgt_cmd);
2136
2137 /* #warning FIXME: Support Text Command parameters besides SendTargets */
2138 int
iscsit_setup_text_cmd(struct iscsi_conn * conn,struct iscsi_cmd * cmd,struct iscsi_text * hdr)2139 iscsit_setup_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2140 struct iscsi_text *hdr)
2141 {
2142 u32 payload_length = ntoh24(hdr->dlength);
2143
2144 if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
2145 pr_err("Unable to accept text parameter length: %u"
2146 "greater than MaxXmitDataSegmentLength %u.\n",
2147 payload_length, conn->conn_ops->MaxXmitDataSegmentLength);
2148 return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
2149 (unsigned char *)hdr);
2150 }
2151
2152 if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL) ||
2153 (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE)) {
2154 pr_err("Multi sequence text commands currently not supported\n");
2155 return iscsit_reject_cmd(cmd, ISCSI_REASON_CMD_NOT_SUPPORTED,
2156 (unsigned char *)hdr);
2157 }
2158
2159 pr_debug("Got Text Request: ITT: 0x%08x, CmdSN: 0x%08x,"
2160 " ExpStatSN: 0x%08x, Length: %u\n", hdr->itt, hdr->cmdsn,
2161 hdr->exp_statsn, payload_length);
2162
2163 cmd->iscsi_opcode = ISCSI_OP_TEXT;
2164 cmd->i_state = ISTATE_SEND_TEXTRSP;
2165 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
2166 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
2167 cmd->targ_xfer_tag = 0xFFFFFFFF;
2168 cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
2169 cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
2170 cmd->data_direction = DMA_NONE;
2171 kfree(cmd->text_in_ptr);
2172 cmd->text_in_ptr = NULL;
2173
2174 return 0;
2175 }
2176 EXPORT_SYMBOL(iscsit_setup_text_cmd);
2177
2178 int
iscsit_process_text_cmd(struct iscsi_conn * conn,struct iscsi_cmd * cmd,struct iscsi_text * hdr)2179 iscsit_process_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2180 struct iscsi_text *hdr)
2181 {
2182 unsigned char *text_in = cmd->text_in_ptr, *text_ptr;
2183 int cmdsn_ret;
2184
2185 if (!text_in) {
2186 cmd->targ_xfer_tag = be32_to_cpu(hdr->ttt);
2187 if (cmd->targ_xfer_tag == 0xFFFFFFFF) {
2188 pr_err("Unable to locate text_in buffer for sendtargets"
2189 " discovery\n");
2190 goto reject;
2191 }
2192 goto empty_sendtargets;
2193 }
2194 if (strncmp("SendTargets", text_in, 11) != 0) {
2195 pr_err("Received Text Data that is not"
2196 " SendTargets, cannot continue.\n");
2197 goto reject;
2198 }
2199 text_ptr = strchr(text_in, '=');
2200 if (!text_ptr) {
2201 pr_err("No \"=\" separator found in Text Data,"
2202 " cannot continue.\n");
2203 goto reject;
2204 }
2205 if (!strncmp("=All", text_ptr, 4)) {
2206 cmd->cmd_flags |= ICF_SENDTARGETS_ALL;
2207 } else if (!strncmp("=iqn.", text_ptr, 5) ||
2208 !strncmp("=eui.", text_ptr, 5)) {
2209 cmd->cmd_flags |= ICF_SENDTARGETS_SINGLE;
2210 } else {
2211 pr_err("Unable to locate valid SendTargets=%s value\n", text_ptr);
2212 goto reject;
2213 }
2214
2215 spin_lock_bh(&conn->cmd_lock);
2216 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
2217 spin_unlock_bh(&conn->cmd_lock);
2218
2219 empty_sendtargets:
2220 iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
2221
2222 if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
2223 cmdsn_ret = iscsit_sequence_cmd(conn, cmd,
2224 (unsigned char *)hdr, hdr->cmdsn);
2225 if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
2226 return -1;
2227
2228 return 0;
2229 }
2230
2231 return iscsit_execute_cmd(cmd, 0);
2232
2233 reject:
2234 return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
2235 (unsigned char *)hdr);
2236 }
2237 EXPORT_SYMBOL(iscsit_process_text_cmd);
2238
2239 static int
iscsit_handle_text_cmd(struct iscsi_conn * conn,struct iscsi_cmd * cmd,unsigned char * buf)2240 iscsit_handle_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2241 unsigned char *buf)
2242 {
2243 struct iscsi_text *hdr = (struct iscsi_text *)buf;
2244 char *text_in = NULL;
2245 u32 payload_length = ntoh24(hdr->dlength);
2246 int rx_size, rc;
2247
2248 rc = iscsit_setup_text_cmd(conn, cmd, hdr);
2249 if (rc < 0)
2250 return 0;
2251
2252 rx_size = payload_length;
2253 if (payload_length) {
2254 u32 checksum = 0, data_crc = 0;
2255 u32 padding = 0, pad_bytes = 0;
2256 int niov = 0, rx_got;
2257 struct kvec iov[3];
2258
2259 text_in = kzalloc(payload_length, GFP_KERNEL);
2260 if (!text_in)
2261 goto reject;
2262
2263 cmd->text_in_ptr = text_in;
2264
2265 memset(iov, 0, 3 * sizeof(struct kvec));
2266 iov[niov].iov_base = text_in;
2267 iov[niov++].iov_len = payload_length;
2268
2269 padding = ((-payload_length) & 3);
2270 if (padding != 0) {
2271 iov[niov].iov_base = &pad_bytes;
2272 iov[niov++].iov_len = padding;
2273 rx_size += padding;
2274 pr_debug("Receiving %u additional bytes"
2275 " for padding.\n", padding);
2276 }
2277 if (conn->conn_ops->DataDigest) {
2278 iov[niov].iov_base = &checksum;
2279 iov[niov++].iov_len = ISCSI_CRC_LEN;
2280 rx_size += ISCSI_CRC_LEN;
2281 }
2282
2283 rx_got = rx_data(conn, &iov[0], niov, rx_size);
2284 if (rx_got != rx_size)
2285 goto reject;
2286
2287 if (conn->conn_ops->DataDigest) {
2288 iscsit_do_crypto_hash_buf(conn->conn_rx_hash,
2289 text_in, payload_length,
2290 padding, (u8 *)&pad_bytes,
2291 (u8 *)&data_crc);
2292
2293 if (checksum != data_crc) {
2294 pr_err("Text data CRC32C DataDigest"
2295 " 0x%08x does not match computed"
2296 " 0x%08x\n", checksum, data_crc);
2297 if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
2298 pr_err("Unable to recover from"
2299 " Text Data digest failure while in"
2300 " ERL=0.\n");
2301 goto reject;
2302 } else {
2303 /*
2304 * Silently drop this PDU and let the
2305 * initiator plug the CmdSN gap.
2306 */
2307 pr_debug("Dropping Text"
2308 " Command CmdSN: 0x%08x due to"
2309 " DataCRC error.\n", hdr->cmdsn);
2310 kfree(text_in);
2311 return 0;
2312 }
2313 } else {
2314 pr_debug("Got CRC32C DataDigest"
2315 " 0x%08x for %u bytes of text data.\n",
2316 checksum, payload_length);
2317 }
2318 }
2319 text_in[payload_length - 1] = '\0';
2320 pr_debug("Successfully read %d bytes of text"
2321 " data.\n", payload_length);
2322 }
2323
2324 return iscsit_process_text_cmd(conn, cmd, hdr);
2325
2326 reject:
2327 kfree(cmd->text_in_ptr);
2328 cmd->text_in_ptr = NULL;
2329 return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, buf);
2330 }
2331
iscsit_logout_closesession(struct iscsi_cmd * cmd,struct iscsi_conn * conn)2332 int iscsit_logout_closesession(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2333 {
2334 struct iscsi_conn *conn_p;
2335 struct iscsi_session *sess = conn->sess;
2336
2337 pr_debug("Received logout request CLOSESESSION on CID: %hu"
2338 " for SID: %u.\n", conn->cid, conn->sess->sid);
2339
2340 atomic_set(&sess->session_logout, 1);
2341 atomic_set(&conn->conn_logout_remove, 1);
2342 conn->conn_logout_reason = ISCSI_LOGOUT_REASON_CLOSE_SESSION;
2343
2344 iscsit_inc_conn_usage_count(conn);
2345 iscsit_inc_session_usage_count(sess);
2346
2347 spin_lock_bh(&sess->conn_lock);
2348 list_for_each_entry(conn_p, &sess->sess_conn_list, conn_list) {
2349 if (conn_p->conn_state != TARG_CONN_STATE_LOGGED_IN)
2350 continue;
2351
2352 pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n");
2353 conn_p->conn_state = TARG_CONN_STATE_IN_LOGOUT;
2354 }
2355 spin_unlock_bh(&sess->conn_lock);
2356
2357 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2358
2359 return 0;
2360 }
2361
iscsit_logout_closeconnection(struct iscsi_cmd * cmd,struct iscsi_conn * conn)2362 int iscsit_logout_closeconnection(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2363 {
2364 struct iscsi_conn *l_conn;
2365 struct iscsi_session *sess = conn->sess;
2366
2367 pr_debug("Received logout request CLOSECONNECTION for CID:"
2368 " %hu on CID: %hu.\n", cmd->logout_cid, conn->cid);
2369
2370 /*
2371 * A Logout Request with a CLOSECONNECTION reason code for a CID
2372 * can arrive on a connection with a differing CID.
2373 */
2374 if (conn->cid == cmd->logout_cid) {
2375 spin_lock_bh(&conn->state_lock);
2376 pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n");
2377 conn->conn_state = TARG_CONN_STATE_IN_LOGOUT;
2378
2379 atomic_set(&conn->conn_logout_remove, 1);
2380 conn->conn_logout_reason = ISCSI_LOGOUT_REASON_CLOSE_CONNECTION;
2381 iscsit_inc_conn_usage_count(conn);
2382
2383 spin_unlock_bh(&conn->state_lock);
2384 } else {
2385 /*
2386 * Handle all different cid CLOSECONNECTION requests in
2387 * iscsit_logout_post_handler_diffcid() as to give enough
2388 * time for any non immediate command's CmdSN to be
2389 * acknowledged on the connection in question.
2390 *
2391 * Here we simply make sure the CID is still around.
2392 */
2393 l_conn = iscsit_get_conn_from_cid(sess,
2394 cmd->logout_cid);
2395 if (!l_conn) {
2396 cmd->logout_response = ISCSI_LOGOUT_CID_NOT_FOUND;
2397 iscsit_add_cmd_to_response_queue(cmd, conn,
2398 cmd->i_state);
2399 return 0;
2400 }
2401
2402 iscsit_dec_conn_usage_count(l_conn);
2403 }
2404
2405 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2406
2407 return 0;
2408 }
2409
iscsit_logout_removeconnforrecovery(struct iscsi_cmd * cmd,struct iscsi_conn * conn)2410 int iscsit_logout_removeconnforrecovery(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2411 {
2412 struct iscsi_session *sess = conn->sess;
2413
2414 pr_debug("Received explicit REMOVECONNFORRECOVERY logout for"
2415 " CID: %hu on CID: %hu.\n", cmd->logout_cid, conn->cid);
2416
2417 if (sess->sess_ops->ErrorRecoveryLevel != 2) {
2418 pr_err("Received Logout Request REMOVECONNFORRECOVERY"
2419 " while ERL!=2.\n");
2420 cmd->logout_response = ISCSI_LOGOUT_RECOVERY_UNSUPPORTED;
2421 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2422 return 0;
2423 }
2424
2425 if (conn->cid == cmd->logout_cid) {
2426 pr_err("Received Logout Request REMOVECONNFORRECOVERY"
2427 " with CID: %hu on CID: %hu, implementation error.\n",
2428 cmd->logout_cid, conn->cid);
2429 cmd->logout_response = ISCSI_LOGOUT_CLEANUP_FAILED;
2430 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2431 return 0;
2432 }
2433
2434 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2435
2436 return 0;
2437 }
2438
2439 int
iscsit_handle_logout_cmd(struct iscsi_conn * conn,struct iscsi_cmd * cmd,unsigned char * buf)2440 iscsit_handle_logout_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2441 unsigned char *buf)
2442 {
2443 int cmdsn_ret, logout_remove = 0;
2444 u8 reason_code = 0;
2445 struct iscsi_logout *hdr;
2446 struct iscsi_tiqn *tiqn = iscsit_snmp_get_tiqn(conn);
2447
2448 hdr = (struct iscsi_logout *) buf;
2449 reason_code = (hdr->flags & 0x7f);
2450
2451 if (tiqn) {
2452 spin_lock(&tiqn->logout_stats.lock);
2453 if (reason_code == ISCSI_LOGOUT_REASON_CLOSE_SESSION)
2454 tiqn->logout_stats.normal_logouts++;
2455 else
2456 tiqn->logout_stats.abnormal_logouts++;
2457 spin_unlock(&tiqn->logout_stats.lock);
2458 }
2459
2460 pr_debug("Got Logout Request ITT: 0x%08x CmdSN: 0x%08x"
2461 " ExpStatSN: 0x%08x Reason: 0x%02x CID: %hu on CID: %hu\n",
2462 hdr->itt, hdr->cmdsn, hdr->exp_statsn, reason_code,
2463 hdr->cid, conn->cid);
2464
2465 if (conn->conn_state != TARG_CONN_STATE_LOGGED_IN) {
2466 pr_err("Received logout request on connection that"
2467 " is not in logged in state, ignoring request.\n");
2468 iscsit_free_cmd(cmd, false);
2469 return 0;
2470 }
2471
2472 cmd->iscsi_opcode = ISCSI_OP_LOGOUT;
2473 cmd->i_state = ISTATE_SEND_LOGOUTRSP;
2474 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
2475 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
2476 cmd->targ_xfer_tag = 0xFFFFFFFF;
2477 cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
2478 cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
2479 cmd->logout_cid = be16_to_cpu(hdr->cid);
2480 cmd->logout_reason = reason_code;
2481 cmd->data_direction = DMA_NONE;
2482
2483 /*
2484 * We need to sleep in these cases (by returning 1) until the Logout
2485 * Response gets sent in the tx thread.
2486 */
2487 if ((reason_code == ISCSI_LOGOUT_REASON_CLOSE_SESSION) ||
2488 ((reason_code == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION) &&
2489 be16_to_cpu(hdr->cid) == conn->cid))
2490 logout_remove = 1;
2491
2492 spin_lock_bh(&conn->cmd_lock);
2493 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
2494 spin_unlock_bh(&conn->cmd_lock);
2495
2496 if (reason_code != ISCSI_LOGOUT_REASON_RECOVERY)
2497 iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
2498
2499 /*
2500 * Immediate commands are executed, well, immediately.
2501 * Non-Immediate Logout Commands are executed in CmdSN order.
2502 */
2503 if (cmd->immediate_cmd) {
2504 int ret = iscsit_execute_cmd(cmd, 0);
2505
2506 if (ret < 0)
2507 return ret;
2508 } else {
2509 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
2510 if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
2511 logout_remove = 0;
2512 else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
2513 return -1;
2514 }
2515
2516 return logout_remove;
2517 }
2518 EXPORT_SYMBOL(iscsit_handle_logout_cmd);
2519
iscsit_handle_snack(struct iscsi_conn * conn,unsigned char * buf)2520 int iscsit_handle_snack(
2521 struct iscsi_conn *conn,
2522 unsigned char *buf)
2523 {
2524 struct iscsi_snack *hdr;
2525
2526 hdr = (struct iscsi_snack *) buf;
2527 hdr->flags &= ~ISCSI_FLAG_CMD_FINAL;
2528
2529 pr_debug("Got ISCSI_INIT_SNACK, ITT: 0x%08x, ExpStatSN:"
2530 " 0x%08x, Type: 0x%02x, BegRun: 0x%08x, RunLength: 0x%08x,"
2531 " CID: %hu\n", hdr->itt, hdr->exp_statsn, hdr->flags,
2532 hdr->begrun, hdr->runlength, conn->cid);
2533
2534 if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
2535 pr_err("Initiator sent SNACK request while in"
2536 " ErrorRecoveryLevel=0.\n");
2537 return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
2538 buf);
2539 }
2540 /*
2541 * SNACK_DATA and SNACK_R2T are both 0, so check which function to
2542 * call from inside iscsi_send_recovery_datain_or_r2t().
2543 */
2544 switch (hdr->flags & ISCSI_FLAG_SNACK_TYPE_MASK) {
2545 case 0:
2546 return iscsit_handle_recovery_datain_or_r2t(conn, buf,
2547 hdr->itt,
2548 be32_to_cpu(hdr->ttt),
2549 be32_to_cpu(hdr->begrun),
2550 be32_to_cpu(hdr->runlength));
2551 case ISCSI_FLAG_SNACK_TYPE_STATUS:
2552 return iscsit_handle_status_snack(conn, hdr->itt,
2553 be32_to_cpu(hdr->ttt),
2554 be32_to_cpu(hdr->begrun), be32_to_cpu(hdr->runlength));
2555 case ISCSI_FLAG_SNACK_TYPE_DATA_ACK:
2556 return iscsit_handle_data_ack(conn, be32_to_cpu(hdr->ttt),
2557 be32_to_cpu(hdr->begrun),
2558 be32_to_cpu(hdr->runlength));
2559 case ISCSI_FLAG_SNACK_TYPE_RDATA:
2560 /* FIXME: Support R-Data SNACK */
2561 pr_err("R-Data SNACK Not Supported.\n");
2562 return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
2563 buf);
2564 default:
2565 pr_err("Unknown SNACK type 0x%02x, protocol"
2566 " error.\n", hdr->flags & 0x0f);
2567 return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
2568 buf);
2569 }
2570
2571 return 0;
2572 }
2573 EXPORT_SYMBOL(iscsit_handle_snack);
2574
iscsit_rx_thread_wait_for_tcp(struct iscsi_conn * conn)2575 static void iscsit_rx_thread_wait_for_tcp(struct iscsi_conn *conn)
2576 {
2577 if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) ||
2578 (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) {
2579 wait_for_completion_interruptible_timeout(
2580 &conn->rx_half_close_comp,
2581 ISCSI_RX_THREAD_TCP_TIMEOUT * HZ);
2582 }
2583 }
2584
iscsit_handle_immediate_data(struct iscsi_cmd * cmd,struct iscsi_scsi_req * hdr,u32 length)2585 static int iscsit_handle_immediate_data(
2586 struct iscsi_cmd *cmd,
2587 struct iscsi_scsi_req *hdr,
2588 u32 length)
2589 {
2590 int iov_ret, rx_got = 0, rx_size = 0;
2591 u32 checksum, iov_count = 0, padding = 0;
2592 struct iscsi_conn *conn = cmd->conn;
2593 struct kvec *iov;
2594
2595 iov_ret = iscsit_map_iovec(cmd, cmd->iov_data, cmd->write_data_done, length);
2596 if (iov_ret < 0)
2597 return IMMEDIATE_DATA_CANNOT_RECOVER;
2598
2599 rx_size = length;
2600 iov_count = iov_ret;
2601 iov = &cmd->iov_data[0];
2602
2603 padding = ((-length) & 3);
2604 if (padding != 0) {
2605 iov[iov_count].iov_base = cmd->pad_bytes;
2606 iov[iov_count++].iov_len = padding;
2607 rx_size += padding;
2608 }
2609
2610 if (conn->conn_ops->DataDigest) {
2611 iov[iov_count].iov_base = &checksum;
2612 iov[iov_count++].iov_len = ISCSI_CRC_LEN;
2613 rx_size += ISCSI_CRC_LEN;
2614 }
2615
2616 rx_got = rx_data(conn, &cmd->iov_data[0], iov_count, rx_size);
2617
2618 iscsit_unmap_iovec(cmd);
2619
2620 if (rx_got != rx_size) {
2621 iscsit_rx_thread_wait_for_tcp(conn);
2622 return IMMEDIATE_DATA_CANNOT_RECOVER;
2623 }
2624
2625 if (conn->conn_ops->DataDigest) {
2626 u32 data_crc;
2627
2628 data_crc = iscsit_do_crypto_hash_sg(conn->conn_rx_hash, cmd,
2629 cmd->write_data_done, length, padding,
2630 cmd->pad_bytes);
2631
2632 if (checksum != data_crc) {
2633 pr_err("ImmediateData CRC32C DataDigest 0x%08x"
2634 " does not match computed 0x%08x\n", checksum,
2635 data_crc);
2636
2637 if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
2638 pr_err("Unable to recover from"
2639 " Immediate Data digest failure while"
2640 " in ERL=0.\n");
2641 iscsit_reject_cmd(cmd,
2642 ISCSI_REASON_DATA_DIGEST_ERROR,
2643 (unsigned char *)hdr);
2644 return IMMEDIATE_DATA_CANNOT_RECOVER;
2645 } else {
2646 iscsit_reject_cmd(cmd,
2647 ISCSI_REASON_DATA_DIGEST_ERROR,
2648 (unsigned char *)hdr);
2649 return IMMEDIATE_DATA_ERL1_CRC_FAILURE;
2650 }
2651 } else {
2652 pr_debug("Got CRC32C DataDigest 0x%08x for"
2653 " %u bytes of Immediate Data\n", checksum,
2654 length);
2655 }
2656 }
2657
2658 cmd->write_data_done += length;
2659
2660 if (cmd->write_data_done == cmd->se_cmd.data_length) {
2661 spin_lock_bh(&cmd->istate_lock);
2662 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
2663 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
2664 spin_unlock_bh(&cmd->istate_lock);
2665 }
2666
2667 return IMMEDIATE_DATA_NORMAL_OPERATION;
2668 }
2669
2670 /*
2671 * Called with sess->conn_lock held.
2672 */
2673 /* #warning iscsi_build_conn_drop_async_message() only sends out on connections
2674 with active network interface */
iscsit_build_conn_drop_async_message(struct iscsi_conn * conn)2675 static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
2676 {
2677 struct iscsi_cmd *cmd;
2678 struct iscsi_conn *conn_p;
2679 bool found = false;
2680
2681 /*
2682 * Only send a Asynchronous Message on connections whos network
2683 * interface is still functional.
2684 */
2685 list_for_each_entry(conn_p, &conn->sess->sess_conn_list, conn_list) {
2686 if (conn_p->conn_state == TARG_CONN_STATE_LOGGED_IN) {
2687 iscsit_inc_conn_usage_count(conn_p);
2688 found = true;
2689 break;
2690 }
2691 }
2692
2693 if (!found)
2694 return;
2695
2696 cmd = iscsit_allocate_cmd(conn_p, TASK_RUNNING);
2697 if (!cmd) {
2698 iscsit_dec_conn_usage_count(conn_p);
2699 return;
2700 }
2701
2702 cmd->logout_cid = conn->cid;
2703 cmd->iscsi_opcode = ISCSI_OP_ASYNC_EVENT;
2704 cmd->i_state = ISTATE_SEND_ASYNCMSG;
2705
2706 spin_lock_bh(&conn_p->cmd_lock);
2707 list_add_tail(&cmd->i_conn_node, &conn_p->conn_cmd_list);
2708 spin_unlock_bh(&conn_p->cmd_lock);
2709
2710 iscsit_add_cmd_to_response_queue(cmd, conn_p, cmd->i_state);
2711 iscsit_dec_conn_usage_count(conn_p);
2712 }
2713
iscsit_send_conn_drop_async_message(struct iscsi_cmd * cmd,struct iscsi_conn * conn)2714 static int iscsit_send_conn_drop_async_message(
2715 struct iscsi_cmd *cmd,
2716 struct iscsi_conn *conn)
2717 {
2718 struct iscsi_async *hdr;
2719
2720 cmd->iscsi_opcode = ISCSI_OP_ASYNC_EVENT;
2721
2722 hdr = (struct iscsi_async *) cmd->pdu;
2723 hdr->opcode = ISCSI_OP_ASYNC_EVENT;
2724 hdr->flags = ISCSI_FLAG_CMD_FINAL;
2725 cmd->init_task_tag = RESERVED_ITT;
2726 cmd->targ_xfer_tag = 0xFFFFFFFF;
2727 put_unaligned_be64(0xFFFFFFFFFFFFFFFFULL, &hdr->rsvd4[0]);
2728 cmd->stat_sn = conn->stat_sn++;
2729 hdr->statsn = cpu_to_be32(cmd->stat_sn);
2730 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
2731 hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
2732 hdr->async_event = ISCSI_ASYNC_MSG_DROPPING_CONNECTION;
2733 hdr->param1 = cpu_to_be16(cmd->logout_cid);
2734 hdr->param2 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Wait);
2735 hdr->param3 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Retain);
2736
2737 pr_debug("Sending Connection Dropped Async Message StatSN:"
2738 " 0x%08x, for CID: %hu on CID: %hu\n", cmd->stat_sn,
2739 cmd->logout_cid, conn->cid);
2740
2741 return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
2742 }
2743
iscsit_tx_thread_wait_for_tcp(struct iscsi_conn * conn)2744 static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *conn)
2745 {
2746 if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) ||
2747 (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) {
2748 wait_for_completion_interruptible_timeout(
2749 &conn->tx_half_close_comp,
2750 ISCSI_TX_THREAD_TCP_TIMEOUT * HZ);
2751 }
2752 }
2753
2754 void
iscsit_build_datain_pdu(struct iscsi_cmd * cmd,struct iscsi_conn * conn,struct iscsi_datain * datain,struct iscsi_data_rsp * hdr,bool set_statsn)2755 iscsit_build_datain_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
2756 struct iscsi_datain *datain, struct iscsi_data_rsp *hdr,
2757 bool set_statsn)
2758 {
2759 hdr->opcode = ISCSI_OP_SCSI_DATA_IN;
2760 hdr->flags = datain->flags;
2761 if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
2762 if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
2763 hdr->flags |= ISCSI_FLAG_DATA_OVERFLOW;
2764 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
2765 } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
2766 hdr->flags |= ISCSI_FLAG_DATA_UNDERFLOW;
2767 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
2768 }
2769 }
2770 hton24(hdr->dlength, datain->length);
2771 if (hdr->flags & ISCSI_FLAG_DATA_ACK)
2772 int_to_scsilun(cmd->se_cmd.orig_fe_lun,
2773 (struct scsi_lun *)&hdr->lun);
2774 else
2775 put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun);
2776
2777 hdr->itt = cmd->init_task_tag;
2778
2779 if (hdr->flags & ISCSI_FLAG_DATA_ACK)
2780 hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag);
2781 else
2782 hdr->ttt = cpu_to_be32(0xFFFFFFFF);
2783 if (set_statsn)
2784 hdr->statsn = cpu_to_be32(cmd->stat_sn);
2785 else
2786 hdr->statsn = cpu_to_be32(0xFFFFFFFF);
2787
2788 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
2789 hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
2790 hdr->datasn = cpu_to_be32(datain->data_sn);
2791 hdr->offset = cpu_to_be32(datain->offset);
2792
2793 pr_debug("Built DataIN ITT: 0x%08x, StatSN: 0x%08x,"
2794 " DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n",
2795 cmd->init_task_tag, ntohl(hdr->statsn), ntohl(hdr->datasn),
2796 ntohl(hdr->offset), datain->length, conn->cid);
2797 }
2798 EXPORT_SYMBOL(iscsit_build_datain_pdu);
2799
iscsit_send_datain(struct iscsi_cmd * cmd,struct iscsi_conn * conn)2800 static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2801 {
2802 struct iscsi_data_rsp *hdr = (struct iscsi_data_rsp *)&cmd->pdu[0];
2803 struct iscsi_datain datain;
2804 struct iscsi_datain_req *dr;
2805 int eodr = 0, ret;
2806 bool set_statsn = false;
2807
2808 memset(&datain, 0, sizeof(struct iscsi_datain));
2809 dr = iscsit_get_datain_values(cmd, &datain);
2810 if (!dr) {
2811 pr_err("iscsit_get_datain_values failed for ITT: 0x%08x\n",
2812 cmd->init_task_tag);
2813 return -1;
2814 }
2815 /*
2816 * Be paranoid and double check the logic for now.
2817 */
2818 if ((datain.offset + datain.length) > cmd->se_cmd.data_length) {
2819 pr_err("Command ITT: 0x%08x, datain.offset: %u and"
2820 " datain.length: %u exceeds cmd->data_length: %u\n",
2821 cmd->init_task_tag, datain.offset, datain.length,
2822 cmd->se_cmd.data_length);
2823 return -1;
2824 }
2825
2826 atomic_long_add(datain.length, &conn->sess->tx_data_octets);
2827 /*
2828 * Special case for successfully execution w/ both DATAIN
2829 * and Sense Data.
2830 */
2831 if ((datain.flags & ISCSI_FLAG_DATA_STATUS) &&
2832 (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE))
2833 datain.flags &= ~ISCSI_FLAG_DATA_STATUS;
2834 else {
2835 if ((dr->dr_complete == DATAIN_COMPLETE_NORMAL) ||
2836 (dr->dr_complete == DATAIN_COMPLETE_CONNECTION_RECOVERY)) {
2837 iscsit_increment_maxcmdsn(cmd, conn->sess);
2838 cmd->stat_sn = conn->stat_sn++;
2839 set_statsn = true;
2840 } else if (dr->dr_complete ==
2841 DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY)
2842 set_statsn = true;
2843 }
2844
2845 iscsit_build_datain_pdu(cmd, conn, &datain, hdr, set_statsn);
2846
2847 ret = conn->conn_transport->iscsit_xmit_pdu(conn, cmd, dr, &datain, 0);
2848 if (ret < 0)
2849 return ret;
2850
2851 if (dr->dr_complete) {
2852 eodr = (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ?
2853 2 : 1;
2854 iscsit_free_datain_req(cmd, dr);
2855 }
2856
2857 return eodr;
2858 }
2859
2860 int
iscsit_build_logout_rsp(struct iscsi_cmd * cmd,struct iscsi_conn * conn,struct iscsi_logout_rsp * hdr)2861 iscsit_build_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
2862 struct iscsi_logout_rsp *hdr)
2863 {
2864 struct iscsi_conn *logout_conn = NULL;
2865 struct iscsi_conn_recovery *cr = NULL;
2866 struct iscsi_session *sess = conn->sess;
2867 /*
2868 * The actual shutting down of Sessions and/or Connections
2869 * for CLOSESESSION and CLOSECONNECTION Logout Requests
2870 * is done in scsi_logout_post_handler().
2871 */
2872 switch (cmd->logout_reason) {
2873 case ISCSI_LOGOUT_REASON_CLOSE_SESSION:
2874 pr_debug("iSCSI session logout successful, setting"
2875 " logout response to ISCSI_LOGOUT_SUCCESS.\n");
2876 cmd->logout_response = ISCSI_LOGOUT_SUCCESS;
2877 break;
2878 case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION:
2879 if (cmd->logout_response == ISCSI_LOGOUT_CID_NOT_FOUND)
2880 break;
2881 /*
2882 * For CLOSECONNECTION logout requests carrying
2883 * a matching logout CID -> local CID, the reference
2884 * for the local CID will have been incremented in
2885 * iscsi_logout_closeconnection().
2886 *
2887 * For CLOSECONNECTION logout requests carrying
2888 * a different CID than the connection it arrived
2889 * on, the connection responding to cmd->logout_cid
2890 * is stopped in iscsit_logout_post_handler_diffcid().
2891 */
2892
2893 pr_debug("iSCSI CID: %hu logout on CID: %hu"
2894 " successful.\n", cmd->logout_cid, conn->cid);
2895 cmd->logout_response = ISCSI_LOGOUT_SUCCESS;
2896 break;
2897 case ISCSI_LOGOUT_REASON_RECOVERY:
2898 if ((cmd->logout_response == ISCSI_LOGOUT_RECOVERY_UNSUPPORTED) ||
2899 (cmd->logout_response == ISCSI_LOGOUT_CLEANUP_FAILED))
2900 break;
2901 /*
2902 * If the connection is still active from our point of view
2903 * force connection recovery to occur.
2904 */
2905 logout_conn = iscsit_get_conn_from_cid_rcfr(sess,
2906 cmd->logout_cid);
2907 if (logout_conn) {
2908 iscsit_connection_reinstatement_rcfr(logout_conn);
2909 iscsit_dec_conn_usage_count(logout_conn);
2910 }
2911
2912 cr = iscsit_get_inactive_connection_recovery_entry(
2913 conn->sess, cmd->logout_cid);
2914 if (!cr) {
2915 pr_err("Unable to locate CID: %hu for"
2916 " REMOVECONNFORRECOVERY Logout Request.\n",
2917 cmd->logout_cid);
2918 cmd->logout_response = ISCSI_LOGOUT_CID_NOT_FOUND;
2919 break;
2920 }
2921
2922 iscsit_discard_cr_cmds_by_expstatsn(cr, cmd->exp_stat_sn);
2923
2924 pr_debug("iSCSI REMOVECONNFORRECOVERY logout"
2925 " for recovery for CID: %hu on CID: %hu successful.\n",
2926 cmd->logout_cid, conn->cid);
2927 cmd->logout_response = ISCSI_LOGOUT_SUCCESS;
2928 break;
2929 default:
2930 pr_err("Unknown cmd->logout_reason: 0x%02x\n",
2931 cmd->logout_reason);
2932 return -1;
2933 }
2934
2935 hdr->opcode = ISCSI_OP_LOGOUT_RSP;
2936 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
2937 hdr->response = cmd->logout_response;
2938 hdr->itt = cmd->init_task_tag;
2939 cmd->stat_sn = conn->stat_sn++;
2940 hdr->statsn = cpu_to_be32(cmd->stat_sn);
2941
2942 iscsit_increment_maxcmdsn(cmd, conn->sess);
2943 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
2944 hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
2945
2946 pr_debug("Built Logout Response ITT: 0x%08x StatSN:"
2947 " 0x%08x Response: 0x%02x CID: %hu on CID: %hu\n",
2948 cmd->init_task_tag, cmd->stat_sn, hdr->response,
2949 cmd->logout_cid, conn->cid);
2950
2951 return 0;
2952 }
2953 EXPORT_SYMBOL(iscsit_build_logout_rsp);
2954
2955 static int
iscsit_send_logout(struct iscsi_cmd * cmd,struct iscsi_conn * conn)2956 iscsit_send_logout(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2957 {
2958 int rc;
2959
2960 rc = iscsit_build_logout_rsp(cmd, conn,
2961 (struct iscsi_logout_rsp *)&cmd->pdu[0]);
2962 if (rc < 0)
2963 return rc;
2964
2965 return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
2966 }
2967
2968 void
iscsit_build_nopin_rsp(struct iscsi_cmd * cmd,struct iscsi_conn * conn,struct iscsi_nopin * hdr,bool nopout_response)2969 iscsit_build_nopin_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
2970 struct iscsi_nopin *hdr, bool nopout_response)
2971 {
2972 hdr->opcode = ISCSI_OP_NOOP_IN;
2973 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
2974 hton24(hdr->dlength, cmd->buf_ptr_size);
2975 if (nopout_response)
2976 put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun);
2977 hdr->itt = cmd->init_task_tag;
2978 hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag);
2979 cmd->stat_sn = (nopout_response) ? conn->stat_sn++ :
2980 conn->stat_sn;
2981 hdr->statsn = cpu_to_be32(cmd->stat_sn);
2982
2983 if (nopout_response)
2984 iscsit_increment_maxcmdsn(cmd, conn->sess);
2985
2986 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
2987 hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
2988
2989 pr_debug("Built NOPIN %s Response ITT: 0x%08x, TTT: 0x%08x,"
2990 " StatSN: 0x%08x, Length %u\n", (nopout_response) ?
2991 "Solicited" : "Unsolicited", cmd->init_task_tag,
2992 cmd->targ_xfer_tag, cmd->stat_sn, cmd->buf_ptr_size);
2993 }
2994 EXPORT_SYMBOL(iscsit_build_nopin_rsp);
2995
2996 /*
2997 * Unsolicited NOPIN, either requesting a response or not.
2998 */
iscsit_send_unsolicited_nopin(struct iscsi_cmd * cmd,struct iscsi_conn * conn,int want_response)2999 static int iscsit_send_unsolicited_nopin(
3000 struct iscsi_cmd *cmd,
3001 struct iscsi_conn *conn,
3002 int want_response)
3003 {
3004 struct iscsi_nopin *hdr = (struct iscsi_nopin *)&cmd->pdu[0];
3005 int ret;
3006
3007 iscsit_build_nopin_rsp(cmd, conn, hdr, false);
3008
3009 pr_debug("Sending Unsolicited NOPIN TTT: 0x%08x StatSN:"
3010 " 0x%08x CID: %hu\n", hdr->ttt, cmd->stat_sn, conn->cid);
3011
3012 ret = conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
3013 if (ret < 0)
3014 return ret;
3015
3016 spin_lock_bh(&cmd->istate_lock);
3017 cmd->i_state = want_response ?
3018 ISTATE_SENT_NOPIN_WANT_RESPONSE : ISTATE_SENT_STATUS;
3019 spin_unlock_bh(&cmd->istate_lock);
3020
3021 return 0;
3022 }
3023
3024 static int
iscsit_send_nopin(struct iscsi_cmd * cmd,struct iscsi_conn * conn)3025 iscsit_send_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
3026 {
3027 struct iscsi_nopin *hdr = (struct iscsi_nopin *)&cmd->pdu[0];
3028
3029 iscsit_build_nopin_rsp(cmd, conn, hdr, true);
3030
3031 /*
3032 * NOPOUT Ping Data is attached to struct iscsi_cmd->buf_ptr.
3033 * NOPOUT DataSegmentLength is at struct iscsi_cmd->buf_ptr_size.
3034 */
3035 pr_debug("Echoing back %u bytes of ping data.\n", cmd->buf_ptr_size);
3036
3037 return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL,
3038 cmd->buf_ptr,
3039 cmd->buf_ptr_size);
3040 }
3041
iscsit_send_r2t(struct iscsi_cmd * cmd,struct iscsi_conn * conn)3042 static int iscsit_send_r2t(
3043 struct iscsi_cmd *cmd,
3044 struct iscsi_conn *conn)
3045 {
3046 struct iscsi_r2t *r2t;
3047 struct iscsi_r2t_rsp *hdr;
3048 int ret;
3049
3050 r2t = iscsit_get_r2t_from_list(cmd);
3051 if (!r2t)
3052 return -1;
3053
3054 hdr = (struct iscsi_r2t_rsp *) cmd->pdu;
3055 memset(hdr, 0, ISCSI_HDR_LEN);
3056 hdr->opcode = ISCSI_OP_R2T;
3057 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
3058 int_to_scsilun(cmd->se_cmd.orig_fe_lun,
3059 (struct scsi_lun *)&hdr->lun);
3060 hdr->itt = cmd->init_task_tag;
3061 if (conn->conn_transport->iscsit_get_r2t_ttt)
3062 conn->conn_transport->iscsit_get_r2t_ttt(conn, cmd, r2t);
3063 else
3064 r2t->targ_xfer_tag = session_get_next_ttt(conn->sess);
3065 hdr->ttt = cpu_to_be32(r2t->targ_xfer_tag);
3066 hdr->statsn = cpu_to_be32(conn->stat_sn);
3067 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
3068 hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
3069 hdr->r2tsn = cpu_to_be32(r2t->r2t_sn);
3070 hdr->data_offset = cpu_to_be32(r2t->offset);
3071 hdr->data_length = cpu_to_be32(r2t->xfer_len);
3072
3073 pr_debug("Built %sR2T, ITT: 0x%08x, TTT: 0x%08x, StatSN:"
3074 " 0x%08x, R2TSN: 0x%08x, Offset: %u, DDTL: %u, CID: %hu\n",
3075 (!r2t->recovery_r2t) ? "" : "Recovery ", cmd->init_task_tag,
3076 r2t->targ_xfer_tag, ntohl(hdr->statsn), r2t->r2t_sn,
3077 r2t->offset, r2t->xfer_len, conn->cid);
3078
3079 spin_lock_bh(&cmd->r2t_lock);
3080 r2t->sent_r2t = 1;
3081 spin_unlock_bh(&cmd->r2t_lock);
3082
3083 ret = conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
3084 if (ret < 0) {
3085 return ret;
3086 }
3087
3088 spin_lock_bh(&cmd->dataout_timeout_lock);
3089 iscsit_start_dataout_timer(cmd, conn);
3090 spin_unlock_bh(&cmd->dataout_timeout_lock);
3091
3092 return 0;
3093 }
3094
3095 /*
3096 * @recovery: If called from iscsi_task_reassign_complete_write() for
3097 * connection recovery.
3098 */
iscsit_build_r2ts_for_cmd(struct iscsi_conn * conn,struct iscsi_cmd * cmd,bool recovery)3099 int iscsit_build_r2ts_for_cmd(
3100 struct iscsi_conn *conn,
3101 struct iscsi_cmd *cmd,
3102 bool recovery)
3103 {
3104 int first_r2t = 1;
3105 u32 offset = 0, xfer_len = 0;
3106
3107 spin_lock_bh(&cmd->r2t_lock);
3108 if (cmd->cmd_flags & ICF_SENT_LAST_R2T) {
3109 spin_unlock_bh(&cmd->r2t_lock);
3110 return 0;
3111 }
3112
3113 if (conn->sess->sess_ops->DataSequenceInOrder &&
3114 !recovery)
3115 cmd->r2t_offset = max(cmd->r2t_offset, cmd->write_data_done);
3116
3117 while (cmd->outstanding_r2ts < conn->sess->sess_ops->MaxOutstandingR2T) {
3118 if (conn->sess->sess_ops->DataSequenceInOrder) {
3119 offset = cmd->r2t_offset;
3120
3121 if (first_r2t && recovery) {
3122 int new_data_end = offset +
3123 conn->sess->sess_ops->MaxBurstLength -
3124 cmd->next_burst_len;
3125
3126 if (new_data_end > cmd->se_cmd.data_length)
3127 xfer_len = cmd->se_cmd.data_length - offset;
3128 else
3129 xfer_len =
3130 conn->sess->sess_ops->MaxBurstLength -
3131 cmd->next_burst_len;
3132 } else {
3133 int new_data_end = offset +
3134 conn->sess->sess_ops->MaxBurstLength;
3135
3136 if (new_data_end > cmd->se_cmd.data_length)
3137 xfer_len = cmd->se_cmd.data_length - offset;
3138 else
3139 xfer_len = conn->sess->sess_ops->MaxBurstLength;
3140 }
3141 cmd->r2t_offset += xfer_len;
3142
3143 if (cmd->r2t_offset == cmd->se_cmd.data_length)
3144 cmd->cmd_flags |= ICF_SENT_LAST_R2T;
3145 } else {
3146 struct iscsi_seq *seq;
3147
3148 seq = iscsit_get_seq_holder_for_r2t(cmd);
3149 if (!seq) {
3150 spin_unlock_bh(&cmd->r2t_lock);
3151 return -1;
3152 }
3153
3154 offset = seq->offset;
3155 xfer_len = seq->xfer_len;
3156
3157 if (cmd->seq_send_order == cmd->seq_count)
3158 cmd->cmd_flags |= ICF_SENT_LAST_R2T;
3159 }
3160 cmd->outstanding_r2ts++;
3161 first_r2t = 0;
3162
3163 if (iscsit_add_r2t_to_list(cmd, offset, xfer_len, 0, 0) < 0) {
3164 spin_unlock_bh(&cmd->r2t_lock);
3165 return -1;
3166 }
3167
3168 if (cmd->cmd_flags & ICF_SENT_LAST_R2T)
3169 break;
3170 }
3171 spin_unlock_bh(&cmd->r2t_lock);
3172
3173 return 0;
3174 }
3175 EXPORT_SYMBOL(iscsit_build_r2ts_for_cmd);
3176
iscsit_build_rsp_pdu(struct iscsi_cmd * cmd,struct iscsi_conn * conn,bool inc_stat_sn,struct iscsi_scsi_rsp * hdr)3177 void iscsit_build_rsp_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
3178 bool inc_stat_sn, struct iscsi_scsi_rsp *hdr)
3179 {
3180 if (inc_stat_sn)
3181 cmd->stat_sn = conn->stat_sn++;
3182
3183 atomic_long_inc(&conn->sess->rsp_pdus);
3184
3185 memset(hdr, 0, ISCSI_HDR_LEN);
3186 hdr->opcode = ISCSI_OP_SCSI_CMD_RSP;
3187 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
3188 if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
3189 hdr->flags |= ISCSI_FLAG_CMD_OVERFLOW;
3190 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
3191 } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
3192 hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW;
3193 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
3194 }
3195 hdr->response = cmd->iscsi_response;
3196 hdr->cmd_status = cmd->se_cmd.scsi_status;
3197 hdr->itt = cmd->init_task_tag;
3198 hdr->statsn = cpu_to_be32(cmd->stat_sn);
3199
3200 iscsit_increment_maxcmdsn(cmd, conn->sess);
3201 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
3202 hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
3203
3204 pr_debug("Built SCSI Response, ITT: 0x%08x, StatSN: 0x%08x,"
3205 " Response: 0x%02x, SAM Status: 0x%02x, CID: %hu\n",
3206 cmd->init_task_tag, cmd->stat_sn, cmd->se_cmd.scsi_status,
3207 cmd->se_cmd.scsi_status, conn->cid);
3208 }
3209 EXPORT_SYMBOL(iscsit_build_rsp_pdu);
3210
iscsit_send_response(struct iscsi_cmd * cmd,struct iscsi_conn * conn)3211 static int iscsit_send_response(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
3212 {
3213 struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)&cmd->pdu[0];
3214 bool inc_stat_sn = (cmd->i_state == ISTATE_SEND_STATUS);
3215 void *data_buf = NULL;
3216 u32 padding = 0, data_buf_len = 0;
3217
3218 iscsit_build_rsp_pdu(cmd, conn, inc_stat_sn, hdr);
3219
3220 /*
3221 * Attach SENSE DATA payload to iSCSI Response PDU
3222 */
3223 if (cmd->se_cmd.sense_buffer &&
3224 ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
3225 (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
3226 put_unaligned_be16(cmd->se_cmd.scsi_sense_length, cmd->sense_buffer);
3227 cmd->se_cmd.scsi_sense_length += sizeof (__be16);
3228
3229 padding = -(cmd->se_cmd.scsi_sense_length) & 3;
3230 hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
3231 data_buf = cmd->sense_buffer;
3232 data_buf_len = cmd->se_cmd.scsi_sense_length + padding;
3233
3234 if (padding) {
3235 memset(cmd->sense_buffer +
3236 cmd->se_cmd.scsi_sense_length, 0, padding);
3237 pr_debug("Adding %u bytes of padding to"
3238 " SENSE.\n", padding);
3239 }
3240
3241 pr_debug("Attaching SENSE DATA: %u bytes to iSCSI"
3242 " Response PDU\n",
3243 cmd->se_cmd.scsi_sense_length);
3244 }
3245
3246 return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, data_buf,
3247 data_buf_len);
3248 }
3249
iscsit_convert_tcm_tmr_rsp(struct se_tmr_req * se_tmr)3250 static u8 iscsit_convert_tcm_tmr_rsp(struct se_tmr_req *se_tmr)
3251 {
3252 switch (se_tmr->response) {
3253 case TMR_FUNCTION_COMPLETE:
3254 return ISCSI_TMF_RSP_COMPLETE;
3255 case TMR_TASK_DOES_NOT_EXIST:
3256 return ISCSI_TMF_RSP_NO_TASK;
3257 case TMR_LUN_DOES_NOT_EXIST:
3258 return ISCSI_TMF_RSP_NO_LUN;
3259 case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED:
3260 return ISCSI_TMF_RSP_NOT_SUPPORTED;
3261 case TMR_FUNCTION_REJECTED:
3262 default:
3263 return ISCSI_TMF_RSP_REJECTED;
3264 }
3265 }
3266
3267 void
iscsit_build_task_mgt_rsp(struct iscsi_cmd * cmd,struct iscsi_conn * conn,struct iscsi_tm_rsp * hdr)3268 iscsit_build_task_mgt_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
3269 struct iscsi_tm_rsp *hdr)
3270 {
3271 struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
3272
3273 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
3274 hdr->flags = ISCSI_FLAG_CMD_FINAL;
3275 hdr->response = iscsit_convert_tcm_tmr_rsp(se_tmr);
3276 hdr->itt = cmd->init_task_tag;
3277 cmd->stat_sn = conn->stat_sn++;
3278 hdr->statsn = cpu_to_be32(cmd->stat_sn);
3279
3280 iscsit_increment_maxcmdsn(cmd, conn->sess);
3281 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
3282 hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
3283
3284 pr_debug("Built Task Management Response ITT: 0x%08x,"
3285 " StatSN: 0x%08x, Response: 0x%02x, CID: %hu\n",
3286 cmd->init_task_tag, cmd->stat_sn, hdr->response, conn->cid);
3287 }
3288 EXPORT_SYMBOL(iscsit_build_task_mgt_rsp);
3289
3290 static int
iscsit_send_task_mgt_rsp(struct iscsi_cmd * cmd,struct iscsi_conn * conn)3291 iscsit_send_task_mgt_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
3292 {
3293 struct iscsi_tm_rsp *hdr = (struct iscsi_tm_rsp *)&cmd->pdu[0];
3294
3295 iscsit_build_task_mgt_rsp(cmd, conn, hdr);
3296
3297 return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
3298 }
3299
iscsit_check_inaddr_any(struct iscsi_np * np)3300 static bool iscsit_check_inaddr_any(struct iscsi_np *np)
3301 {
3302 bool ret = false;
3303
3304 if (np->np_sockaddr.ss_family == AF_INET6) {
3305 const struct sockaddr_in6 sin6 = {
3306 .sin6_addr = IN6ADDR_ANY_INIT };
3307 struct sockaddr_in6 *sock_in6 =
3308 (struct sockaddr_in6 *)&np->np_sockaddr;
3309
3310 if (!memcmp(sock_in6->sin6_addr.s6_addr,
3311 sin6.sin6_addr.s6_addr, 16))
3312 ret = true;
3313 } else {
3314 struct sockaddr_in * sock_in =
3315 (struct sockaddr_in *)&np->np_sockaddr;
3316
3317 if (sock_in->sin_addr.s_addr == htonl(INADDR_ANY))
3318 ret = true;
3319 }
3320
3321 return ret;
3322 }
3323
3324 #define SENDTARGETS_BUF_LIMIT 32768U
3325
3326 static int
iscsit_build_sendtargets_response(struct iscsi_cmd * cmd,enum iscsit_transport_type network_transport,int skip_bytes,bool * completed)3327 iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
3328 enum iscsit_transport_type network_transport,
3329 int skip_bytes, bool *completed)
3330 {
3331 char *payload = NULL;
3332 struct iscsi_conn *conn = cmd->conn;
3333 struct iscsi_portal_group *tpg;
3334 struct iscsi_tiqn *tiqn;
3335 struct iscsi_tpg_np *tpg_np;
3336 int buffer_len, end_of_buf = 0, len = 0, payload_len = 0;
3337 int target_name_printed;
3338 unsigned char buf[ISCSI_IQN_LEN+12]; /* iqn + "TargetName=" + \0 */
3339 unsigned char *text_in = cmd->text_in_ptr, *text_ptr = NULL;
3340 bool active;
3341
3342 buffer_len = min(conn->conn_ops->MaxRecvDataSegmentLength,
3343 SENDTARGETS_BUF_LIMIT);
3344
3345 payload = kzalloc(buffer_len, GFP_KERNEL);
3346 if (!payload)
3347 return -ENOMEM;
3348
3349 /*
3350 * Locate pointer to iqn./eui. string for ICF_SENDTARGETS_SINGLE
3351 * explicit case..
3352 */
3353 if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) {
3354 text_ptr = strchr(text_in, '=');
3355 if (!text_ptr) {
3356 pr_err("Unable to locate '=' string in text_in:"
3357 " %s\n", text_in);
3358 kfree(payload);
3359 return -EINVAL;
3360 }
3361 /*
3362 * Skip over '=' character..
3363 */
3364 text_ptr += 1;
3365 }
3366
3367 spin_lock(&tiqn_lock);
3368 list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) {
3369 if ((cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) &&
3370 strcmp(tiqn->tiqn, text_ptr)) {
3371 continue;
3372 }
3373
3374 target_name_printed = 0;
3375
3376 spin_lock(&tiqn->tiqn_tpg_lock);
3377 list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
3378
3379 /* If demo_mode_discovery=0 and generate_node_acls=0
3380 * (demo mode dislabed) do not return
3381 * TargetName+TargetAddress unless a NodeACL exists.
3382 */
3383
3384 if ((tpg->tpg_attrib.generate_node_acls == 0) &&
3385 (tpg->tpg_attrib.demo_mode_discovery == 0) &&
3386 (!target_tpg_has_node_acl(&tpg->tpg_se_tpg,
3387 cmd->conn->sess->sess_ops->InitiatorName))) {
3388 continue;
3389 }
3390
3391 spin_lock(&tpg->tpg_state_lock);
3392 active = (tpg->tpg_state == TPG_STATE_ACTIVE);
3393 spin_unlock(&tpg->tpg_state_lock);
3394
3395 if (!active && tpg->tpg_attrib.tpg_enabled_sendtargets)
3396 continue;
3397
3398 spin_lock(&tpg->tpg_np_lock);
3399 list_for_each_entry(tpg_np, &tpg->tpg_gnp_list,
3400 tpg_np_list) {
3401 struct iscsi_np *np = tpg_np->tpg_np;
3402 bool inaddr_any = iscsit_check_inaddr_any(np);
3403 struct sockaddr_storage *sockaddr;
3404
3405 if (np->np_network_transport != network_transport)
3406 continue;
3407
3408 if (!target_name_printed) {
3409 len = sprintf(buf, "TargetName=%s",
3410 tiqn->tiqn);
3411 len += 1;
3412
3413 if ((len + payload_len) > buffer_len) {
3414 spin_unlock(&tpg->tpg_np_lock);
3415 spin_unlock(&tiqn->tiqn_tpg_lock);
3416 end_of_buf = 1;
3417 goto eob;
3418 }
3419
3420 if (skip_bytes && len <= skip_bytes) {
3421 skip_bytes -= len;
3422 } else {
3423 memcpy(payload + payload_len, buf, len);
3424 payload_len += len;
3425 target_name_printed = 1;
3426 if (len > skip_bytes)
3427 skip_bytes = 0;
3428 }
3429 }
3430
3431 if (inaddr_any)
3432 sockaddr = &conn->local_sockaddr;
3433 else
3434 sockaddr = &np->np_sockaddr;
3435
3436 len = sprintf(buf, "TargetAddress="
3437 "%pISpc,%hu",
3438 sockaddr,
3439 tpg->tpgt);
3440 len += 1;
3441
3442 if ((len + payload_len) > buffer_len) {
3443 spin_unlock(&tpg->tpg_np_lock);
3444 spin_unlock(&tiqn->tiqn_tpg_lock);
3445 end_of_buf = 1;
3446 goto eob;
3447 }
3448
3449 if (skip_bytes && len <= skip_bytes) {
3450 skip_bytes -= len;
3451 } else {
3452 memcpy(payload + payload_len, buf, len);
3453 payload_len += len;
3454 if (len > skip_bytes)
3455 skip_bytes = 0;
3456 }
3457 }
3458 spin_unlock(&tpg->tpg_np_lock);
3459 }
3460 spin_unlock(&tiqn->tiqn_tpg_lock);
3461 eob:
3462 if (end_of_buf) {
3463 *completed = false;
3464 break;
3465 }
3466
3467 if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE)
3468 break;
3469 }
3470 spin_unlock(&tiqn_lock);
3471
3472 cmd->buf_ptr = payload;
3473
3474 return payload_len;
3475 }
3476
3477 int
iscsit_build_text_rsp(struct iscsi_cmd * cmd,struct iscsi_conn * conn,struct iscsi_text_rsp * hdr,enum iscsit_transport_type network_transport)3478 iscsit_build_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
3479 struct iscsi_text_rsp *hdr,
3480 enum iscsit_transport_type network_transport)
3481 {
3482 int text_length, padding;
3483 bool completed = true;
3484
3485 text_length = iscsit_build_sendtargets_response(cmd, network_transport,
3486 cmd->read_data_done,
3487 &completed);
3488 if (text_length < 0)
3489 return text_length;
3490
3491 if (completed) {
3492 hdr->flags = ISCSI_FLAG_CMD_FINAL;
3493 } else {
3494 hdr->flags = ISCSI_FLAG_TEXT_CONTINUE;
3495 cmd->read_data_done += text_length;
3496 if (cmd->targ_xfer_tag == 0xFFFFFFFF)
3497 cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
3498 }
3499 hdr->opcode = ISCSI_OP_TEXT_RSP;
3500 padding = ((-text_length) & 3);
3501 hton24(hdr->dlength, text_length);
3502 hdr->itt = cmd->init_task_tag;
3503 hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag);
3504 cmd->stat_sn = conn->stat_sn++;
3505 hdr->statsn = cpu_to_be32(cmd->stat_sn);
3506
3507 iscsit_increment_maxcmdsn(cmd, conn->sess);
3508 /*
3509 * Reset maxcmdsn_inc in multi-part text payload exchanges to
3510 * correctly increment MaxCmdSN for each response answering a
3511 * non immediate text request with a valid CmdSN.
3512 */
3513 cmd->maxcmdsn_inc = 0;
3514 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
3515 hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
3516
3517 pr_debug("Built Text Response: ITT: 0x%08x, TTT: 0x%08x, StatSN: 0x%08x,"
3518 " Length: %u, CID: %hu F: %d C: %d\n", cmd->init_task_tag,
3519 cmd->targ_xfer_tag, cmd->stat_sn, text_length, conn->cid,
3520 !!(hdr->flags & ISCSI_FLAG_CMD_FINAL),
3521 !!(hdr->flags & ISCSI_FLAG_TEXT_CONTINUE));
3522
3523 return text_length + padding;
3524 }
3525 EXPORT_SYMBOL(iscsit_build_text_rsp);
3526
iscsit_send_text_rsp(struct iscsi_cmd * cmd,struct iscsi_conn * conn)3527 static int iscsit_send_text_rsp(
3528 struct iscsi_cmd *cmd,
3529 struct iscsi_conn *conn)
3530 {
3531 struct iscsi_text_rsp *hdr = (struct iscsi_text_rsp *)cmd->pdu;
3532 int text_length;
3533
3534 text_length = iscsit_build_text_rsp(cmd, conn, hdr,
3535 conn->conn_transport->transport_type);
3536 if (text_length < 0)
3537 return text_length;
3538
3539 return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL,
3540 cmd->buf_ptr,
3541 text_length);
3542 }
3543
3544 void
iscsit_build_reject(struct iscsi_cmd * cmd,struct iscsi_conn * conn,struct iscsi_reject * hdr)3545 iscsit_build_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
3546 struct iscsi_reject *hdr)
3547 {
3548 hdr->opcode = ISCSI_OP_REJECT;
3549 hdr->reason = cmd->reject_reason;
3550 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
3551 hton24(hdr->dlength, ISCSI_HDR_LEN);
3552 hdr->ffffffff = cpu_to_be32(0xffffffff);
3553 cmd->stat_sn = conn->stat_sn++;
3554 hdr->statsn = cpu_to_be32(cmd->stat_sn);
3555 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
3556 hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
3557
3558 }
3559 EXPORT_SYMBOL(iscsit_build_reject);
3560
iscsit_send_reject(struct iscsi_cmd * cmd,struct iscsi_conn * conn)3561 static int iscsit_send_reject(
3562 struct iscsi_cmd *cmd,
3563 struct iscsi_conn *conn)
3564 {
3565 struct iscsi_reject *hdr = (struct iscsi_reject *)&cmd->pdu[0];
3566
3567 iscsit_build_reject(cmd, conn, hdr);
3568
3569 pr_debug("Built Reject PDU StatSN: 0x%08x, Reason: 0x%02x,"
3570 " CID: %hu\n", ntohl(hdr->statsn), hdr->reason, conn->cid);
3571
3572 return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL,
3573 cmd->buf_ptr,
3574 ISCSI_HDR_LEN);
3575 }
3576
iscsit_thread_get_cpumask(struct iscsi_conn * conn)3577 void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
3578 {
3579 int ord, cpu;
3580 /*
3581 * bitmap_id is assigned from iscsit_global->ts_bitmap from
3582 * within iscsit_start_kthreads()
3583 *
3584 * Here we use bitmap_id to determine which CPU that this
3585 * iSCSI connection's RX/TX threads will be scheduled to
3586 * execute upon.
3587 */
3588 ord = conn->bitmap_id % cpumask_weight(cpu_online_mask);
3589 for_each_online_cpu(cpu) {
3590 if (ord-- == 0) {
3591 cpumask_set_cpu(cpu, conn->conn_cpumask);
3592 return;
3593 }
3594 }
3595 /*
3596 * This should never be reached..
3597 */
3598 dump_stack();
3599 cpumask_setall(conn->conn_cpumask);
3600 }
3601
3602 int
iscsit_immediate_queue(struct iscsi_conn * conn,struct iscsi_cmd * cmd,int state)3603 iscsit_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
3604 {
3605 int ret;
3606
3607 switch (state) {
3608 case ISTATE_SEND_R2T:
3609 ret = iscsit_send_r2t(cmd, conn);
3610 if (ret < 0)
3611 goto err;
3612 break;
3613 case ISTATE_REMOVE:
3614 spin_lock_bh(&conn->cmd_lock);
3615 list_del_init(&cmd->i_conn_node);
3616 spin_unlock_bh(&conn->cmd_lock);
3617
3618 iscsit_free_cmd(cmd, false);
3619 break;
3620 case ISTATE_SEND_NOPIN_WANT_RESPONSE:
3621 iscsit_mod_nopin_response_timer(conn);
3622 ret = iscsit_send_unsolicited_nopin(cmd, conn, 1);
3623 if (ret < 0)
3624 goto err;
3625 break;
3626 case ISTATE_SEND_NOPIN_NO_RESPONSE:
3627 ret = iscsit_send_unsolicited_nopin(cmd, conn, 0);
3628 if (ret < 0)
3629 goto err;
3630 break;
3631 default:
3632 pr_err("Unknown Opcode: 0x%02x ITT:"
3633 " 0x%08x, i_state: %d on CID: %hu\n",
3634 cmd->iscsi_opcode, cmd->init_task_tag, state,
3635 conn->cid);
3636 goto err;
3637 }
3638
3639 return 0;
3640
3641 err:
3642 return -1;
3643 }
3644 EXPORT_SYMBOL(iscsit_immediate_queue);
3645
3646 static int
iscsit_handle_immediate_queue(struct iscsi_conn * conn)3647 iscsit_handle_immediate_queue(struct iscsi_conn *conn)
3648 {
3649 struct iscsit_transport *t = conn->conn_transport;
3650 struct iscsi_queue_req *qr;
3651 struct iscsi_cmd *cmd;
3652 u8 state;
3653 int ret;
3654
3655 while ((qr = iscsit_get_cmd_from_immediate_queue(conn))) {
3656 atomic_set(&conn->check_immediate_queue, 0);
3657 cmd = qr->cmd;
3658 state = qr->state;
3659 kmem_cache_free(lio_qr_cache, qr);
3660
3661 ret = t->iscsit_immediate_queue(conn, cmd, state);
3662 if (ret < 0)
3663 return ret;
3664 }
3665
3666 return 0;
3667 }
3668
3669 int
iscsit_response_queue(struct iscsi_conn * conn,struct iscsi_cmd * cmd,int state)3670 iscsit_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
3671 {
3672 int ret;
3673
3674 check_rsp_state:
3675 switch (state) {
3676 case ISTATE_SEND_DATAIN:
3677 ret = iscsit_send_datain(cmd, conn);
3678 if (ret < 0)
3679 goto err;
3680 else if (!ret)
3681 /* more drs */
3682 goto check_rsp_state;
3683 else if (ret == 1) {
3684 /* all done */
3685 spin_lock_bh(&cmd->istate_lock);
3686 cmd->i_state = ISTATE_SENT_STATUS;
3687 spin_unlock_bh(&cmd->istate_lock);
3688
3689 if (atomic_read(&conn->check_immediate_queue))
3690 return 1;
3691
3692 return 0;
3693 } else if (ret == 2) {
3694 /* Still must send status,
3695 SCF_TRANSPORT_TASK_SENSE was set */
3696 spin_lock_bh(&cmd->istate_lock);
3697 cmd->i_state = ISTATE_SEND_STATUS;
3698 spin_unlock_bh(&cmd->istate_lock);
3699 state = ISTATE_SEND_STATUS;
3700 goto check_rsp_state;
3701 }
3702
3703 break;
3704 case ISTATE_SEND_STATUS:
3705 case ISTATE_SEND_STATUS_RECOVERY:
3706 ret = iscsit_send_response(cmd, conn);
3707 break;
3708 case ISTATE_SEND_LOGOUTRSP:
3709 ret = iscsit_send_logout(cmd, conn);
3710 break;
3711 case ISTATE_SEND_ASYNCMSG:
3712 ret = iscsit_send_conn_drop_async_message(
3713 cmd, conn);
3714 break;
3715 case ISTATE_SEND_NOPIN:
3716 ret = iscsit_send_nopin(cmd, conn);
3717 break;
3718 case ISTATE_SEND_REJECT:
3719 ret = iscsit_send_reject(cmd, conn);
3720 break;
3721 case ISTATE_SEND_TASKMGTRSP:
3722 ret = iscsit_send_task_mgt_rsp(cmd, conn);
3723 if (ret != 0)
3724 break;
3725 ret = iscsit_tmr_post_handler(cmd, conn);
3726 if (ret != 0)
3727 iscsit_fall_back_to_erl0(conn->sess);
3728 break;
3729 case ISTATE_SEND_TEXTRSP:
3730 ret = iscsit_send_text_rsp(cmd, conn);
3731 break;
3732 default:
3733 pr_err("Unknown Opcode: 0x%02x ITT:"
3734 " 0x%08x, i_state: %d on CID: %hu\n",
3735 cmd->iscsi_opcode, cmd->init_task_tag,
3736 state, conn->cid);
3737 goto err;
3738 }
3739 if (ret < 0)
3740 goto err;
3741
3742 switch (state) {
3743 case ISTATE_SEND_LOGOUTRSP:
3744 if (!iscsit_logout_post_handler(cmd, conn))
3745 return -ECONNRESET;
3746 /* fall through */
3747 case ISTATE_SEND_STATUS:
3748 case ISTATE_SEND_ASYNCMSG:
3749 case ISTATE_SEND_NOPIN:
3750 case ISTATE_SEND_STATUS_RECOVERY:
3751 case ISTATE_SEND_TEXTRSP:
3752 case ISTATE_SEND_TASKMGTRSP:
3753 case ISTATE_SEND_REJECT:
3754 spin_lock_bh(&cmd->istate_lock);
3755 cmd->i_state = ISTATE_SENT_STATUS;
3756 spin_unlock_bh(&cmd->istate_lock);
3757 break;
3758 default:
3759 pr_err("Unknown Opcode: 0x%02x ITT:"
3760 " 0x%08x, i_state: %d on CID: %hu\n",
3761 cmd->iscsi_opcode, cmd->init_task_tag,
3762 cmd->i_state, conn->cid);
3763 goto err;
3764 }
3765
3766 if (atomic_read(&conn->check_immediate_queue))
3767 return 1;
3768
3769 return 0;
3770
3771 err:
3772 return -1;
3773 }
3774 EXPORT_SYMBOL(iscsit_response_queue);
3775
iscsit_handle_response_queue(struct iscsi_conn * conn)3776 static int iscsit_handle_response_queue(struct iscsi_conn *conn)
3777 {
3778 struct iscsit_transport *t = conn->conn_transport;
3779 struct iscsi_queue_req *qr;
3780 struct iscsi_cmd *cmd;
3781 u8 state;
3782 int ret;
3783
3784 while ((qr = iscsit_get_cmd_from_response_queue(conn))) {
3785 cmd = qr->cmd;
3786 state = qr->state;
3787 kmem_cache_free(lio_qr_cache, qr);
3788
3789 ret = t->iscsit_response_queue(conn, cmd, state);
3790 if (ret == 1 || ret < 0)
3791 return ret;
3792 }
3793
3794 return 0;
3795 }
3796
iscsi_target_tx_thread(void * arg)3797 int iscsi_target_tx_thread(void *arg)
3798 {
3799 int ret = 0;
3800 struct iscsi_conn *conn = arg;
3801 bool conn_freed = false;
3802
3803 /*
3804 * Allow ourselves to be interrupted by SIGINT so that a
3805 * connection recovery / failure event can be triggered externally.
3806 */
3807 allow_signal(SIGINT);
3808
3809 while (!kthread_should_stop()) {
3810 /*
3811 * Ensure that both TX and RX per connection kthreads
3812 * are scheduled to run on the same CPU.
3813 */
3814 iscsit_thread_check_cpumask(conn, current, 1);
3815
3816 wait_event_interruptible(conn->queues_wq,
3817 !iscsit_conn_all_queues_empty(conn));
3818
3819 if (signal_pending(current))
3820 goto transport_err;
3821
3822 get_immediate:
3823 ret = iscsit_handle_immediate_queue(conn);
3824 if (ret < 0)
3825 goto transport_err;
3826
3827 ret = iscsit_handle_response_queue(conn);
3828 if (ret == 1) {
3829 goto get_immediate;
3830 } else if (ret == -ECONNRESET) {
3831 conn_freed = true;
3832 goto out;
3833 } else if (ret < 0) {
3834 goto transport_err;
3835 }
3836 }
3837
3838 transport_err:
3839 /*
3840 * Avoid the normal connection failure code-path if this connection
3841 * is still within LOGIN mode, and iscsi_np process context is
3842 * responsible for cleaning up the early connection failure.
3843 */
3844 if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN)
3845 iscsit_take_action_for_connection_exit(conn, &conn_freed);
3846 out:
3847 if (!conn_freed) {
3848 while (!kthread_should_stop()) {
3849 msleep(100);
3850 }
3851 }
3852 return 0;
3853 }
3854
iscsi_target_rx_opcode(struct iscsi_conn * conn,unsigned char * buf)3855 static int iscsi_target_rx_opcode(struct iscsi_conn *conn, unsigned char *buf)
3856 {
3857 struct iscsi_hdr *hdr = (struct iscsi_hdr *)buf;
3858 struct iscsi_cmd *cmd;
3859 int ret = 0;
3860
3861 switch (hdr->opcode & ISCSI_OPCODE_MASK) {
3862 case ISCSI_OP_SCSI_CMD:
3863 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
3864 if (!cmd)
3865 goto reject;
3866
3867 ret = iscsit_handle_scsi_cmd(conn, cmd, buf);
3868 break;
3869 case ISCSI_OP_SCSI_DATA_OUT:
3870 ret = iscsit_handle_data_out(conn, buf);
3871 break;
3872 case ISCSI_OP_NOOP_OUT:
3873 cmd = NULL;
3874 if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
3875 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
3876 if (!cmd)
3877 goto reject;
3878 }
3879 ret = iscsit_handle_nop_out(conn, cmd, buf);
3880 break;
3881 case ISCSI_OP_SCSI_TMFUNC:
3882 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
3883 if (!cmd)
3884 goto reject;
3885
3886 ret = iscsit_handle_task_mgt_cmd(conn, cmd, buf);
3887 break;
3888 case ISCSI_OP_TEXT:
3889 if (hdr->ttt != cpu_to_be32(0xFFFFFFFF)) {
3890 cmd = iscsit_find_cmd_from_itt(conn, hdr->itt);
3891 if (!cmd)
3892 goto reject;
3893 } else {
3894 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
3895 if (!cmd)
3896 goto reject;
3897 }
3898
3899 ret = iscsit_handle_text_cmd(conn, cmd, buf);
3900 break;
3901 case ISCSI_OP_LOGOUT:
3902 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
3903 if (!cmd)
3904 goto reject;
3905
3906 ret = iscsit_handle_logout_cmd(conn, cmd, buf);
3907 if (ret > 0)
3908 wait_for_completion_timeout(&conn->conn_logout_comp,
3909 SECONDS_FOR_LOGOUT_COMP * HZ);
3910 break;
3911 case ISCSI_OP_SNACK:
3912 ret = iscsit_handle_snack(conn, buf);
3913 break;
3914 default:
3915 pr_err("Got unknown iSCSI OpCode: 0x%02x\n", hdr->opcode);
3916 if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
3917 pr_err("Cannot recover from unknown"
3918 " opcode while ERL=0, closing iSCSI connection.\n");
3919 return -1;
3920 }
3921 pr_err("Unable to recover from unknown opcode while OFMarker=No,"
3922 " closing iSCSI connection.\n");
3923 ret = -1;
3924 break;
3925 }
3926
3927 return ret;
3928 reject:
3929 return iscsit_add_reject(conn, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
3930 }
3931
iscsi_target_check_conn_state(struct iscsi_conn * conn)3932 static bool iscsi_target_check_conn_state(struct iscsi_conn *conn)
3933 {
3934 bool ret;
3935
3936 spin_lock_bh(&conn->state_lock);
3937 ret = (conn->conn_state != TARG_CONN_STATE_LOGGED_IN);
3938 spin_unlock_bh(&conn->state_lock);
3939
3940 return ret;
3941 }
3942
iscsit_get_rx_pdu(struct iscsi_conn * conn)3943 static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
3944 {
3945 int ret;
3946 u8 *buffer, opcode;
3947 u32 checksum = 0, digest = 0;
3948 struct kvec iov;
3949
3950 buffer = kcalloc(ISCSI_HDR_LEN, sizeof(*buffer), GFP_KERNEL);
3951 if (!buffer)
3952 return;
3953
3954 while (!kthread_should_stop()) {
3955 /*
3956 * Ensure that both TX and RX per connection kthreads
3957 * are scheduled to run on the same CPU.
3958 */
3959 iscsit_thread_check_cpumask(conn, current, 0);
3960
3961 memset(&iov, 0, sizeof(struct kvec));
3962
3963 iov.iov_base = buffer;
3964 iov.iov_len = ISCSI_HDR_LEN;
3965
3966 ret = rx_data(conn, &iov, 1, ISCSI_HDR_LEN);
3967 if (ret != ISCSI_HDR_LEN) {
3968 iscsit_rx_thread_wait_for_tcp(conn);
3969 break;
3970 }
3971
3972 if (conn->conn_ops->HeaderDigest) {
3973 iov.iov_base = &digest;
3974 iov.iov_len = ISCSI_CRC_LEN;
3975
3976 ret = rx_data(conn, &iov, 1, ISCSI_CRC_LEN);
3977 if (ret != ISCSI_CRC_LEN) {
3978 iscsit_rx_thread_wait_for_tcp(conn);
3979 break;
3980 }
3981
3982 iscsit_do_crypto_hash_buf(conn->conn_rx_hash,
3983 buffer, ISCSI_HDR_LEN,
3984 0, NULL, (u8 *)&checksum);
3985
3986 if (digest != checksum) {
3987 pr_err("HeaderDigest CRC32C failed,"
3988 " received 0x%08x, computed 0x%08x\n",
3989 digest, checksum);
3990 /*
3991 * Set the PDU to 0xff so it will intentionally
3992 * hit default in the switch below.
3993 */
3994 memset(buffer, 0xff, ISCSI_HDR_LEN);
3995 atomic_long_inc(&conn->sess->conn_digest_errors);
3996 } else {
3997 pr_debug("Got HeaderDigest CRC32C"
3998 " 0x%08x\n", checksum);
3999 }
4000 }
4001
4002 if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)
4003 break;
4004
4005 opcode = buffer[0] & ISCSI_OPCODE_MASK;
4006
4007 if (conn->sess->sess_ops->SessionType &&
4008 ((!(opcode & ISCSI_OP_TEXT)) ||
4009 (!(opcode & ISCSI_OP_LOGOUT)))) {
4010 pr_err("Received illegal iSCSI Opcode: 0x%02x"
4011 " while in Discovery Session, rejecting.\n", opcode);
4012 iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
4013 buffer);
4014 break;
4015 }
4016
4017 ret = iscsi_target_rx_opcode(conn, buffer);
4018 if (ret < 0)
4019 break;
4020 }
4021
4022 kfree(buffer);
4023 }
4024
iscsi_target_rx_thread(void * arg)4025 int iscsi_target_rx_thread(void *arg)
4026 {
4027 int rc;
4028 struct iscsi_conn *conn = arg;
4029 bool conn_freed = false;
4030
4031 /*
4032 * Allow ourselves to be interrupted by SIGINT so that a
4033 * connection recovery / failure event can be triggered externally.
4034 */
4035 allow_signal(SIGINT);
4036 /*
4037 * Wait for iscsi_post_login_handler() to complete before allowing
4038 * incoming iscsi/tcp socket I/O, and/or failing the connection.
4039 */
4040 rc = wait_for_completion_interruptible(&conn->rx_login_comp);
4041 if (rc < 0 || iscsi_target_check_conn_state(conn))
4042 goto out;
4043
4044 if (!conn->conn_transport->iscsit_get_rx_pdu)
4045 return 0;
4046
4047 conn->conn_transport->iscsit_get_rx_pdu(conn);
4048
4049 if (!signal_pending(current))
4050 atomic_set(&conn->transport_failed, 1);
4051 iscsit_take_action_for_connection_exit(conn, &conn_freed);
4052
4053 out:
4054 if (!conn_freed) {
4055 while (!kthread_should_stop()) {
4056 msleep(100);
4057 }
4058 }
4059
4060 return 0;
4061 }
4062
iscsit_release_commands_from_conn(struct iscsi_conn * conn)4063 static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
4064 {
4065 LIST_HEAD(tmp_list);
4066 struct iscsi_cmd *cmd = NULL, *cmd_tmp = NULL;
4067 struct iscsi_session *sess = conn->sess;
4068 /*
4069 * We expect this function to only ever be called from either RX or TX
4070 * thread context via iscsit_close_connection() once the other context
4071 * has been reset -> returned sleeping pre-handler state.
4072 */
4073 spin_lock_bh(&conn->cmd_lock);
4074 list_splice_init(&conn->conn_cmd_list, &tmp_list);
4075
4076 list_for_each_entry(cmd, &tmp_list, i_conn_node) {
4077 struct se_cmd *se_cmd = &cmd->se_cmd;
4078
4079 if (se_cmd->se_tfo != NULL) {
4080 spin_lock_irq(&se_cmd->t_state_lock);
4081 se_cmd->transport_state |= CMD_T_FABRIC_STOP;
4082 spin_unlock_irq(&se_cmd->t_state_lock);
4083 }
4084 }
4085 spin_unlock_bh(&conn->cmd_lock);
4086
4087 list_for_each_entry_safe(cmd, cmd_tmp, &tmp_list, i_conn_node) {
4088 list_del_init(&cmd->i_conn_node);
4089
4090 iscsit_increment_maxcmdsn(cmd, sess);
4091 iscsit_free_cmd(cmd, true);
4092
4093 }
4094 }
4095
iscsit_stop_timers_for_cmds(struct iscsi_conn * conn)4096 static void iscsit_stop_timers_for_cmds(
4097 struct iscsi_conn *conn)
4098 {
4099 struct iscsi_cmd *cmd;
4100
4101 spin_lock_bh(&conn->cmd_lock);
4102 list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
4103 if (cmd->data_direction == DMA_TO_DEVICE)
4104 iscsit_stop_dataout_timer(cmd);
4105 }
4106 spin_unlock_bh(&conn->cmd_lock);
4107 }
4108
iscsit_close_connection(struct iscsi_conn * conn)4109 int iscsit_close_connection(
4110 struct iscsi_conn *conn)
4111 {
4112 int conn_logout = (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT);
4113 struct iscsi_session *sess = conn->sess;
4114
4115 pr_debug("Closing iSCSI connection CID %hu on SID:"
4116 " %u\n", conn->cid, sess->sid);
4117 /*
4118 * Always up conn_logout_comp for the traditional TCP and HW_OFFLOAD
4119 * case just in case the RX Thread in iscsi_target_rx_opcode() is
4120 * sleeping and the logout response never got sent because the
4121 * connection failed.
4122 *
4123 * However for iser-target, isert_wait4logout() is using conn_logout_comp
4124 * to signal logout response TX interrupt completion. Go ahead and skip
4125 * this for iser since isert_rx_opcode() does not wait on logout failure,
4126 * and to avoid iscsi_conn pointer dereference in iser-target code.
4127 */
4128 if (!conn->conn_transport->rdma_shutdown)
4129 complete(&conn->conn_logout_comp);
4130
4131 if (!strcmp(current->comm, ISCSI_RX_THREAD_NAME)) {
4132 if (conn->tx_thread &&
4133 cmpxchg(&conn->tx_thread_active, true, false)) {
4134 send_sig(SIGINT, conn->tx_thread, 1);
4135 kthread_stop(conn->tx_thread);
4136 }
4137 } else if (!strcmp(current->comm, ISCSI_TX_THREAD_NAME)) {
4138 if (conn->rx_thread &&
4139 cmpxchg(&conn->rx_thread_active, true, false)) {
4140 send_sig(SIGINT, conn->rx_thread, 1);
4141 kthread_stop(conn->rx_thread);
4142 }
4143 }
4144
4145 spin_lock(&iscsit_global->ts_bitmap_lock);
4146 bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id,
4147 get_order(1));
4148 spin_unlock(&iscsit_global->ts_bitmap_lock);
4149
4150 iscsit_stop_timers_for_cmds(conn);
4151 iscsit_stop_nopin_response_timer(conn);
4152 iscsit_stop_nopin_timer(conn);
4153
4154 if (conn->conn_transport->iscsit_wait_conn)
4155 conn->conn_transport->iscsit_wait_conn(conn);
4156
4157 /*
4158 * During Connection recovery drop unacknowledged out of order
4159 * commands for this connection, and prepare the other commands
4160 * for reallegiance.
4161 *
4162 * During normal operation clear the out of order commands (but
4163 * do not free the struct iscsi_ooo_cmdsn's) and release all
4164 * struct iscsi_cmds.
4165 */
4166 if (atomic_read(&conn->connection_recovery)) {
4167 iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(conn);
4168 iscsit_prepare_cmds_for_reallegiance(conn);
4169 } else {
4170 iscsit_clear_ooo_cmdsns_for_conn(conn);
4171 iscsit_release_commands_from_conn(conn);
4172 }
4173 iscsit_free_queue_reqs_for_conn(conn);
4174
4175 /*
4176 * Handle decrementing session or connection usage count if
4177 * a logout response was not able to be sent because the
4178 * connection failed. Fall back to Session Recovery here.
4179 */
4180 if (atomic_read(&conn->conn_logout_remove)) {
4181 if (conn->conn_logout_reason == ISCSI_LOGOUT_REASON_CLOSE_SESSION) {
4182 iscsit_dec_conn_usage_count(conn);
4183 iscsit_dec_session_usage_count(sess);
4184 }
4185 if (conn->conn_logout_reason == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION)
4186 iscsit_dec_conn_usage_count(conn);
4187
4188 atomic_set(&conn->conn_logout_remove, 0);
4189 atomic_set(&sess->session_reinstatement, 0);
4190 atomic_set(&sess->session_fall_back_to_erl0, 1);
4191 }
4192
4193 spin_lock_bh(&sess->conn_lock);
4194 list_del(&conn->conn_list);
4195
4196 /*
4197 * Attempt to let the Initiator know this connection failed by
4198 * sending an Connection Dropped Async Message on another
4199 * active connection.
4200 */
4201 if (atomic_read(&conn->connection_recovery))
4202 iscsit_build_conn_drop_async_message(conn);
4203
4204 spin_unlock_bh(&sess->conn_lock);
4205
4206 /*
4207 * If connection reinstatement is being performed on this connection,
4208 * up the connection reinstatement semaphore that is being blocked on
4209 * in iscsit_cause_connection_reinstatement().
4210 */
4211 spin_lock_bh(&conn->state_lock);
4212 if (atomic_read(&conn->sleep_on_conn_wait_comp)) {
4213 spin_unlock_bh(&conn->state_lock);
4214 complete(&conn->conn_wait_comp);
4215 wait_for_completion(&conn->conn_post_wait_comp);
4216 spin_lock_bh(&conn->state_lock);
4217 }
4218
4219 /*
4220 * If connection reinstatement is being performed on this connection
4221 * by receiving a REMOVECONNFORRECOVERY logout request, up the
4222 * connection wait rcfr semaphore that is being blocked on
4223 * an iscsit_connection_reinstatement_rcfr().
4224 */
4225 if (atomic_read(&conn->connection_wait_rcfr)) {
4226 spin_unlock_bh(&conn->state_lock);
4227 complete(&conn->conn_wait_rcfr_comp);
4228 wait_for_completion(&conn->conn_post_wait_comp);
4229 spin_lock_bh(&conn->state_lock);
4230 }
4231 atomic_set(&conn->connection_reinstatement, 1);
4232 spin_unlock_bh(&conn->state_lock);
4233
4234 /*
4235 * If any other processes are accessing this connection pointer we
4236 * must wait until they have completed.
4237 */
4238 iscsit_check_conn_usage_count(conn);
4239
4240 ahash_request_free(conn->conn_tx_hash);
4241 if (conn->conn_rx_hash) {
4242 struct crypto_ahash *tfm;
4243
4244 tfm = crypto_ahash_reqtfm(conn->conn_rx_hash);
4245 ahash_request_free(conn->conn_rx_hash);
4246 crypto_free_ahash(tfm);
4247 }
4248
4249 free_cpumask_var(conn->conn_cpumask);
4250
4251 kfree(conn->conn_ops);
4252 conn->conn_ops = NULL;
4253
4254 if (conn->sock)
4255 sock_release(conn->sock);
4256
4257 if (conn->conn_transport->iscsit_free_conn)
4258 conn->conn_transport->iscsit_free_conn(conn);
4259
4260 iscsit_put_transport(conn->conn_transport);
4261
4262 pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
4263 conn->conn_state = TARG_CONN_STATE_FREE;
4264 kfree(conn);
4265
4266 spin_lock_bh(&sess->conn_lock);
4267 atomic_dec(&sess->nconn);
4268 pr_debug("Decremented iSCSI connection count to %hu from node:"
4269 " %s\n", atomic_read(&sess->nconn),
4270 sess->sess_ops->InitiatorName);
4271 /*
4272 * Make sure that if one connection fails in an non ERL=2 iSCSI
4273 * Session that they all fail.
4274 */
4275 if ((sess->sess_ops->ErrorRecoveryLevel != 2) && !conn_logout &&
4276 !atomic_read(&sess->session_logout))
4277 atomic_set(&sess->session_fall_back_to_erl0, 1);
4278
4279 /*
4280 * If this was not the last connection in the session, and we are
4281 * performing session reinstatement or falling back to ERL=0, call
4282 * iscsit_stop_session() without sleeping to shutdown the other
4283 * active connections.
4284 */
4285 if (atomic_read(&sess->nconn)) {
4286 if (!atomic_read(&sess->session_reinstatement) &&
4287 !atomic_read(&sess->session_fall_back_to_erl0)) {
4288 spin_unlock_bh(&sess->conn_lock);
4289 return 0;
4290 }
4291 if (!atomic_read(&sess->session_stop_active)) {
4292 atomic_set(&sess->session_stop_active, 1);
4293 spin_unlock_bh(&sess->conn_lock);
4294 iscsit_stop_session(sess, 0, 0);
4295 return 0;
4296 }
4297 spin_unlock_bh(&sess->conn_lock);
4298 return 0;
4299 }
4300
4301 /*
4302 * If this was the last connection in the session and one of the
4303 * following is occurring:
4304 *
4305 * Session Reinstatement is not being performed, and are falling back
4306 * to ERL=0 call iscsit_close_session().
4307 *
4308 * Session Logout was requested. iscsit_close_session() will be called
4309 * elsewhere.
4310 *
4311 * Session Continuation is not being performed, start the Time2Retain
4312 * handler and check if sleep_on_sess_wait_sem is active.
4313 */
4314 if (!atomic_read(&sess->session_reinstatement) &&
4315 atomic_read(&sess->session_fall_back_to_erl0)) {
4316 spin_unlock_bh(&sess->conn_lock);
4317 iscsit_close_session(sess);
4318
4319 return 0;
4320 } else if (atomic_read(&sess->session_logout)) {
4321 pr_debug("Moving to TARG_SESS_STATE_FREE.\n");
4322 sess->session_state = TARG_SESS_STATE_FREE;
4323 spin_unlock_bh(&sess->conn_lock);
4324
4325 if (atomic_read(&sess->sleep_on_sess_wait_comp))
4326 complete(&sess->session_wait_comp);
4327
4328 return 0;
4329 } else {
4330 pr_debug("Moving to TARG_SESS_STATE_FAILED.\n");
4331 sess->session_state = TARG_SESS_STATE_FAILED;
4332
4333 if (!atomic_read(&sess->session_continuation)) {
4334 spin_unlock_bh(&sess->conn_lock);
4335 iscsit_start_time2retain_handler(sess);
4336 } else
4337 spin_unlock_bh(&sess->conn_lock);
4338
4339 if (atomic_read(&sess->sleep_on_sess_wait_comp))
4340 complete(&sess->session_wait_comp);
4341
4342 return 0;
4343 }
4344 }
4345
4346 /*
4347 * If the iSCSI Session for the iSCSI Initiator Node exists,
4348 * forcefully shutdown the iSCSI NEXUS.
4349 */
iscsit_close_session(struct iscsi_session * sess)4350 int iscsit_close_session(struct iscsi_session *sess)
4351 {
4352 struct iscsi_portal_group *tpg = sess->tpg;
4353 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
4354
4355 if (atomic_read(&sess->nconn)) {
4356 pr_err("%d connection(s) still exist for iSCSI session"
4357 " to %s\n", atomic_read(&sess->nconn),
4358 sess->sess_ops->InitiatorName);
4359 BUG();
4360 }
4361
4362 spin_lock_bh(&se_tpg->session_lock);
4363 atomic_set(&sess->session_logout, 1);
4364 atomic_set(&sess->session_reinstatement, 1);
4365 iscsit_stop_time2retain_timer(sess);
4366 spin_unlock_bh(&se_tpg->session_lock);
4367
4368 /*
4369 * transport_deregister_session_configfs() will clear the
4370 * struct se_node_acl->nacl_sess pointer now as a iscsi_np process context
4371 * can be setting it again with __transport_register_session() in
4372 * iscsi_post_login_handler() again after the iscsit_stop_session()
4373 * completes in iscsi_np context.
4374 */
4375 transport_deregister_session_configfs(sess->se_sess);
4376
4377 /*
4378 * If any other processes are accessing this session pointer we must
4379 * wait until they have completed. If we are in an interrupt (the
4380 * time2retain handler) and contain and active session usage count we
4381 * restart the timer and exit.
4382 */
4383 if (!in_interrupt()) {
4384 if (iscsit_check_session_usage_count(sess) == 1)
4385 iscsit_stop_session(sess, 1, 1);
4386 } else {
4387 if (iscsit_check_session_usage_count(sess) == 2) {
4388 atomic_set(&sess->session_logout, 0);
4389 iscsit_start_time2retain_handler(sess);
4390 return 0;
4391 }
4392 }
4393
4394 transport_deregister_session(sess->se_sess);
4395
4396 if (sess->sess_ops->ErrorRecoveryLevel == 2)
4397 iscsit_free_connection_recovery_entires(sess);
4398
4399 iscsit_free_all_ooo_cmdsns(sess);
4400
4401 spin_lock_bh(&se_tpg->session_lock);
4402 pr_debug("Moving to TARG_SESS_STATE_FREE.\n");
4403 sess->session_state = TARG_SESS_STATE_FREE;
4404 pr_debug("Released iSCSI session from node: %s\n",
4405 sess->sess_ops->InitiatorName);
4406 tpg->nsessions--;
4407 if (tpg->tpg_tiqn)
4408 tpg->tpg_tiqn->tiqn_nsessions--;
4409
4410 pr_debug("Decremented number of active iSCSI Sessions on"
4411 " iSCSI TPG: %hu to %u\n", tpg->tpgt, tpg->nsessions);
4412
4413 spin_lock(&sess_idr_lock);
4414 idr_remove(&sess_idr, sess->session_index);
4415 spin_unlock(&sess_idr_lock);
4416
4417 kfree(sess->sess_ops);
4418 sess->sess_ops = NULL;
4419 spin_unlock_bh(&se_tpg->session_lock);
4420
4421 kfree(sess);
4422 return 0;
4423 }
4424
iscsit_logout_post_handler_closesession(struct iscsi_conn * conn)4425 static void iscsit_logout_post_handler_closesession(
4426 struct iscsi_conn *conn)
4427 {
4428 struct iscsi_session *sess = conn->sess;
4429 int sleep = 1;
4430 /*
4431 * Traditional iscsi/tcp will invoke this logic from TX thread
4432 * context during session logout, so clear tx_thread_active and
4433 * sleep if iscsit_close_connection() has not already occured.
4434 *
4435 * Since iser-target invokes this logic from it's own workqueue,
4436 * always sleep waiting for RX/TX thread shutdown to complete
4437 * within iscsit_close_connection().
4438 */
4439 if (!conn->conn_transport->rdma_shutdown) {
4440 sleep = cmpxchg(&conn->tx_thread_active, true, false);
4441 if (!sleep)
4442 return;
4443 }
4444
4445 atomic_set(&conn->conn_logout_remove, 0);
4446 complete(&conn->conn_logout_comp);
4447
4448 iscsit_dec_conn_usage_count(conn);
4449 iscsit_stop_session(sess, sleep, sleep);
4450 iscsit_dec_session_usage_count(sess);
4451 iscsit_close_session(sess);
4452 }
4453
iscsit_logout_post_handler_samecid(struct iscsi_conn * conn)4454 static void iscsit_logout_post_handler_samecid(
4455 struct iscsi_conn *conn)
4456 {
4457 int sleep = 1;
4458
4459 if (!conn->conn_transport->rdma_shutdown) {
4460 sleep = cmpxchg(&conn->tx_thread_active, true, false);
4461 if (!sleep)
4462 return;
4463 }
4464
4465 atomic_set(&conn->conn_logout_remove, 0);
4466 complete(&conn->conn_logout_comp);
4467
4468 iscsit_cause_connection_reinstatement(conn, sleep);
4469 iscsit_dec_conn_usage_count(conn);
4470 }
4471
iscsit_logout_post_handler_diffcid(struct iscsi_conn * conn,u16 cid)4472 static void iscsit_logout_post_handler_diffcid(
4473 struct iscsi_conn *conn,
4474 u16 cid)
4475 {
4476 struct iscsi_conn *l_conn;
4477 struct iscsi_session *sess = conn->sess;
4478 bool conn_found = false;
4479
4480 if (!sess)
4481 return;
4482
4483 spin_lock_bh(&sess->conn_lock);
4484 list_for_each_entry(l_conn, &sess->sess_conn_list, conn_list) {
4485 if (l_conn->cid == cid) {
4486 iscsit_inc_conn_usage_count(l_conn);
4487 conn_found = true;
4488 break;
4489 }
4490 }
4491 spin_unlock_bh(&sess->conn_lock);
4492
4493 if (!conn_found)
4494 return;
4495
4496 if (l_conn->sock)
4497 l_conn->sock->ops->shutdown(l_conn->sock, RCV_SHUTDOWN);
4498
4499 spin_lock_bh(&l_conn->state_lock);
4500 pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n");
4501 l_conn->conn_state = TARG_CONN_STATE_IN_LOGOUT;
4502 spin_unlock_bh(&l_conn->state_lock);
4503
4504 iscsit_cause_connection_reinstatement(l_conn, 1);
4505 iscsit_dec_conn_usage_count(l_conn);
4506 }
4507
4508 /*
4509 * Return of 0 causes the TX thread to restart.
4510 */
iscsit_logout_post_handler(struct iscsi_cmd * cmd,struct iscsi_conn * conn)4511 int iscsit_logout_post_handler(
4512 struct iscsi_cmd *cmd,
4513 struct iscsi_conn *conn)
4514 {
4515 int ret = 0;
4516
4517 switch (cmd->logout_reason) {
4518 case ISCSI_LOGOUT_REASON_CLOSE_SESSION:
4519 switch (cmd->logout_response) {
4520 case ISCSI_LOGOUT_SUCCESS:
4521 case ISCSI_LOGOUT_CLEANUP_FAILED:
4522 default:
4523 iscsit_logout_post_handler_closesession(conn);
4524 break;
4525 }
4526 ret = 0;
4527 break;
4528 case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION:
4529 if (conn->cid == cmd->logout_cid) {
4530 switch (cmd->logout_response) {
4531 case ISCSI_LOGOUT_SUCCESS:
4532 case ISCSI_LOGOUT_CLEANUP_FAILED:
4533 default:
4534 iscsit_logout_post_handler_samecid(conn);
4535 break;
4536 }
4537 ret = 0;
4538 } else {
4539 switch (cmd->logout_response) {
4540 case ISCSI_LOGOUT_SUCCESS:
4541 iscsit_logout_post_handler_diffcid(conn,
4542 cmd->logout_cid);
4543 break;
4544 case ISCSI_LOGOUT_CID_NOT_FOUND:
4545 case ISCSI_LOGOUT_CLEANUP_FAILED:
4546 default:
4547 break;
4548 }
4549 ret = 1;
4550 }
4551 break;
4552 case ISCSI_LOGOUT_REASON_RECOVERY:
4553 switch (cmd->logout_response) {
4554 case ISCSI_LOGOUT_SUCCESS:
4555 case ISCSI_LOGOUT_CID_NOT_FOUND:
4556 case ISCSI_LOGOUT_RECOVERY_UNSUPPORTED:
4557 case ISCSI_LOGOUT_CLEANUP_FAILED:
4558 default:
4559 break;
4560 }
4561 ret = 1;
4562 break;
4563 default:
4564 break;
4565
4566 }
4567 return ret;
4568 }
4569 EXPORT_SYMBOL(iscsit_logout_post_handler);
4570
iscsit_fail_session(struct iscsi_session * sess)4571 void iscsit_fail_session(struct iscsi_session *sess)
4572 {
4573 struct iscsi_conn *conn;
4574
4575 spin_lock_bh(&sess->conn_lock);
4576 list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
4577 pr_debug("Moving to TARG_CONN_STATE_CLEANUP_WAIT.\n");
4578 conn->conn_state = TARG_CONN_STATE_CLEANUP_WAIT;
4579 }
4580 spin_unlock_bh(&sess->conn_lock);
4581
4582 pr_debug("Moving to TARG_SESS_STATE_FAILED.\n");
4583 sess->session_state = TARG_SESS_STATE_FAILED;
4584 }
4585
iscsit_free_session(struct iscsi_session * sess)4586 int iscsit_free_session(struct iscsi_session *sess)
4587 {
4588 u16 conn_count = atomic_read(&sess->nconn);
4589 struct iscsi_conn *conn, *conn_tmp = NULL;
4590 int is_last;
4591
4592 spin_lock_bh(&sess->conn_lock);
4593 atomic_set(&sess->sleep_on_sess_wait_comp, 1);
4594
4595 list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list,
4596 conn_list) {
4597 if (conn_count == 0)
4598 break;
4599
4600 if (list_is_last(&conn->conn_list, &sess->sess_conn_list)) {
4601 is_last = 1;
4602 } else {
4603 iscsit_inc_conn_usage_count(conn_tmp);
4604 is_last = 0;
4605 }
4606 iscsit_inc_conn_usage_count(conn);
4607
4608 spin_unlock_bh(&sess->conn_lock);
4609 iscsit_cause_connection_reinstatement(conn, 1);
4610 spin_lock_bh(&sess->conn_lock);
4611
4612 iscsit_dec_conn_usage_count(conn);
4613 if (is_last == 0)
4614 iscsit_dec_conn_usage_count(conn_tmp);
4615
4616 conn_count--;
4617 }
4618
4619 if (atomic_read(&sess->nconn)) {
4620 spin_unlock_bh(&sess->conn_lock);
4621 wait_for_completion(&sess->session_wait_comp);
4622 } else
4623 spin_unlock_bh(&sess->conn_lock);
4624
4625 iscsit_close_session(sess);
4626 return 0;
4627 }
4628
iscsit_stop_session(struct iscsi_session * sess,int session_sleep,int connection_sleep)4629 void iscsit_stop_session(
4630 struct iscsi_session *sess,
4631 int session_sleep,
4632 int connection_sleep)
4633 {
4634 u16 conn_count = atomic_read(&sess->nconn);
4635 struct iscsi_conn *conn, *conn_tmp = NULL;
4636 int is_last;
4637
4638 spin_lock_bh(&sess->conn_lock);
4639 if (session_sleep)
4640 atomic_set(&sess->sleep_on_sess_wait_comp, 1);
4641
4642 if (connection_sleep) {
4643 list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list,
4644 conn_list) {
4645 if (conn_count == 0)
4646 break;
4647
4648 if (list_is_last(&conn->conn_list, &sess->sess_conn_list)) {
4649 is_last = 1;
4650 } else {
4651 iscsit_inc_conn_usage_count(conn_tmp);
4652 is_last = 0;
4653 }
4654 iscsit_inc_conn_usage_count(conn);
4655
4656 spin_unlock_bh(&sess->conn_lock);
4657 iscsit_cause_connection_reinstatement(conn, 1);
4658 spin_lock_bh(&sess->conn_lock);
4659
4660 iscsit_dec_conn_usage_count(conn);
4661 if (is_last == 0)
4662 iscsit_dec_conn_usage_count(conn_tmp);
4663 conn_count--;
4664 }
4665 } else {
4666 list_for_each_entry(conn, &sess->sess_conn_list, conn_list)
4667 iscsit_cause_connection_reinstatement(conn, 0);
4668 }
4669
4670 if (session_sleep && atomic_read(&sess->nconn)) {
4671 spin_unlock_bh(&sess->conn_lock);
4672 wait_for_completion(&sess->session_wait_comp);
4673 } else
4674 spin_unlock_bh(&sess->conn_lock);
4675 }
4676
iscsit_release_sessions_for_tpg(struct iscsi_portal_group * tpg,int force)4677 int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
4678 {
4679 struct iscsi_session *sess;
4680 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
4681 struct se_session *se_sess, *se_sess_tmp;
4682 LIST_HEAD(free_list);
4683 int session_count = 0;
4684
4685 spin_lock_bh(&se_tpg->session_lock);
4686 if (tpg->nsessions && !force) {
4687 spin_unlock_bh(&se_tpg->session_lock);
4688 return -1;
4689 }
4690
4691 list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list,
4692 sess_list) {
4693 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
4694
4695 spin_lock(&sess->conn_lock);
4696 if (atomic_read(&sess->session_fall_back_to_erl0) ||
4697 atomic_read(&sess->session_logout) ||
4698 (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
4699 spin_unlock(&sess->conn_lock);
4700 continue;
4701 }
4702 atomic_set(&sess->session_reinstatement, 1);
4703 atomic_set(&sess->session_fall_back_to_erl0, 1);
4704 spin_unlock(&sess->conn_lock);
4705
4706 list_move_tail(&se_sess->sess_list, &free_list);
4707 }
4708 spin_unlock_bh(&se_tpg->session_lock);
4709
4710 list_for_each_entry_safe(se_sess, se_sess_tmp, &free_list, sess_list) {
4711 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
4712
4713 iscsit_free_session(sess);
4714 session_count++;
4715 }
4716
4717 pr_debug("Released %d iSCSI Session(s) from Target Portal"
4718 " Group: %hu\n", session_count, tpg->tpgt);
4719 return 0;
4720 }
4721
4722 MODULE_DESCRIPTION("iSCSI-Target Driver for mainline target infrastructure");
4723 MODULE_VERSION("4.1.x");
4724 MODULE_AUTHOR("nab@Linux-iSCSI.org");
4725 MODULE_LICENSE("GPL");
4726
4727 module_init(iscsi_target_init_module);
4728 module_exit(iscsi_target_cleanup_module);
4729