1 /*
2 * bnx2i_iscsi.c: QLogic NetXtreme II iSCSI driver.
3 *
4 * Copyright (c) 2006 - 2013 Broadcom Corporation
5 * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
6 * Copyright (c) 2007, 2008 Mike Christie
7 * Copyright (c) 2014, QLogic Corporation
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation.
12 *
13 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
14 * Previously Maintained by: Eddie Wai (eddie.wai@broadcom.com)
15 * Maintained by: QLogic-Storage-Upstream@qlogic.com
16 */
17
18 #include <linux/slab.h>
19 #include <scsi/scsi_tcq.h>
20 #include <scsi/libiscsi.h>
21 #include "bnx2i.h"
22
23 struct scsi_transport_template *bnx2i_scsi_xport_template;
24 struct iscsi_transport bnx2i_iscsi_transport;
25 static struct scsi_host_template bnx2i_host_template;
26
27 /*
28 * Global endpoint resource info
29 */
30 static DEFINE_SPINLOCK(bnx2i_resc_lock); /* protects global resources */
31
32 DECLARE_PER_CPU(struct bnx2i_percpu_s, bnx2i_percpu);
33
bnx2i_adapter_ready(struct bnx2i_hba * hba)34 static int bnx2i_adapter_ready(struct bnx2i_hba *hba)
35 {
36 int retval = 0;
37
38 if (!hba || !test_bit(ADAPTER_STATE_UP, &hba->adapter_state) ||
39 test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state) ||
40 test_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state))
41 retval = -EPERM;
42 return retval;
43 }
44
45 /**
46 * bnx2i_get_write_cmd_bd_idx - identifies various BD bookmarks
47 * @cmd: iscsi cmd struct pointer
48 * @buf_off: absolute buffer offset
49 * @start_bd_off: u32 pointer to return the offset within the BD
50 * indicated by 'start_bd_idx' on which 'buf_off' falls
51 * @start_bd_idx: index of the BD on which 'buf_off' falls
52 *
53 * identifies & marks various bd info for scsi command's imm data,
54 * unsolicited data and the first solicited data seq.
55 */
bnx2i_get_write_cmd_bd_idx(struct bnx2i_cmd * cmd,u32 buf_off,u32 * start_bd_off,u32 * start_bd_idx)56 static void bnx2i_get_write_cmd_bd_idx(struct bnx2i_cmd *cmd, u32 buf_off,
57 u32 *start_bd_off, u32 *start_bd_idx)
58 {
59 struct iscsi_bd *bd_tbl = cmd->io_tbl.bd_tbl;
60 u32 cur_offset = 0;
61 u32 cur_bd_idx = 0;
62
63 if (buf_off) {
64 while (buf_off >= (cur_offset + bd_tbl->buffer_length)) {
65 cur_offset += bd_tbl->buffer_length;
66 cur_bd_idx++;
67 bd_tbl++;
68 }
69 }
70
71 *start_bd_off = buf_off - cur_offset;
72 *start_bd_idx = cur_bd_idx;
73 }
74
75 /**
76 * bnx2i_setup_write_cmd_bd_info - sets up BD various information
77 * @task: transport layer's cmd struct pointer
78 *
79 * identifies & marks various bd info for scsi command's immediate data,
80 * unsolicited data and first solicited data seq which includes BD start
81 * index & BD buf off. his function takes into account iscsi parameter such
82 * as immediate data and unsolicited data is support on this connection.
83 */
bnx2i_setup_write_cmd_bd_info(struct iscsi_task * task)84 static void bnx2i_setup_write_cmd_bd_info(struct iscsi_task *task)
85 {
86 struct bnx2i_cmd *cmd = task->dd_data;
87 u32 start_bd_offset;
88 u32 start_bd_idx;
89 u32 buffer_offset = 0;
90 u32 cmd_len = cmd->req.total_data_transfer_length;
91
92 /* if ImmediateData is turned off & IntialR2T is turned on,
93 * there will be no immediate or unsolicited data, just return.
94 */
95 if (!iscsi_task_has_unsol_data(task) && !task->imm_count)
96 return;
97
98 /* Immediate data */
99 buffer_offset += task->imm_count;
100 if (task->imm_count == cmd_len)
101 return;
102
103 if (iscsi_task_has_unsol_data(task)) {
104 bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset,
105 &start_bd_offset, &start_bd_idx);
106 cmd->req.ud_buffer_offset = start_bd_offset;
107 cmd->req.ud_start_bd_index = start_bd_idx;
108 buffer_offset += task->unsol_r2t.data_length;
109 }
110
111 if (buffer_offset != cmd_len) {
112 bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset,
113 &start_bd_offset, &start_bd_idx);
114 if ((start_bd_offset > task->conn->session->first_burst) ||
115 (start_bd_idx > scsi_sg_count(cmd->scsi_cmd))) {
116 int i = 0;
117
118 iscsi_conn_printk(KERN_ALERT, task->conn,
119 "bnx2i- error, buf offset 0x%x "
120 "bd_valid %d use_sg %d\n",
121 buffer_offset, cmd->io_tbl.bd_valid,
122 scsi_sg_count(cmd->scsi_cmd));
123 for (i = 0; i < cmd->io_tbl.bd_valid; i++)
124 iscsi_conn_printk(KERN_ALERT, task->conn,
125 "bnx2i err, bd[%d]: len %x\n",
126 i, cmd->io_tbl.bd_tbl[i].\
127 buffer_length);
128 }
129 cmd->req.sd_buffer_offset = start_bd_offset;
130 cmd->req.sd_start_bd_index = start_bd_idx;
131 }
132 }
133
134
135
136 /**
137 * bnx2i_map_scsi_sg - maps IO buffer and prepares the BD table
138 * @hba: adapter instance
139 * @cmd: iscsi cmd struct pointer
140 *
141 * map SG list
142 */
bnx2i_map_scsi_sg(struct bnx2i_hba * hba,struct bnx2i_cmd * cmd)143 static int bnx2i_map_scsi_sg(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd)
144 {
145 struct scsi_cmnd *sc = cmd->scsi_cmd;
146 struct iscsi_bd *bd = cmd->io_tbl.bd_tbl;
147 struct scatterlist *sg;
148 int byte_count = 0;
149 int bd_count = 0;
150 int sg_count;
151 int sg_len;
152 u64 addr;
153 int i;
154
155 BUG_ON(scsi_sg_count(sc) > ISCSI_MAX_BDS_PER_CMD);
156
157 sg_count = scsi_dma_map(sc);
158
159 scsi_for_each_sg(sc, sg, sg_count, i) {
160 sg_len = sg_dma_len(sg);
161 addr = (u64) sg_dma_address(sg);
162 bd[bd_count].buffer_addr_lo = addr & 0xffffffff;
163 bd[bd_count].buffer_addr_hi = addr >> 32;
164 bd[bd_count].buffer_length = sg_len;
165 bd[bd_count].flags = 0;
166 if (bd_count == 0)
167 bd[bd_count].flags = ISCSI_BD_FIRST_IN_BD_CHAIN;
168
169 byte_count += sg_len;
170 bd_count++;
171 }
172
173 if (bd_count)
174 bd[bd_count - 1].flags |= ISCSI_BD_LAST_IN_BD_CHAIN;
175
176 BUG_ON(byte_count != scsi_bufflen(sc));
177 return bd_count;
178 }
179
180 /**
181 * bnx2i_iscsi_map_sg_list - maps SG list
182 * @cmd: iscsi cmd struct pointer
183 *
184 * creates BD list table for the command
185 */
bnx2i_iscsi_map_sg_list(struct bnx2i_cmd * cmd)186 static void bnx2i_iscsi_map_sg_list(struct bnx2i_cmd *cmd)
187 {
188 int bd_count;
189
190 bd_count = bnx2i_map_scsi_sg(cmd->conn->hba, cmd);
191 if (!bd_count) {
192 struct iscsi_bd *bd = cmd->io_tbl.bd_tbl;
193
194 bd[0].buffer_addr_lo = bd[0].buffer_addr_hi = 0;
195 bd[0].buffer_length = bd[0].flags = 0;
196 }
197 cmd->io_tbl.bd_valid = bd_count;
198 }
199
200
201 /**
202 * bnx2i_iscsi_unmap_sg_list - unmaps SG list
203 * @cmd: iscsi cmd struct pointer
204 *
205 * unmap IO buffers and invalidate the BD table
206 */
bnx2i_iscsi_unmap_sg_list(struct bnx2i_cmd * cmd)207 void bnx2i_iscsi_unmap_sg_list(struct bnx2i_cmd *cmd)
208 {
209 struct scsi_cmnd *sc = cmd->scsi_cmd;
210
211 if (cmd->io_tbl.bd_valid && sc) {
212 scsi_dma_unmap(sc);
213 cmd->io_tbl.bd_valid = 0;
214 }
215 }
216
bnx2i_setup_cmd_wqe_template(struct bnx2i_cmd * cmd)217 static void bnx2i_setup_cmd_wqe_template(struct bnx2i_cmd *cmd)
218 {
219 memset(&cmd->req, 0x00, sizeof(cmd->req));
220 cmd->req.op_code = 0xFF;
221 cmd->req.bd_list_addr_lo = (u32) cmd->io_tbl.bd_tbl_dma;
222 cmd->req.bd_list_addr_hi =
223 (u32) ((u64) cmd->io_tbl.bd_tbl_dma >> 32);
224
225 }
226
227
228 /**
229 * bnx2i_bind_conn_to_iscsi_cid - bind conn structure to 'iscsi_cid'
230 * @hba: pointer to adapter instance
231 * @bnx2i_conn: pointer to iscsi connection
232 * @iscsi_cid: iscsi context ID, range 0 - (MAX_CONN - 1)
233 *
234 * update iscsi cid table entry with connection pointer. This enables
235 * driver to quickly get hold of connection structure pointer in
236 * completion/interrupt thread using iscsi context ID
237 */
bnx2i_bind_conn_to_iscsi_cid(struct bnx2i_hba * hba,struct bnx2i_conn * bnx2i_conn,u32 iscsi_cid)238 static int bnx2i_bind_conn_to_iscsi_cid(struct bnx2i_hba *hba,
239 struct bnx2i_conn *bnx2i_conn,
240 u32 iscsi_cid)
241 {
242 if (hba && hba->cid_que.conn_cid_tbl[iscsi_cid]) {
243 iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data,
244 "conn bind - entry #%d not free\n", iscsi_cid);
245 return -EBUSY;
246 }
247
248 hba->cid_que.conn_cid_tbl[iscsi_cid] = bnx2i_conn;
249 return 0;
250 }
251
252
253 /**
254 * bnx2i_get_conn_from_id - maps an iscsi cid to corresponding conn ptr
255 * @hba: pointer to adapter instance
256 * @iscsi_cid: iscsi context ID, range 0 - (MAX_CONN - 1)
257 */
bnx2i_get_conn_from_id(struct bnx2i_hba * hba,u16 iscsi_cid)258 struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba,
259 u16 iscsi_cid)
260 {
261 if (!hba->cid_que.conn_cid_tbl) {
262 printk(KERN_ERR "bnx2i: ERROR - missing conn<->cid table\n");
263 return NULL;
264
265 } else if (iscsi_cid >= hba->max_active_conns) {
266 printk(KERN_ERR "bnx2i: wrong cid #%d\n", iscsi_cid);
267 return NULL;
268 }
269 return hba->cid_que.conn_cid_tbl[iscsi_cid];
270 }
271
272
273 /**
274 * bnx2i_alloc_iscsi_cid - allocates a iscsi_cid from free pool
275 * @hba: pointer to adapter instance
276 */
bnx2i_alloc_iscsi_cid(struct bnx2i_hba * hba)277 static u32 bnx2i_alloc_iscsi_cid(struct bnx2i_hba *hba)
278 {
279 int idx;
280
281 if (!hba->cid_que.cid_free_cnt)
282 return -1;
283
284 idx = hba->cid_que.cid_q_cons_idx;
285 hba->cid_que.cid_q_cons_idx++;
286 if (hba->cid_que.cid_q_cons_idx == hba->cid_que.cid_q_max_idx)
287 hba->cid_que.cid_q_cons_idx = 0;
288
289 hba->cid_que.cid_free_cnt--;
290 return hba->cid_que.cid_que[idx];
291 }
292
293
294 /**
295 * bnx2i_free_iscsi_cid - returns tcp port to free list
296 * @hba: pointer to adapter instance
297 * @iscsi_cid: iscsi context ID to free
298 */
bnx2i_free_iscsi_cid(struct bnx2i_hba * hba,u16 iscsi_cid)299 static void bnx2i_free_iscsi_cid(struct bnx2i_hba *hba, u16 iscsi_cid)
300 {
301 int idx;
302
303 if (iscsi_cid == (u16) -1)
304 return;
305
306 hba->cid_que.cid_free_cnt++;
307
308 idx = hba->cid_que.cid_q_prod_idx;
309 hba->cid_que.cid_que[idx] = iscsi_cid;
310 hba->cid_que.conn_cid_tbl[iscsi_cid] = NULL;
311 hba->cid_que.cid_q_prod_idx++;
312 if (hba->cid_que.cid_q_prod_idx == hba->cid_que.cid_q_max_idx)
313 hba->cid_que.cid_q_prod_idx = 0;
314 }
315
316
317 /**
318 * bnx2i_setup_free_cid_que - sets up free iscsi cid queue
319 * @hba: pointer to adapter instance
320 *
321 * allocates memory for iscsi cid queue & 'cid - conn ptr' mapping table,
322 * and initialize table attributes
323 */
bnx2i_setup_free_cid_que(struct bnx2i_hba * hba)324 static int bnx2i_setup_free_cid_que(struct bnx2i_hba *hba)
325 {
326 int mem_size;
327 int i;
328
329 mem_size = hba->max_active_conns * sizeof(u32);
330 mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
331
332 hba->cid_que.cid_que_base = kmalloc(mem_size, GFP_KERNEL);
333 if (!hba->cid_que.cid_que_base)
334 return -ENOMEM;
335
336 mem_size = hba->max_active_conns * sizeof(struct bnx2i_conn *);
337 mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
338 hba->cid_que.conn_cid_tbl = kmalloc(mem_size, GFP_KERNEL);
339 if (!hba->cid_que.conn_cid_tbl) {
340 kfree(hba->cid_que.cid_que_base);
341 hba->cid_que.cid_que_base = NULL;
342 return -ENOMEM;
343 }
344
345 hba->cid_que.cid_que = (u32 *)hba->cid_que.cid_que_base;
346 hba->cid_que.cid_q_prod_idx = 0;
347 hba->cid_que.cid_q_cons_idx = 0;
348 hba->cid_que.cid_q_max_idx = hba->max_active_conns;
349 hba->cid_que.cid_free_cnt = hba->max_active_conns;
350
351 for (i = 0; i < hba->max_active_conns; i++) {
352 hba->cid_que.cid_que[i] = i;
353 hba->cid_que.conn_cid_tbl[i] = NULL;
354 }
355 return 0;
356 }
357
358
359 /**
360 * bnx2i_release_free_cid_que - releases 'iscsi_cid' queue resources
361 * @hba: pointer to adapter instance
362 */
bnx2i_release_free_cid_que(struct bnx2i_hba * hba)363 static void bnx2i_release_free_cid_que(struct bnx2i_hba *hba)
364 {
365 kfree(hba->cid_que.cid_que_base);
366 hba->cid_que.cid_que_base = NULL;
367
368 kfree(hba->cid_que.conn_cid_tbl);
369 hba->cid_que.conn_cid_tbl = NULL;
370 }
371
372
373 /**
374 * bnx2i_alloc_ep - allocates ep structure from global pool
375 * @hba: pointer to adapter instance
376 *
377 * routine allocates a free endpoint structure from global pool and
378 * a tcp port to be used for this connection. Global resource lock,
379 * 'bnx2i_resc_lock' is held while accessing shared global data structures
380 */
bnx2i_alloc_ep(struct bnx2i_hba * hba)381 static struct iscsi_endpoint *bnx2i_alloc_ep(struct bnx2i_hba *hba)
382 {
383 struct iscsi_endpoint *ep;
384 struct bnx2i_endpoint *bnx2i_ep;
385 u32 ec_div;
386
387 ep = iscsi_create_endpoint(sizeof(*bnx2i_ep));
388 if (!ep) {
389 printk(KERN_ERR "bnx2i: Could not allocate ep\n");
390 return NULL;
391 }
392
393 bnx2i_ep = ep->dd_data;
394 bnx2i_ep->cls_ep = ep;
395 INIT_LIST_HEAD(&bnx2i_ep->link);
396 bnx2i_ep->state = EP_STATE_IDLE;
397 bnx2i_ep->ep_iscsi_cid = (u16) -1;
398 bnx2i_ep->hba = hba;
399 bnx2i_ep->hba_age = hba->age;
400
401 ec_div = event_coal_div;
402 while (ec_div >>= 1)
403 bnx2i_ep->ec_shift += 1;
404
405 hba->ofld_conns_active++;
406 init_waitqueue_head(&bnx2i_ep->ofld_wait);
407 return ep;
408 }
409
410
411 /**
412 * bnx2i_free_ep - free endpoint
413 * @ep: pointer to iscsi endpoint structure
414 */
bnx2i_free_ep(struct iscsi_endpoint * ep)415 static void bnx2i_free_ep(struct iscsi_endpoint *ep)
416 {
417 struct bnx2i_endpoint *bnx2i_ep = ep->dd_data;
418 unsigned long flags;
419
420 spin_lock_irqsave(&bnx2i_resc_lock, flags);
421 bnx2i_ep->state = EP_STATE_IDLE;
422 bnx2i_ep->hba->ofld_conns_active--;
423
424 if (bnx2i_ep->ep_iscsi_cid != (u16) -1)
425 bnx2i_free_iscsi_cid(bnx2i_ep->hba, bnx2i_ep->ep_iscsi_cid);
426
427 if (bnx2i_ep->conn) {
428 bnx2i_ep->conn->ep = NULL;
429 bnx2i_ep->conn = NULL;
430 }
431
432 bnx2i_ep->hba = NULL;
433 spin_unlock_irqrestore(&bnx2i_resc_lock, flags);
434 iscsi_destroy_endpoint(ep);
435 }
436
437
438 /**
439 * bnx2i_alloc_bdt - allocates buffer descriptor (BD) table for the command
440 * @hba: adapter instance pointer
441 * @session: iscsi session pointer
442 * @cmd: iscsi command structure
443 */
bnx2i_alloc_bdt(struct bnx2i_hba * hba,struct iscsi_session * session,struct bnx2i_cmd * cmd)444 static int bnx2i_alloc_bdt(struct bnx2i_hba *hba, struct iscsi_session *session,
445 struct bnx2i_cmd *cmd)
446 {
447 struct io_bdt *io = &cmd->io_tbl;
448 struct iscsi_bd *bd;
449
450 io->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
451 ISCSI_MAX_BDS_PER_CMD * sizeof(*bd),
452 &io->bd_tbl_dma, GFP_KERNEL);
453 if (!io->bd_tbl) {
454 iscsi_session_printk(KERN_ERR, session, "Could not "
455 "allocate bdt.\n");
456 return -ENOMEM;
457 }
458 io->bd_valid = 0;
459 return 0;
460 }
461
462 /**
463 * bnx2i_destroy_cmd_pool - destroys iscsi command pool and release BD table
464 * @hba: adapter instance pointer
465 * @session: iscsi session pointer
466 */
bnx2i_destroy_cmd_pool(struct bnx2i_hba * hba,struct iscsi_session * session)467 static void bnx2i_destroy_cmd_pool(struct bnx2i_hba *hba,
468 struct iscsi_session *session)
469 {
470 int i;
471
472 for (i = 0; i < session->cmds_max; i++) {
473 struct iscsi_task *task = session->cmds[i];
474 struct bnx2i_cmd *cmd = task->dd_data;
475
476 if (cmd->io_tbl.bd_tbl)
477 dma_free_coherent(&hba->pcidev->dev,
478 ISCSI_MAX_BDS_PER_CMD *
479 sizeof(struct iscsi_bd),
480 cmd->io_tbl.bd_tbl,
481 cmd->io_tbl.bd_tbl_dma);
482 }
483
484 }
485
486
487 /**
488 * bnx2i_setup_cmd_pool - sets up iscsi command pool for the session
489 * @hba: adapter instance pointer
490 * @session: iscsi session pointer
491 */
bnx2i_setup_cmd_pool(struct bnx2i_hba * hba,struct iscsi_session * session)492 static int bnx2i_setup_cmd_pool(struct bnx2i_hba *hba,
493 struct iscsi_session *session)
494 {
495 int i;
496
497 for (i = 0; i < session->cmds_max; i++) {
498 struct iscsi_task *task = session->cmds[i];
499 struct bnx2i_cmd *cmd = task->dd_data;
500
501 task->hdr = &cmd->hdr;
502 task->hdr_max = sizeof(struct iscsi_hdr);
503
504 if (bnx2i_alloc_bdt(hba, session, cmd))
505 goto free_bdts;
506 }
507
508 return 0;
509
510 free_bdts:
511 bnx2i_destroy_cmd_pool(hba, session);
512 return -ENOMEM;
513 }
514
515
516 /**
517 * bnx2i_setup_mp_bdt - allocate BD table resources
518 * @hba: pointer to adapter structure
519 *
520 * Allocate memory for dummy buffer and associated BD
521 * table to be used by middle path (MP) requests
522 */
bnx2i_setup_mp_bdt(struct bnx2i_hba * hba)523 static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba)
524 {
525 int rc = 0;
526 struct iscsi_bd *mp_bdt;
527 u64 addr;
528
529 hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
530 &hba->mp_bd_dma, GFP_KERNEL);
531 if (!hba->mp_bd_tbl) {
532 printk(KERN_ERR "unable to allocate Middle Path BDT\n");
533 rc = -1;
534 goto out;
535 }
536
537 hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev,
538 CNIC_PAGE_SIZE,
539 &hba->dummy_buf_dma, GFP_KERNEL);
540 if (!hba->dummy_buffer) {
541 printk(KERN_ERR "unable to alloc Middle Path Dummy Buffer\n");
542 dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
543 hba->mp_bd_tbl, hba->mp_bd_dma);
544 hba->mp_bd_tbl = NULL;
545 rc = -1;
546 goto out;
547 }
548
549 mp_bdt = (struct iscsi_bd *) hba->mp_bd_tbl;
550 addr = (unsigned long) hba->dummy_buf_dma;
551 mp_bdt->buffer_addr_lo = addr & 0xffffffff;
552 mp_bdt->buffer_addr_hi = addr >> 32;
553 mp_bdt->buffer_length = CNIC_PAGE_SIZE;
554 mp_bdt->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
555 ISCSI_BD_FIRST_IN_BD_CHAIN;
556 out:
557 return rc;
558 }
559
560
561 /**
562 * bnx2i_free_mp_bdt - releases ITT back to free pool
563 * @hba: pointer to adapter instance
564 *
565 * free MP dummy buffer and associated BD table
566 */
bnx2i_free_mp_bdt(struct bnx2i_hba * hba)567 static void bnx2i_free_mp_bdt(struct bnx2i_hba *hba)
568 {
569 if (hba->mp_bd_tbl) {
570 dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
571 hba->mp_bd_tbl, hba->mp_bd_dma);
572 hba->mp_bd_tbl = NULL;
573 }
574 if (hba->dummy_buffer) {
575 dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
576 hba->dummy_buffer, hba->dummy_buf_dma);
577 hba->dummy_buffer = NULL;
578 }
579 return;
580 }
581
582 /**
583 * bnx2i_drop_session - notifies iscsid of connection error.
584 * @cls_session: iscsi cls session pointer
585 *
586 * This notifies iscsid that there is a error, so it can initiate
587 * recovery.
588 *
589 * This relies on caller using the iscsi class iterator so the object
590 * is refcounted and does not disapper from under us.
591 */
bnx2i_drop_session(struct iscsi_cls_session * cls_session)592 void bnx2i_drop_session(struct iscsi_cls_session *cls_session)
593 {
594 iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
595 }
596
597 /**
598 * bnx2i_ep_destroy_list_add - add an entry to EP destroy list
599 * @hba: pointer to adapter instance
600 * @ep: pointer to endpoint (transport identifier) structure
601 *
602 * EP destroy queue manager
603 */
bnx2i_ep_destroy_list_add(struct bnx2i_hba * hba,struct bnx2i_endpoint * ep)604 static int bnx2i_ep_destroy_list_add(struct bnx2i_hba *hba,
605 struct bnx2i_endpoint *ep)
606 {
607 write_lock_bh(&hba->ep_rdwr_lock);
608 list_add_tail(&ep->link, &hba->ep_destroy_list);
609 write_unlock_bh(&hba->ep_rdwr_lock);
610 return 0;
611 }
612
613 /**
614 * bnx2i_ep_destroy_list_del - add an entry to EP destroy list
615 *
616 * @hba: pointer to adapter instance
617 * @ep: pointer to endpoint (transport identifier) structure
618 *
619 * EP destroy queue manager
620 */
bnx2i_ep_destroy_list_del(struct bnx2i_hba * hba,struct bnx2i_endpoint * ep)621 static int bnx2i_ep_destroy_list_del(struct bnx2i_hba *hba,
622 struct bnx2i_endpoint *ep)
623 {
624 write_lock_bh(&hba->ep_rdwr_lock);
625 list_del_init(&ep->link);
626 write_unlock_bh(&hba->ep_rdwr_lock);
627
628 return 0;
629 }
630
631 /**
632 * bnx2i_ep_ofld_list_add - add an entry to ep offload pending list
633 * @hba: pointer to adapter instance
634 * @ep: pointer to endpoint (transport identifier) structure
635 *
636 * pending conn offload completion queue manager
637 */
bnx2i_ep_ofld_list_add(struct bnx2i_hba * hba,struct bnx2i_endpoint * ep)638 static int bnx2i_ep_ofld_list_add(struct bnx2i_hba *hba,
639 struct bnx2i_endpoint *ep)
640 {
641 write_lock_bh(&hba->ep_rdwr_lock);
642 list_add_tail(&ep->link, &hba->ep_ofld_list);
643 write_unlock_bh(&hba->ep_rdwr_lock);
644 return 0;
645 }
646
647 /**
648 * bnx2i_ep_ofld_list_del - add an entry to ep offload pending list
649 * @hba: pointer to adapter instance
650 * @ep: pointer to endpoint (transport identifier) structure
651 *
652 * pending conn offload completion queue manager
653 */
bnx2i_ep_ofld_list_del(struct bnx2i_hba * hba,struct bnx2i_endpoint * ep)654 static int bnx2i_ep_ofld_list_del(struct bnx2i_hba *hba,
655 struct bnx2i_endpoint *ep)
656 {
657 write_lock_bh(&hba->ep_rdwr_lock);
658 list_del_init(&ep->link);
659 write_unlock_bh(&hba->ep_rdwr_lock);
660 return 0;
661 }
662
663
664 /**
665 * bnx2i_find_ep_in_ofld_list - find iscsi_cid in pending list of endpoints
666 *
667 * @hba: pointer to adapter instance
668 * @iscsi_cid: iscsi context ID to find
669 *
670 */
671 struct bnx2i_endpoint *
bnx2i_find_ep_in_ofld_list(struct bnx2i_hba * hba,u32 iscsi_cid)672 bnx2i_find_ep_in_ofld_list(struct bnx2i_hba *hba, u32 iscsi_cid)
673 {
674 struct list_head *list;
675 struct list_head *tmp;
676 struct bnx2i_endpoint *ep = NULL;
677
678 read_lock_bh(&hba->ep_rdwr_lock);
679 list_for_each_safe(list, tmp, &hba->ep_ofld_list) {
680 ep = (struct bnx2i_endpoint *)list;
681
682 if (ep->ep_iscsi_cid == iscsi_cid)
683 break;
684 ep = NULL;
685 }
686 read_unlock_bh(&hba->ep_rdwr_lock);
687
688 if (!ep)
689 printk(KERN_ERR "l5 cid %d not found\n", iscsi_cid);
690 return ep;
691 }
692
693 /**
694 * bnx2i_find_ep_in_destroy_list - find iscsi_cid in destroy list
695 * @hba: pointer to adapter instance
696 * @iscsi_cid: iscsi context ID to find
697 *
698 */
699 struct bnx2i_endpoint *
bnx2i_find_ep_in_destroy_list(struct bnx2i_hba * hba,u32 iscsi_cid)700 bnx2i_find_ep_in_destroy_list(struct bnx2i_hba *hba, u32 iscsi_cid)
701 {
702 struct list_head *list;
703 struct list_head *tmp;
704 struct bnx2i_endpoint *ep = NULL;
705
706 read_lock_bh(&hba->ep_rdwr_lock);
707 list_for_each_safe(list, tmp, &hba->ep_destroy_list) {
708 ep = (struct bnx2i_endpoint *)list;
709
710 if (ep->ep_iscsi_cid == iscsi_cid)
711 break;
712 ep = NULL;
713 }
714 read_unlock_bh(&hba->ep_rdwr_lock);
715
716 if (!ep)
717 printk(KERN_ERR "l5 cid %d not found\n", iscsi_cid);
718
719 return ep;
720 }
721
722 /**
723 * bnx2i_ep_active_list_add - add an entry to ep active list
724 * @hba: pointer to adapter instance
725 * @ep: pointer to endpoint (transport identifier) structure
726 *
727 * current active conn queue manager
728 */
bnx2i_ep_active_list_add(struct bnx2i_hba * hba,struct bnx2i_endpoint * ep)729 static void bnx2i_ep_active_list_add(struct bnx2i_hba *hba,
730 struct bnx2i_endpoint *ep)
731 {
732 write_lock_bh(&hba->ep_rdwr_lock);
733 list_add_tail(&ep->link, &hba->ep_active_list);
734 write_unlock_bh(&hba->ep_rdwr_lock);
735 }
736
737
738 /**
739 * bnx2i_ep_active_list_del - deletes an entry to ep active list
740 * @hba: pointer to adapter instance
741 * @ep: pointer to endpoint (transport identifier) structure
742 *
743 * current active conn queue manager
744 */
bnx2i_ep_active_list_del(struct bnx2i_hba * hba,struct bnx2i_endpoint * ep)745 static void bnx2i_ep_active_list_del(struct bnx2i_hba *hba,
746 struct bnx2i_endpoint *ep)
747 {
748 write_lock_bh(&hba->ep_rdwr_lock);
749 list_del_init(&ep->link);
750 write_unlock_bh(&hba->ep_rdwr_lock);
751 }
752
753
754 /**
755 * bnx2i_setup_host_queue_size - assigns shost->can_queue param
756 * @hba: pointer to adapter instance
757 * @shost: scsi host pointer
758 *
759 * Initializes 'can_queue' parameter based on how many outstanding commands
760 * the device can handle. Each device 5708/5709/57710 has different
761 * capabilities
762 */
bnx2i_setup_host_queue_size(struct bnx2i_hba * hba,struct Scsi_Host * shost)763 static void bnx2i_setup_host_queue_size(struct bnx2i_hba *hba,
764 struct Scsi_Host *shost)
765 {
766 if (test_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type))
767 shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5708;
768 else if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type))
769 shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5709;
770 else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type))
771 shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_57710;
772 else
773 shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5708;
774 }
775
776
777 /**
778 * bnx2i_alloc_hba - allocate and init adapter instance
779 * @cnic: cnic device pointer
780 *
781 * allocate & initialize adapter structure and call other
782 * support routines to do per adapter initialization
783 */
bnx2i_alloc_hba(struct cnic_dev * cnic)784 struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
785 {
786 struct Scsi_Host *shost;
787 struct bnx2i_hba *hba;
788
789 shost = iscsi_host_alloc(&bnx2i_host_template, sizeof(*hba), 0);
790 if (!shost)
791 return NULL;
792 shost->dma_boundary = cnic->pcidev->dma_mask;
793 shost->transportt = bnx2i_scsi_xport_template;
794 shost->max_id = ISCSI_MAX_CONNS_PER_HBA - 1;
795 shost->max_channel = 0;
796 shost->max_lun = 512;
797 shost->max_cmd_len = 16;
798
799 hba = iscsi_host_priv(shost);
800 hba->shost = shost;
801 hba->netdev = cnic->netdev;
802 /* Get PCI related information and update hba struct members */
803 hba->pcidev = cnic->pcidev;
804 pci_dev_get(hba->pcidev);
805 hba->pci_did = hba->pcidev->device;
806 hba->pci_vid = hba->pcidev->vendor;
807 hba->pci_sdid = hba->pcidev->subsystem_device;
808 hba->pci_svid = hba->pcidev->subsystem_vendor;
809 hba->pci_func = PCI_FUNC(hba->pcidev->devfn);
810 hba->pci_devno = PCI_SLOT(hba->pcidev->devfn);
811
812 bnx2i_identify_device(hba, cnic);
813 bnx2i_setup_host_queue_size(hba, shost);
814
815 hba->reg_base = pci_resource_start(hba->pcidev, 0);
816 if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) {
817 hba->regview = pci_iomap(hba->pcidev, 0, BNX2_MQ_CONFIG2);
818 if (!hba->regview)
819 goto ioreg_map_err;
820 } else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
821 hba->regview = pci_iomap(hba->pcidev, 0, 4096);
822 if (!hba->regview)
823 goto ioreg_map_err;
824 }
825
826 if (bnx2i_setup_mp_bdt(hba))
827 goto mp_bdt_mem_err;
828
829 INIT_LIST_HEAD(&hba->ep_ofld_list);
830 INIT_LIST_HEAD(&hba->ep_active_list);
831 INIT_LIST_HEAD(&hba->ep_destroy_list);
832 rwlock_init(&hba->ep_rdwr_lock);
833
834 hba->mtu_supported = BNX2I_MAX_MTU_SUPPORTED;
835
836 /* different values for 5708/5709/57710 */
837 hba->max_active_conns = ISCSI_MAX_CONNS_PER_HBA;
838
839 if (bnx2i_setup_free_cid_que(hba))
840 goto cid_que_err;
841
842 /* SQ/RQ/CQ size can be changed via sysfx interface */
843 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
844 if (sq_size && sq_size <= BNX2I_5770X_SQ_WQES_MAX)
845 hba->max_sqes = sq_size;
846 else
847 hba->max_sqes = BNX2I_5770X_SQ_WQES_DEFAULT;
848 } else { /* 5706/5708/5709 */
849 if (sq_size && sq_size <= BNX2I_570X_SQ_WQES_MAX)
850 hba->max_sqes = sq_size;
851 else
852 hba->max_sqes = BNX2I_570X_SQ_WQES_DEFAULT;
853 }
854
855 hba->max_rqes = rq_size;
856 hba->max_cqes = hba->max_sqes + rq_size;
857 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
858 if (hba->max_cqes > BNX2I_5770X_CQ_WQES_MAX)
859 hba->max_cqes = BNX2I_5770X_CQ_WQES_MAX;
860 } else if (hba->max_cqes > BNX2I_570X_CQ_WQES_MAX)
861 hba->max_cqes = BNX2I_570X_CQ_WQES_MAX;
862
863 hba->num_ccell = hba->max_sqes / 2;
864
865 spin_lock_init(&hba->lock);
866 mutex_init(&hba->net_dev_lock);
867 init_waitqueue_head(&hba->eh_wait);
868 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
869 hba->hba_shutdown_tmo = 30 * HZ;
870 hba->conn_teardown_tmo = 20 * HZ;
871 hba->conn_ctx_destroy_tmo = 6 * HZ;
872 } else { /* 5706/5708/5709 */
873 hba->hba_shutdown_tmo = 20 * HZ;
874 hba->conn_teardown_tmo = 10 * HZ;
875 hba->conn_ctx_destroy_tmo = 2 * HZ;
876 }
877
878 #ifdef CONFIG_32BIT
879 spin_lock_init(&hba->stat_lock);
880 #endif
881 memset(&hba->stats, 0, sizeof(struct iscsi_stats_info));
882
883 if (iscsi_host_add(shost, &hba->pcidev->dev))
884 goto free_dump_mem;
885 return hba;
886
887 free_dump_mem:
888 bnx2i_release_free_cid_que(hba);
889 cid_que_err:
890 bnx2i_free_mp_bdt(hba);
891 mp_bdt_mem_err:
892 if (hba->regview) {
893 pci_iounmap(hba->pcidev, hba->regview);
894 hba->regview = NULL;
895 }
896 ioreg_map_err:
897 pci_dev_put(hba->pcidev);
898 scsi_host_put(shost);
899 return NULL;
900 }
901
902 /**
903 * bnx2i_free_hba- releases hba structure and resources held by the adapter
904 * @hba: pointer to adapter instance
905 *
906 * free adapter structure and call various cleanup routines.
907 */
bnx2i_free_hba(struct bnx2i_hba * hba)908 void bnx2i_free_hba(struct bnx2i_hba *hba)
909 {
910 struct Scsi_Host *shost = hba->shost;
911
912 iscsi_host_remove(shost);
913 INIT_LIST_HEAD(&hba->ep_ofld_list);
914 INIT_LIST_HEAD(&hba->ep_active_list);
915 INIT_LIST_HEAD(&hba->ep_destroy_list);
916
917 if (hba->regview) {
918 pci_iounmap(hba->pcidev, hba->regview);
919 hba->regview = NULL;
920 }
921 pci_dev_put(hba->pcidev);
922 bnx2i_free_mp_bdt(hba);
923 bnx2i_release_free_cid_que(hba);
924 iscsi_host_free(shost);
925 }
926
927 /**
928 * bnx2i_conn_free_login_resources - free DMA resources used for login process
929 * @hba: pointer to adapter instance
930 * @bnx2i_conn: iscsi connection pointer
931 *
932 * Login related resources, mostly BDT & payload DMA memory is freed
933 */
bnx2i_conn_free_login_resources(struct bnx2i_hba * hba,struct bnx2i_conn * bnx2i_conn)934 static void bnx2i_conn_free_login_resources(struct bnx2i_hba *hba,
935 struct bnx2i_conn *bnx2i_conn)
936 {
937 if (bnx2i_conn->gen_pdu.resp_bd_tbl) {
938 dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
939 bnx2i_conn->gen_pdu.resp_bd_tbl,
940 bnx2i_conn->gen_pdu.resp_bd_dma);
941 bnx2i_conn->gen_pdu.resp_bd_tbl = NULL;
942 }
943
944 if (bnx2i_conn->gen_pdu.req_bd_tbl) {
945 dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
946 bnx2i_conn->gen_pdu.req_bd_tbl,
947 bnx2i_conn->gen_pdu.req_bd_dma);
948 bnx2i_conn->gen_pdu.req_bd_tbl = NULL;
949 }
950
951 if (bnx2i_conn->gen_pdu.resp_buf) {
952 dma_free_coherent(&hba->pcidev->dev,
953 ISCSI_DEF_MAX_RECV_SEG_LEN,
954 bnx2i_conn->gen_pdu.resp_buf,
955 bnx2i_conn->gen_pdu.resp_dma_addr);
956 bnx2i_conn->gen_pdu.resp_buf = NULL;
957 }
958
959 if (bnx2i_conn->gen_pdu.req_buf) {
960 dma_free_coherent(&hba->pcidev->dev,
961 ISCSI_DEF_MAX_RECV_SEG_LEN,
962 bnx2i_conn->gen_pdu.req_buf,
963 bnx2i_conn->gen_pdu.req_dma_addr);
964 bnx2i_conn->gen_pdu.req_buf = NULL;
965 }
966 }
967
968 /**
969 * bnx2i_conn_alloc_login_resources - alloc DMA resources for login/nop.
970 * @hba: pointer to adapter instance
971 * @bnx2i_conn: iscsi connection pointer
972 *
973 * Mgmt task DNA resources are allocated in this routine.
974 */
bnx2i_conn_alloc_login_resources(struct bnx2i_hba * hba,struct bnx2i_conn * bnx2i_conn)975 static int bnx2i_conn_alloc_login_resources(struct bnx2i_hba *hba,
976 struct bnx2i_conn *bnx2i_conn)
977 {
978 /* Allocate memory for login request/response buffers */
979 bnx2i_conn->gen_pdu.req_buf =
980 dma_alloc_coherent(&hba->pcidev->dev,
981 ISCSI_DEF_MAX_RECV_SEG_LEN,
982 &bnx2i_conn->gen_pdu.req_dma_addr,
983 GFP_KERNEL);
984 if (bnx2i_conn->gen_pdu.req_buf == NULL)
985 goto login_req_buf_failure;
986
987 bnx2i_conn->gen_pdu.req_buf_size = 0;
988 bnx2i_conn->gen_pdu.req_wr_ptr = bnx2i_conn->gen_pdu.req_buf;
989
990 bnx2i_conn->gen_pdu.resp_buf =
991 dma_alloc_coherent(&hba->pcidev->dev,
992 ISCSI_DEF_MAX_RECV_SEG_LEN,
993 &bnx2i_conn->gen_pdu.resp_dma_addr,
994 GFP_KERNEL);
995 if (bnx2i_conn->gen_pdu.resp_buf == NULL)
996 goto login_resp_buf_failure;
997
998 bnx2i_conn->gen_pdu.resp_buf_size = ISCSI_DEF_MAX_RECV_SEG_LEN;
999 bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf;
1000
1001 bnx2i_conn->gen_pdu.req_bd_tbl =
1002 dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
1003 &bnx2i_conn->gen_pdu.req_bd_dma, GFP_KERNEL);
1004 if (bnx2i_conn->gen_pdu.req_bd_tbl == NULL)
1005 goto login_req_bd_tbl_failure;
1006
1007 bnx2i_conn->gen_pdu.resp_bd_tbl =
1008 dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
1009 &bnx2i_conn->gen_pdu.resp_bd_dma,
1010 GFP_KERNEL);
1011 if (bnx2i_conn->gen_pdu.resp_bd_tbl == NULL)
1012 goto login_resp_bd_tbl_failure;
1013
1014 return 0;
1015
1016 login_resp_bd_tbl_failure:
1017 dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
1018 bnx2i_conn->gen_pdu.req_bd_tbl,
1019 bnx2i_conn->gen_pdu.req_bd_dma);
1020 bnx2i_conn->gen_pdu.req_bd_tbl = NULL;
1021
1022 login_req_bd_tbl_failure:
1023 dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN,
1024 bnx2i_conn->gen_pdu.resp_buf,
1025 bnx2i_conn->gen_pdu.resp_dma_addr);
1026 bnx2i_conn->gen_pdu.resp_buf = NULL;
1027 login_resp_buf_failure:
1028 dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN,
1029 bnx2i_conn->gen_pdu.req_buf,
1030 bnx2i_conn->gen_pdu.req_dma_addr);
1031 bnx2i_conn->gen_pdu.req_buf = NULL;
1032 login_req_buf_failure:
1033 iscsi_conn_printk(KERN_ERR, bnx2i_conn->cls_conn->dd_data,
1034 "login resource alloc failed!!\n");
1035 return -ENOMEM;
1036
1037 }
1038
1039
1040 /**
1041 * bnx2i_iscsi_prep_generic_pdu_bd - prepares BD table.
1042 * @bnx2i_conn: iscsi connection pointer
1043 *
1044 * Allocates buffers and BD tables before shipping requests to cnic
1045 * for PDUs prepared by 'iscsid' daemon
1046 */
bnx2i_iscsi_prep_generic_pdu_bd(struct bnx2i_conn * bnx2i_conn)1047 static void bnx2i_iscsi_prep_generic_pdu_bd(struct bnx2i_conn *bnx2i_conn)
1048 {
1049 struct iscsi_bd *bd_tbl;
1050
1051 bd_tbl = (struct iscsi_bd *) bnx2i_conn->gen_pdu.req_bd_tbl;
1052
1053 bd_tbl->buffer_addr_hi =
1054 (u32) ((u64) bnx2i_conn->gen_pdu.req_dma_addr >> 32);
1055 bd_tbl->buffer_addr_lo = (u32) bnx2i_conn->gen_pdu.req_dma_addr;
1056 bd_tbl->buffer_length = bnx2i_conn->gen_pdu.req_wr_ptr -
1057 bnx2i_conn->gen_pdu.req_buf;
1058 bd_tbl->reserved0 = 0;
1059 bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
1060 ISCSI_BD_FIRST_IN_BD_CHAIN;
1061
1062 bd_tbl = (struct iscsi_bd *) bnx2i_conn->gen_pdu.resp_bd_tbl;
1063 bd_tbl->buffer_addr_hi = (u64) bnx2i_conn->gen_pdu.resp_dma_addr >> 32;
1064 bd_tbl->buffer_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_dma_addr;
1065 bd_tbl->buffer_length = ISCSI_DEF_MAX_RECV_SEG_LEN;
1066 bd_tbl->reserved0 = 0;
1067 bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
1068 ISCSI_BD_FIRST_IN_BD_CHAIN;
1069 }
1070
1071
1072 /**
1073 * bnx2i_iscsi_send_generic_request - called to send mgmt tasks.
1074 * @task: transport layer task pointer
1075 *
1076 * called to transmit PDUs prepared by the 'iscsid' daemon. iSCSI login,
1077 * Nop-out and Logout requests flow through this path.
1078 */
bnx2i_iscsi_send_generic_request(struct iscsi_task * task)1079 static int bnx2i_iscsi_send_generic_request(struct iscsi_task *task)
1080 {
1081 struct bnx2i_cmd *cmd = task->dd_data;
1082 struct bnx2i_conn *bnx2i_conn = cmd->conn;
1083 int rc = 0;
1084 char *buf;
1085 int data_len;
1086
1087 bnx2i_iscsi_prep_generic_pdu_bd(bnx2i_conn);
1088 switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
1089 case ISCSI_OP_LOGIN:
1090 bnx2i_send_iscsi_login(bnx2i_conn, task);
1091 break;
1092 case ISCSI_OP_NOOP_OUT:
1093 data_len = bnx2i_conn->gen_pdu.req_buf_size;
1094 buf = bnx2i_conn->gen_pdu.req_buf;
1095 if (data_len)
1096 rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task,
1097 buf, data_len, 1);
1098 else
1099 rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task,
1100 NULL, 0, 1);
1101 break;
1102 case ISCSI_OP_LOGOUT:
1103 rc = bnx2i_send_iscsi_logout(bnx2i_conn, task);
1104 break;
1105 case ISCSI_OP_SCSI_TMFUNC:
1106 rc = bnx2i_send_iscsi_tmf(bnx2i_conn, task);
1107 break;
1108 case ISCSI_OP_TEXT:
1109 rc = bnx2i_send_iscsi_text(bnx2i_conn, task);
1110 break;
1111 default:
1112 iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data,
1113 "send_gen: unsupported op 0x%x\n",
1114 task->hdr->opcode);
1115 }
1116 return rc;
1117 }
1118
1119
1120 /**********************************************************************
1121 * SCSI-ML Interface
1122 **********************************************************************/
1123
1124 /**
1125 * bnx2i_cpy_scsi_cdb - copies LUN & CDB fields in required format to sq wqe
1126 * @sc: SCSI-ML command pointer
1127 * @cmd: iscsi cmd pointer
1128 */
bnx2i_cpy_scsi_cdb(struct scsi_cmnd * sc,struct bnx2i_cmd * cmd)1129 static void bnx2i_cpy_scsi_cdb(struct scsi_cmnd *sc, struct bnx2i_cmd *cmd)
1130 {
1131 u32 dword;
1132 int lpcnt;
1133 u8 *srcp;
1134 u32 *dstp;
1135 u32 scsi_lun[2];
1136
1137 int_to_scsilun(sc->device->lun, (struct scsi_lun *) scsi_lun);
1138 cmd->req.lun[0] = be32_to_cpu(scsi_lun[0]);
1139 cmd->req.lun[1] = be32_to_cpu(scsi_lun[1]);
1140
1141 lpcnt = cmd->scsi_cmd->cmd_len / sizeof(dword);
1142 srcp = (u8 *) sc->cmnd;
1143 dstp = (u32 *) cmd->req.cdb;
1144 while (lpcnt--) {
1145 memcpy(&dword, (const void *) srcp, 4);
1146 *dstp = cpu_to_be32(dword);
1147 srcp += 4;
1148 dstp++;
1149 }
1150 if (sc->cmd_len & 0x3) {
1151 dword = (u32) srcp[0] | ((u32) srcp[1] << 8);
1152 *dstp = cpu_to_be32(dword);
1153 }
1154 }
1155
bnx2i_cleanup_task(struct iscsi_task * task)1156 static void bnx2i_cleanup_task(struct iscsi_task *task)
1157 {
1158 struct iscsi_conn *conn = task->conn;
1159 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1160 struct bnx2i_hba *hba = bnx2i_conn->hba;
1161
1162 /*
1163 * mgmt task or cmd was never sent to us to transmit.
1164 */
1165 if (!task->sc || task->state == ISCSI_TASK_PENDING)
1166 return;
1167 /*
1168 * need to clean-up task context to claim dma buffers
1169 */
1170 if (task->state == ISCSI_TASK_ABRT_TMF) {
1171 bnx2i_send_cmd_cleanup_req(hba, task->dd_data);
1172
1173 spin_unlock_bh(&conn->session->back_lock);
1174 wait_for_completion_timeout(&bnx2i_conn->cmd_cleanup_cmpl,
1175 msecs_to_jiffies(ISCSI_CMD_CLEANUP_TIMEOUT));
1176 spin_lock_bh(&conn->session->back_lock);
1177 }
1178 bnx2i_iscsi_unmap_sg_list(task->dd_data);
1179 }
1180
1181 /**
1182 * bnx2i_mtask_xmit - transmit mtask to chip for further processing
1183 * @conn: transport layer conn structure pointer
1184 * @task: transport layer command structure pointer
1185 */
1186 static int
bnx2i_mtask_xmit(struct iscsi_conn * conn,struct iscsi_task * task)1187 bnx2i_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
1188 {
1189 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1190 struct bnx2i_hba *hba = bnx2i_conn->hba;
1191 struct bnx2i_cmd *cmd = task->dd_data;
1192
1193 memset(bnx2i_conn->gen_pdu.req_buf, 0, ISCSI_DEF_MAX_RECV_SEG_LEN);
1194
1195 bnx2i_setup_cmd_wqe_template(cmd);
1196 bnx2i_conn->gen_pdu.req_buf_size = task->data_count;
1197
1198 /* Tx PDU/data length count */
1199 ADD_STATS_64(hba, tx_pdus, 1);
1200 ADD_STATS_64(hba, tx_bytes, task->data_count);
1201
1202 if (task->data_count) {
1203 memcpy(bnx2i_conn->gen_pdu.req_buf, task->data,
1204 task->data_count);
1205 bnx2i_conn->gen_pdu.req_wr_ptr =
1206 bnx2i_conn->gen_pdu.req_buf + task->data_count;
1207 }
1208 cmd->conn = conn->dd_data;
1209 cmd->scsi_cmd = NULL;
1210 return bnx2i_iscsi_send_generic_request(task);
1211 }
1212
1213 /**
1214 * bnx2i_task_xmit - transmit iscsi command to chip for further processing
1215 * @task: transport layer command structure pointer
1216 *
1217 * maps SG buffers and send request to chip/firmware in the form of SQ WQE
1218 */
bnx2i_task_xmit(struct iscsi_task * task)1219 static int bnx2i_task_xmit(struct iscsi_task *task)
1220 {
1221 struct iscsi_conn *conn = task->conn;
1222 struct iscsi_session *session = conn->session;
1223 struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session);
1224 struct bnx2i_hba *hba = iscsi_host_priv(shost);
1225 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1226 struct scsi_cmnd *sc = task->sc;
1227 struct bnx2i_cmd *cmd = task->dd_data;
1228 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
1229
1230 if (atomic_read(&bnx2i_conn->ep->num_active_cmds) + 1 >
1231 hba->max_sqes)
1232 return -ENOMEM;
1233
1234 /*
1235 * If there is no scsi_cmnd this must be a mgmt task
1236 */
1237 if (!sc)
1238 return bnx2i_mtask_xmit(conn, task);
1239
1240 bnx2i_setup_cmd_wqe_template(cmd);
1241 cmd->req.op_code = ISCSI_OP_SCSI_CMD;
1242 cmd->conn = bnx2i_conn;
1243 cmd->scsi_cmd = sc;
1244 cmd->req.total_data_transfer_length = scsi_bufflen(sc);
1245 cmd->req.cmd_sn = be32_to_cpu(hdr->cmdsn);
1246
1247 bnx2i_iscsi_map_sg_list(cmd);
1248 bnx2i_cpy_scsi_cdb(sc, cmd);
1249
1250 cmd->req.op_attr = ISCSI_ATTR_SIMPLE;
1251 if (sc->sc_data_direction == DMA_TO_DEVICE) {
1252 cmd->req.op_attr |= ISCSI_CMD_REQUEST_WRITE;
1253 cmd->req.itt = task->itt |
1254 (ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT);
1255 bnx2i_setup_write_cmd_bd_info(task);
1256 } else {
1257 if (scsi_bufflen(sc))
1258 cmd->req.op_attr |= ISCSI_CMD_REQUEST_READ;
1259 cmd->req.itt = task->itt |
1260 (ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT);
1261 }
1262
1263 cmd->req.num_bds = cmd->io_tbl.bd_valid;
1264 if (!cmd->io_tbl.bd_valid) {
1265 cmd->req.bd_list_addr_lo = (u32) hba->mp_bd_dma;
1266 cmd->req.bd_list_addr_hi = (u32) ((u64) hba->mp_bd_dma >> 32);
1267 cmd->req.num_bds = 1;
1268 }
1269
1270 bnx2i_send_iscsi_scsicmd(bnx2i_conn, cmd);
1271 return 0;
1272 }
1273
1274 /**
1275 * bnx2i_session_create - create a new iscsi session
1276 * @ep: pointer to iscsi endpoint
1277 * @cmds_max: user specified maximum commands
1278 * @qdepth: scsi queue depth to support
1279 * @initial_cmdsn: initial iscsi CMDSN to be used for this session
1280 *
1281 * Creates a new iSCSI session instance on given device.
1282 */
1283 static struct iscsi_cls_session *
bnx2i_session_create(struct iscsi_endpoint * ep,uint16_t cmds_max,uint16_t qdepth,uint32_t initial_cmdsn)1284 bnx2i_session_create(struct iscsi_endpoint *ep,
1285 uint16_t cmds_max, uint16_t qdepth,
1286 uint32_t initial_cmdsn)
1287 {
1288 struct Scsi_Host *shost;
1289 struct iscsi_cls_session *cls_session;
1290 struct bnx2i_hba *hba;
1291 struct bnx2i_endpoint *bnx2i_ep;
1292
1293 if (!ep) {
1294 printk(KERN_ERR "bnx2i: missing ep.\n");
1295 return NULL;
1296 }
1297
1298 bnx2i_ep = ep->dd_data;
1299 shost = bnx2i_ep->hba->shost;
1300 hba = iscsi_host_priv(shost);
1301 if (bnx2i_adapter_ready(hba))
1302 return NULL;
1303
1304 /*
1305 * user can override hw limit as long as it is within
1306 * the min/max.
1307 */
1308 if (cmds_max > hba->max_sqes)
1309 cmds_max = hba->max_sqes;
1310 else if (cmds_max < BNX2I_SQ_WQES_MIN)
1311 cmds_max = BNX2I_SQ_WQES_MIN;
1312
1313 cls_session = iscsi_session_setup(&bnx2i_iscsi_transport, shost,
1314 cmds_max, 0, sizeof(struct bnx2i_cmd),
1315 initial_cmdsn, ISCSI_MAX_TARGET);
1316 if (!cls_session)
1317 return NULL;
1318
1319 if (bnx2i_setup_cmd_pool(hba, cls_session->dd_data))
1320 goto session_teardown;
1321 return cls_session;
1322
1323 session_teardown:
1324 iscsi_session_teardown(cls_session);
1325 return NULL;
1326 }
1327
1328
1329 /**
1330 * bnx2i_session_destroy - destroys iscsi session
1331 * @cls_session: pointer to iscsi cls session
1332 *
1333 * Destroys previously created iSCSI session instance and releases
1334 * all resources held by it
1335 */
bnx2i_session_destroy(struct iscsi_cls_session * cls_session)1336 static void bnx2i_session_destroy(struct iscsi_cls_session *cls_session)
1337 {
1338 struct iscsi_session *session = cls_session->dd_data;
1339 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
1340 struct bnx2i_hba *hba = iscsi_host_priv(shost);
1341
1342 bnx2i_destroy_cmd_pool(hba, session);
1343 iscsi_session_teardown(cls_session);
1344 }
1345
1346
1347 /**
1348 * bnx2i_conn_create - create iscsi connection instance
1349 * @cls_session: pointer to iscsi cls session
1350 * @cid: iscsi cid as per rfc (not NX2's CID terminology)
1351 *
1352 * Creates a new iSCSI connection instance for a given session
1353 */
1354 static struct iscsi_cls_conn *
bnx2i_conn_create(struct iscsi_cls_session * cls_session,uint32_t cid)1355 bnx2i_conn_create(struct iscsi_cls_session *cls_session, uint32_t cid)
1356 {
1357 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
1358 struct bnx2i_hba *hba = iscsi_host_priv(shost);
1359 struct bnx2i_conn *bnx2i_conn;
1360 struct iscsi_cls_conn *cls_conn;
1361 struct iscsi_conn *conn;
1362
1363 cls_conn = iscsi_conn_setup(cls_session, sizeof(*bnx2i_conn),
1364 cid);
1365 if (!cls_conn)
1366 return NULL;
1367 conn = cls_conn->dd_data;
1368
1369 bnx2i_conn = conn->dd_data;
1370 bnx2i_conn->cls_conn = cls_conn;
1371 bnx2i_conn->hba = hba;
1372
1373 atomic_set(&bnx2i_conn->work_cnt, 0);
1374
1375 /* 'ep' ptr will be assigned in bind() call */
1376 bnx2i_conn->ep = NULL;
1377 init_completion(&bnx2i_conn->cmd_cleanup_cmpl);
1378
1379 if (bnx2i_conn_alloc_login_resources(hba, bnx2i_conn)) {
1380 iscsi_conn_printk(KERN_ALERT, conn,
1381 "conn_new: login resc alloc failed!!\n");
1382 goto free_conn;
1383 }
1384
1385 return cls_conn;
1386
1387 free_conn:
1388 iscsi_conn_teardown(cls_conn);
1389 return NULL;
1390 }
1391
1392 /**
1393 * bnx2i_conn_bind - binds iscsi sess, conn and ep objects together
1394 * @cls_session: pointer to iscsi cls session
1395 * @cls_conn: pointer to iscsi cls conn
1396 * @transport_fd: 64-bit EP handle
1397 * @is_leading: leading connection on this session?
1398 *
1399 * Binds together iSCSI session instance, iSCSI connection instance
1400 * and the TCP connection. This routine returns error code if
1401 * TCP connection does not belong on the device iSCSI sess/conn
1402 * is bound
1403 */
bnx2i_conn_bind(struct iscsi_cls_session * cls_session,struct iscsi_cls_conn * cls_conn,uint64_t transport_fd,int is_leading)1404 static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session,
1405 struct iscsi_cls_conn *cls_conn,
1406 uint64_t transport_fd, int is_leading)
1407 {
1408 struct iscsi_conn *conn = cls_conn->dd_data;
1409 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1410 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
1411 struct bnx2i_hba *hba = iscsi_host_priv(shost);
1412 struct bnx2i_endpoint *bnx2i_ep;
1413 struct iscsi_endpoint *ep;
1414 int ret_code;
1415
1416 ep = iscsi_lookup_endpoint(transport_fd);
1417 if (!ep)
1418 return -EINVAL;
1419 /*
1420 * Forcefully terminate all in progress connection recovery at the
1421 * earliest, either in bind(), send_pdu(LOGIN), or conn_start()
1422 */
1423 if (bnx2i_adapter_ready(hba)) {
1424 ret_code = -EIO;
1425 goto put_ep;
1426 }
1427
1428 bnx2i_ep = ep->dd_data;
1429 if ((bnx2i_ep->state == EP_STATE_TCP_FIN_RCVD) ||
1430 (bnx2i_ep->state == EP_STATE_TCP_RST_RCVD)) {
1431 /* Peer disconnect via' FIN or RST */
1432 ret_code = -EINVAL;
1433 goto put_ep;
1434 }
1435
1436 if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) {
1437 ret_code = -EINVAL;
1438 goto put_ep;
1439 }
1440
1441 if (bnx2i_ep->hba != hba) {
1442 /* Error - TCP connection does not belong to this device
1443 */
1444 iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data,
1445 "conn bind, ep=0x%p (%s) does not",
1446 bnx2i_ep, bnx2i_ep->hba->netdev->name);
1447 iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data,
1448 "belong to hba (%s)\n",
1449 hba->netdev->name);
1450 ret_code = -EEXIST;
1451 goto put_ep;
1452 }
1453 bnx2i_ep->conn = bnx2i_conn;
1454 bnx2i_conn->ep = bnx2i_ep;
1455 bnx2i_conn->iscsi_conn_cid = bnx2i_ep->ep_iscsi_cid;
1456 bnx2i_conn->fw_cid = bnx2i_ep->ep_cid;
1457
1458 ret_code = bnx2i_bind_conn_to_iscsi_cid(hba, bnx2i_conn,
1459 bnx2i_ep->ep_iscsi_cid);
1460
1461 /* 5706/5708/5709 FW takes RQ as full when initiated, but for 57710
1462 * driver needs to explicitly replenish RQ index during setup.
1463 */
1464 if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type))
1465 bnx2i_put_rq_buf(bnx2i_conn, 0);
1466
1467 bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE);
1468 put_ep:
1469 iscsi_put_endpoint(ep);
1470 return ret_code;
1471 }
1472
1473
1474 /**
1475 * bnx2i_conn_destroy - destroy iscsi connection instance & release resources
1476 * @cls_conn: pointer to iscsi cls conn
1477 *
1478 * Destroy an iSCSI connection instance and release memory resources held by
1479 * this connection
1480 */
bnx2i_conn_destroy(struct iscsi_cls_conn * cls_conn)1481 static void bnx2i_conn_destroy(struct iscsi_cls_conn *cls_conn)
1482 {
1483 struct iscsi_conn *conn = cls_conn->dd_data;
1484 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1485 struct Scsi_Host *shost;
1486 struct bnx2i_hba *hba;
1487 struct bnx2i_work *work, *tmp;
1488 unsigned cpu = 0;
1489 struct bnx2i_percpu_s *p;
1490
1491 shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn));
1492 hba = iscsi_host_priv(shost);
1493
1494 bnx2i_conn_free_login_resources(hba, bnx2i_conn);
1495
1496 if (atomic_read(&bnx2i_conn->work_cnt)) {
1497 for_each_online_cpu(cpu) {
1498 p = &per_cpu(bnx2i_percpu, cpu);
1499 spin_lock_bh(&p->p_work_lock);
1500 list_for_each_entry_safe(work, tmp,
1501 &p->work_list, list) {
1502 if (work->session == conn->session &&
1503 work->bnx2i_conn == bnx2i_conn) {
1504 list_del_init(&work->list);
1505 kfree(work);
1506 if (!atomic_dec_and_test(
1507 &bnx2i_conn->work_cnt))
1508 break;
1509 }
1510 }
1511 spin_unlock_bh(&p->p_work_lock);
1512 }
1513 }
1514
1515 iscsi_conn_teardown(cls_conn);
1516 }
1517
1518
1519 /**
1520 * bnx2i_ep_get_param - return iscsi ep parameter to caller
1521 * @ep: pointer to iscsi endpoint
1522 * @param: parameter type identifier
1523 * @buf: buffer pointer
1524 *
1525 * returns iSCSI ep parameters
1526 */
bnx2i_ep_get_param(struct iscsi_endpoint * ep,enum iscsi_param param,char * buf)1527 static int bnx2i_ep_get_param(struct iscsi_endpoint *ep,
1528 enum iscsi_param param, char *buf)
1529 {
1530 struct bnx2i_endpoint *bnx2i_ep = ep->dd_data;
1531 struct bnx2i_hba *hba = bnx2i_ep->hba;
1532 int len = -ENOTCONN;
1533
1534 if (!hba)
1535 return -ENOTCONN;
1536
1537 switch (param) {
1538 case ISCSI_PARAM_CONN_PORT:
1539 mutex_lock(&hba->net_dev_lock);
1540 if (bnx2i_ep->cm_sk)
1541 len = sprintf(buf, "%hu\n", bnx2i_ep->cm_sk->dst_port);
1542 mutex_unlock(&hba->net_dev_lock);
1543 break;
1544 case ISCSI_PARAM_CONN_ADDRESS:
1545 mutex_lock(&hba->net_dev_lock);
1546 if (bnx2i_ep->cm_sk)
1547 len = sprintf(buf, "%pI4\n", &bnx2i_ep->cm_sk->dst_ip);
1548 mutex_unlock(&hba->net_dev_lock);
1549 break;
1550 default:
1551 return -ENOSYS;
1552 }
1553
1554 return len;
1555 }
1556
1557 /**
1558 * bnx2i_host_get_param - returns host (adapter) related parameters
1559 * @shost: scsi host pointer
1560 * @param: parameter type identifier
1561 * @buf: buffer pointer
1562 */
bnx2i_host_get_param(struct Scsi_Host * shost,enum iscsi_host_param param,char * buf)1563 static int bnx2i_host_get_param(struct Scsi_Host *shost,
1564 enum iscsi_host_param param, char *buf)
1565 {
1566 struct bnx2i_hba *hba = iscsi_host_priv(shost);
1567 int len = 0;
1568
1569 switch (param) {
1570 case ISCSI_HOST_PARAM_HWADDRESS:
1571 len = sysfs_format_mac(buf, hba->cnic->mac_addr, 6);
1572 break;
1573 case ISCSI_HOST_PARAM_NETDEV_NAME:
1574 len = sprintf(buf, "%s\n", hba->netdev->name);
1575 break;
1576 case ISCSI_HOST_PARAM_IPADDRESS: {
1577 struct list_head *active_list = &hba->ep_active_list;
1578
1579 read_lock_bh(&hba->ep_rdwr_lock);
1580 if (!list_empty(&hba->ep_active_list)) {
1581 struct bnx2i_endpoint *bnx2i_ep;
1582 struct cnic_sock *csk;
1583
1584 bnx2i_ep = list_first_entry(active_list,
1585 struct bnx2i_endpoint,
1586 link);
1587 csk = bnx2i_ep->cm_sk;
1588 if (test_bit(SK_F_IPV6, &csk->flags))
1589 len = sprintf(buf, "%pI6\n", csk->src_ip);
1590 else
1591 len = sprintf(buf, "%pI4\n", csk->src_ip);
1592 }
1593 read_unlock_bh(&hba->ep_rdwr_lock);
1594 break;
1595 }
1596 default:
1597 return iscsi_host_get_param(shost, param, buf);
1598 }
1599 return len;
1600 }
1601
1602 /**
1603 * bnx2i_conn_start - completes iscsi connection migration to FFP
1604 * @cls_conn: pointer to iscsi cls conn
1605 *
1606 * last call in FFP migration to handover iscsi conn to the driver
1607 */
bnx2i_conn_start(struct iscsi_cls_conn * cls_conn)1608 static int bnx2i_conn_start(struct iscsi_cls_conn *cls_conn)
1609 {
1610 struct iscsi_conn *conn = cls_conn->dd_data;
1611 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1612
1613 bnx2i_conn->ep->state = EP_STATE_ULP_UPDATE_START;
1614 bnx2i_update_iscsi_conn(conn);
1615
1616 /*
1617 * this should normally not sleep for a long time so it should
1618 * not disrupt the caller.
1619 */
1620 timer_setup(&bnx2i_conn->ep->ofld_timer, bnx2i_ep_ofld_timer, 0);
1621 bnx2i_conn->ep->ofld_timer.expires = 1 * HZ + jiffies;
1622 add_timer(&bnx2i_conn->ep->ofld_timer);
1623 /* update iSCSI context for this conn, wait for CNIC to complete */
1624 wait_event_interruptible(bnx2i_conn->ep->ofld_wait,
1625 bnx2i_conn->ep->state != EP_STATE_ULP_UPDATE_START);
1626
1627 if (signal_pending(current))
1628 flush_signals(current);
1629 del_timer_sync(&bnx2i_conn->ep->ofld_timer);
1630
1631 iscsi_conn_start(cls_conn);
1632 return 0;
1633 }
1634
1635
1636 /**
1637 * bnx2i_conn_get_stats - returns iSCSI stats
1638 * @cls_conn: pointer to iscsi cls conn
1639 * @stats: pointer to iscsi statistic struct
1640 */
bnx2i_conn_get_stats(struct iscsi_cls_conn * cls_conn,struct iscsi_stats * stats)1641 static void bnx2i_conn_get_stats(struct iscsi_cls_conn *cls_conn,
1642 struct iscsi_stats *stats)
1643 {
1644 struct iscsi_conn *conn = cls_conn->dd_data;
1645
1646 stats->txdata_octets = conn->txdata_octets;
1647 stats->rxdata_octets = conn->rxdata_octets;
1648 stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
1649 stats->dataout_pdus = conn->dataout_pdus_cnt;
1650 stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
1651 stats->datain_pdus = conn->datain_pdus_cnt;
1652 stats->r2t_pdus = conn->r2t_pdus_cnt;
1653 stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
1654 stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
1655 stats->digest_err = 0;
1656 stats->timeout_err = 0;
1657 strcpy(stats->custom[0].desc, "eh_abort_cnt");
1658 stats->custom[0].value = conn->eh_abort_cnt;
1659 stats->custom_length = 1;
1660 }
1661
1662
1663 /**
1664 * bnx2i_check_route - checks if target IP route belongs to one of NX2 devices
1665 * @dst_addr: target IP address
1666 *
1667 * check if route resolves to BNX2 device
1668 */
bnx2i_check_route(struct sockaddr * dst_addr)1669 static struct bnx2i_hba *bnx2i_check_route(struct sockaddr *dst_addr)
1670 {
1671 struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr;
1672 struct bnx2i_hba *hba;
1673 struct cnic_dev *cnic = NULL;
1674
1675 hba = get_adapter_list_head();
1676 if (hba && hba->cnic)
1677 cnic = hba->cnic->cm_select_dev(desti, CNIC_ULP_ISCSI);
1678 if (!cnic) {
1679 printk(KERN_ALERT "bnx2i: no route,"
1680 "can't connect using cnic\n");
1681 goto no_nx2_route;
1682 }
1683 hba = bnx2i_find_hba_for_cnic(cnic);
1684 if (!hba)
1685 goto no_nx2_route;
1686
1687 if (bnx2i_adapter_ready(hba)) {
1688 printk(KERN_ALERT "bnx2i: check route, hba not found\n");
1689 goto no_nx2_route;
1690 }
1691 if (hba->netdev->mtu > hba->mtu_supported) {
1692 printk(KERN_ALERT "bnx2i: %s network i/f mtu is set to %d\n",
1693 hba->netdev->name, hba->netdev->mtu);
1694 printk(KERN_ALERT "bnx2i: iSCSI HBA can support mtu of %d\n",
1695 hba->mtu_supported);
1696 goto no_nx2_route;
1697 }
1698 return hba;
1699 no_nx2_route:
1700 return NULL;
1701 }
1702
1703
1704 /**
1705 * bnx2i_tear_down_conn - tear down iscsi/tcp connection and free resources
1706 * @hba: pointer to adapter instance
1707 * @ep: endpoint (transport identifier) structure
1708 *
1709 * destroys cm_sock structure and on chip iscsi context
1710 */
bnx2i_tear_down_conn(struct bnx2i_hba * hba,struct bnx2i_endpoint * ep)1711 static int bnx2i_tear_down_conn(struct bnx2i_hba *hba,
1712 struct bnx2i_endpoint *ep)
1713 {
1714 if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic) && ep->cm_sk)
1715 hba->cnic->cm_destroy(ep->cm_sk);
1716
1717 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type) &&
1718 ep->state == EP_STATE_DISCONN_TIMEDOUT) {
1719 if (ep->conn && ep->conn->cls_conn &&
1720 ep->conn->cls_conn->dd_data) {
1721 struct iscsi_conn *conn = ep->conn->cls_conn->dd_data;
1722
1723 /* Must suspend all rx queue activity for this ep */
1724 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
1725 }
1726 /* CONN_DISCONNECT timeout may or may not be an issue depending
1727 * on what transcribed in TCP layer, different targets behave
1728 * differently
1729 */
1730 printk(KERN_ALERT "bnx2i (%s): - WARN - CONN_DISCON timed out, "
1731 "please submit GRC Dump, NW/PCIe trace, "
1732 "driver msgs to developers for analysis\n",
1733 hba->netdev->name);
1734 }
1735
1736 ep->state = EP_STATE_CLEANUP_START;
1737 timer_setup(&ep->ofld_timer, bnx2i_ep_ofld_timer, 0);
1738 ep->ofld_timer.expires = hba->conn_ctx_destroy_tmo + jiffies;
1739 add_timer(&ep->ofld_timer);
1740
1741 bnx2i_ep_destroy_list_add(hba, ep);
1742
1743 /* destroy iSCSI context, wait for it to complete */
1744 if (bnx2i_send_conn_destroy(hba, ep))
1745 ep->state = EP_STATE_CLEANUP_CMPL;
1746
1747 wait_event_interruptible(ep->ofld_wait,
1748 (ep->state != EP_STATE_CLEANUP_START));
1749
1750 if (signal_pending(current))
1751 flush_signals(current);
1752 del_timer_sync(&ep->ofld_timer);
1753
1754 bnx2i_ep_destroy_list_del(hba, ep);
1755
1756 if (ep->state != EP_STATE_CLEANUP_CMPL)
1757 /* should never happen */
1758 printk(KERN_ALERT "bnx2i - conn destroy failed\n");
1759
1760 return 0;
1761 }
1762
1763
1764 /**
1765 * bnx2i_ep_connect - establish TCP connection to target portal
1766 * @shost: scsi host
1767 * @dst_addr: target IP address
1768 * @non_blocking: blocking or non-blocking call
1769 *
1770 * this routine initiates the TCP/IP connection by invoking Option-2 i/f
1771 * with l5_core and the CNIC. This is a multi-step process of resolving
1772 * route to target, create a iscsi connection context, handshaking with
1773 * CNIC module to create/initialize the socket struct and finally
1774 * sending down option-2 request to complete TCP 3-way handshake
1775 */
bnx2i_ep_connect(struct Scsi_Host * shost,struct sockaddr * dst_addr,int non_blocking)1776 static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost,
1777 struct sockaddr *dst_addr,
1778 int non_blocking)
1779 {
1780 u32 iscsi_cid = BNX2I_CID_RESERVED;
1781 struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr;
1782 struct sockaddr_in6 *desti6;
1783 struct bnx2i_endpoint *bnx2i_ep;
1784 struct bnx2i_hba *hba;
1785 struct cnic_dev *cnic;
1786 struct cnic_sockaddr saddr;
1787 struct iscsi_endpoint *ep;
1788 int rc = 0;
1789
1790 if (shost) {
1791 /* driver is given scsi host to work with */
1792 hba = iscsi_host_priv(shost);
1793 } else
1794 /*
1795 * check if the given destination can be reached through
1796 * a iscsi capable NetXtreme2 device
1797 */
1798 hba = bnx2i_check_route(dst_addr);
1799
1800 if (!hba) {
1801 rc = -EINVAL;
1802 goto nohba;
1803 }
1804 mutex_lock(&hba->net_dev_lock);
1805
1806 if (bnx2i_adapter_ready(hba) || !hba->cid_que.cid_free_cnt) {
1807 rc = -EPERM;
1808 goto check_busy;
1809 }
1810 cnic = hba->cnic;
1811 ep = bnx2i_alloc_ep(hba);
1812 if (!ep) {
1813 rc = -ENOMEM;
1814 goto check_busy;
1815 }
1816 bnx2i_ep = ep->dd_data;
1817
1818 atomic_set(&bnx2i_ep->num_active_cmds, 0);
1819 iscsi_cid = bnx2i_alloc_iscsi_cid(hba);
1820 if (iscsi_cid == -1) {
1821 printk(KERN_ALERT "bnx2i (%s): alloc_ep - unable to allocate "
1822 "iscsi cid\n", hba->netdev->name);
1823 rc = -ENOMEM;
1824 bnx2i_free_ep(ep);
1825 goto check_busy;
1826 }
1827 bnx2i_ep->hba_age = hba->age;
1828
1829 rc = bnx2i_alloc_qp_resc(hba, bnx2i_ep);
1830 if (rc != 0) {
1831 printk(KERN_ALERT "bnx2i (%s): ep_conn - alloc QP resc error"
1832 "\n", hba->netdev->name);
1833 rc = -ENOMEM;
1834 goto qp_resc_err;
1835 }
1836
1837 bnx2i_ep->ep_iscsi_cid = (u16)iscsi_cid;
1838 bnx2i_ep->state = EP_STATE_OFLD_START;
1839 bnx2i_ep_ofld_list_add(hba, bnx2i_ep);
1840
1841 timer_setup(&bnx2i_ep->ofld_timer, bnx2i_ep_ofld_timer, 0);
1842 bnx2i_ep->ofld_timer.expires = 2 * HZ + jiffies;
1843 add_timer(&bnx2i_ep->ofld_timer);
1844
1845 if (bnx2i_send_conn_ofld_req(hba, bnx2i_ep)) {
1846 if (bnx2i_ep->state == EP_STATE_OFLD_FAILED_CID_BUSY) {
1847 printk(KERN_ALERT "bnx2i (%s): iscsi cid %d is busy\n",
1848 hba->netdev->name, bnx2i_ep->ep_iscsi_cid);
1849 rc = -EBUSY;
1850 } else
1851 rc = -ENOSPC;
1852 printk(KERN_ALERT "bnx2i (%s): unable to send conn offld kwqe"
1853 "\n", hba->netdev->name);
1854 bnx2i_ep_ofld_list_del(hba, bnx2i_ep);
1855 goto conn_failed;
1856 }
1857
1858 /* Wait for CNIC hardware to setup conn context and return 'cid' */
1859 wait_event_interruptible(bnx2i_ep->ofld_wait,
1860 bnx2i_ep->state != EP_STATE_OFLD_START);
1861
1862 if (signal_pending(current))
1863 flush_signals(current);
1864 del_timer_sync(&bnx2i_ep->ofld_timer);
1865
1866 bnx2i_ep_ofld_list_del(hba, bnx2i_ep);
1867
1868 if (bnx2i_ep->state != EP_STATE_OFLD_COMPL) {
1869 if (bnx2i_ep->state == EP_STATE_OFLD_FAILED_CID_BUSY) {
1870 printk(KERN_ALERT "bnx2i (%s): iscsi cid %d is busy\n",
1871 hba->netdev->name, bnx2i_ep->ep_iscsi_cid);
1872 rc = -EBUSY;
1873 } else
1874 rc = -ENOSPC;
1875 goto conn_failed;
1876 }
1877
1878 rc = cnic->cm_create(cnic, CNIC_ULP_ISCSI, bnx2i_ep->ep_cid,
1879 iscsi_cid, &bnx2i_ep->cm_sk, bnx2i_ep);
1880 if (rc) {
1881 rc = -EINVAL;
1882 /* Need to terminate and cleanup the connection */
1883 goto release_ep;
1884 }
1885
1886 bnx2i_ep->cm_sk->rcv_buf = 256 * 1024;
1887 bnx2i_ep->cm_sk->snd_buf = 256 * 1024;
1888 clear_bit(SK_TCP_TIMESTAMP, &bnx2i_ep->cm_sk->tcp_flags);
1889
1890 memset(&saddr, 0, sizeof(saddr));
1891 if (dst_addr->sa_family == AF_INET) {
1892 desti = (struct sockaddr_in *) dst_addr;
1893 saddr.remote.v4 = *desti;
1894 saddr.local.v4.sin_family = desti->sin_family;
1895 } else if (dst_addr->sa_family == AF_INET6) {
1896 desti6 = (struct sockaddr_in6 *) dst_addr;
1897 saddr.remote.v6 = *desti6;
1898 saddr.local.v6.sin6_family = desti6->sin6_family;
1899 }
1900
1901 bnx2i_ep->timestamp = jiffies;
1902 bnx2i_ep->state = EP_STATE_CONNECT_START;
1903 if (!test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
1904 rc = -EINVAL;
1905 goto conn_failed;
1906 } else
1907 rc = cnic->cm_connect(bnx2i_ep->cm_sk, &saddr);
1908 if (rc)
1909 goto release_ep;
1910
1911 bnx2i_ep_active_list_add(hba, bnx2i_ep);
1912
1913 rc = bnx2i_map_ep_dbell_regs(bnx2i_ep);
1914 if (rc)
1915 goto del_active_ep;
1916
1917 mutex_unlock(&hba->net_dev_lock);
1918 return ep;
1919
1920 del_active_ep:
1921 bnx2i_ep_active_list_del(hba, bnx2i_ep);
1922 release_ep:
1923 if (bnx2i_tear_down_conn(hba, bnx2i_ep)) {
1924 mutex_unlock(&hba->net_dev_lock);
1925 return ERR_PTR(rc);
1926 }
1927 conn_failed:
1928 bnx2i_free_qp_resc(hba, bnx2i_ep);
1929 qp_resc_err:
1930 bnx2i_free_ep(ep);
1931 check_busy:
1932 mutex_unlock(&hba->net_dev_lock);
1933 nohba:
1934 return ERR_PTR(rc);
1935 }
1936
1937
1938 /**
1939 * bnx2i_ep_poll - polls for TCP connection establishement
1940 * @ep: TCP connection (endpoint) handle
1941 * @timeout_ms: timeout value in milli secs
1942 *
1943 * polls for TCP connect request to complete
1944 */
bnx2i_ep_poll(struct iscsi_endpoint * ep,int timeout_ms)1945 static int bnx2i_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
1946 {
1947 struct bnx2i_endpoint *bnx2i_ep;
1948 int rc = 0;
1949
1950 bnx2i_ep = ep->dd_data;
1951 if ((bnx2i_ep->state == EP_STATE_IDLE) ||
1952 (bnx2i_ep->state == EP_STATE_CONNECT_FAILED) ||
1953 (bnx2i_ep->state == EP_STATE_OFLD_FAILED))
1954 return -1;
1955 if (bnx2i_ep->state == EP_STATE_CONNECT_COMPL)
1956 return 1;
1957
1958 rc = wait_event_interruptible_timeout(bnx2i_ep->ofld_wait,
1959 ((bnx2i_ep->state ==
1960 EP_STATE_OFLD_FAILED) ||
1961 (bnx2i_ep->state ==
1962 EP_STATE_CONNECT_FAILED) ||
1963 (bnx2i_ep->state ==
1964 EP_STATE_CONNECT_COMPL)),
1965 msecs_to_jiffies(timeout_ms));
1966 if (bnx2i_ep->state == EP_STATE_OFLD_FAILED)
1967 rc = -1;
1968
1969 if (rc > 0)
1970 return 1;
1971 else if (!rc)
1972 return 0; /* timeout */
1973 else
1974 return rc;
1975 }
1976
1977
1978 /**
1979 * bnx2i_ep_tcp_conn_active - check EP state transition
1980 * @bnx2i_ep: endpoint pointer
1981 *
1982 * check if underlying TCP connection is active
1983 */
bnx2i_ep_tcp_conn_active(struct bnx2i_endpoint * bnx2i_ep)1984 static int bnx2i_ep_tcp_conn_active(struct bnx2i_endpoint *bnx2i_ep)
1985 {
1986 int ret;
1987 int cnic_dev_10g = 0;
1988
1989 if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type))
1990 cnic_dev_10g = 1;
1991
1992 switch (bnx2i_ep->state) {
1993 case EP_STATE_CLEANUP_FAILED:
1994 case EP_STATE_OFLD_FAILED:
1995 case EP_STATE_DISCONN_TIMEDOUT:
1996 ret = 0;
1997 break;
1998 case EP_STATE_CONNECT_START:
1999 case EP_STATE_CONNECT_FAILED:
2000 case EP_STATE_CONNECT_COMPL:
2001 case EP_STATE_ULP_UPDATE_START:
2002 case EP_STATE_ULP_UPDATE_COMPL:
2003 case EP_STATE_TCP_FIN_RCVD:
2004 case EP_STATE_LOGOUT_SENT:
2005 case EP_STATE_LOGOUT_RESP_RCVD:
2006 case EP_STATE_ULP_UPDATE_FAILED:
2007 ret = 1;
2008 break;
2009 case EP_STATE_TCP_RST_RCVD:
2010 if (cnic_dev_10g)
2011 ret = 0;
2012 else
2013 ret = 1;
2014 break;
2015 default:
2016 ret = 0;
2017 }
2018
2019 return ret;
2020 }
2021
2022
2023 /**
2024 * bnx2i_hw_ep_disconnect - executes TCP connection teardown process in the hw
2025 * @bnx2i_ep: TCP connection (bnx2i endpoint) handle
2026 *
2027 * executes TCP connection teardown process
2028 */
bnx2i_hw_ep_disconnect(struct bnx2i_endpoint * bnx2i_ep)2029 int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep)
2030 {
2031 struct bnx2i_hba *hba = bnx2i_ep->hba;
2032 struct cnic_dev *cnic;
2033 struct iscsi_session *session = NULL;
2034 struct iscsi_conn *conn = NULL;
2035 int ret = 0;
2036 int close = 0;
2037 int close_ret = 0;
2038
2039 if (!hba)
2040 return 0;
2041
2042 cnic = hba->cnic;
2043 if (!cnic)
2044 return 0;
2045
2046 if (bnx2i_ep->state == EP_STATE_IDLE ||
2047 bnx2i_ep->state == EP_STATE_DISCONN_TIMEDOUT)
2048 return 0;
2049
2050 if (!bnx2i_ep_tcp_conn_active(bnx2i_ep))
2051 goto destroy_conn;
2052
2053 if (bnx2i_ep->conn) {
2054 conn = bnx2i_ep->conn->cls_conn->dd_data;
2055 session = conn->session;
2056 }
2057
2058 timer_setup(&bnx2i_ep->ofld_timer, bnx2i_ep_ofld_timer, 0);
2059 bnx2i_ep->ofld_timer.expires = hba->conn_teardown_tmo + jiffies;
2060 add_timer(&bnx2i_ep->ofld_timer);
2061
2062 if (!test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic))
2063 goto out;
2064
2065 if (session) {
2066 spin_lock_bh(&session->frwd_lock);
2067 if (bnx2i_ep->state != EP_STATE_TCP_FIN_RCVD) {
2068 if (session->state == ISCSI_STATE_LOGGING_OUT) {
2069 if (bnx2i_ep->state == EP_STATE_LOGOUT_SENT) {
2070 /* Logout sent, but no resp */
2071 printk(KERN_ALERT "bnx2i (%s): WARNING"
2072 " logout response was not "
2073 "received!\n",
2074 bnx2i_ep->hba->netdev->name);
2075 } else if (bnx2i_ep->state ==
2076 EP_STATE_LOGOUT_RESP_RCVD)
2077 close = 1;
2078 }
2079 } else
2080 close = 1;
2081
2082 spin_unlock_bh(&session->frwd_lock);
2083 }
2084
2085 bnx2i_ep->state = EP_STATE_DISCONN_START;
2086
2087 if (close)
2088 close_ret = cnic->cm_close(bnx2i_ep->cm_sk);
2089 else
2090 close_ret = cnic->cm_abort(bnx2i_ep->cm_sk);
2091
2092 if (close_ret)
2093 printk(KERN_ALERT "bnx2i (%s): close/abort(%d) returned %d\n",
2094 bnx2i_ep->hba->netdev->name, close, close_ret);
2095 else
2096 /* wait for option-2 conn teardown */
2097 wait_event_interruptible(bnx2i_ep->ofld_wait,
2098 ((bnx2i_ep->state != EP_STATE_DISCONN_START)
2099 && (bnx2i_ep->state != EP_STATE_TCP_FIN_RCVD)));
2100
2101 if (signal_pending(current))
2102 flush_signals(current);
2103 del_timer_sync(&bnx2i_ep->ofld_timer);
2104
2105 destroy_conn:
2106 bnx2i_ep_active_list_del(hba, bnx2i_ep);
2107 if (bnx2i_tear_down_conn(hba, bnx2i_ep))
2108 return -EINVAL;
2109 out:
2110 bnx2i_ep->state = EP_STATE_IDLE;
2111 return ret;
2112 }
2113
2114
2115 /**
2116 * bnx2i_ep_disconnect - executes TCP connection teardown process
2117 * @ep: TCP connection (iscsi endpoint) handle
2118 *
2119 * executes TCP connection teardown process
2120 */
bnx2i_ep_disconnect(struct iscsi_endpoint * ep)2121 static void bnx2i_ep_disconnect(struct iscsi_endpoint *ep)
2122 {
2123 struct bnx2i_endpoint *bnx2i_ep;
2124 struct bnx2i_conn *bnx2i_conn = NULL;
2125 struct iscsi_conn *conn = NULL;
2126 struct bnx2i_hba *hba;
2127
2128 bnx2i_ep = ep->dd_data;
2129
2130 /* driver should not attempt connection cleanup until TCP_CONNECT
2131 * completes either successfully or fails. Timeout is 9-secs, so
2132 * wait for it to complete
2133 */
2134 while ((bnx2i_ep->state == EP_STATE_CONNECT_START) &&
2135 !time_after(jiffies, bnx2i_ep->timestamp + (12 * HZ)))
2136 msleep(250);
2137
2138 if (bnx2i_ep->conn) {
2139 bnx2i_conn = bnx2i_ep->conn;
2140 conn = bnx2i_conn->cls_conn->dd_data;
2141 iscsi_suspend_queue(conn);
2142 }
2143 hba = bnx2i_ep->hba;
2144
2145 mutex_lock(&hba->net_dev_lock);
2146
2147 if (bnx2i_ep->state == EP_STATE_DISCONN_TIMEDOUT)
2148 goto out;
2149
2150 if (bnx2i_ep->state == EP_STATE_IDLE)
2151 goto free_resc;
2152
2153 if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) ||
2154 (bnx2i_ep->hba_age != hba->age)) {
2155 bnx2i_ep_active_list_del(hba, bnx2i_ep);
2156 goto free_resc;
2157 }
2158
2159 /* Do all chip cleanup here */
2160 if (bnx2i_hw_ep_disconnect(bnx2i_ep)) {
2161 mutex_unlock(&hba->net_dev_lock);
2162 return;
2163 }
2164 free_resc:
2165 bnx2i_free_qp_resc(hba, bnx2i_ep);
2166
2167 if (bnx2i_conn)
2168 bnx2i_conn->ep = NULL;
2169
2170 bnx2i_free_ep(ep);
2171 out:
2172 mutex_unlock(&hba->net_dev_lock);
2173
2174 wake_up_interruptible(&hba->eh_wait);
2175 }
2176
2177
2178 /**
2179 * bnx2i_nl_set_path - ISCSI_UEVENT_PATH_UPDATE user message handler
2180 * @shost: scsi host pointer
2181 * @params: pointer to buffer containing iscsi path message
2182 */
bnx2i_nl_set_path(struct Scsi_Host * shost,struct iscsi_path * params)2183 static int bnx2i_nl_set_path(struct Scsi_Host *shost, struct iscsi_path *params)
2184 {
2185 struct bnx2i_hba *hba = iscsi_host_priv(shost);
2186 char *buf = (char *) params;
2187 u16 len = sizeof(*params);
2188
2189 /* handled by cnic driver */
2190 hba->cnic->iscsi_nl_msg_recv(hba->cnic, ISCSI_UEVENT_PATH_UPDATE, buf,
2191 len);
2192
2193 return 0;
2194 }
2195
bnx2i_attr_is_visible(int param_type,int param)2196 static umode_t bnx2i_attr_is_visible(int param_type, int param)
2197 {
2198 switch (param_type) {
2199 case ISCSI_HOST_PARAM:
2200 switch (param) {
2201 case ISCSI_HOST_PARAM_NETDEV_NAME:
2202 case ISCSI_HOST_PARAM_HWADDRESS:
2203 case ISCSI_HOST_PARAM_IPADDRESS:
2204 return S_IRUGO;
2205 default:
2206 return 0;
2207 }
2208 case ISCSI_PARAM:
2209 switch (param) {
2210 case ISCSI_PARAM_MAX_RECV_DLENGTH:
2211 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
2212 case ISCSI_PARAM_HDRDGST_EN:
2213 case ISCSI_PARAM_DATADGST_EN:
2214 case ISCSI_PARAM_CONN_ADDRESS:
2215 case ISCSI_PARAM_CONN_PORT:
2216 case ISCSI_PARAM_EXP_STATSN:
2217 case ISCSI_PARAM_PERSISTENT_ADDRESS:
2218 case ISCSI_PARAM_PERSISTENT_PORT:
2219 case ISCSI_PARAM_PING_TMO:
2220 case ISCSI_PARAM_RECV_TMO:
2221 case ISCSI_PARAM_INITIAL_R2T_EN:
2222 case ISCSI_PARAM_MAX_R2T:
2223 case ISCSI_PARAM_IMM_DATA_EN:
2224 case ISCSI_PARAM_FIRST_BURST:
2225 case ISCSI_PARAM_MAX_BURST:
2226 case ISCSI_PARAM_PDU_INORDER_EN:
2227 case ISCSI_PARAM_DATASEQ_INORDER_EN:
2228 case ISCSI_PARAM_ERL:
2229 case ISCSI_PARAM_TARGET_NAME:
2230 case ISCSI_PARAM_TPGT:
2231 case ISCSI_PARAM_USERNAME:
2232 case ISCSI_PARAM_PASSWORD:
2233 case ISCSI_PARAM_USERNAME_IN:
2234 case ISCSI_PARAM_PASSWORD_IN:
2235 case ISCSI_PARAM_FAST_ABORT:
2236 case ISCSI_PARAM_ABORT_TMO:
2237 case ISCSI_PARAM_LU_RESET_TMO:
2238 case ISCSI_PARAM_TGT_RESET_TMO:
2239 case ISCSI_PARAM_IFACE_NAME:
2240 case ISCSI_PARAM_INITIATOR_NAME:
2241 case ISCSI_PARAM_BOOT_ROOT:
2242 case ISCSI_PARAM_BOOT_NIC:
2243 case ISCSI_PARAM_BOOT_TARGET:
2244 return S_IRUGO;
2245 default:
2246 return 0;
2247 }
2248 }
2249
2250 return 0;
2251 }
2252
2253 /*
2254 * 'Scsi_Host_Template' structure and 'iscsi_tranport' structure template
2255 * used while registering with the scsi host and iSCSI transport module.
2256 */
2257 static struct scsi_host_template bnx2i_host_template = {
2258 .module = THIS_MODULE,
2259 .name = "QLogic Offload iSCSI Initiator",
2260 .proc_name = "bnx2i",
2261 .queuecommand = iscsi_queuecommand,
2262 .eh_timed_out = iscsi_eh_cmd_timed_out,
2263 .eh_abort_handler = iscsi_eh_abort,
2264 .eh_device_reset_handler = iscsi_eh_device_reset,
2265 .eh_target_reset_handler = iscsi_eh_recover_target,
2266 .change_queue_depth = scsi_change_queue_depth,
2267 .target_alloc = iscsi_target_alloc,
2268 .can_queue = 2048,
2269 .max_sectors = 127,
2270 .cmd_per_lun = 128,
2271 .this_id = -1,
2272 .sg_tablesize = ISCSI_MAX_BDS_PER_CMD,
2273 .shost_attrs = bnx2i_dev_attributes,
2274 .track_queue_depth = 1,
2275 };
2276
2277 struct iscsi_transport bnx2i_iscsi_transport = {
2278 .owner = THIS_MODULE,
2279 .name = "bnx2i",
2280 .caps = CAP_RECOVERY_L0 | CAP_HDRDGST |
2281 CAP_MULTI_R2T | CAP_DATADGST |
2282 CAP_DATA_PATH_OFFLOAD |
2283 CAP_TEXT_NEGO,
2284 .create_session = bnx2i_session_create,
2285 .destroy_session = bnx2i_session_destroy,
2286 .create_conn = bnx2i_conn_create,
2287 .bind_conn = bnx2i_conn_bind,
2288 .unbind_conn = iscsi_conn_unbind,
2289 .destroy_conn = bnx2i_conn_destroy,
2290 .attr_is_visible = bnx2i_attr_is_visible,
2291 .set_param = iscsi_set_param,
2292 .get_conn_param = iscsi_conn_get_param,
2293 .get_session_param = iscsi_session_get_param,
2294 .get_host_param = bnx2i_host_get_param,
2295 .start_conn = bnx2i_conn_start,
2296 .stop_conn = iscsi_conn_stop,
2297 .send_pdu = iscsi_conn_send_pdu,
2298 .xmit_task = bnx2i_task_xmit,
2299 .get_stats = bnx2i_conn_get_stats,
2300 /* TCP connect - disconnect - option-2 interface calls */
2301 .get_ep_param = bnx2i_ep_get_param,
2302 .ep_connect = bnx2i_ep_connect,
2303 .ep_poll = bnx2i_ep_poll,
2304 .ep_disconnect = bnx2i_ep_disconnect,
2305 .set_path = bnx2i_nl_set_path,
2306 /* Error recovery timeout call */
2307 .session_recovery_timedout = iscsi_session_recovery_timedout,
2308 .cleanup_task = bnx2i_cleanup_task,
2309 };
2310