1 /*
2 * This file is part of the Chelsio FCoE driver for Linux.
3 *
4 * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35 #include <linux/string.h>
36 #include <scsi/scsi_device.h>
37 #include <scsi/scsi_transport_fc.h>
38 #include <scsi/fc/fc_els.h>
39 #include <scsi/fc/fc_fs.h>
40
41 #include "csio_hw.h"
42 #include "csio_lnode.h"
43 #include "csio_rnode.h"
44
45 static int csio_rnode_init(struct csio_rnode *, struct csio_lnode *);
46 static void csio_rnode_exit(struct csio_rnode *);
47
48 /* Static machine forward declarations */
49 static void csio_rns_uninit(struct csio_rnode *, enum csio_rn_ev);
50 static void csio_rns_ready(struct csio_rnode *, enum csio_rn_ev);
51 static void csio_rns_offline(struct csio_rnode *, enum csio_rn_ev);
52 static void csio_rns_disappeared(struct csio_rnode *, enum csio_rn_ev);
53
54 /* RNF event mapping */
55 static enum csio_rn_ev fwevt_to_rnevt[] = {
56 CSIO_RNFE_NONE, /* None */
57 CSIO_RNFE_LOGGED_IN, /* PLOGI_ACC_RCVD */
58 CSIO_RNFE_NONE, /* PLOGI_RJT_RCVD */
59 CSIO_RNFE_PLOGI_RECV, /* PLOGI_RCVD */
60 CSIO_RNFE_LOGO_RECV, /* PLOGO_RCVD */
61 CSIO_RNFE_PRLI_DONE, /* PRLI_ACC_RCVD */
62 CSIO_RNFE_NONE, /* PRLI_RJT_RCVD */
63 CSIO_RNFE_PRLI_RECV, /* PRLI_RCVD */
64 CSIO_RNFE_PRLO_RECV, /* PRLO_RCVD */
65 CSIO_RNFE_NONE, /* NPORT_ID_CHGD */
66 CSIO_RNFE_LOGO_RECV, /* FLOGO_RCVD */
67 CSIO_RNFE_NONE, /* CLR_VIRT_LNK_RCVD */
68 CSIO_RNFE_LOGGED_IN, /* FLOGI_ACC_RCVD */
69 CSIO_RNFE_NONE, /* FLOGI_RJT_RCVD */
70 CSIO_RNFE_LOGGED_IN, /* FDISC_ACC_RCVD */
71 CSIO_RNFE_NONE, /* FDISC_RJT_RCVD */
72 CSIO_RNFE_NONE, /* FLOGI_TMO_MAX_RETRY */
73 CSIO_RNFE_NONE, /* IMPL_LOGO_ADISC_ACC */
74 CSIO_RNFE_NONE, /* IMPL_LOGO_ADISC_RJT */
75 CSIO_RNFE_NONE, /* IMPL_LOGO_ADISC_CNFLT */
76 CSIO_RNFE_NONE, /* PRLI_TMO */
77 CSIO_RNFE_NONE, /* ADISC_TMO */
78 CSIO_RNFE_NAME_MISSING, /* RSCN_DEV_LOST */
79 CSIO_RNFE_NONE, /* SCR_ACC_RCVD */
80 CSIO_RNFE_NONE, /* ADISC_RJT_RCVD */
81 CSIO_RNFE_NONE, /* LOGO_SNT */
82 CSIO_RNFE_LOGO_RECV, /* PROTO_ERR_IMPL_LOGO */
83 };
84
85 #define CSIO_FWE_TO_RNFE(_evt) ((_evt > PROTO_ERR_IMPL_LOGO) ? \
86 CSIO_RNFE_NONE : \
87 fwevt_to_rnevt[_evt])
88 int
csio_is_rnode_ready(struct csio_rnode * rn)89 csio_is_rnode_ready(struct csio_rnode *rn)
90 {
91 return csio_match_state(rn, csio_rns_ready);
92 }
93
94 static int
csio_is_rnode_uninit(struct csio_rnode * rn)95 csio_is_rnode_uninit(struct csio_rnode *rn)
96 {
97 return csio_match_state(rn, csio_rns_uninit);
98 }
99
100 static int
csio_is_rnode_wka(uint8_t rport_type)101 csio_is_rnode_wka(uint8_t rport_type)
102 {
103 if ((rport_type == FLOGI_VFPORT) ||
104 (rport_type == FDISC_VFPORT) ||
105 (rport_type == NS_VNPORT) ||
106 (rport_type == FDMI_VNPORT))
107 return 1;
108
109 return 0;
110 }
111
112 /*
113 * csio_rn_lookup - Finds the rnode with the given flowid
114 * @ln - lnode
115 * @flowid - flowid.
116 *
117 * Does the rnode lookup on the given lnode and flowid.If no matching entry
118 * found, NULL is returned.
119 */
120 static struct csio_rnode *
csio_rn_lookup(struct csio_lnode * ln,uint32_t flowid)121 csio_rn_lookup(struct csio_lnode *ln, uint32_t flowid)
122 {
123 struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
124 struct list_head *tmp;
125 struct csio_rnode *rn;
126
127 list_for_each(tmp, &rnhead->sm.sm_list) {
128 rn = (struct csio_rnode *) tmp;
129 if (rn->flowid == flowid)
130 return rn;
131 }
132
133 return NULL;
134 }
135
136 /*
137 * csio_rn_lookup_wwpn - Finds the rnode with the given wwpn
138 * @ln: lnode
139 * @wwpn: wwpn
140 *
141 * Does the rnode lookup on the given lnode and wwpn. If no matching entry
142 * found, NULL is returned.
143 */
144 static struct csio_rnode *
csio_rn_lookup_wwpn(struct csio_lnode * ln,uint8_t * wwpn)145 csio_rn_lookup_wwpn(struct csio_lnode *ln, uint8_t *wwpn)
146 {
147 struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
148 struct list_head *tmp;
149 struct csio_rnode *rn;
150
151 list_for_each(tmp, &rnhead->sm.sm_list) {
152 rn = (struct csio_rnode *) tmp;
153 if (!memcmp(csio_rn_wwpn(rn), wwpn, 8))
154 return rn;
155 }
156
157 return NULL;
158 }
159
160 /**
161 * csio_rnode_lookup_portid - Finds the rnode with the given portid
162 * @ln: lnode
163 * @portid: port id
164 *
165 * Lookup the rnode list for a given portid. If no matching entry
166 * found, NULL is returned.
167 */
168 struct csio_rnode *
csio_rnode_lookup_portid(struct csio_lnode * ln,uint32_t portid)169 csio_rnode_lookup_portid(struct csio_lnode *ln, uint32_t portid)
170 {
171 struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
172 struct list_head *tmp;
173 struct csio_rnode *rn;
174
175 list_for_each(tmp, &rnhead->sm.sm_list) {
176 rn = (struct csio_rnode *) tmp;
177 if (rn->nport_id == portid)
178 return rn;
179 }
180
181 return NULL;
182 }
183
184 static int
csio_rn_dup_flowid(struct csio_lnode * ln,uint32_t rdev_flowid,uint32_t * vnp_flowid)185 csio_rn_dup_flowid(struct csio_lnode *ln, uint32_t rdev_flowid,
186 uint32_t *vnp_flowid)
187 {
188 struct csio_rnode *rnhead;
189 struct list_head *tmp, *tmp1;
190 struct csio_rnode *rn;
191 struct csio_lnode *ln_tmp;
192 struct csio_hw *hw = csio_lnode_to_hw(ln);
193
194 list_for_each(tmp1, &hw->sln_head) {
195 ln_tmp = (struct csio_lnode *) tmp1;
196 if (ln_tmp == ln)
197 continue;
198
199 rnhead = (struct csio_rnode *)&ln_tmp->rnhead;
200 list_for_each(tmp, &rnhead->sm.sm_list) {
201
202 rn = (struct csio_rnode *) tmp;
203 if (csio_is_rnode_ready(rn)) {
204 if (rn->flowid == rdev_flowid) {
205 *vnp_flowid = csio_ln_flowid(ln_tmp);
206 return 1;
207 }
208 }
209 }
210 }
211
212 return 0;
213 }
214
215 static struct csio_rnode *
csio_alloc_rnode(struct csio_lnode * ln)216 csio_alloc_rnode(struct csio_lnode *ln)
217 {
218 struct csio_hw *hw = csio_lnode_to_hw(ln);
219
220 struct csio_rnode *rn = mempool_alloc(hw->rnode_mempool, GFP_ATOMIC);
221 if (!rn)
222 goto err;
223
224 memset(rn, 0, sizeof(struct csio_rnode));
225 if (csio_rnode_init(rn, ln))
226 goto err_free;
227
228 CSIO_INC_STATS(ln, n_rnode_alloc);
229
230 return rn;
231
232 err_free:
233 mempool_free(rn, hw->rnode_mempool);
234 err:
235 CSIO_INC_STATS(ln, n_rnode_nomem);
236 return NULL;
237 }
238
239 static void
csio_free_rnode(struct csio_rnode * rn)240 csio_free_rnode(struct csio_rnode *rn)
241 {
242 struct csio_hw *hw = csio_lnode_to_hw(csio_rnode_to_lnode(rn));
243
244 csio_rnode_exit(rn);
245 CSIO_INC_STATS(rn->lnp, n_rnode_free);
246 mempool_free(rn, hw->rnode_mempool);
247 }
248
249 /*
250 * csio_get_rnode - Gets rnode with the given flowid
251 * @ln - lnode
252 * @flowid - flow id.
253 *
254 * Does the rnode lookup on the given lnode and flowid. If no matching
255 * rnode found, then new rnode with given npid is allocated and returned.
256 */
257 static struct csio_rnode *
csio_get_rnode(struct csio_lnode * ln,uint32_t flowid)258 csio_get_rnode(struct csio_lnode *ln, uint32_t flowid)
259 {
260 struct csio_rnode *rn;
261
262 rn = csio_rn_lookup(ln, flowid);
263 if (!rn) {
264 rn = csio_alloc_rnode(ln);
265 if (!rn)
266 return NULL;
267
268 rn->flowid = flowid;
269 }
270
271 return rn;
272 }
273
274 /*
275 * csio_put_rnode - Frees the given rnode
276 * @ln - lnode
277 * @flowid - flow id.
278 *
279 * Does the rnode lookup on the given lnode and flowid. If no matching
280 * rnode found, then new rnode with given npid is allocated and returned.
281 */
282 void
csio_put_rnode(struct csio_lnode * ln,struct csio_rnode * rn)283 csio_put_rnode(struct csio_lnode *ln, struct csio_rnode *rn)
284 {
285 CSIO_DB_ASSERT(csio_is_rnode_uninit(rn) != 0);
286 csio_free_rnode(rn);
287 }
288
289 /*
290 * csio_confirm_rnode - confirms rnode based on wwpn.
291 * @ln: lnode
292 * @rdev_flowid: remote device flowid
293 * @rdevp: remote device params
294 * This routines searches other rnode in list having same wwpn of new rnode.
295 * If there is a match, then matched rnode is returned and otherwise new rnode
296 * is returned.
297 * returns rnode.
298 */
299 struct csio_rnode *
csio_confirm_rnode(struct csio_lnode * ln,uint32_t rdev_flowid,struct fcoe_rdev_entry * rdevp)300 csio_confirm_rnode(struct csio_lnode *ln, uint32_t rdev_flowid,
301 struct fcoe_rdev_entry *rdevp)
302 {
303 uint8_t rport_type;
304 struct csio_rnode *rn, *match_rn;
305 uint32_t vnp_flowid = 0;
306 __be32 *port_id;
307
308 port_id = (__be32 *)&rdevp->r_id[0];
309 rport_type =
310 FW_RDEV_WR_RPORT_TYPE_GET(rdevp->rd_xfer_rdy_to_rport_type);
311
312 /* Drop rdev event for cntrl port */
313 if (rport_type == FAB_CTLR_VNPORT) {
314 csio_ln_dbg(ln,
315 "Unhandled rport_type:%d recv in rdev evt "
316 "ssni:x%x\n", rport_type, rdev_flowid);
317 return NULL;
318 }
319
320 /* Lookup on flowid */
321 rn = csio_rn_lookup(ln, rdev_flowid);
322 if (!rn) {
323
324 /* Drop events with duplicate flowid */
325 if (csio_rn_dup_flowid(ln, rdev_flowid, &vnp_flowid)) {
326 csio_ln_warn(ln,
327 "ssni:%x already active on vnpi:%x",
328 rdev_flowid, vnp_flowid);
329 return NULL;
330 }
331
332 /* Lookup on wwpn for NPORTs */
333 rn = csio_rn_lookup_wwpn(ln, rdevp->wwpn);
334 if (!rn)
335 goto alloc_rnode;
336
337 } else {
338 /* Lookup well-known ports with nport id */
339 if (csio_is_rnode_wka(rport_type)) {
340 match_rn = csio_rnode_lookup_portid(ln,
341 ((ntohl(*port_id) >> 8) & CSIO_DID_MASK));
342 if (match_rn == NULL) {
343 csio_rn_flowid(rn) = CSIO_INVALID_IDX;
344 goto alloc_rnode;
345 }
346
347 /*
348 * Now compare the wwpn to confirm that
349 * same port relogged in. If so update the matched rn.
350 * Else, go ahead and alloc a new rnode.
351 */
352 if (!memcmp(csio_rn_wwpn(match_rn), rdevp->wwpn, 8)) {
353 if (rn == match_rn)
354 goto found_rnode;
355 csio_ln_dbg(ln,
356 "nport_id:x%x and wwpn:%llx"
357 " match for ssni:x%x\n",
358 rn->nport_id,
359 wwn_to_u64(rdevp->wwpn),
360 rdev_flowid);
361 if (csio_is_rnode_ready(rn)) {
362 csio_ln_warn(ln,
363 "rnode is already"
364 "active ssni:x%x\n",
365 rdev_flowid);
366 CSIO_ASSERT(0);
367 }
368 csio_rn_flowid(rn) = CSIO_INVALID_IDX;
369 rn = match_rn;
370
371 /* Update rn */
372 goto found_rnode;
373 }
374 csio_rn_flowid(rn) = CSIO_INVALID_IDX;
375 goto alloc_rnode;
376 }
377
378 /* wwpn match */
379 if (!memcmp(csio_rn_wwpn(rn), rdevp->wwpn, 8))
380 goto found_rnode;
381
382 /* Search for rnode that have same wwpn */
383 match_rn = csio_rn_lookup_wwpn(ln, rdevp->wwpn);
384 if (match_rn != NULL) {
385 csio_ln_dbg(ln,
386 "ssni:x%x changed for rport name(wwpn):%llx "
387 "did:x%x\n", rdev_flowid,
388 wwn_to_u64(rdevp->wwpn),
389 match_rn->nport_id);
390 csio_rn_flowid(rn) = CSIO_INVALID_IDX;
391 rn = match_rn;
392 } else {
393 csio_ln_dbg(ln,
394 "rnode wwpn mismatch found ssni:x%x "
395 "name(wwpn):%llx\n",
396 rdev_flowid,
397 wwn_to_u64(csio_rn_wwpn(rn)));
398 if (csio_is_rnode_ready(rn)) {
399 csio_ln_warn(ln,
400 "rnode is already active "
401 "wwpn:%llx ssni:x%x\n",
402 wwn_to_u64(csio_rn_wwpn(rn)),
403 rdev_flowid);
404 CSIO_ASSERT(0);
405 }
406 csio_rn_flowid(rn) = CSIO_INVALID_IDX;
407 goto alloc_rnode;
408 }
409 }
410
411 found_rnode:
412 csio_ln_dbg(ln, "found rnode:%p ssni:x%x name(wwpn):%llx\n",
413 rn, rdev_flowid, wwn_to_u64(rdevp->wwpn));
414
415 /* Update flowid */
416 csio_rn_flowid(rn) = rdev_flowid;
417
418 /* update rdev entry */
419 rn->rdev_entry = rdevp;
420 CSIO_INC_STATS(ln, n_rnode_match);
421 return rn;
422
423 alloc_rnode:
424 rn = csio_get_rnode(ln, rdev_flowid);
425 if (!rn)
426 return NULL;
427
428 csio_ln_dbg(ln, "alloc rnode:%p ssni:x%x name(wwpn):%llx\n",
429 rn, rdev_flowid, wwn_to_u64(rdevp->wwpn));
430
431 /* update rdev entry */
432 rn->rdev_entry = rdevp;
433 return rn;
434 }
435
436 /*
437 * csio_rn_verify_rparams - verify rparams.
438 * @ln: lnode
439 * @rn: rnode
440 * @rdevp: remote device params
441 * returns success if rparams are verified.
442 */
443 static int
csio_rn_verify_rparams(struct csio_lnode * ln,struct csio_rnode * rn,struct fcoe_rdev_entry * rdevp)444 csio_rn_verify_rparams(struct csio_lnode *ln, struct csio_rnode *rn,
445 struct fcoe_rdev_entry *rdevp)
446 {
447 uint8_t null[8];
448 uint8_t rport_type;
449 uint8_t fc_class;
450 __be32 *did;
451
452 did = (__be32 *) &rdevp->r_id[0];
453 rport_type =
454 FW_RDEV_WR_RPORT_TYPE_GET(rdevp->rd_xfer_rdy_to_rport_type);
455 switch (rport_type) {
456 case FLOGI_VFPORT:
457 rn->role = CSIO_RNFR_FABRIC;
458 if (((ntohl(*did) >> 8) & CSIO_DID_MASK) != FC_FID_FLOGI) {
459 csio_ln_err(ln, "ssni:x%x invalid fabric portid\n",
460 csio_rn_flowid(rn));
461 return -EINVAL;
462 }
463 /* NPIV support */
464 if (FW_RDEV_WR_NPIV_GET(rdevp->vft_to_qos))
465 ln->flags |= CSIO_LNF_NPIVSUPP;
466
467 break;
468
469 case NS_VNPORT:
470 rn->role = CSIO_RNFR_NS;
471 if (((ntohl(*did) >> 8) & CSIO_DID_MASK) != FC_FID_DIR_SERV) {
472 csio_ln_err(ln, "ssni:x%x invalid fabric portid\n",
473 csio_rn_flowid(rn));
474 return -EINVAL;
475 }
476 break;
477
478 case REG_FC4_VNPORT:
479 case REG_VNPORT:
480 rn->role = CSIO_RNFR_NPORT;
481 if (rdevp->event_cause == PRLI_ACC_RCVD ||
482 rdevp->event_cause == PRLI_RCVD) {
483 if (FW_RDEV_WR_TASK_RETRY_ID_GET(
484 rdevp->enh_disc_to_tgt))
485 rn->fcp_flags |= FCP_SPPF_OVLY_ALLOW;
486
487 if (FW_RDEV_WR_RETRY_GET(rdevp->enh_disc_to_tgt))
488 rn->fcp_flags |= FCP_SPPF_RETRY;
489
490 if (FW_RDEV_WR_CONF_CMPL_GET(rdevp->enh_disc_to_tgt))
491 rn->fcp_flags |= FCP_SPPF_CONF_COMPL;
492
493 if (FW_RDEV_WR_TGT_GET(rdevp->enh_disc_to_tgt))
494 rn->role |= CSIO_RNFR_TARGET;
495
496 if (FW_RDEV_WR_INI_GET(rdevp->enh_disc_to_tgt))
497 rn->role |= CSIO_RNFR_INITIATOR;
498 }
499
500 break;
501
502 case FDMI_VNPORT:
503 case FAB_CTLR_VNPORT:
504 rn->role = 0;
505 break;
506
507 default:
508 csio_ln_err(ln, "ssni:x%x invalid rport type recv x%x\n",
509 csio_rn_flowid(rn), rport_type);
510 return -EINVAL;
511 }
512
513 /* validate wwpn/wwnn for Name server/remote port */
514 if (rport_type == REG_VNPORT || rport_type == NS_VNPORT) {
515 memset(null, 0, 8);
516 if (!memcmp(rdevp->wwnn, null, 8)) {
517 csio_ln_err(ln,
518 "ssni:x%x invalid wwnn received from"
519 " rport did:x%x\n",
520 csio_rn_flowid(rn),
521 (ntohl(*did) & CSIO_DID_MASK));
522 return -EINVAL;
523 }
524
525 if (!memcmp(rdevp->wwpn, null, 8)) {
526 csio_ln_err(ln,
527 "ssni:x%x invalid wwpn received from"
528 " rport did:x%x\n",
529 csio_rn_flowid(rn),
530 (ntohl(*did) & CSIO_DID_MASK));
531 return -EINVAL;
532 }
533
534 }
535
536 /* Copy wwnn, wwpn and nport id */
537 rn->nport_id = (ntohl(*did) >> 8) & CSIO_DID_MASK;
538 memcpy(csio_rn_wwnn(rn), rdevp->wwnn, 8);
539 memcpy(csio_rn_wwpn(rn), rdevp->wwpn, 8);
540 rn->rn_sparm.csp.sp_bb_data = rdevp->rcv_fr_sz;
541 fc_class = FW_RDEV_WR_CLASS_GET(rdevp->vft_to_qos);
542 rn->rn_sparm.clsp[fc_class - 1].cp_class = htons(FC_CPC_VALID);
543
544 return 0;
545 }
546
547 static void
__csio_reg_rnode(struct csio_rnode * rn)548 __csio_reg_rnode(struct csio_rnode *rn)
549 {
550 struct csio_lnode *ln = csio_rnode_to_lnode(rn);
551 struct csio_hw *hw = csio_lnode_to_hw(ln);
552
553 spin_unlock_irq(&hw->lock);
554 csio_reg_rnode(rn);
555 spin_lock_irq(&hw->lock);
556
557 if (rn->role & CSIO_RNFR_TARGET)
558 ln->n_scsi_tgts++;
559
560 if (rn->nport_id == FC_FID_MGMT_SERV)
561 csio_ln_fdmi_start(ln, (void *) rn);
562 }
563
564 static void
__csio_unreg_rnode(struct csio_rnode * rn)565 __csio_unreg_rnode(struct csio_rnode *rn)
566 {
567 struct csio_lnode *ln = csio_rnode_to_lnode(rn);
568 struct csio_hw *hw = csio_lnode_to_hw(ln);
569 LIST_HEAD(tmp_q);
570 int cmpl = 0;
571
572 if (!list_empty(&rn->host_cmpl_q)) {
573 csio_dbg(hw, "Returning completion queue I/Os\n");
574 list_splice_tail_init(&rn->host_cmpl_q, &tmp_q);
575 cmpl = 1;
576 }
577
578 if (rn->role & CSIO_RNFR_TARGET) {
579 ln->n_scsi_tgts--;
580 ln->last_scan_ntgts--;
581 }
582
583 spin_unlock_irq(&hw->lock);
584 csio_unreg_rnode(rn);
585 spin_lock_irq(&hw->lock);
586
587 /* Cleanup I/Os that were waiting for rnode to unregister */
588 if (cmpl)
589 csio_scsi_cleanup_io_q(csio_hw_to_scsim(hw), &tmp_q);
590
591 }
592
593 /*****************************************************************************/
594 /* START: Rnode SM */
595 /*****************************************************************************/
596
597 /*
598 * csio_rns_uninit -
599 * @rn - rnode
600 * @evt - SM event.
601 *
602 */
603 static void
csio_rns_uninit(struct csio_rnode * rn,enum csio_rn_ev evt)604 csio_rns_uninit(struct csio_rnode *rn, enum csio_rn_ev evt)
605 {
606 struct csio_lnode *ln = csio_rnode_to_lnode(rn);
607 int ret = 0;
608
609 CSIO_INC_STATS(rn, n_evt_sm[evt]);
610
611 switch (evt) {
612 case CSIO_RNFE_LOGGED_IN:
613 case CSIO_RNFE_PLOGI_RECV:
614 ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry);
615 if (!ret) {
616 csio_set_state(&rn->sm, csio_rns_ready);
617 __csio_reg_rnode(rn);
618 } else {
619 CSIO_INC_STATS(rn, n_err_inval);
620 }
621 break;
622 case CSIO_RNFE_LOGO_RECV:
623 csio_ln_dbg(ln,
624 "ssni:x%x Ignoring event %d recv "
625 "in rn state[uninit]\n", csio_rn_flowid(rn), evt);
626 CSIO_INC_STATS(rn, n_evt_drop);
627 break;
628 default:
629 csio_ln_dbg(ln,
630 "ssni:x%x unexp event %d recv "
631 "in rn state[uninit]\n", csio_rn_flowid(rn), evt);
632 CSIO_INC_STATS(rn, n_evt_unexp);
633 break;
634 }
635 }
636
637 /*
638 * csio_rns_ready -
639 * @rn - rnode
640 * @evt - SM event.
641 *
642 */
643 static void
csio_rns_ready(struct csio_rnode * rn,enum csio_rn_ev evt)644 csio_rns_ready(struct csio_rnode *rn, enum csio_rn_ev evt)
645 {
646 struct csio_lnode *ln = csio_rnode_to_lnode(rn);
647 int ret = 0;
648
649 CSIO_INC_STATS(rn, n_evt_sm[evt]);
650
651 switch (evt) {
652 case CSIO_RNFE_LOGGED_IN:
653 case CSIO_RNFE_PLOGI_RECV:
654 csio_ln_dbg(ln,
655 "ssni:x%x Ignoring event %d recv from did:x%x "
656 "in rn state[ready]\n", csio_rn_flowid(rn), evt,
657 rn->nport_id);
658 CSIO_INC_STATS(rn, n_evt_drop);
659 break;
660
661 case CSIO_RNFE_PRLI_DONE:
662 case CSIO_RNFE_PRLI_RECV:
663 ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry);
664 if (!ret)
665 __csio_reg_rnode(rn);
666 else
667 CSIO_INC_STATS(rn, n_err_inval);
668
669 break;
670 case CSIO_RNFE_DOWN:
671 csio_set_state(&rn->sm, csio_rns_offline);
672 __csio_unreg_rnode(rn);
673
674 /* FW expected to internally aborted outstanding SCSI WRs
675 * and return all SCSI WRs to host with status "ABORTED".
676 */
677 break;
678
679 case CSIO_RNFE_LOGO_RECV:
680 csio_set_state(&rn->sm, csio_rns_offline);
681
682 __csio_unreg_rnode(rn);
683
684 /* FW expected to internally aborted outstanding SCSI WRs
685 * and return all SCSI WRs to host with status "ABORTED".
686 */
687 break;
688
689 case CSIO_RNFE_CLOSE:
690 /*
691 * Each rnode receives CLOSE event when driver is removed or
692 * device is reset
693 * Note: All outstanding IOs on remote port need to returned
694 * to uppper layer with appropriate error before sending
695 * CLOSE event
696 */
697 csio_set_state(&rn->sm, csio_rns_uninit);
698 __csio_unreg_rnode(rn);
699 break;
700
701 case CSIO_RNFE_NAME_MISSING:
702 csio_set_state(&rn->sm, csio_rns_disappeared);
703 __csio_unreg_rnode(rn);
704
705 /*
706 * FW expected to internally aborted outstanding SCSI WRs
707 * and return all SCSI WRs to host with status "ABORTED".
708 */
709
710 break;
711
712 default:
713 csio_ln_dbg(ln,
714 "ssni:x%x unexp event %d recv from did:x%x "
715 "in rn state[uninit]\n", csio_rn_flowid(rn), evt,
716 rn->nport_id);
717 CSIO_INC_STATS(rn, n_evt_unexp);
718 break;
719 }
720 }
721
722 /*
723 * csio_rns_offline -
724 * @rn - rnode
725 * @evt - SM event.
726 *
727 */
728 static void
csio_rns_offline(struct csio_rnode * rn,enum csio_rn_ev evt)729 csio_rns_offline(struct csio_rnode *rn, enum csio_rn_ev evt)
730 {
731 struct csio_lnode *ln = csio_rnode_to_lnode(rn);
732 int ret = 0;
733
734 CSIO_INC_STATS(rn, n_evt_sm[evt]);
735
736 switch (evt) {
737 case CSIO_RNFE_LOGGED_IN:
738 case CSIO_RNFE_PLOGI_RECV:
739 ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry);
740 if (!ret) {
741 csio_set_state(&rn->sm, csio_rns_ready);
742 __csio_reg_rnode(rn);
743 } else {
744 CSIO_INC_STATS(rn, n_err_inval);
745 csio_post_event(&rn->sm, CSIO_RNFE_CLOSE);
746 }
747 break;
748
749 case CSIO_RNFE_DOWN:
750 csio_ln_dbg(ln,
751 "ssni:x%x Ignoring event %d recv from did:x%x "
752 "in rn state[offline]\n", csio_rn_flowid(rn), evt,
753 rn->nport_id);
754 CSIO_INC_STATS(rn, n_evt_drop);
755 break;
756
757 case CSIO_RNFE_CLOSE:
758 /* Each rnode receives CLOSE event when driver is removed or
759 * device is reset
760 * Note: All outstanding IOs on remote port need to returned
761 * to uppper layer with appropriate error before sending
762 * CLOSE event
763 */
764 csio_set_state(&rn->sm, csio_rns_uninit);
765 break;
766
767 case CSIO_RNFE_NAME_MISSING:
768 csio_set_state(&rn->sm, csio_rns_disappeared);
769 break;
770
771 default:
772 csio_ln_dbg(ln,
773 "ssni:x%x unexp event %d recv from did:x%x "
774 "in rn state[offline]\n", csio_rn_flowid(rn), evt,
775 rn->nport_id);
776 CSIO_INC_STATS(rn, n_evt_unexp);
777 break;
778 }
779 }
780
781 /*
782 * csio_rns_disappeared -
783 * @rn - rnode
784 * @evt - SM event.
785 *
786 */
787 static void
csio_rns_disappeared(struct csio_rnode * rn,enum csio_rn_ev evt)788 csio_rns_disappeared(struct csio_rnode *rn, enum csio_rn_ev evt)
789 {
790 struct csio_lnode *ln = csio_rnode_to_lnode(rn);
791 int ret = 0;
792
793 CSIO_INC_STATS(rn, n_evt_sm[evt]);
794
795 switch (evt) {
796 case CSIO_RNFE_LOGGED_IN:
797 case CSIO_RNFE_PLOGI_RECV:
798 ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry);
799 if (!ret) {
800 csio_set_state(&rn->sm, csio_rns_ready);
801 __csio_reg_rnode(rn);
802 } else {
803 CSIO_INC_STATS(rn, n_err_inval);
804 csio_post_event(&rn->sm, CSIO_RNFE_CLOSE);
805 }
806 break;
807
808 case CSIO_RNFE_CLOSE:
809 /* Each rnode receives CLOSE event when driver is removed or
810 * device is reset.
811 * Note: All outstanding IOs on remote port need to returned
812 * to uppper layer with appropriate error before sending
813 * CLOSE event
814 */
815 csio_set_state(&rn->sm, csio_rns_uninit);
816 break;
817
818 case CSIO_RNFE_DOWN:
819 case CSIO_RNFE_NAME_MISSING:
820 csio_ln_dbg(ln,
821 "ssni:x%x Ignoring event %d recv from did x%x"
822 "in rn state[disappeared]\n", csio_rn_flowid(rn),
823 evt, rn->nport_id);
824 break;
825
826 default:
827 csio_ln_dbg(ln,
828 "ssni:x%x unexp event %d recv from did x%x"
829 "in rn state[disappeared]\n", csio_rn_flowid(rn),
830 evt, rn->nport_id);
831 CSIO_INC_STATS(rn, n_evt_unexp);
832 break;
833 }
834 }
835
836 /*****************************************************************************/
837 /* END: Rnode SM */
838 /*****************************************************************************/
839
840 /*
841 * csio_rnode_devloss_handler - Device loss event handler
842 * @rn: rnode
843 *
844 * Post event to close rnode SM and free rnode.
845 */
846 void
csio_rnode_devloss_handler(struct csio_rnode * rn)847 csio_rnode_devloss_handler(struct csio_rnode *rn)
848 {
849 struct csio_lnode *ln = csio_rnode_to_lnode(rn);
850
851 /* ignore if same rnode came back as online */
852 if (csio_is_rnode_ready(rn))
853 return;
854
855 csio_post_event(&rn->sm, CSIO_RNFE_CLOSE);
856
857 /* Free rn if in uninit state */
858 if (csio_is_rnode_uninit(rn))
859 csio_put_rnode(ln, rn);
860 }
861
862 /**
863 * csio_rnode_fwevt_handler - Event handler for firmware rnode events.
864 * @rn: rnode
865 * @fwevt: firmware event to handle
866 */
867 void
csio_rnode_fwevt_handler(struct csio_rnode * rn,uint8_t fwevt)868 csio_rnode_fwevt_handler(struct csio_rnode *rn, uint8_t fwevt)
869 {
870 struct csio_lnode *ln = csio_rnode_to_lnode(rn);
871 enum csio_rn_ev evt;
872
873 evt = CSIO_FWE_TO_RNFE(fwevt);
874 if (!evt) {
875 csio_ln_err(ln, "ssni:x%x Unhandled FW Rdev event: %d\n",
876 csio_rn_flowid(rn), fwevt);
877 CSIO_INC_STATS(rn, n_evt_unexp);
878 return;
879 }
880 CSIO_INC_STATS(rn, n_evt_fw[fwevt]);
881
882 /* Track previous & current events for debugging */
883 rn->prev_evt = rn->cur_evt;
884 rn->cur_evt = fwevt;
885
886 /* Post event to rnode SM */
887 csio_post_event(&rn->sm, evt);
888
889 /* Free rn if in uninit state */
890 if (csio_is_rnode_uninit(rn))
891 csio_put_rnode(ln, rn);
892 }
893
894 /*
895 * csio_rnode_init - Initialize rnode.
896 * @rn: RNode
897 * @ln: Associated lnode
898 *
899 * Caller is responsible for holding the lock. The lock is required
900 * to be held for inserting the rnode in ln->rnhead list.
901 */
902 static int
csio_rnode_init(struct csio_rnode * rn,struct csio_lnode * ln)903 csio_rnode_init(struct csio_rnode *rn, struct csio_lnode *ln)
904 {
905 csio_rnode_to_lnode(rn) = ln;
906 csio_init_state(&rn->sm, csio_rns_uninit);
907 INIT_LIST_HEAD(&rn->host_cmpl_q);
908 csio_rn_flowid(rn) = CSIO_INVALID_IDX;
909
910 /* Add rnode to list of lnodes->rnhead */
911 list_add_tail(&rn->sm.sm_list, &ln->rnhead);
912
913 return 0;
914 }
915
916 static void
csio_rnode_exit(struct csio_rnode * rn)917 csio_rnode_exit(struct csio_rnode *rn)
918 {
919 list_del_init(&rn->sm.sm_list);
920 CSIO_DB_ASSERT(list_empty(&rn->host_cmpl_q));
921 }
922