• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
4  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
5  */
6 
7 /*
8  * NPORT
9  *
10  * Port object for physical port and NPIV ports.
11  */
12 
13 /*
14  * NPORT REFERENCE COUNTING
15  *
16  * A nport reference should be taken when:
17  * - an nport is allocated
18  * - a vport populates associated nport
19  * - a remote node is allocated
20  * - a unsolicited frame is processed
21  * The reference should be dropped when:
22  * - the unsolicited frame processesing is done
23  * - the remote node is removed
24  * - the vport is removed
25  * - the nport is removed
26  */
27 
28 #include "efc.h"
29 
30 void
efc_nport_cb(void * arg,int event,void * data)31 efc_nport_cb(void *arg, int event, void *data)
32 {
33 	struct efc *efc = arg;
34 	struct efc_nport *nport = data;
35 	unsigned long flags = 0;
36 
37 	efc_log_debug(efc, "nport event: %s\n", efc_sm_event_name(event));
38 
39 	spin_lock_irqsave(&efc->lock, flags);
40 	efc_sm_post_event(&nport->sm, event, NULL);
41 	spin_unlock_irqrestore(&efc->lock, flags);
42 }
43 
44 static struct efc_nport *
efc_nport_find_wwn(struct efc_domain * domain,uint64_t wwnn,uint64_t wwpn)45 efc_nport_find_wwn(struct efc_domain *domain, uint64_t wwnn, uint64_t wwpn)
46 {
47 	struct efc_nport *nport = NULL;
48 
49 	/* Find a nport, given the WWNN and WWPN */
50 	list_for_each_entry(nport, &domain->nport_list, list_entry) {
51 		if (nport->wwnn == wwnn && nport->wwpn == wwpn)
52 			return nport;
53 	}
54 	return NULL;
55 }
56 
57 static void
_efc_nport_free(struct kref * arg)58 _efc_nport_free(struct kref *arg)
59 {
60 	struct efc_nport *nport = container_of(arg, struct efc_nport, ref);
61 
62 	kfree(nport);
63 }
64 
65 struct efc_nport *
efc_nport_alloc(struct efc_domain * domain,uint64_t wwpn,uint64_t wwnn,u32 fc_id,bool enable_ini,bool enable_tgt)66 efc_nport_alloc(struct efc_domain *domain, uint64_t wwpn, uint64_t wwnn,
67 		u32 fc_id, bool enable_ini, bool enable_tgt)
68 {
69 	struct efc_nport *nport;
70 
71 	if (domain->efc->enable_ini)
72 		enable_ini = 0;
73 
74 	/* Return a failure if this nport has already been allocated */
75 	if ((wwpn != 0) || (wwnn != 0)) {
76 		nport = efc_nport_find_wwn(domain, wwnn, wwpn);
77 		if (nport) {
78 			efc_log_err(domain->efc,
79 				    "NPORT %016llX %016llX already allocated\n",
80 				    wwnn, wwpn);
81 			return NULL;
82 		}
83 	}
84 
85 	nport = kzalloc(sizeof(*nport), GFP_ATOMIC);
86 	if (!nport)
87 		return nport;
88 
89 	/* initialize refcount */
90 	kref_init(&nport->ref);
91 	nport->release = _efc_nport_free;
92 
93 	nport->efc = domain->efc;
94 	snprintf(nport->display_name, sizeof(nport->display_name), "------");
95 	nport->domain = domain;
96 	xa_init(&nport->lookup);
97 	nport->instance_index = domain->nport_count++;
98 	nport->sm.app = nport;
99 	nport->enable_ini = enable_ini;
100 	nport->enable_tgt = enable_tgt;
101 	nport->enable_rscn = (nport->enable_ini ||
102 			(nport->enable_tgt && enable_target_rscn(nport->efc)));
103 
104 	/* Copy service parameters from domain */
105 	memcpy(nport->service_params, domain->service_params,
106 	       sizeof(struct fc_els_flogi));
107 
108 	/* Update requested fc_id */
109 	nport->fc_id = fc_id;
110 
111 	/* Update the nport's service parameters for the new wwn's */
112 	nport->wwpn = wwpn;
113 	nport->wwnn = wwnn;
114 	snprintf(nport->wwnn_str, sizeof(nport->wwnn_str), "%016llX",
115 		 (unsigned long long)wwnn);
116 
117 	/*
118 	 * if this is the "first" nport of the domain,
119 	 * then make it the "phys" nport
120 	 */
121 	if (list_empty(&domain->nport_list))
122 		domain->nport = nport;
123 
124 	INIT_LIST_HEAD(&nport->list_entry);
125 	list_add_tail(&nport->list_entry, &domain->nport_list);
126 
127 	kref_get(&domain->ref);
128 
129 	efc_log_debug(domain->efc, "New Nport [%s]\n", nport->display_name);
130 
131 	return nport;
132 }
133 
134 void
efc_nport_free(struct efc_nport * nport)135 efc_nport_free(struct efc_nport *nport)
136 {
137 	struct efc_domain *domain;
138 
139 	if (!nport)
140 		return;
141 
142 	domain = nport->domain;
143 	efc_log_debug(domain->efc, "[%s] free nport\n", nport->display_name);
144 	list_del(&nport->list_entry);
145 	/*
146 	 * if this is the physical nport,
147 	 * then clear it out of the domain
148 	 */
149 	if (nport == domain->nport)
150 		domain->nport = NULL;
151 
152 	xa_destroy(&nport->lookup);
153 	xa_erase(&domain->lookup, nport->fc_id);
154 
155 	if (list_empty(&domain->nport_list))
156 		efc_domain_post_event(domain, EFC_EVT_ALL_CHILD_NODES_FREE,
157 				      NULL);
158 
159 	kref_put(&domain->ref, domain->release);
160 	kref_put(&nport->ref, nport->release);
161 }
162 
163 struct efc_nport *
efc_nport_find(struct efc_domain * domain,u32 d_id)164 efc_nport_find(struct efc_domain *domain, u32 d_id)
165 {
166 	struct efc_nport *nport;
167 
168 	/* Find a nport object, given an FC_ID */
169 	nport = xa_load(&domain->lookup, d_id);
170 	if (!nport || !kref_get_unless_zero(&nport->ref))
171 		return NULL;
172 
173 	return nport;
174 }
175 
176 int
efc_nport_attach(struct efc_nport * nport,u32 fc_id)177 efc_nport_attach(struct efc_nport *nport, u32 fc_id)
178 {
179 	int rc;
180 	struct efc_node *node;
181 	struct efc *efc = nport->efc;
182 	unsigned long index;
183 
184 	/* Set our lookup */
185 	rc = xa_err(xa_store(&nport->domain->lookup, fc_id, nport, GFP_ATOMIC));
186 	if (rc) {
187 		efc_log_err(efc, "Sport lookup store failed: %d\n", rc);
188 		return rc;
189 	}
190 
191 	/* Update our display_name */
192 	efc_node_fcid_display(fc_id, nport->display_name,
193 			      sizeof(nport->display_name));
194 
195 	xa_for_each(&nport->lookup, index, node) {
196 		efc_node_update_display_name(node);
197 	}
198 
199 	efc_log_debug(nport->efc, "[%s] attach nport: fc_id x%06x\n",
200 		      nport->display_name, fc_id);
201 
202 	/* Register a nport, given an FC_ID */
203 	rc = efc_cmd_nport_attach(efc, nport, fc_id);
204 	if (rc < 0) {
205 		efc_log_err(nport->efc,
206 			    "efc_hw_port_attach failed: %d\n", rc);
207 		return -EIO;
208 	}
209 	return 0;
210 }
211 
212 static void
efc_nport_shutdown(struct efc_nport * nport)213 efc_nport_shutdown(struct efc_nport *nport)
214 {
215 	struct efc *efc = nport->efc;
216 	struct efc_node *node;
217 	unsigned long index;
218 
219 	xa_for_each(&nport->lookup, index, node) {
220 		if (!(node->rnode.fc_id == FC_FID_FLOGI && nport->is_vport)) {
221 			efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL);
222 			continue;
223 		}
224 
225 		/*
226 		 * If this is a vport, logout of the fabric
227 		 * controller so that it deletes the vport
228 		 * on the switch.
229 		 */
230 		/* if link is down, don't send logo */
231 		if (efc->link_status == EFC_LINK_STATUS_DOWN) {
232 			efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL);
233 			continue;
234 		}
235 
236 		efc_log_debug(efc, "[%s] nport shutdown vport, send logo\n",
237 			      node->display_name);
238 
239 		if (!efc_send_logo(node)) {
240 			/* sent LOGO, wait for response */
241 			efc_node_transition(node, __efc_d_wait_logo_rsp, NULL);
242 			continue;
243 		}
244 
245 		/*
246 		 * failed to send LOGO,
247 		 * go ahead and cleanup node anyways
248 		 */
249 		node_printf(node, "Failed to send LOGO\n");
250 		efc_node_post_event(node, EFC_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
251 	}
252 }
253 
254 static void
efc_vport_link_down(struct efc_nport * nport)255 efc_vport_link_down(struct efc_nport *nport)
256 {
257 	struct efc *efc = nport->efc;
258 	struct efc_vport *vport;
259 
260 	/* Clear the nport reference in the vport specification */
261 	list_for_each_entry(vport, &efc->vport_list, list_entry) {
262 		if (vport->nport == nport) {
263 			kref_put(&nport->ref, nport->release);
264 			vport->nport = NULL;
265 			break;
266 		}
267 	}
268 }
269 
270 static void
__efc_nport_common(const char * funcname,struct efc_sm_ctx * ctx,enum efc_sm_event evt,void * arg)271 __efc_nport_common(const char *funcname, struct efc_sm_ctx *ctx,
272 		   enum efc_sm_event evt, void *arg)
273 {
274 	struct efc_nport *nport = ctx->app;
275 	struct efc_domain *domain = nport->domain;
276 	struct efc *efc = nport->efc;
277 
278 	switch (evt) {
279 	case EFC_EVT_ENTER:
280 	case EFC_EVT_REENTER:
281 	case EFC_EVT_EXIT:
282 	case EFC_EVT_ALL_CHILD_NODES_FREE:
283 		break;
284 	case EFC_EVT_NPORT_ATTACH_OK:
285 			efc_sm_transition(ctx, __efc_nport_attached, NULL);
286 		break;
287 	case EFC_EVT_SHUTDOWN:
288 		/* Flag this nport as shutting down */
289 		nport->shutting_down = true;
290 
291 		if (nport->is_vport)
292 			efc_vport_link_down(nport);
293 
294 		if (xa_empty(&nport->lookup)) {
295 			/* Remove the nport from the domain's lookup table */
296 			xa_erase(&domain->lookup, nport->fc_id);
297 			efc_sm_transition(ctx, __efc_nport_wait_port_free,
298 					  NULL);
299 			if (efc_cmd_nport_free(efc, nport)) {
300 				efc_log_debug(nport->efc,
301 					      "efc_hw_port_free failed\n");
302 				/* Not much we can do, free the nport anyways */
303 				efc_nport_free(nport);
304 			}
305 		} else {
306 			/* sm: node list is not empty / shutdown nodes */
307 			efc_sm_transition(ctx,
308 					  __efc_nport_wait_shutdown, NULL);
309 			efc_nport_shutdown(nport);
310 		}
311 		break;
312 	default:
313 		efc_log_debug(nport->efc, "[%s] %-20s %-20s not handled\n",
314 			      nport->display_name, funcname,
315 			      efc_sm_event_name(evt));
316 	}
317 }
318 
319 void
__efc_nport_allocated(struct efc_sm_ctx * ctx,enum efc_sm_event evt,void * arg)320 __efc_nport_allocated(struct efc_sm_ctx *ctx,
321 		      enum efc_sm_event evt, void *arg)
322 {
323 	struct efc_nport *nport = ctx->app;
324 	struct efc_domain *domain = nport->domain;
325 
326 	nport_sm_trace(nport);
327 
328 	switch (evt) {
329 	/* the physical nport is attached */
330 	case EFC_EVT_NPORT_ATTACH_OK:
331 		WARN_ON(nport != domain->nport);
332 		efc_sm_transition(ctx, __efc_nport_attached, NULL);
333 		break;
334 
335 	case EFC_EVT_NPORT_ALLOC_OK:
336 		/* ignore */
337 		break;
338 	default:
339 		__efc_nport_common(__func__, ctx, evt, arg);
340 	}
341 }
342 
343 void
__efc_nport_vport_init(struct efc_sm_ctx * ctx,enum efc_sm_event evt,void * arg)344 __efc_nport_vport_init(struct efc_sm_ctx *ctx,
345 		       enum efc_sm_event evt, void *arg)
346 {
347 	struct efc_nport *nport = ctx->app;
348 	struct efc *efc = nport->efc;
349 
350 	nport_sm_trace(nport);
351 
352 	switch (evt) {
353 	case EFC_EVT_ENTER: {
354 		__be64 be_wwpn = cpu_to_be64(nport->wwpn);
355 
356 		if (nport->wwpn == 0)
357 			efc_log_debug(efc, "vport: letting f/w select WWN\n");
358 
359 		if (nport->fc_id != U32_MAX) {
360 			efc_log_debug(efc, "vport: hard coding port id: %x\n",
361 				      nport->fc_id);
362 		}
363 
364 		efc_sm_transition(ctx, __efc_nport_vport_wait_alloc, NULL);
365 		/* If wwpn is zero, then we'll let the f/w assign wwpn*/
366 		if (efc_cmd_nport_alloc(efc, nport, nport->domain,
367 					nport->wwpn == 0 ? NULL :
368 					(uint8_t *)&be_wwpn)) {
369 			efc_log_err(efc, "Can't allocate port\n");
370 			break;
371 		}
372 
373 		break;
374 	}
375 	default:
376 		__efc_nport_common(__func__, ctx, evt, arg);
377 	}
378 }
379 
380 void
__efc_nport_vport_wait_alloc(struct efc_sm_ctx * ctx,enum efc_sm_event evt,void * arg)381 __efc_nport_vport_wait_alloc(struct efc_sm_ctx *ctx,
382 			     enum efc_sm_event evt, void *arg)
383 {
384 	struct efc_nport *nport = ctx->app;
385 	struct efc *efc = nport->efc;
386 
387 	nport_sm_trace(nport);
388 
389 	switch (evt) {
390 	case EFC_EVT_NPORT_ALLOC_OK: {
391 		struct fc_els_flogi *sp;
392 
393 		sp = (struct fc_els_flogi *)nport->service_params;
394 
395 		if (nport->wwnn == 0) {
396 			nport->wwnn = be64_to_cpu(nport->sli_wwnn);
397 			nport->wwpn = be64_to_cpu(nport->sli_wwpn);
398 			snprintf(nport->wwnn_str, sizeof(nport->wwnn_str),
399 				 "%016llX", nport->wwpn);
400 		}
401 
402 		/* Update the nport's service parameters */
403 		sp->fl_wwpn = cpu_to_be64(nport->wwpn);
404 		sp->fl_wwnn = cpu_to_be64(nport->wwnn);
405 
406 		/*
407 		 * if nport->fc_id is uninitialized,
408 		 * then request that the fabric node use FDISC
409 		 * to find an fc_id.
410 		 * Otherwise we're restoring vports, or we're in
411 		 * fabric emulation mode, so attach the fc_id
412 		 */
413 		if (nport->fc_id == U32_MAX) {
414 			struct efc_node *fabric;
415 
416 			fabric = efc_node_alloc(nport, FC_FID_FLOGI, false,
417 						false);
418 			if (!fabric) {
419 				efc_log_err(efc, "efc_node_alloc() failed\n");
420 				return;
421 			}
422 			efc_node_transition(fabric, __efc_vport_fabric_init,
423 					    NULL);
424 		} else {
425 			snprintf(nport->wwnn_str, sizeof(nport->wwnn_str),
426 				 "%016llX", nport->wwpn);
427 			efc_nport_attach(nport, nport->fc_id);
428 		}
429 		efc_sm_transition(ctx, __efc_nport_vport_allocated, NULL);
430 		break;
431 	}
432 	default:
433 		__efc_nport_common(__func__, ctx, evt, arg);
434 	}
435 }
436 
437 void
__efc_nport_vport_allocated(struct efc_sm_ctx * ctx,enum efc_sm_event evt,void * arg)438 __efc_nport_vport_allocated(struct efc_sm_ctx *ctx,
439 			    enum efc_sm_event evt, void *arg)
440 {
441 	struct efc_nport *nport = ctx->app;
442 	struct efc *efc = nport->efc;
443 
444 	nport_sm_trace(nport);
445 
446 	/*
447 	 * This state is entered after the nport is allocated;
448 	 * it then waits for a fabric node
449 	 * FDISC to complete, which requests a nport attach.
450 	 * The nport attach complete is handled in this state.
451 	 */
452 	switch (evt) {
453 	case EFC_EVT_NPORT_ATTACH_OK: {
454 		struct efc_node *node;
455 
456 		/* Find our fabric node, and forward this event */
457 		node = efc_node_find(nport, FC_FID_FLOGI);
458 		if (!node) {
459 			efc_log_debug(efc, "can't find node %06x\n", FC_FID_FLOGI);
460 			break;
461 		}
462 		/* sm: / forward nport attach to fabric node */
463 		efc_node_post_event(node, evt, NULL);
464 		efc_sm_transition(ctx, __efc_nport_attached, NULL);
465 		break;
466 	}
467 	default:
468 		__efc_nport_common(__func__, ctx, evt, arg);
469 	}
470 }
471 
472 static void
efc_vport_update_spec(struct efc_nport * nport)473 efc_vport_update_spec(struct efc_nport *nport)
474 {
475 	struct efc *efc = nport->efc;
476 	struct efc_vport *vport;
477 	unsigned long flags = 0;
478 
479 	spin_lock_irqsave(&efc->vport_lock, flags);
480 	list_for_each_entry(vport, &efc->vport_list, list_entry) {
481 		if (vport->nport == nport) {
482 			vport->wwnn = nport->wwnn;
483 			vport->wwpn = nport->wwpn;
484 			vport->tgt_data = nport->tgt_data;
485 			vport->ini_data = nport->ini_data;
486 			break;
487 		}
488 	}
489 	spin_unlock_irqrestore(&efc->vport_lock, flags);
490 }
491 
492 void
__efc_nport_attached(struct efc_sm_ctx * ctx,enum efc_sm_event evt,void * arg)493 __efc_nport_attached(struct efc_sm_ctx *ctx,
494 		     enum efc_sm_event evt, void *arg)
495 {
496 	struct efc_nport *nport = ctx->app;
497 	struct efc *efc = nport->efc;
498 
499 	nport_sm_trace(nport);
500 
501 	switch (evt) {
502 	case EFC_EVT_ENTER: {
503 		struct efc_node *node;
504 		unsigned long index;
505 
506 		efc_log_debug(efc,
507 			      "[%s] NPORT attached WWPN %016llX WWNN %016llX\n",
508 			      nport->display_name,
509 			      nport->wwpn, nport->wwnn);
510 
511 		xa_for_each(&nport->lookup, index, node)
512 			efc_node_update_display_name(node);
513 
514 		efc->tt.new_nport(efc, nport);
515 
516 		/*
517 		 * Update the vport (if its not the physical nport)
518 		 * parameters
519 		 */
520 		if (nport->is_vport)
521 			efc_vport_update_spec(nport);
522 		break;
523 	}
524 
525 	case EFC_EVT_EXIT:
526 		efc_log_debug(efc,
527 			      "[%s] NPORT deattached WWPN %016llX WWNN %016llX\n",
528 			      nport->display_name,
529 			      nport->wwpn, nport->wwnn);
530 
531 		efc->tt.del_nport(efc, nport);
532 		break;
533 	default:
534 		__efc_nport_common(__func__, ctx, evt, arg);
535 	}
536 }
537 
538 void
__efc_nport_wait_shutdown(struct efc_sm_ctx * ctx,enum efc_sm_event evt,void * arg)539 __efc_nport_wait_shutdown(struct efc_sm_ctx *ctx,
540 			  enum efc_sm_event evt, void *arg)
541 {
542 	struct efc_nport *nport = ctx->app;
543 	struct efc_domain *domain = nport->domain;
544 	struct efc *efc = nport->efc;
545 
546 	nport_sm_trace(nport);
547 
548 	switch (evt) {
549 	case EFC_EVT_NPORT_ALLOC_OK:
550 	case EFC_EVT_NPORT_ALLOC_FAIL:
551 	case EFC_EVT_NPORT_ATTACH_OK:
552 	case EFC_EVT_NPORT_ATTACH_FAIL:
553 		/* ignore these events - just wait for the all free event */
554 		break;
555 
556 	case EFC_EVT_ALL_CHILD_NODES_FREE: {
557 		/*
558 		 * Remove the nport from the domain's
559 		 * sparse vector lookup table
560 		 */
561 		xa_erase(&domain->lookup, nport->fc_id);
562 		efc_sm_transition(ctx, __efc_nport_wait_port_free, NULL);
563 		if (efc_cmd_nport_free(efc, nport)) {
564 			efc_log_err(nport->efc, "efc_hw_port_free failed\n");
565 			/* Not much we can do, free the nport anyways */
566 			efc_nport_free(nport);
567 		}
568 		break;
569 	}
570 	default:
571 		__efc_nport_common(__func__, ctx, evt, arg);
572 	}
573 }
574 
575 void
__efc_nport_wait_port_free(struct efc_sm_ctx * ctx,enum efc_sm_event evt,void * arg)576 __efc_nport_wait_port_free(struct efc_sm_ctx *ctx,
577 			   enum efc_sm_event evt, void *arg)
578 {
579 	struct efc_nport *nport = ctx->app;
580 
581 	nport_sm_trace(nport);
582 
583 	switch (evt) {
584 	case EFC_EVT_NPORT_ATTACH_OK:
585 		/* Ignore as we are waiting for the free CB */
586 		break;
587 	case EFC_EVT_NPORT_FREE_OK: {
588 		/* All done, free myself */
589 		efc_nport_free(nport);
590 		break;
591 	}
592 	default:
593 		__efc_nport_common(__func__, ctx, evt, arg);
594 	}
595 }
596 
597 static int
efc_vport_nport_alloc(struct efc_domain * domain,struct efc_vport * vport)598 efc_vport_nport_alloc(struct efc_domain *domain, struct efc_vport *vport)
599 {
600 	struct efc_nport *nport;
601 
602 	lockdep_assert_held(&domain->efc->lock);
603 
604 	nport = efc_nport_alloc(domain, vport->wwpn, vport->wwnn, vport->fc_id,
605 				vport->enable_ini, vport->enable_tgt);
606 	vport->nport = nport;
607 	if (!nport)
608 		return -EIO;
609 
610 	kref_get(&nport->ref);
611 	nport->is_vport = true;
612 	nport->tgt_data = vport->tgt_data;
613 	nport->ini_data = vport->ini_data;
614 
615 	efc_sm_transition(&nport->sm, __efc_nport_vport_init, NULL);
616 
617 	return 0;
618 }
619 
620 int
efc_vport_start(struct efc_domain * domain)621 efc_vport_start(struct efc_domain *domain)
622 {
623 	struct efc *efc = domain->efc;
624 	struct efc_vport *vport;
625 	struct efc_vport *next;
626 	int rc = 0;
627 	unsigned long flags = 0;
628 
629 	/* Use the vport spec to find the associated vports and start them */
630 	spin_lock_irqsave(&efc->vport_lock, flags);
631 	list_for_each_entry_safe(vport, next, &efc->vport_list, list_entry) {
632 		if (!vport->nport) {
633 			if (efc_vport_nport_alloc(domain, vport))
634 				rc = -EIO;
635 		}
636 	}
637 	spin_unlock_irqrestore(&efc->vport_lock, flags);
638 
639 	return rc;
640 }
641 
642 int
efc_nport_vport_new(struct efc_domain * domain,uint64_t wwpn,uint64_t wwnn,u32 fc_id,bool ini,bool tgt,void * tgt_data,void * ini_data)643 efc_nport_vport_new(struct efc_domain *domain, uint64_t wwpn, uint64_t wwnn,
644 		    u32 fc_id, bool ini, bool tgt, void *tgt_data,
645 		    void *ini_data)
646 {
647 	struct efc *efc = domain->efc;
648 	struct efc_vport *vport;
649 	int rc = 0;
650 	unsigned long flags = 0;
651 
652 	if (ini && domain->efc->enable_ini == 0) {
653 		efc_log_debug(efc, "driver initiator mode not enabled\n");
654 		return -EIO;
655 	}
656 
657 	if (tgt && domain->efc->enable_tgt == 0) {
658 		efc_log_debug(efc, "driver target mode not enabled\n");
659 		return -EIO;
660 	}
661 
662 	/*
663 	 * Create a vport spec if we need to recreate
664 	 * this vport after a link up event
665 	 */
666 	vport = efc_vport_create_spec(domain->efc, wwnn, wwpn, fc_id, ini, tgt,
667 				      tgt_data, ini_data);
668 	if (!vport) {
669 		efc_log_err(efc, "failed to create vport object entry\n");
670 		return -EIO;
671 	}
672 
673 	spin_lock_irqsave(&efc->lock, flags);
674 	rc = efc_vport_nport_alloc(domain, vport);
675 	spin_unlock_irqrestore(&efc->lock, flags);
676 
677 	return rc;
678 }
679 
680 int
efc_nport_vport_del(struct efc * efc,struct efc_domain * domain,u64 wwpn,uint64_t wwnn)681 efc_nport_vport_del(struct efc *efc, struct efc_domain *domain,
682 		    u64 wwpn, uint64_t wwnn)
683 {
684 	struct efc_nport *nport;
685 	struct efc_vport *vport;
686 	struct efc_vport *next;
687 	unsigned long flags = 0;
688 
689 	spin_lock_irqsave(&efc->vport_lock, flags);
690 	/* walk the efc_vport_list and remove from there */
691 	list_for_each_entry_safe(vport, next, &efc->vport_list, list_entry) {
692 		if (vport->wwpn == wwpn && vport->wwnn == wwnn) {
693 			list_del(&vport->list_entry);
694 			kfree(vport);
695 			break;
696 		}
697 	}
698 	spin_unlock_irqrestore(&efc->vport_lock, flags);
699 
700 	if (!domain) {
701 		/* No domain means no nport to look for */
702 		return 0;
703 	}
704 
705 	spin_lock_irqsave(&efc->lock, flags);
706 	list_for_each_entry(nport, &domain->nport_list, list_entry) {
707 		if (nport->wwpn == wwpn && nport->wwnn == wwnn) {
708 			kref_put(&nport->ref, nport->release);
709 			/* Shutdown this NPORT */
710 			efc_sm_post_event(&nport->sm, EFC_EVT_SHUTDOWN, NULL);
711 			break;
712 		}
713 	}
714 
715 	spin_unlock_irqrestore(&efc->lock, flags);
716 	return 0;
717 }
718 
719 void
efc_vport_del_all(struct efc * efc)720 efc_vport_del_all(struct efc *efc)
721 {
722 	struct efc_vport *vport;
723 	struct efc_vport *next;
724 	unsigned long flags = 0;
725 
726 	spin_lock_irqsave(&efc->vport_lock, flags);
727 	list_for_each_entry_safe(vport, next, &efc->vport_list, list_entry) {
728 		list_del(&vport->list_entry);
729 		kfree(vport);
730 	}
731 	spin_unlock_irqrestore(&efc->vport_lock, flags);
732 }
733 
734 struct efc_vport *
efc_vport_create_spec(struct efc * efc,uint64_t wwnn,uint64_t wwpn,u32 fc_id,bool enable_ini,bool enable_tgt,void * tgt_data,void * ini_data)735 efc_vport_create_spec(struct efc *efc, uint64_t wwnn, uint64_t wwpn,
736 		      u32 fc_id, bool enable_ini,
737 		      bool enable_tgt, void *tgt_data, void *ini_data)
738 {
739 	struct efc_vport *vport;
740 	unsigned long flags = 0;
741 
742 	/*
743 	 * walk the efc_vport_list and return failure
744 	 * if a valid(vport with non zero WWPN and WWNN) vport entry
745 	 * is already created
746 	 */
747 	spin_lock_irqsave(&efc->vport_lock, flags);
748 	list_for_each_entry(vport, &efc->vport_list, list_entry) {
749 		if ((wwpn && vport->wwpn == wwpn) &&
750 		    (wwnn && vport->wwnn == wwnn)) {
751 			efc_log_err(efc,
752 				    "VPORT %016llX %016llX already allocated\n",
753 				    wwnn, wwpn);
754 			spin_unlock_irqrestore(&efc->vport_lock, flags);
755 			return NULL;
756 		}
757 	}
758 
759 	vport = kzalloc(sizeof(*vport), GFP_ATOMIC);
760 	if (!vport) {
761 		spin_unlock_irqrestore(&efc->vport_lock, flags);
762 		return NULL;
763 	}
764 
765 	vport->wwnn = wwnn;
766 	vport->wwpn = wwpn;
767 	vport->fc_id = fc_id;
768 	vport->enable_tgt = enable_tgt;
769 	vport->enable_ini = enable_ini;
770 	vport->tgt_data = tgt_data;
771 	vport->ini_data = ini_data;
772 
773 	INIT_LIST_HEAD(&vport->list_entry);
774 	list_add_tail(&vport->list_entry, &efc->vport_list);
775 	spin_unlock_irqrestore(&efc->vport_lock, flags);
776 	return vport;
777 }
778