• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * Copyright(c) 2007 Intel Corporation. All rights reserved.
4   * Copyright(c) 2008 Red Hat, Inc.  All rights reserved.
5   * Copyright(c) 2008 Mike Christie
6   *
7   * Maintained at www.Open-FCoE.org
8   */
9  
10  /*
11   * Fibre Channel exchange and sequence handling.
12   */
13  
14  #include <linux/timer.h>
15  #include <linux/slab.h>
16  #include <linux/err.h>
17  #include <linux/export.h>
18  #include <linux/log2.h>
19  
20  #include <scsi/fc/fc_fc2.h>
21  
22  #include <scsi/libfc.h>
23  #include <scsi/fc_encode.h>
24  
25  #include "fc_libfc.h"
26  
27  u16	fc_cpu_mask;		/* cpu mask for possible cpus */
28  EXPORT_SYMBOL(fc_cpu_mask);
29  static u16	fc_cpu_order;	/* 2's power to represent total possible cpus */
30  static struct kmem_cache *fc_em_cachep;	       /* cache for exchanges */
31  static struct workqueue_struct *fc_exch_workqueue;
32  
33  /*
34   * Structure and function definitions for managing Fibre Channel Exchanges
35   * and Sequences.
36   *
37   * The three primary structures used here are fc_exch_mgr, fc_exch, and fc_seq.
38   *
39   * fc_exch_mgr holds the exchange state for an N port
40   *
41   * fc_exch holds state for one exchange and links to its active sequence.
42   *
43   * fc_seq holds the state for an individual sequence.
44   */
45  
46  /**
47   * struct fc_exch_pool - Per cpu exchange pool
48   * @next_index:	  Next possible free exchange index
49   * @total_exches: Total allocated exchanges
50   * @lock:	  Exch pool lock
51   * @ex_list:	  List of exchanges
52   * @left:	  Cache of free slot in exch array
53   * @right:	  Cache of free slot in exch array
54   *
55   * This structure manages per cpu exchanges in array of exchange pointers.
56   * This array is allocated followed by struct fc_exch_pool memory for
57   * assigned range of exchanges to per cpu pool.
58   */
59  struct fc_exch_pool {
60  	spinlock_t	 lock;
61  	struct list_head ex_list;
62  	u16		 next_index;
63  	u16		 total_exches;
64  
65  	u16		 left;
66  	u16		 right;
67  } ____cacheline_aligned_in_smp;
68  
69  /**
70   * struct fc_exch_mgr - The Exchange Manager (EM).
71   * @class:	    Default class for new sequences
72   * @kref:	    Reference counter
73   * @min_xid:	    Minimum exchange ID
74   * @max_xid:	    Maximum exchange ID
75   * @ep_pool:	    Reserved exchange pointers
76   * @pool_max_index: Max exch array index in exch pool
77   * @pool:	    Per cpu exch pool
78   * @lport:	    Local exchange port
79   * @stats:	    Statistics structure
80   *
81   * This structure is the center for creating exchanges and sequences.
82   * It manages the allocation of exchange IDs.
83   */
84  struct fc_exch_mgr {
85  	struct fc_exch_pool __percpu *pool;
86  	mempool_t	*ep_pool;
87  	struct fc_lport	*lport;
88  	enum fc_class	class;
89  	struct kref	kref;
90  	u16		min_xid;
91  	u16		max_xid;
92  	u16		pool_max_index;
93  
94  	struct {
95  		atomic_t no_free_exch;
96  		atomic_t no_free_exch_xid;
97  		atomic_t xid_not_found;
98  		atomic_t xid_busy;
99  		atomic_t seq_not_found;
100  		atomic_t non_bls_resp;
101  	} stats;
102  };
103  
104  /**
105   * struct fc_exch_mgr_anchor - primary structure for list of EMs
106   * @ema_list: Exchange Manager Anchor list
107   * @mp:	      Exchange Manager associated with this anchor
108   * @match:    Routine to determine if this anchor's EM should be used
109   *
110   * When walking the list of anchors the match routine will be called
111   * for each anchor to determine if that EM should be used. The last
112   * anchor in the list will always match to handle any exchanges not
113   * handled by other EMs. The non-default EMs would be added to the
114   * anchor list by HW that provides offloads.
115   */
116  struct fc_exch_mgr_anchor {
117  	struct list_head ema_list;
118  	struct fc_exch_mgr *mp;
119  	bool (*match)(struct fc_frame *);
120  };
121  
122  static void fc_exch_rrq(struct fc_exch *);
123  static void fc_seq_ls_acc(struct fc_frame *);
124  static void fc_seq_ls_rjt(struct fc_frame *, enum fc_els_rjt_reason,
125  			  enum fc_els_rjt_explan);
126  static void fc_exch_els_rec(struct fc_frame *);
127  static void fc_exch_els_rrq(struct fc_frame *);
128  
129  /*
130   * Internal implementation notes.
131   *
132   * The exchange manager is one by default in libfc but LLD may choose
133   * to have one per CPU. The sequence manager is one per exchange manager
134   * and currently never separated.
135   *
136   * Section 9.8 in FC-FS-2 specifies:  "The SEQ_ID is a one-byte field
137   * assigned by the Sequence Initiator that shall be unique for a specific
138   * D_ID and S_ID pair while the Sequence is open."   Note that it isn't
139   * qualified by exchange ID, which one might think it would be.
140   * In practice this limits the number of open sequences and exchanges to 256
141   * per session.	 For most targets we could treat this limit as per exchange.
142   *
143   * The exchange and its sequence are freed when the last sequence is received.
144   * It's possible for the remote port to leave an exchange open without
145   * sending any sequences.
146   *
147   * Notes on reference counts:
148   *
149   * Exchanges are reference counted and exchange gets freed when the reference
150   * count becomes zero.
151   *
152   * Timeouts:
153   * Sequences are timed out for E_D_TOV and R_A_TOV.
154   *
155   * Sequence event handling:
156   *
157   * The following events may occur on initiator sequences:
158   *
159   *	Send.
160   *	    For now, the whole thing is sent.
161   *	Receive ACK
162   *	    This applies only to class F.
163   *	    The sequence is marked complete.
164   *	ULP completion.
165   *	    The upper layer calls fc_exch_done() when done
166   *	    with exchange and sequence tuple.
167   *	RX-inferred completion.
168   *	    When we receive the next sequence on the same exchange, we can
169   *	    retire the previous sequence ID.  (XXX not implemented).
170   *	Timeout.
171   *	    R_A_TOV frees the sequence ID.  If we're waiting for ACK,
172   *	    E_D_TOV causes abort and calls upper layer response handler
173   *	    with FC_EX_TIMEOUT error.
174   *	Receive RJT
175   *	    XXX defer.
176   *	Send ABTS
177   *	    On timeout.
178   *
179   * The following events may occur on recipient sequences:
180   *
181   *	Receive
182   *	    Allocate sequence for first frame received.
183   *	    Hold during receive handler.
184   *	    Release when final frame received.
185   *	    Keep status of last N of these for the ELS RES command.  XXX TBD.
186   *	Receive ABTS
187   *	    Deallocate sequence
188   *	Send RJT
189   *	    Deallocate
190   *
191   * For now, we neglect conditions where only part of a sequence was
192   * received or transmitted, or where out-of-order receipt is detected.
193   */
194  
195  /*
196   * Locking notes:
197   *
198   * The EM code run in a per-CPU worker thread.
199   *
200   * To protect against concurrency between a worker thread code and timers,
201   * sequence allocation and deallocation must be locked.
202   *  - exchange refcnt can be done atomicly without locks.
203   *  - sequence allocation must be locked by exch lock.
204   *  - If the EM pool lock and ex_lock must be taken at the same time, then the
205   *    EM pool lock must be taken before the ex_lock.
206   */
207  
208  /*
209   * opcode names for debugging.
210   */
211  static char *fc_exch_rctl_names[] = FC_RCTL_NAMES_INIT;
212  
213  /**
214   * fc_exch_name_lookup() - Lookup name by opcode
215   * @op:	       Opcode to be looked up
216   * @table:     Opcode/name table
217   * @max_index: Index not to be exceeded
218   *
219   * This routine is used to determine a human-readable string identifying
220   * a R_CTL opcode.
221   */
fc_exch_name_lookup(unsigned int op,char ** table,unsigned int max_index)222  static inline const char *fc_exch_name_lookup(unsigned int op, char **table,
223  					      unsigned int max_index)
224  {
225  	const char *name = NULL;
226  
227  	if (op < max_index)
228  		name = table[op];
229  	if (!name)
230  		name = "unknown";
231  	return name;
232  }
233  
234  /**
235   * fc_exch_rctl_name() - Wrapper routine for fc_exch_name_lookup()
236   * @op: The opcode to be looked up
237   */
fc_exch_rctl_name(unsigned int op)238  static const char *fc_exch_rctl_name(unsigned int op)
239  {
240  	return fc_exch_name_lookup(op, fc_exch_rctl_names,
241  				   ARRAY_SIZE(fc_exch_rctl_names));
242  }
243  
244  /**
245   * fc_exch_hold() - Increment an exchange's reference count
246   * @ep: Echange to be held
247   */
fc_exch_hold(struct fc_exch * ep)248  static inline void fc_exch_hold(struct fc_exch *ep)
249  {
250  	atomic_inc(&ep->ex_refcnt);
251  }
252  
253  /**
254   * fc_exch_setup_hdr() - Initialize a FC header by initializing some fields
255   *			 and determine SOF and EOF.
256   * @ep:	   The exchange to that will use the header
257   * @fp:	   The frame whose header is to be modified
258   * @f_ctl: F_CTL bits that will be used for the frame header
259   *
260   * The fields initialized by this routine are: fh_ox_id, fh_rx_id,
261   * fh_seq_id, fh_seq_cnt and the SOF and EOF.
262   */
fc_exch_setup_hdr(struct fc_exch * ep,struct fc_frame * fp,u32 f_ctl)263  static void fc_exch_setup_hdr(struct fc_exch *ep, struct fc_frame *fp,
264  			      u32 f_ctl)
265  {
266  	struct fc_frame_header *fh = fc_frame_header_get(fp);
267  	u16 fill;
268  
269  	fr_sof(fp) = ep->class;
270  	if (ep->seq.cnt)
271  		fr_sof(fp) = fc_sof_normal(ep->class);
272  
273  	if (f_ctl & FC_FC_END_SEQ) {
274  		fr_eof(fp) = FC_EOF_T;
275  		if (fc_sof_needs_ack(ep->class))
276  			fr_eof(fp) = FC_EOF_N;
277  		/*
278  		 * From F_CTL.
279  		 * The number of fill bytes to make the length a 4-byte
280  		 * multiple is the low order 2-bits of the f_ctl.
281  		 * The fill itself will have been cleared by the frame
282  		 * allocation.
283  		 * After this, the length will be even, as expected by
284  		 * the transport.
285  		 */
286  		fill = fr_len(fp) & 3;
287  		if (fill) {
288  			fill = 4 - fill;
289  			/* TODO, this may be a problem with fragmented skb */
290  			skb_put(fp_skb(fp), fill);
291  			hton24(fh->fh_f_ctl, f_ctl | fill);
292  		}
293  	} else {
294  		WARN_ON(fr_len(fp) % 4 != 0);	/* no pad to non last frame */
295  		fr_eof(fp) = FC_EOF_N;
296  	}
297  
298  	/* Initialize remaining fh fields from fc_fill_fc_hdr */
299  	fh->fh_ox_id = htons(ep->oxid);
300  	fh->fh_rx_id = htons(ep->rxid);
301  	fh->fh_seq_id = ep->seq.id;
302  	fh->fh_seq_cnt = htons(ep->seq.cnt);
303  }
304  
305  /**
306   * fc_exch_release() - Decrement an exchange's reference count
307   * @ep: Exchange to be released
308   *
309   * If the reference count reaches zero and the exchange is complete,
310   * it is freed.
311   */
fc_exch_release(struct fc_exch * ep)312  static void fc_exch_release(struct fc_exch *ep)
313  {
314  	struct fc_exch_mgr *mp;
315  
316  	if (atomic_dec_and_test(&ep->ex_refcnt)) {
317  		mp = ep->em;
318  		if (ep->destructor)
319  			ep->destructor(&ep->seq, ep->arg);
320  		WARN_ON(!(ep->esb_stat & ESB_ST_COMPLETE));
321  		mempool_free(ep, mp->ep_pool);
322  	}
323  }
324  
325  /**
326   * fc_exch_timer_cancel() - cancel exch timer
327   * @ep:		The exchange whose timer to be canceled
328   */
fc_exch_timer_cancel(struct fc_exch * ep)329  static inline void fc_exch_timer_cancel(struct fc_exch *ep)
330  {
331  	if (cancel_delayed_work(&ep->timeout_work)) {
332  		FC_EXCH_DBG(ep, "Exchange timer canceled\n");
333  		atomic_dec(&ep->ex_refcnt); /* drop hold for timer */
334  	}
335  }
336  
337  /**
338   * fc_exch_timer_set_locked() - Start a timer for an exchange w/ the
339   *				the exchange lock held
340   * @ep:		The exchange whose timer will start
341   * @timer_msec: The timeout period
342   *
343   * Used for upper level protocols to time out the exchange.
344   * The timer is cancelled when it fires or when the exchange completes.
345   */
fc_exch_timer_set_locked(struct fc_exch * ep,unsigned int timer_msec)346  static inline void fc_exch_timer_set_locked(struct fc_exch *ep,
347  					    unsigned int timer_msec)
348  {
349  	if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
350  		return;
351  
352  	FC_EXCH_DBG(ep, "Exchange timer armed : %d msecs\n", timer_msec);
353  
354  	fc_exch_hold(ep);		/* hold for timer */
355  	if (!queue_delayed_work(fc_exch_workqueue, &ep->timeout_work,
356  				msecs_to_jiffies(timer_msec))) {
357  		FC_EXCH_DBG(ep, "Exchange already queued\n");
358  		fc_exch_release(ep);
359  	}
360  }
361  
362  /**
363   * fc_exch_timer_set() - Lock the exchange and set the timer
364   * @ep:		The exchange whose timer will start
365   * @timer_msec: The timeout period
366   */
fc_exch_timer_set(struct fc_exch * ep,unsigned int timer_msec)367  static void fc_exch_timer_set(struct fc_exch *ep, unsigned int timer_msec)
368  {
369  	spin_lock_bh(&ep->ex_lock);
370  	fc_exch_timer_set_locked(ep, timer_msec);
371  	spin_unlock_bh(&ep->ex_lock);
372  }
373  
374  /**
375   * fc_exch_done_locked() - Complete an exchange with the exchange lock held
376   * @ep: The exchange that is complete
377   *
378   * Note: May sleep if invoked from outside a response handler.
379   */
fc_exch_done_locked(struct fc_exch * ep)380  static int fc_exch_done_locked(struct fc_exch *ep)
381  {
382  	int rc = 1;
383  
384  	/*
385  	 * We must check for completion in case there are two threads
386  	 * tyring to complete this. But the rrq code will reuse the
387  	 * ep, and in that case we only clear the resp and set it as
388  	 * complete, so it can be reused by the timer to send the rrq.
389  	 */
390  	if (ep->state & FC_EX_DONE)
391  		return rc;
392  	ep->esb_stat |= ESB_ST_COMPLETE;
393  
394  	if (!(ep->esb_stat & ESB_ST_REC_QUAL)) {
395  		ep->state |= FC_EX_DONE;
396  		fc_exch_timer_cancel(ep);
397  		rc = 0;
398  	}
399  	return rc;
400  }
401  
402  static struct fc_exch fc_quarantine_exch;
403  
404  /**
405   * fc_exch_ptr_get() - Return an exchange from an exchange pool
406   * @pool:  Exchange Pool to get an exchange from
407   * @index: Index of the exchange within the pool
408   *
409   * Use the index to get an exchange from within an exchange pool. exches
410   * will point to an array of exchange pointers. The index will select
411   * the exchange within the array.
412   */
fc_exch_ptr_get(struct fc_exch_pool * pool,u16 index)413  static inline struct fc_exch *fc_exch_ptr_get(struct fc_exch_pool *pool,
414  					      u16 index)
415  {
416  	struct fc_exch **exches = (struct fc_exch **)(pool + 1);
417  	return exches[index];
418  }
419  
420  /**
421   * fc_exch_ptr_set() - Assign an exchange to a slot in an exchange pool
422   * @pool:  The pool to assign the exchange to
423   * @index: The index in the pool where the exchange will be assigned
424   * @ep:	   The exchange to assign to the pool
425   */
fc_exch_ptr_set(struct fc_exch_pool * pool,u16 index,struct fc_exch * ep)426  static inline void fc_exch_ptr_set(struct fc_exch_pool *pool, u16 index,
427  				   struct fc_exch *ep)
428  {
429  	((struct fc_exch **)(pool + 1))[index] = ep;
430  }
431  
432  /**
433   * fc_exch_delete() - Delete an exchange
434   * @ep: The exchange to be deleted
435   */
fc_exch_delete(struct fc_exch * ep)436  static void fc_exch_delete(struct fc_exch *ep)
437  {
438  	struct fc_exch_pool *pool;
439  	u16 index;
440  
441  	pool = ep->pool;
442  	spin_lock_bh(&pool->lock);
443  	WARN_ON(pool->total_exches <= 0);
444  	pool->total_exches--;
445  
446  	/* update cache of free slot */
447  	index = (ep->xid - ep->em->min_xid) >> fc_cpu_order;
448  	if (!(ep->state & FC_EX_QUARANTINE)) {
449  		if (pool->left == FC_XID_UNKNOWN)
450  			pool->left = index;
451  		else if (pool->right == FC_XID_UNKNOWN)
452  			pool->right = index;
453  		else
454  			pool->next_index = index;
455  		fc_exch_ptr_set(pool, index, NULL);
456  	} else {
457  		fc_exch_ptr_set(pool, index, &fc_quarantine_exch);
458  	}
459  	list_del(&ep->ex_list);
460  	spin_unlock_bh(&pool->lock);
461  	fc_exch_release(ep);	/* drop hold for exch in mp */
462  }
463  
fc_seq_send_locked(struct fc_lport * lport,struct fc_seq * sp,struct fc_frame * fp)464  static int fc_seq_send_locked(struct fc_lport *lport, struct fc_seq *sp,
465  			      struct fc_frame *fp)
466  {
467  	struct fc_exch *ep;
468  	struct fc_frame_header *fh = fc_frame_header_get(fp);
469  	int error = -ENXIO;
470  	u32 f_ctl;
471  	u8 fh_type = fh->fh_type;
472  
473  	ep = fc_seq_exch(sp);
474  
475  	if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL)) {
476  		fc_frame_free(fp);
477  		goto out;
478  	}
479  
480  	WARN_ON(!(ep->esb_stat & ESB_ST_SEQ_INIT));
481  
482  	f_ctl = ntoh24(fh->fh_f_ctl);
483  	fc_exch_setup_hdr(ep, fp, f_ctl);
484  	fr_encaps(fp) = ep->encaps;
485  
486  	/*
487  	 * update sequence count if this frame is carrying
488  	 * multiple FC frames when sequence offload is enabled
489  	 * by LLD.
490  	 */
491  	if (fr_max_payload(fp))
492  		sp->cnt += DIV_ROUND_UP((fr_len(fp) - sizeof(*fh)),
493  					fr_max_payload(fp));
494  	else
495  		sp->cnt++;
496  
497  	/*
498  	 * Send the frame.
499  	 */
500  	error = lport->tt.frame_send(lport, fp);
501  
502  	if (fh_type == FC_TYPE_BLS)
503  		goto out;
504  
505  	/*
506  	 * Update the exchange and sequence flags,
507  	 * assuming all frames for the sequence have been sent.
508  	 * We can only be called to send once for each sequence.
509  	 */
510  	ep->f_ctl = f_ctl & ~FC_FC_FIRST_SEQ;	/* not first seq */
511  	if (f_ctl & FC_FC_SEQ_INIT)
512  		ep->esb_stat &= ~ESB_ST_SEQ_INIT;
513  out:
514  	return error;
515  }
516  
517  /**
518   * fc_seq_send() - Send a frame using existing sequence/exchange pair
519   * @lport: The local port that the exchange will be sent on
520   * @sp:	   The sequence to be sent
521   * @fp:	   The frame to be sent on the exchange
522   *
523   * Note: The frame will be freed either by a direct call to fc_frame_free(fp)
524   * or indirectly by calling libfc_function_template.frame_send().
525   */
fc_seq_send(struct fc_lport * lport,struct fc_seq * sp,struct fc_frame * fp)526  int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp, struct fc_frame *fp)
527  {
528  	struct fc_exch *ep;
529  	int error;
530  	ep = fc_seq_exch(sp);
531  	spin_lock_bh(&ep->ex_lock);
532  	error = fc_seq_send_locked(lport, sp, fp);
533  	spin_unlock_bh(&ep->ex_lock);
534  	return error;
535  }
536  EXPORT_SYMBOL(fc_seq_send);
537  
538  /**
539   * fc_seq_alloc() - Allocate a sequence for a given exchange
540   * @ep:	    The exchange to allocate a new sequence for
541   * @seq_id: The sequence ID to be used
542   *
543   * We don't support multiple originated sequences on the same exchange.
544   * By implication, any previously originated sequence on this exchange
545   * is complete, and we reallocate the same sequence.
546   */
fc_seq_alloc(struct fc_exch * ep,u8 seq_id)547  static struct fc_seq *fc_seq_alloc(struct fc_exch *ep, u8 seq_id)
548  {
549  	struct fc_seq *sp;
550  
551  	sp = &ep->seq;
552  	sp->ssb_stat = 0;
553  	sp->cnt = 0;
554  	sp->id = seq_id;
555  	return sp;
556  }
557  
558  /**
559   * fc_seq_start_next_locked() - Allocate a new sequence on the same
560   *				exchange as the supplied sequence
561   * @sp: The sequence/exchange to get a new sequence for
562   */
fc_seq_start_next_locked(struct fc_seq * sp)563  static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp)
564  {
565  	struct fc_exch *ep = fc_seq_exch(sp);
566  
567  	sp = fc_seq_alloc(ep, ep->seq_id++);
568  	FC_EXCH_DBG(ep, "f_ctl %6x seq %2x\n",
569  		    ep->f_ctl, sp->id);
570  	return sp;
571  }
572  
573  /**
574   * fc_seq_start_next() - Lock the exchange and get a new sequence
575   *			 for a given sequence/exchange pair
576   * @sp: The sequence/exchange to get a new exchange for
577   */
fc_seq_start_next(struct fc_seq * sp)578  struct fc_seq *fc_seq_start_next(struct fc_seq *sp)
579  {
580  	struct fc_exch *ep = fc_seq_exch(sp);
581  
582  	spin_lock_bh(&ep->ex_lock);
583  	sp = fc_seq_start_next_locked(sp);
584  	spin_unlock_bh(&ep->ex_lock);
585  
586  	return sp;
587  }
588  EXPORT_SYMBOL(fc_seq_start_next);
589  
590  /*
591   * Set the response handler for the exchange associated with a sequence.
592   *
593   * Note: May sleep if invoked from outside a response handler.
594   */
fc_seq_set_resp(struct fc_seq * sp,void (* resp)(struct fc_seq *,struct fc_frame *,void *),void * arg)595  void fc_seq_set_resp(struct fc_seq *sp,
596  		     void (*resp)(struct fc_seq *, struct fc_frame *, void *),
597  		     void *arg)
598  {
599  	struct fc_exch *ep = fc_seq_exch(sp);
600  	DEFINE_WAIT(wait);
601  
602  	spin_lock_bh(&ep->ex_lock);
603  	while (ep->resp_active && ep->resp_task != current) {
604  		prepare_to_wait(&ep->resp_wq, &wait, TASK_UNINTERRUPTIBLE);
605  		spin_unlock_bh(&ep->ex_lock);
606  
607  		schedule();
608  
609  		spin_lock_bh(&ep->ex_lock);
610  	}
611  	finish_wait(&ep->resp_wq, &wait);
612  	ep->resp = resp;
613  	ep->arg = arg;
614  	spin_unlock_bh(&ep->ex_lock);
615  }
616  EXPORT_SYMBOL(fc_seq_set_resp);
617  
618  /**
619   * fc_exch_abort_locked() - Abort an exchange
620   * @ep:	The exchange to be aborted
621   * @timer_msec: The period of time to wait before aborting
622   *
623   * Abort an exchange and sequence. Generally called because of a
624   * exchange timeout or an abort from the upper layer.
625   *
626   * A timer_msec can be specified for abort timeout, if non-zero
627   * timer_msec value is specified then exchange resp handler
628   * will be called with timeout error if no response to abort.
629   *
630   * Locking notes:  Called with exch lock held
631   *
632   * Return value: 0 on success else error code
633   */
fc_exch_abort_locked(struct fc_exch * ep,unsigned int timer_msec)634  static int fc_exch_abort_locked(struct fc_exch *ep,
635  				unsigned int timer_msec)
636  {
637  	struct fc_seq *sp;
638  	struct fc_frame *fp;
639  	int error;
640  
641  	FC_EXCH_DBG(ep, "exch: abort, time %d msecs\n", timer_msec);
642  	if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL) ||
643  	    ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP)) {
644  		FC_EXCH_DBG(ep, "exch: already completed esb %x state %x\n",
645  			    ep->esb_stat, ep->state);
646  		return -ENXIO;
647  	}
648  
649  	/*
650  	 * Send the abort on a new sequence if possible.
651  	 */
652  	sp = fc_seq_start_next_locked(&ep->seq);
653  	if (!sp)
654  		return -ENOMEM;
655  
656  	if (timer_msec)
657  		fc_exch_timer_set_locked(ep, timer_msec);
658  
659  	if (ep->sid) {
660  		/*
661  		 * Send an abort for the sequence that timed out.
662  		 */
663  		fp = fc_frame_alloc(ep->lp, 0);
664  		if (fp) {
665  			ep->esb_stat |= ESB_ST_SEQ_INIT;
666  			fc_fill_fc_hdr(fp, FC_RCTL_BA_ABTS, ep->did, ep->sid,
667  				       FC_TYPE_BLS, FC_FC_END_SEQ |
668  				       FC_FC_SEQ_INIT, 0);
669  			error = fc_seq_send_locked(ep->lp, sp, fp);
670  		} else {
671  			error = -ENOBUFS;
672  		}
673  	} else {
674  		/*
675  		 * If not logged into the fabric, don't send ABTS but leave
676  		 * sequence active until next timeout.
677  		 */
678  		error = 0;
679  	}
680  	ep->esb_stat |= ESB_ST_ABNORMAL;
681  	return error;
682  }
683  
684  /**
685   * fc_seq_exch_abort() - Abort an exchange and sequence
686   * @req_sp:	The sequence to be aborted
687   * @timer_msec: The period of time to wait before aborting
688   *
689   * Generally called because of a timeout or an abort from the upper layer.
690   *
691   * Return value: 0 on success else error code
692   */
fc_seq_exch_abort(const struct fc_seq * req_sp,unsigned int timer_msec)693  int fc_seq_exch_abort(const struct fc_seq *req_sp, unsigned int timer_msec)
694  {
695  	struct fc_exch *ep;
696  	int error;
697  
698  	ep = fc_seq_exch(req_sp);
699  	spin_lock_bh(&ep->ex_lock);
700  	error = fc_exch_abort_locked(ep, timer_msec);
701  	spin_unlock_bh(&ep->ex_lock);
702  	return error;
703  }
704  
705  /**
706   * fc_invoke_resp() - invoke ep->resp()
707   * @ep:	   The exchange to be operated on
708   * @fp:	   The frame pointer to pass through to ->resp()
709   * @sp:	   The sequence pointer to pass through to ->resp()
710   *
711   * Notes:
712   * It is assumed that after initialization finished (this means the
713   * first unlock of ex_lock after fc_exch_alloc()) ep->resp and ep->arg are
714   * modified only via fc_seq_set_resp(). This guarantees that none of these
715   * two variables changes if ep->resp_active > 0.
716   *
717   * If an fc_seq_set_resp() call is busy modifying ep->resp and ep->arg when
718   * this function is invoked, the first spin_lock_bh() call in this function
719   * will wait until fc_seq_set_resp() has finished modifying these variables.
720   *
721   * Since fc_exch_done() invokes fc_seq_set_resp() it is guaranteed that that
722   * ep->resp() won't be invoked after fc_exch_done() has returned.
723   *
724   * The response handler itself may invoke fc_exch_done(), which will clear the
725   * ep->resp pointer.
726   *
727   * Return value:
728   * Returns true if and only if ep->resp has been invoked.
729   */
fc_invoke_resp(struct fc_exch * ep,struct fc_seq * sp,struct fc_frame * fp)730  static bool fc_invoke_resp(struct fc_exch *ep, struct fc_seq *sp,
731  			   struct fc_frame *fp)
732  {
733  	void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
734  	void *arg;
735  	bool res = false;
736  
737  	spin_lock_bh(&ep->ex_lock);
738  	ep->resp_active++;
739  	if (ep->resp_task != current)
740  		ep->resp_task = !ep->resp_task ? current : NULL;
741  	resp = ep->resp;
742  	arg = ep->arg;
743  	spin_unlock_bh(&ep->ex_lock);
744  
745  	if (resp) {
746  		resp(sp, fp, arg);
747  		res = true;
748  	}
749  
750  	spin_lock_bh(&ep->ex_lock);
751  	if (--ep->resp_active == 0)
752  		ep->resp_task = NULL;
753  	spin_unlock_bh(&ep->ex_lock);
754  
755  	if (ep->resp_active == 0)
756  		wake_up(&ep->resp_wq);
757  
758  	return res;
759  }
760  
761  /**
762   * fc_exch_timeout() - Handle exchange timer expiration
763   * @work: The work_struct identifying the exchange that timed out
764   */
fc_exch_timeout(struct work_struct * work)765  static void fc_exch_timeout(struct work_struct *work)
766  {
767  	struct fc_exch *ep = container_of(work, struct fc_exch,
768  					  timeout_work.work);
769  	struct fc_seq *sp = &ep->seq;
770  	u32 e_stat;
771  	int rc = 1;
772  
773  	FC_EXCH_DBG(ep, "Exchange timed out state %x\n", ep->state);
774  
775  	spin_lock_bh(&ep->ex_lock);
776  	if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
777  		goto unlock;
778  
779  	e_stat = ep->esb_stat;
780  	if (e_stat & ESB_ST_COMPLETE) {
781  		ep->esb_stat = e_stat & ~ESB_ST_REC_QUAL;
782  		spin_unlock_bh(&ep->ex_lock);
783  		if (e_stat & ESB_ST_REC_QUAL)
784  			fc_exch_rrq(ep);
785  		goto done;
786  	} else {
787  		if (e_stat & ESB_ST_ABNORMAL)
788  			rc = fc_exch_done_locked(ep);
789  		spin_unlock_bh(&ep->ex_lock);
790  		if (!rc)
791  			fc_exch_delete(ep);
792  		fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_TIMEOUT));
793  		fc_seq_set_resp(sp, NULL, ep->arg);
794  		fc_seq_exch_abort(sp, 2 * ep->r_a_tov);
795  		goto done;
796  	}
797  unlock:
798  	spin_unlock_bh(&ep->ex_lock);
799  done:
800  	/*
801  	 * This release matches the hold taken when the timer was set.
802  	 */
803  	fc_exch_release(ep);
804  }
805  
806  /**
807   * fc_exch_em_alloc() - Allocate an exchange from a specified EM.
808   * @lport: The local port that the exchange is for
809   * @mp:	   The exchange manager that will allocate the exchange
810   *
811   * Returns pointer to allocated fc_exch with exch lock held.
812   */
fc_exch_em_alloc(struct fc_lport * lport,struct fc_exch_mgr * mp)813  static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
814  					struct fc_exch_mgr *mp)
815  {
816  	struct fc_exch *ep;
817  	unsigned int cpu;
818  	u16 index;
819  	struct fc_exch_pool *pool;
820  
821  	/* allocate memory for exchange */
822  	ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
823  	if (!ep) {
824  		atomic_inc(&mp->stats.no_free_exch);
825  		goto out;
826  	}
827  	memset(ep, 0, sizeof(*ep));
828  
829  	cpu = get_cpu();
830  	pool = per_cpu_ptr(mp->pool, cpu);
831  	spin_lock_bh(&pool->lock);
832  	put_cpu();
833  
834  	/* peek cache of free slot */
835  	if (pool->left != FC_XID_UNKNOWN) {
836  		if (!WARN_ON(fc_exch_ptr_get(pool, pool->left))) {
837  			index = pool->left;
838  			pool->left = FC_XID_UNKNOWN;
839  			goto hit;
840  		}
841  	}
842  	if (pool->right != FC_XID_UNKNOWN) {
843  		if (!WARN_ON(fc_exch_ptr_get(pool, pool->right))) {
844  			index = pool->right;
845  			pool->right = FC_XID_UNKNOWN;
846  			goto hit;
847  		}
848  	}
849  
850  	index = pool->next_index;
851  	/* allocate new exch from pool */
852  	while (fc_exch_ptr_get(pool, index)) {
853  		index = index == mp->pool_max_index ? 0 : index + 1;
854  		if (index == pool->next_index)
855  			goto err;
856  	}
857  	pool->next_index = index == mp->pool_max_index ? 0 : index + 1;
858  hit:
859  	fc_exch_hold(ep);	/* hold for exch in mp */
860  	spin_lock_init(&ep->ex_lock);
861  	/*
862  	 * Hold exch lock for caller to prevent fc_exch_reset()
863  	 * from releasing exch	while fc_exch_alloc() caller is
864  	 * still working on exch.
865  	 */
866  	spin_lock_bh(&ep->ex_lock);
867  
868  	fc_exch_ptr_set(pool, index, ep);
869  	list_add_tail(&ep->ex_list, &pool->ex_list);
870  	fc_seq_alloc(ep, ep->seq_id++);
871  	pool->total_exches++;
872  	spin_unlock_bh(&pool->lock);
873  
874  	/*
875  	 *  update exchange
876  	 */
877  	ep->oxid = ep->xid = (index << fc_cpu_order | cpu) + mp->min_xid;
878  	ep->em = mp;
879  	ep->pool = pool;
880  	ep->lp = lport;
881  	ep->f_ctl = FC_FC_FIRST_SEQ;	/* next seq is first seq */
882  	ep->rxid = FC_XID_UNKNOWN;
883  	ep->class = mp->class;
884  	ep->resp_active = 0;
885  	init_waitqueue_head(&ep->resp_wq);
886  	INIT_DELAYED_WORK(&ep->timeout_work, fc_exch_timeout);
887  out:
888  	return ep;
889  err:
890  	spin_unlock_bh(&pool->lock);
891  	atomic_inc(&mp->stats.no_free_exch_xid);
892  	mempool_free(ep, mp->ep_pool);
893  	return NULL;
894  }
895  
896  /**
897   * fc_exch_alloc() - Allocate an exchange from an EM on a
898   *		     local port's list of EMs.
899   * @lport: The local port that will own the exchange
900   * @fp:	   The FC frame that the exchange will be for
901   *
902   * This function walks the list of exchange manager(EM)
903   * anchors to select an EM for a new exchange allocation. The
904   * EM is selected when a NULL match function pointer is encountered
905   * or when a call to a match function returns true.
906   */
fc_exch_alloc(struct fc_lport * lport,struct fc_frame * fp)907  static struct fc_exch *fc_exch_alloc(struct fc_lport *lport,
908  				     struct fc_frame *fp)
909  {
910  	struct fc_exch_mgr_anchor *ema;
911  	struct fc_exch *ep;
912  
913  	list_for_each_entry(ema, &lport->ema_list, ema_list) {
914  		if (!ema->match || ema->match(fp)) {
915  			ep = fc_exch_em_alloc(lport, ema->mp);
916  			if (ep)
917  				return ep;
918  		}
919  	}
920  	return NULL;
921  }
922  
923  /**
924   * fc_exch_find() - Lookup and hold an exchange
925   * @mp:	 The exchange manager to lookup the exchange from
926   * @xid: The XID of the exchange to look up
927   */
fc_exch_find(struct fc_exch_mgr * mp,u16 xid)928  static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
929  {
930  	struct fc_lport *lport = mp->lport;
931  	struct fc_exch_pool *pool;
932  	struct fc_exch *ep = NULL;
933  	u16 cpu = xid & fc_cpu_mask;
934  
935  	if (xid == FC_XID_UNKNOWN)
936  		return NULL;
937  
938  	if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
939  		pr_err("host%u: lport %6.6x: xid %d invalid CPU %d\n:",
940  		       lport->host->host_no, lport->port_id, xid, cpu);
941  		return NULL;
942  	}
943  
944  	if ((xid >= mp->min_xid) && (xid <= mp->max_xid)) {
945  		pool = per_cpu_ptr(mp->pool, cpu);
946  		spin_lock_bh(&pool->lock);
947  		ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order);
948  		if (ep == &fc_quarantine_exch) {
949  			FC_LPORT_DBG(lport, "xid %x quarantined\n", xid);
950  			ep = NULL;
951  		}
952  		if (ep) {
953  			WARN_ON(ep->xid != xid);
954  			fc_exch_hold(ep);
955  		}
956  		spin_unlock_bh(&pool->lock);
957  	}
958  	return ep;
959  }
960  
961  
962  /**
963   * fc_exch_done() - Indicate that an exchange/sequence tuple is complete and
964   *		    the memory allocated for the related objects may be freed.
965   * @sp: The sequence that has completed
966   *
967   * Note: May sleep if invoked from outside a response handler.
968   */
fc_exch_done(struct fc_seq * sp)969  void fc_exch_done(struct fc_seq *sp)
970  {
971  	struct fc_exch *ep = fc_seq_exch(sp);
972  	int rc;
973  
974  	spin_lock_bh(&ep->ex_lock);
975  	rc = fc_exch_done_locked(ep);
976  	spin_unlock_bh(&ep->ex_lock);
977  
978  	fc_seq_set_resp(sp, NULL, ep->arg);
979  	if (!rc)
980  		fc_exch_delete(ep);
981  }
982  EXPORT_SYMBOL(fc_exch_done);
983  
984  /**
985   * fc_exch_resp() - Allocate a new exchange for a response frame
986   * @lport: The local port that the exchange was for
987   * @mp:	   The exchange manager to allocate the exchange from
988   * @fp:	   The response frame
989   *
990   * Sets the responder ID in the frame header.
991   */
fc_exch_resp(struct fc_lport * lport,struct fc_exch_mgr * mp,struct fc_frame * fp)992  static struct fc_exch *fc_exch_resp(struct fc_lport *lport,
993  				    struct fc_exch_mgr *mp,
994  				    struct fc_frame *fp)
995  {
996  	struct fc_exch *ep;
997  	struct fc_frame_header *fh;
998  
999  	ep = fc_exch_alloc(lport, fp);
1000  	if (ep) {
1001  		ep->class = fc_frame_class(fp);
1002  
1003  		/*
1004  		 * Set EX_CTX indicating we're responding on this exchange.
1005  		 */
1006  		ep->f_ctl |= FC_FC_EX_CTX;	/* we're responding */
1007  		ep->f_ctl &= ~FC_FC_FIRST_SEQ;	/* not new */
1008  		fh = fc_frame_header_get(fp);
1009  		ep->sid = ntoh24(fh->fh_d_id);
1010  		ep->did = ntoh24(fh->fh_s_id);
1011  		ep->oid = ep->did;
1012  
1013  		/*
1014  		 * Allocated exchange has placed the XID in the
1015  		 * originator field. Move it to the responder field,
1016  		 * and set the originator XID from the frame.
1017  		 */
1018  		ep->rxid = ep->xid;
1019  		ep->oxid = ntohs(fh->fh_ox_id);
1020  		ep->esb_stat |= ESB_ST_RESP | ESB_ST_SEQ_INIT;
1021  		if ((ntoh24(fh->fh_f_ctl) & FC_FC_SEQ_INIT) == 0)
1022  			ep->esb_stat &= ~ESB_ST_SEQ_INIT;
1023  
1024  		fc_exch_hold(ep);	/* hold for caller */
1025  		spin_unlock_bh(&ep->ex_lock);	/* lock from fc_exch_alloc */
1026  	}
1027  	return ep;
1028  }
1029  
1030  /**
1031   * fc_seq_lookup_recip() - Find a sequence where the other end
1032   *			   originated the sequence
1033   * @lport: The local port that the frame was sent to
1034   * @mp:	   The Exchange Manager to lookup the exchange from
1035   * @fp:	   The frame associated with the sequence we're looking for
1036   *
1037   * If fc_pf_rjt_reason is FC_RJT_NONE then this function will have a hold
1038   * on the ep that should be released by the caller.
1039   */
fc_seq_lookup_recip(struct fc_lport * lport,struct fc_exch_mgr * mp,struct fc_frame * fp)1040  static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
1041  						 struct fc_exch_mgr *mp,
1042  						 struct fc_frame *fp)
1043  {
1044  	struct fc_frame_header *fh = fc_frame_header_get(fp);
1045  	struct fc_exch *ep = NULL;
1046  	struct fc_seq *sp = NULL;
1047  	enum fc_pf_rjt_reason reject = FC_RJT_NONE;
1048  	u32 f_ctl;
1049  	u16 xid;
1050  
1051  	f_ctl = ntoh24(fh->fh_f_ctl);
1052  	WARN_ON((f_ctl & FC_FC_SEQ_CTX) != 0);
1053  
1054  	/*
1055  	 * Lookup or create the exchange if we will be creating the sequence.
1056  	 */
1057  	if (f_ctl & FC_FC_EX_CTX) {
1058  		xid = ntohs(fh->fh_ox_id);	/* we originated exch */
1059  		ep = fc_exch_find(mp, xid);
1060  		if (!ep) {
1061  			atomic_inc(&mp->stats.xid_not_found);
1062  			reject = FC_RJT_OX_ID;
1063  			goto out;
1064  		}
1065  		if (ep->rxid == FC_XID_UNKNOWN)
1066  			ep->rxid = ntohs(fh->fh_rx_id);
1067  		else if (ep->rxid != ntohs(fh->fh_rx_id)) {
1068  			reject = FC_RJT_OX_ID;
1069  			goto rel;
1070  		}
1071  	} else {
1072  		xid = ntohs(fh->fh_rx_id);	/* we are the responder */
1073  
1074  		/*
1075  		 * Special case for MDS issuing an ELS TEST with a
1076  		 * bad rxid of 0.
1077  		 * XXX take this out once we do the proper reject.
1078  		 */
1079  		if (xid == 0 && fh->fh_r_ctl == FC_RCTL_ELS_REQ &&
1080  		    fc_frame_payload_op(fp) == ELS_TEST) {
1081  			fh->fh_rx_id = htons(FC_XID_UNKNOWN);
1082  			xid = FC_XID_UNKNOWN;
1083  		}
1084  
1085  		/*
1086  		 * new sequence - find the exchange
1087  		 */
1088  		ep = fc_exch_find(mp, xid);
1089  		if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
1090  			if (ep) {
1091  				atomic_inc(&mp->stats.xid_busy);
1092  				reject = FC_RJT_RX_ID;
1093  				goto rel;
1094  			}
1095  			ep = fc_exch_resp(lport, mp, fp);
1096  			if (!ep) {
1097  				reject = FC_RJT_EXCH_EST;	/* XXX */
1098  				goto out;
1099  			}
1100  			xid = ep->xid;	/* get our XID */
1101  		} else if (!ep) {
1102  			atomic_inc(&mp->stats.xid_not_found);
1103  			reject = FC_RJT_RX_ID;	/* XID not found */
1104  			goto out;
1105  		}
1106  	}
1107  
1108  	spin_lock_bh(&ep->ex_lock);
1109  	/*
1110  	 * At this point, we have the exchange held.
1111  	 * Find or create the sequence.
1112  	 */
1113  	if (fc_sof_is_init(fr_sof(fp))) {
1114  		sp = &ep->seq;
1115  		sp->ssb_stat |= SSB_ST_RESP;
1116  		sp->id = fh->fh_seq_id;
1117  	} else {
1118  		sp = &ep->seq;
1119  		if (sp->id != fh->fh_seq_id) {
1120  			atomic_inc(&mp->stats.seq_not_found);
1121  			if (f_ctl & FC_FC_END_SEQ) {
1122  				/*
1123  				 * Update sequence_id based on incoming last
1124  				 * frame of sequence exchange. This is needed
1125  				 * for FC target where DDP has been used
1126  				 * on target where, stack is indicated only
1127  				 * about last frame's (payload _header) header.
1128  				 * Whereas "seq_id" which is part of
1129  				 * frame_header is allocated by initiator
1130  				 * which is totally different from "seq_id"
1131  				 * allocated when XFER_RDY was sent by target.
1132  				 * To avoid false -ve which results into not
1133  				 * sending RSP, hence write request on other
1134  				 * end never finishes.
1135  				 */
1136  				sp->ssb_stat |= SSB_ST_RESP;
1137  				sp->id = fh->fh_seq_id;
1138  			} else {
1139  				spin_unlock_bh(&ep->ex_lock);
1140  
1141  				/* sequence/exch should exist */
1142  				reject = FC_RJT_SEQ_ID;
1143  				goto rel;
1144  			}
1145  		}
1146  	}
1147  	WARN_ON(ep != fc_seq_exch(sp));
1148  
1149  	if (f_ctl & FC_FC_SEQ_INIT)
1150  		ep->esb_stat |= ESB_ST_SEQ_INIT;
1151  	spin_unlock_bh(&ep->ex_lock);
1152  
1153  	fr_seq(fp) = sp;
1154  out:
1155  	return reject;
1156  rel:
1157  	fc_exch_done(&ep->seq);
1158  	fc_exch_release(ep);	/* hold from fc_exch_find/fc_exch_resp */
1159  	return reject;
1160  }
1161  
1162  /**
1163   * fc_seq_lookup_orig() - Find a sequence where this end
1164   *			  originated the sequence
1165   * @mp:	   The Exchange Manager to lookup the exchange from
1166   * @fp:	   The frame associated with the sequence we're looking for
1167   *
1168   * Does not hold the sequence for the caller.
1169   */
fc_seq_lookup_orig(struct fc_exch_mgr * mp,struct fc_frame * fp)1170  static struct fc_seq *fc_seq_lookup_orig(struct fc_exch_mgr *mp,
1171  					 struct fc_frame *fp)
1172  {
1173  	struct fc_frame_header *fh = fc_frame_header_get(fp);
1174  	struct fc_exch *ep;
1175  	struct fc_seq *sp = NULL;
1176  	u32 f_ctl;
1177  	u16 xid;
1178  
1179  	f_ctl = ntoh24(fh->fh_f_ctl);
1180  	WARN_ON((f_ctl & FC_FC_SEQ_CTX) != FC_FC_SEQ_CTX);
1181  	xid = ntohs((f_ctl & FC_FC_EX_CTX) ? fh->fh_ox_id : fh->fh_rx_id);
1182  	ep = fc_exch_find(mp, xid);
1183  	if (!ep)
1184  		return NULL;
1185  	if (ep->seq.id == fh->fh_seq_id) {
1186  		/*
1187  		 * Save the RX_ID if we didn't previously know it.
1188  		 */
1189  		sp = &ep->seq;
1190  		if ((f_ctl & FC_FC_EX_CTX) != 0 &&
1191  		    ep->rxid == FC_XID_UNKNOWN) {
1192  			ep->rxid = ntohs(fh->fh_rx_id);
1193  		}
1194  	}
1195  	fc_exch_release(ep);
1196  	return sp;
1197  }
1198  
1199  /**
1200   * fc_exch_set_addr() - Set the source and destination IDs for an exchange
1201   * @ep:	     The exchange to set the addresses for
1202   * @orig_id: The originator's ID
1203   * @resp_id: The responder's ID
1204   *
1205   * Note this must be done before the first sequence of the exchange is sent.
1206   */
fc_exch_set_addr(struct fc_exch * ep,u32 orig_id,u32 resp_id)1207  static void fc_exch_set_addr(struct fc_exch *ep,
1208  			     u32 orig_id, u32 resp_id)
1209  {
1210  	ep->oid = orig_id;
1211  	if (ep->esb_stat & ESB_ST_RESP) {
1212  		ep->sid = resp_id;
1213  		ep->did = orig_id;
1214  	} else {
1215  		ep->sid = orig_id;
1216  		ep->did = resp_id;
1217  	}
1218  }
1219  
1220  /**
1221   * fc_seq_els_rsp_send() - Send an ELS response using information from
1222   *			   the existing sequence/exchange.
1223   * @fp:	      The received frame
1224   * @els_cmd:  The ELS command to be sent
1225   * @els_data: The ELS data to be sent
1226   *
1227   * The received frame is not freed.
1228   */
fc_seq_els_rsp_send(struct fc_frame * fp,enum fc_els_cmd els_cmd,struct fc_seq_els_data * els_data)1229  void fc_seq_els_rsp_send(struct fc_frame *fp, enum fc_els_cmd els_cmd,
1230  			 struct fc_seq_els_data *els_data)
1231  {
1232  	switch (els_cmd) {
1233  	case ELS_LS_RJT:
1234  		fc_seq_ls_rjt(fp, els_data->reason, els_data->explan);
1235  		break;
1236  	case ELS_LS_ACC:
1237  		fc_seq_ls_acc(fp);
1238  		break;
1239  	case ELS_RRQ:
1240  		fc_exch_els_rrq(fp);
1241  		break;
1242  	case ELS_REC:
1243  		fc_exch_els_rec(fp);
1244  		break;
1245  	default:
1246  		FC_LPORT_DBG(fr_dev(fp), "Invalid ELS CMD:%x\n", els_cmd);
1247  	}
1248  }
1249  EXPORT_SYMBOL_GPL(fc_seq_els_rsp_send);
1250  
1251  /**
1252   * fc_seq_send_last() - Send a sequence that is the last in the exchange
1253   * @sp:	     The sequence that is to be sent
1254   * @fp:	     The frame that will be sent on the sequence
1255   * @rctl:    The R_CTL information to be sent
1256   * @fh_type: The frame header type
1257   */
fc_seq_send_last(struct fc_seq * sp,struct fc_frame * fp,enum fc_rctl rctl,enum fc_fh_type fh_type)1258  static void fc_seq_send_last(struct fc_seq *sp, struct fc_frame *fp,
1259  			     enum fc_rctl rctl, enum fc_fh_type fh_type)
1260  {
1261  	u32 f_ctl;
1262  	struct fc_exch *ep = fc_seq_exch(sp);
1263  
1264  	f_ctl = FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT;
1265  	f_ctl |= ep->f_ctl;
1266  	fc_fill_fc_hdr(fp, rctl, ep->did, ep->sid, fh_type, f_ctl, 0);
1267  	fc_seq_send_locked(ep->lp, sp, fp);
1268  }
1269  
1270  /**
1271   * fc_seq_send_ack() - Send an acknowledgement that we've received a frame
1272   * @sp:	   The sequence to send the ACK on
1273   * @rx_fp: The received frame that is being acknoledged
1274   *
1275   * Send ACK_1 (or equiv.) indicating we received something.
1276   */
fc_seq_send_ack(struct fc_seq * sp,const struct fc_frame * rx_fp)1277  static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp)
1278  {
1279  	struct fc_frame *fp;
1280  	struct fc_frame_header *rx_fh;
1281  	struct fc_frame_header *fh;
1282  	struct fc_exch *ep = fc_seq_exch(sp);
1283  	struct fc_lport *lport = ep->lp;
1284  	unsigned int f_ctl;
1285  
1286  	/*
1287  	 * Don't send ACKs for class 3.
1288  	 */
1289  	if (fc_sof_needs_ack(fr_sof(rx_fp))) {
1290  		fp = fc_frame_alloc(lport, 0);
1291  		if (!fp) {
1292  			FC_EXCH_DBG(ep, "Drop ACK request, out of memory\n");
1293  			return;
1294  		}
1295  
1296  		fh = fc_frame_header_get(fp);
1297  		fh->fh_r_ctl = FC_RCTL_ACK_1;
1298  		fh->fh_type = FC_TYPE_BLS;
1299  
1300  		/*
1301  		 * Form f_ctl by inverting EX_CTX and SEQ_CTX (bits 23, 22).
1302  		 * Echo FIRST_SEQ, LAST_SEQ, END_SEQ, END_CONN, SEQ_INIT.
1303  		 * Bits 9-8 are meaningful (retransmitted or unidirectional).
1304  		 * Last ACK uses bits 7-6 (continue sequence),
1305  		 * bits 5-4 are meaningful (what kind of ACK to use).
1306  		 */
1307  		rx_fh = fc_frame_header_get(rx_fp);
1308  		f_ctl = ntoh24(rx_fh->fh_f_ctl);
1309  		f_ctl &= FC_FC_EX_CTX | FC_FC_SEQ_CTX |
1310  			FC_FC_FIRST_SEQ | FC_FC_LAST_SEQ |
1311  			FC_FC_END_SEQ | FC_FC_END_CONN | FC_FC_SEQ_INIT |
1312  			FC_FC_RETX_SEQ | FC_FC_UNI_TX;
1313  		f_ctl ^= FC_FC_EX_CTX | FC_FC_SEQ_CTX;
1314  		hton24(fh->fh_f_ctl, f_ctl);
1315  
1316  		fc_exch_setup_hdr(ep, fp, f_ctl);
1317  		fh->fh_seq_id = rx_fh->fh_seq_id;
1318  		fh->fh_seq_cnt = rx_fh->fh_seq_cnt;
1319  		fh->fh_parm_offset = htonl(1);	/* ack single frame */
1320  
1321  		fr_sof(fp) = fr_sof(rx_fp);
1322  		if (f_ctl & FC_FC_END_SEQ)
1323  			fr_eof(fp) = FC_EOF_T;
1324  		else
1325  			fr_eof(fp) = FC_EOF_N;
1326  
1327  		lport->tt.frame_send(lport, fp);
1328  	}
1329  }
1330  
1331  /**
1332   * fc_exch_send_ba_rjt() - Send BLS Reject
1333   * @rx_fp:  The frame being rejected
1334   * @reason: The reason the frame is being rejected
1335   * @explan: The explanation for the rejection
1336   *
1337   * This is for rejecting BA_ABTS only.
1338   */
fc_exch_send_ba_rjt(struct fc_frame * rx_fp,enum fc_ba_rjt_reason reason,enum fc_ba_rjt_explan explan)1339  static void fc_exch_send_ba_rjt(struct fc_frame *rx_fp,
1340  				enum fc_ba_rjt_reason reason,
1341  				enum fc_ba_rjt_explan explan)
1342  {
1343  	struct fc_frame *fp;
1344  	struct fc_frame_header *rx_fh;
1345  	struct fc_frame_header *fh;
1346  	struct fc_ba_rjt *rp;
1347  	struct fc_seq *sp;
1348  	struct fc_lport *lport;
1349  	unsigned int f_ctl;
1350  
1351  	lport = fr_dev(rx_fp);
1352  	sp = fr_seq(rx_fp);
1353  	fp = fc_frame_alloc(lport, sizeof(*rp));
1354  	if (!fp) {
1355  		FC_EXCH_DBG(fc_seq_exch(sp),
1356  			     "Drop BA_RJT request, out of memory\n");
1357  		return;
1358  	}
1359  	fh = fc_frame_header_get(fp);
1360  	rx_fh = fc_frame_header_get(rx_fp);
1361  
1362  	memset(fh, 0, sizeof(*fh) + sizeof(*rp));
1363  
1364  	rp = fc_frame_payload_get(fp, sizeof(*rp));
1365  	rp->br_reason = reason;
1366  	rp->br_explan = explan;
1367  
1368  	/*
1369  	 * seq_id, cs_ctl, df_ctl and param/offset are zero.
1370  	 */
1371  	memcpy(fh->fh_s_id, rx_fh->fh_d_id, 3);
1372  	memcpy(fh->fh_d_id, rx_fh->fh_s_id, 3);
1373  	fh->fh_ox_id = rx_fh->fh_ox_id;
1374  	fh->fh_rx_id = rx_fh->fh_rx_id;
1375  	fh->fh_seq_cnt = rx_fh->fh_seq_cnt;
1376  	fh->fh_r_ctl = FC_RCTL_BA_RJT;
1377  	fh->fh_type = FC_TYPE_BLS;
1378  
1379  	/*
1380  	 * Form f_ctl by inverting EX_CTX and SEQ_CTX (bits 23, 22).
1381  	 * Echo FIRST_SEQ, LAST_SEQ, END_SEQ, END_CONN, SEQ_INIT.
1382  	 * Bits 9-8 are meaningful (retransmitted or unidirectional).
1383  	 * Last ACK uses bits 7-6 (continue sequence),
1384  	 * bits 5-4 are meaningful (what kind of ACK to use).
1385  	 * Always set LAST_SEQ, END_SEQ.
1386  	 */
1387  	f_ctl = ntoh24(rx_fh->fh_f_ctl);
1388  	f_ctl &= FC_FC_EX_CTX | FC_FC_SEQ_CTX |
1389  		FC_FC_END_CONN | FC_FC_SEQ_INIT |
1390  		FC_FC_RETX_SEQ | FC_FC_UNI_TX;
1391  	f_ctl ^= FC_FC_EX_CTX | FC_FC_SEQ_CTX;
1392  	f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ;
1393  	f_ctl &= ~FC_FC_FIRST_SEQ;
1394  	hton24(fh->fh_f_ctl, f_ctl);
1395  
1396  	fr_sof(fp) = fc_sof_class(fr_sof(rx_fp));
1397  	fr_eof(fp) = FC_EOF_T;
1398  	if (fc_sof_needs_ack(fr_sof(fp)))
1399  		fr_eof(fp) = FC_EOF_N;
1400  
1401  	lport->tt.frame_send(lport, fp);
1402  }
1403  
1404  /**
1405   * fc_exch_recv_abts() - Handle an incoming ABTS
1406   * @ep:	   The exchange the abort was on
1407   * @rx_fp: The ABTS frame
1408   *
1409   * This would be for target mode usually, but could be due to lost
1410   * FCP transfer ready, confirm or RRQ. We always handle this as an
1411   * exchange abort, ignoring the parameter.
1412   */
fc_exch_recv_abts(struct fc_exch * ep,struct fc_frame * rx_fp)1413  static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp)
1414  {
1415  	struct fc_frame *fp;
1416  	struct fc_ba_acc *ap;
1417  	struct fc_frame_header *fh;
1418  	struct fc_seq *sp;
1419  
1420  	if (!ep)
1421  		goto reject;
1422  
1423  	FC_EXCH_DBG(ep, "exch: ABTS received\n");
1424  	fp = fc_frame_alloc(ep->lp, sizeof(*ap));
1425  	if (!fp) {
1426  		FC_EXCH_DBG(ep, "Drop ABTS request, out of memory\n");
1427  		goto free;
1428  	}
1429  
1430  	spin_lock_bh(&ep->ex_lock);
1431  	if (ep->esb_stat & ESB_ST_COMPLETE) {
1432  		spin_unlock_bh(&ep->ex_lock);
1433  		FC_EXCH_DBG(ep, "exch: ABTS rejected, exchange complete\n");
1434  		fc_frame_free(fp);
1435  		goto reject;
1436  	}
1437  	if (!(ep->esb_stat & ESB_ST_REC_QUAL)) {
1438  		ep->esb_stat |= ESB_ST_REC_QUAL;
1439  		fc_exch_hold(ep);		/* hold for REC_QUAL */
1440  	}
1441  	fc_exch_timer_set_locked(ep, ep->r_a_tov);
1442  	fh = fc_frame_header_get(fp);
1443  	ap = fc_frame_payload_get(fp, sizeof(*ap));
1444  	memset(ap, 0, sizeof(*ap));
1445  	sp = &ep->seq;
1446  	ap->ba_high_seq_cnt = htons(0xffff);
1447  	if (sp->ssb_stat & SSB_ST_RESP) {
1448  		ap->ba_seq_id = sp->id;
1449  		ap->ba_seq_id_val = FC_BA_SEQ_ID_VAL;
1450  		ap->ba_high_seq_cnt = fh->fh_seq_cnt;
1451  		ap->ba_low_seq_cnt = htons(sp->cnt);
1452  	}
1453  	sp = fc_seq_start_next_locked(sp);
1454  	fc_seq_send_last(sp, fp, FC_RCTL_BA_ACC, FC_TYPE_BLS);
1455  	ep->esb_stat |= ESB_ST_ABNORMAL;
1456  	spin_unlock_bh(&ep->ex_lock);
1457  
1458  free:
1459  	fc_frame_free(rx_fp);
1460  	return;
1461  
1462  reject:
1463  	fc_exch_send_ba_rjt(rx_fp, FC_BA_RJT_UNABLE, FC_BA_RJT_INV_XID);
1464  	goto free;
1465  }
1466  
1467  /**
1468   * fc_seq_assign() - Assign exchange and sequence for incoming request
1469   * @lport: The local port that received the request
1470   * @fp:    The request frame
1471   *
1472   * On success, the sequence pointer will be returned and also in fr_seq(@fp).
1473   * A reference will be held on the exchange/sequence for the caller, which
1474   * must call fc_seq_release().
1475   */
fc_seq_assign(struct fc_lport * lport,struct fc_frame * fp)1476  struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp)
1477  {
1478  	struct fc_exch_mgr_anchor *ema;
1479  
1480  	WARN_ON(lport != fr_dev(fp));
1481  	WARN_ON(fr_seq(fp));
1482  	fr_seq(fp) = NULL;
1483  
1484  	list_for_each_entry(ema, &lport->ema_list, ema_list)
1485  		if ((!ema->match || ema->match(fp)) &&
1486  		    fc_seq_lookup_recip(lport, ema->mp, fp) == FC_RJT_NONE)
1487  			break;
1488  	return fr_seq(fp);
1489  }
1490  EXPORT_SYMBOL(fc_seq_assign);
1491  
1492  /**
1493   * fc_seq_release() - Release the hold
1494   * @sp:    The sequence.
1495   */
fc_seq_release(struct fc_seq * sp)1496  void fc_seq_release(struct fc_seq *sp)
1497  {
1498  	fc_exch_release(fc_seq_exch(sp));
1499  }
1500  EXPORT_SYMBOL(fc_seq_release);
1501  
1502  /**
1503   * fc_exch_recv_req() - Handler for an incoming request
1504   * @lport: The local port that received the request
1505   * @mp:	   The EM that the exchange is on
1506   * @fp:	   The request frame
1507   *
1508   * This is used when the other end is originating the exchange
1509   * and the sequence.
1510   */
fc_exch_recv_req(struct fc_lport * lport,struct fc_exch_mgr * mp,struct fc_frame * fp)1511  static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp,
1512  			     struct fc_frame *fp)
1513  {
1514  	struct fc_frame_header *fh = fc_frame_header_get(fp);
1515  	struct fc_seq *sp = NULL;
1516  	struct fc_exch *ep = NULL;
1517  	enum fc_pf_rjt_reason reject;
1518  
1519  	/* We can have the wrong fc_lport at this point with NPIV, which is a
1520  	 * problem now that we know a new exchange needs to be allocated
1521  	 */
1522  	lport = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id));
1523  	if (!lport) {
1524  		fc_frame_free(fp);
1525  		return;
1526  	}
1527  	fr_dev(fp) = lport;
1528  
1529  	BUG_ON(fr_seq(fp));		/* XXX remove later */
1530  
1531  	/*
1532  	 * If the RX_ID is 0xffff, don't allocate an exchange.
1533  	 * The upper-level protocol may request one later, if needed.
1534  	 */
1535  	if (fh->fh_rx_id == htons(FC_XID_UNKNOWN))
1536  		return fc_lport_recv(lport, fp);
1537  
1538  	reject = fc_seq_lookup_recip(lport, mp, fp);
1539  	if (reject == FC_RJT_NONE) {
1540  		sp = fr_seq(fp);	/* sequence will be held */
1541  		ep = fc_seq_exch(sp);
1542  		fc_seq_send_ack(sp, fp);
1543  		ep->encaps = fr_encaps(fp);
1544  
1545  		/*
1546  		 * Call the receive function.
1547  		 *
1548  		 * The receive function may allocate a new sequence
1549  		 * over the old one, so we shouldn't change the
1550  		 * sequence after this.
1551  		 *
1552  		 * The frame will be freed by the receive function.
1553  		 * If new exch resp handler is valid then call that
1554  		 * first.
1555  		 */
1556  		if (!fc_invoke_resp(ep, sp, fp))
1557  			fc_lport_recv(lport, fp);
1558  		fc_exch_release(ep);	/* release from lookup */
1559  	} else {
1560  		FC_LPORT_DBG(lport, "exch/seq lookup failed: reject %x\n",
1561  			     reject);
1562  		fc_frame_free(fp);
1563  	}
1564  }
1565  
1566  /**
1567   * fc_exch_recv_seq_resp() - Handler for an incoming response where the other
1568   *			     end is the originator of the sequence that is a
1569   *			     response to our initial exchange
1570   * @mp: The EM that the exchange is on
1571   * @fp: The response frame
1572   */
fc_exch_recv_seq_resp(struct fc_exch_mgr * mp,struct fc_frame * fp)1573  static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1574  {
1575  	struct fc_frame_header *fh = fc_frame_header_get(fp);
1576  	struct fc_seq *sp;
1577  	struct fc_exch *ep;
1578  	enum fc_sof sof;
1579  	u32 f_ctl;
1580  	int rc;
1581  
1582  	ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
1583  	if (!ep) {
1584  		atomic_inc(&mp->stats.xid_not_found);
1585  		goto out;
1586  	}
1587  	if (ep->esb_stat & ESB_ST_COMPLETE) {
1588  		atomic_inc(&mp->stats.xid_not_found);
1589  		goto rel;
1590  	}
1591  	if (ep->rxid == FC_XID_UNKNOWN)
1592  		ep->rxid = ntohs(fh->fh_rx_id);
1593  	if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
1594  		atomic_inc(&mp->stats.xid_not_found);
1595  		goto rel;
1596  	}
1597  	if (ep->did != ntoh24(fh->fh_s_id) &&
1598  	    ep->did != FC_FID_FLOGI) {
1599  		atomic_inc(&mp->stats.xid_not_found);
1600  		goto rel;
1601  	}
1602  	sof = fr_sof(fp);
1603  	sp = &ep->seq;
1604  	if (fc_sof_is_init(sof)) {
1605  		sp->ssb_stat |= SSB_ST_RESP;
1606  		sp->id = fh->fh_seq_id;
1607  	}
1608  
1609  	f_ctl = ntoh24(fh->fh_f_ctl);
1610  	fr_seq(fp) = sp;
1611  
1612  	spin_lock_bh(&ep->ex_lock);
1613  	if (f_ctl & FC_FC_SEQ_INIT)
1614  		ep->esb_stat |= ESB_ST_SEQ_INIT;
1615  	spin_unlock_bh(&ep->ex_lock);
1616  
1617  	if (fc_sof_needs_ack(sof))
1618  		fc_seq_send_ack(sp, fp);
1619  
1620  	if (fh->fh_type != FC_TYPE_FCP && fr_eof(fp) == FC_EOF_T &&
1621  	    (f_ctl & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
1622  	    (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
1623  		spin_lock_bh(&ep->ex_lock);
1624  		rc = fc_exch_done_locked(ep);
1625  		WARN_ON(fc_seq_exch(sp) != ep);
1626  		spin_unlock_bh(&ep->ex_lock);
1627  		if (!rc) {
1628  			fc_exch_delete(ep);
1629  		} else {
1630  			FC_EXCH_DBG(ep, "ep is completed already,"
1631  					"hence skip calling the resp\n");
1632  			goto skip_resp;
1633  		}
1634  	}
1635  
1636  	/*
1637  	 * Call the receive function.
1638  	 * The sequence is held (has a refcnt) for us,
1639  	 * but not for the receive function.
1640  	 *
1641  	 * The receive function may allocate a new sequence
1642  	 * over the old one, so we shouldn't change the
1643  	 * sequence after this.
1644  	 *
1645  	 * The frame will be freed by the receive function.
1646  	 * If new exch resp handler is valid then call that
1647  	 * first.
1648  	 */
1649  	if (!fc_invoke_resp(ep, sp, fp))
1650  		fc_frame_free(fp);
1651  
1652  skip_resp:
1653  	fc_exch_release(ep);
1654  	return;
1655  rel:
1656  	fc_exch_release(ep);
1657  out:
1658  	fc_frame_free(fp);
1659  }
1660  
1661  /**
1662   * fc_exch_recv_resp() - Handler for a sequence where other end is
1663   *			 responding to our sequence
1664   * @mp: The EM that the exchange is on
1665   * @fp: The response frame
1666   */
fc_exch_recv_resp(struct fc_exch_mgr * mp,struct fc_frame * fp)1667  static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1668  {
1669  	struct fc_seq *sp;
1670  
1671  	sp = fc_seq_lookup_orig(mp, fp);	/* doesn't hold sequence */
1672  
1673  	if (!sp)
1674  		atomic_inc(&mp->stats.xid_not_found);
1675  	else
1676  		atomic_inc(&mp->stats.non_bls_resp);
1677  
1678  	fc_frame_free(fp);
1679  }
1680  
1681  /**
1682   * fc_exch_abts_resp() - Handler for a response to an ABT
1683   * @ep: The exchange that the frame is on
1684   * @fp: The response frame
1685   *
1686   * This response would be to an ABTS cancelling an exchange or sequence.
1687   * The response can be either BA_ACC or BA_RJT
1688   */
fc_exch_abts_resp(struct fc_exch * ep,struct fc_frame * fp)1689  static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
1690  {
1691  	struct fc_frame_header *fh;
1692  	struct fc_ba_acc *ap;
1693  	struct fc_seq *sp;
1694  	u16 low;
1695  	u16 high;
1696  	int rc = 1, has_rec = 0;
1697  
1698  	fh = fc_frame_header_get(fp);
1699  	FC_EXCH_DBG(ep, "exch: BLS rctl %x - %s\n", fh->fh_r_ctl,
1700  		    fc_exch_rctl_name(fh->fh_r_ctl));
1701  
1702  	if (cancel_delayed_work_sync(&ep->timeout_work)) {
1703  		FC_EXCH_DBG(ep, "Exchange timer canceled due to ABTS response\n");
1704  		fc_exch_release(ep);	/* release from pending timer hold */
1705  		return;
1706  	}
1707  
1708  	spin_lock_bh(&ep->ex_lock);
1709  	switch (fh->fh_r_ctl) {
1710  	case FC_RCTL_BA_ACC:
1711  		ap = fc_frame_payload_get(fp, sizeof(*ap));
1712  		if (!ap)
1713  			break;
1714  
1715  		/*
1716  		 * Decide whether to establish a Recovery Qualifier.
1717  		 * We do this if there is a non-empty SEQ_CNT range and
1718  		 * SEQ_ID is the same as the one we aborted.
1719  		 */
1720  		low = ntohs(ap->ba_low_seq_cnt);
1721  		high = ntohs(ap->ba_high_seq_cnt);
1722  		if ((ep->esb_stat & ESB_ST_REC_QUAL) == 0 &&
1723  		    (ap->ba_seq_id_val != FC_BA_SEQ_ID_VAL ||
1724  		     ap->ba_seq_id == ep->seq_id) && low != high) {
1725  			ep->esb_stat |= ESB_ST_REC_QUAL;
1726  			fc_exch_hold(ep);  /* hold for recovery qualifier */
1727  			has_rec = 1;
1728  		}
1729  		break;
1730  	case FC_RCTL_BA_RJT:
1731  		break;
1732  	default:
1733  		break;
1734  	}
1735  
1736  	/* do we need to do some other checks here. Can we reuse more of
1737  	 * fc_exch_recv_seq_resp
1738  	 */
1739  	sp = &ep->seq;
1740  	/*
1741  	 * do we want to check END_SEQ as well as LAST_SEQ here?
1742  	 */
1743  	if (ep->fh_type != FC_TYPE_FCP &&
1744  	    ntoh24(fh->fh_f_ctl) & FC_FC_LAST_SEQ)
1745  		rc = fc_exch_done_locked(ep);
1746  	spin_unlock_bh(&ep->ex_lock);
1747  
1748  	fc_exch_hold(ep);
1749  	if (!rc)
1750  		fc_exch_delete(ep);
1751  	if (!fc_invoke_resp(ep, sp, fp))
1752  		fc_frame_free(fp);
1753  	if (has_rec)
1754  		fc_exch_timer_set(ep, ep->r_a_tov);
1755  	fc_exch_release(ep);
1756  }
1757  
1758  /**
1759   * fc_exch_recv_bls() - Handler for a BLS sequence
1760   * @mp: The EM that the exchange is on
1761   * @fp: The request frame
1762   *
1763   * The BLS frame is always a sequence initiated by the remote side.
1764   * We may be either the originator or recipient of the exchange.
1765   */
fc_exch_recv_bls(struct fc_exch_mgr * mp,struct fc_frame * fp)1766  static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp)
1767  {
1768  	struct fc_frame_header *fh;
1769  	struct fc_exch *ep;
1770  	u32 f_ctl;
1771  
1772  	fh = fc_frame_header_get(fp);
1773  	f_ctl = ntoh24(fh->fh_f_ctl);
1774  	fr_seq(fp) = NULL;
1775  
1776  	ep = fc_exch_find(mp, (f_ctl & FC_FC_EX_CTX) ?
1777  			  ntohs(fh->fh_ox_id) : ntohs(fh->fh_rx_id));
1778  	if (ep && (f_ctl & FC_FC_SEQ_INIT)) {
1779  		spin_lock_bh(&ep->ex_lock);
1780  		ep->esb_stat |= ESB_ST_SEQ_INIT;
1781  		spin_unlock_bh(&ep->ex_lock);
1782  	}
1783  	if (f_ctl & FC_FC_SEQ_CTX) {
1784  		/*
1785  		 * A response to a sequence we initiated.
1786  		 * This should only be ACKs for class 2 or F.
1787  		 */
1788  		switch (fh->fh_r_ctl) {
1789  		case FC_RCTL_ACK_1:
1790  		case FC_RCTL_ACK_0:
1791  			break;
1792  		default:
1793  			if (ep)
1794  				FC_EXCH_DBG(ep, "BLS rctl %x - %s received\n",
1795  					    fh->fh_r_ctl,
1796  					    fc_exch_rctl_name(fh->fh_r_ctl));
1797  			break;
1798  		}
1799  		fc_frame_free(fp);
1800  	} else {
1801  		switch (fh->fh_r_ctl) {
1802  		case FC_RCTL_BA_RJT:
1803  		case FC_RCTL_BA_ACC:
1804  			if (ep)
1805  				fc_exch_abts_resp(ep, fp);
1806  			else
1807  				fc_frame_free(fp);
1808  			break;
1809  		case FC_RCTL_BA_ABTS:
1810  			if (ep)
1811  				fc_exch_recv_abts(ep, fp);
1812  			else
1813  				fc_frame_free(fp);
1814  			break;
1815  		default:			/* ignore junk */
1816  			fc_frame_free(fp);
1817  			break;
1818  		}
1819  	}
1820  	if (ep)
1821  		fc_exch_release(ep);	/* release hold taken by fc_exch_find */
1822  }
1823  
1824  /**
1825   * fc_seq_ls_acc() - Accept sequence with LS_ACC
1826   * @rx_fp: The received frame, not freed here.
1827   *
1828   * If this fails due to allocation or transmit congestion, assume the
1829   * originator will repeat the sequence.
1830   */
fc_seq_ls_acc(struct fc_frame * rx_fp)1831  static void fc_seq_ls_acc(struct fc_frame *rx_fp)
1832  {
1833  	struct fc_lport *lport;
1834  	struct fc_els_ls_acc *acc;
1835  	struct fc_frame *fp;
1836  	struct fc_seq *sp;
1837  
1838  	lport = fr_dev(rx_fp);
1839  	sp = fr_seq(rx_fp);
1840  	fp = fc_frame_alloc(lport, sizeof(*acc));
1841  	if (!fp) {
1842  		FC_EXCH_DBG(fc_seq_exch(sp),
1843  			    "exch: drop LS_ACC, out of memory\n");
1844  		return;
1845  	}
1846  	acc = fc_frame_payload_get(fp, sizeof(*acc));
1847  	memset(acc, 0, sizeof(*acc));
1848  	acc->la_cmd = ELS_LS_ACC;
1849  	fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
1850  	lport->tt.frame_send(lport, fp);
1851  }
1852  
1853  /**
1854   * fc_seq_ls_rjt() - Reject a sequence with ELS LS_RJT
1855   * @rx_fp: The received frame, not freed here.
1856   * @reason: The reason the sequence is being rejected
1857   * @explan: The explanation for the rejection
1858   *
1859   * If this fails due to allocation or transmit congestion, assume the
1860   * originator will repeat the sequence.
1861   */
fc_seq_ls_rjt(struct fc_frame * rx_fp,enum fc_els_rjt_reason reason,enum fc_els_rjt_explan explan)1862  static void fc_seq_ls_rjt(struct fc_frame *rx_fp, enum fc_els_rjt_reason reason,
1863  			  enum fc_els_rjt_explan explan)
1864  {
1865  	struct fc_lport *lport;
1866  	struct fc_els_ls_rjt *rjt;
1867  	struct fc_frame *fp;
1868  	struct fc_seq *sp;
1869  
1870  	lport = fr_dev(rx_fp);
1871  	sp = fr_seq(rx_fp);
1872  	fp = fc_frame_alloc(lport, sizeof(*rjt));
1873  	if (!fp) {
1874  		FC_EXCH_DBG(fc_seq_exch(sp),
1875  			    "exch: drop LS_ACC, out of memory\n");
1876  		return;
1877  	}
1878  	rjt = fc_frame_payload_get(fp, sizeof(*rjt));
1879  	memset(rjt, 0, sizeof(*rjt));
1880  	rjt->er_cmd = ELS_LS_RJT;
1881  	rjt->er_reason = reason;
1882  	rjt->er_explan = explan;
1883  	fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
1884  	lport->tt.frame_send(lport, fp);
1885  }
1886  
1887  /**
1888   * fc_exch_reset() - Reset an exchange
1889   * @ep: The exchange to be reset
1890   *
1891   * Note: May sleep if invoked from outside a response handler.
1892   */
fc_exch_reset(struct fc_exch * ep)1893  static void fc_exch_reset(struct fc_exch *ep)
1894  {
1895  	struct fc_seq *sp;
1896  	int rc = 1;
1897  
1898  	spin_lock_bh(&ep->ex_lock);
1899  	ep->state |= FC_EX_RST_CLEANUP;
1900  	fc_exch_timer_cancel(ep);
1901  	if (ep->esb_stat & ESB_ST_REC_QUAL)
1902  		atomic_dec(&ep->ex_refcnt);	/* drop hold for rec_qual */
1903  	ep->esb_stat &= ~ESB_ST_REC_QUAL;
1904  	sp = &ep->seq;
1905  	rc = fc_exch_done_locked(ep);
1906  	spin_unlock_bh(&ep->ex_lock);
1907  
1908  	fc_exch_hold(ep);
1909  
1910  	if (!rc) {
1911  		fc_exch_delete(ep);
1912  	} else {
1913  		FC_EXCH_DBG(ep, "ep is completed already,"
1914  				"hence skip calling the resp\n");
1915  		goto skip_resp;
1916  	}
1917  
1918  	fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_CLOSED));
1919  skip_resp:
1920  	fc_seq_set_resp(sp, NULL, ep->arg);
1921  	fc_exch_release(ep);
1922  }
1923  
1924  /**
1925   * fc_exch_pool_reset() - Reset a per cpu exchange pool
1926   * @lport: The local port that the exchange pool is on
1927   * @pool:  The exchange pool to be reset
1928   * @sid:   The source ID
1929   * @did:   The destination ID
1930   *
1931   * Resets a per cpu exches pool, releasing all of its sequences
1932   * and exchanges. If sid is non-zero then reset only exchanges
1933   * we sourced from the local port's FID. If did is non-zero then
1934   * only reset exchanges destined for the local port's FID.
1935   */
fc_exch_pool_reset(struct fc_lport * lport,struct fc_exch_pool * pool,u32 sid,u32 did)1936  static void fc_exch_pool_reset(struct fc_lport *lport,
1937  			       struct fc_exch_pool *pool,
1938  			       u32 sid, u32 did)
1939  {
1940  	struct fc_exch *ep;
1941  	struct fc_exch *next;
1942  
1943  	spin_lock_bh(&pool->lock);
1944  restart:
1945  	list_for_each_entry_safe(ep, next, &pool->ex_list, ex_list) {
1946  		if ((lport == ep->lp) &&
1947  		    (sid == 0 || sid == ep->sid) &&
1948  		    (did == 0 || did == ep->did)) {
1949  			fc_exch_hold(ep);
1950  			spin_unlock_bh(&pool->lock);
1951  
1952  			fc_exch_reset(ep);
1953  
1954  			fc_exch_release(ep);
1955  			spin_lock_bh(&pool->lock);
1956  
1957  			/*
1958  			 * must restart loop incase while lock
1959  			 * was down multiple eps were released.
1960  			 */
1961  			goto restart;
1962  		}
1963  	}
1964  	pool->next_index = 0;
1965  	pool->left = FC_XID_UNKNOWN;
1966  	pool->right = FC_XID_UNKNOWN;
1967  	spin_unlock_bh(&pool->lock);
1968  }
1969  
1970  /**
1971   * fc_exch_mgr_reset() - Reset all EMs of a local port
1972   * @lport: The local port whose EMs are to be reset
1973   * @sid:   The source ID
1974   * @did:   The destination ID
1975   *
1976   * Reset all EMs associated with a given local port. Release all
1977   * sequences and exchanges. If sid is non-zero then reset only the
1978   * exchanges sent from the local port's FID. If did is non-zero then
1979   * reset only exchanges destined for the local port's FID.
1980   */
fc_exch_mgr_reset(struct fc_lport * lport,u32 sid,u32 did)1981  void fc_exch_mgr_reset(struct fc_lport *lport, u32 sid, u32 did)
1982  {
1983  	struct fc_exch_mgr_anchor *ema;
1984  	unsigned int cpu;
1985  
1986  	list_for_each_entry(ema, &lport->ema_list, ema_list) {
1987  		for_each_possible_cpu(cpu)
1988  			fc_exch_pool_reset(lport,
1989  					   per_cpu_ptr(ema->mp->pool, cpu),
1990  					   sid, did);
1991  	}
1992  }
1993  EXPORT_SYMBOL(fc_exch_mgr_reset);
1994  
1995  /**
1996   * fc_exch_lookup() - find an exchange
1997   * @lport: The local port
1998   * @xid: The exchange ID
1999   *
2000   * Returns exchange pointer with hold for caller, or NULL if not found.
2001   */
fc_exch_lookup(struct fc_lport * lport,u32 xid)2002  static struct fc_exch *fc_exch_lookup(struct fc_lport *lport, u32 xid)
2003  {
2004  	struct fc_exch_mgr_anchor *ema;
2005  
2006  	list_for_each_entry(ema, &lport->ema_list, ema_list)
2007  		if (ema->mp->min_xid <= xid && xid <= ema->mp->max_xid)
2008  			return fc_exch_find(ema->mp, xid);
2009  	return NULL;
2010  }
2011  
2012  /**
2013   * fc_exch_els_rec() - Handler for ELS REC (Read Exchange Concise) requests
2014   * @rfp: The REC frame, not freed here.
2015   *
2016   * Note that the requesting port may be different than the S_ID in the request.
2017   */
fc_exch_els_rec(struct fc_frame * rfp)2018  static void fc_exch_els_rec(struct fc_frame *rfp)
2019  {
2020  	struct fc_lport *lport;
2021  	struct fc_frame *fp;
2022  	struct fc_exch *ep;
2023  	struct fc_els_rec *rp;
2024  	struct fc_els_rec_acc *acc;
2025  	enum fc_els_rjt_reason reason = ELS_RJT_LOGIC;
2026  	enum fc_els_rjt_explan explan;
2027  	u32 sid;
2028  	u16 xid, rxid, oxid;
2029  
2030  	lport = fr_dev(rfp);
2031  	rp = fc_frame_payload_get(rfp, sizeof(*rp));
2032  	explan = ELS_EXPL_INV_LEN;
2033  	if (!rp)
2034  		goto reject;
2035  	sid = ntoh24(rp->rec_s_id);
2036  	rxid = ntohs(rp->rec_rx_id);
2037  	oxid = ntohs(rp->rec_ox_id);
2038  
2039  	explan = ELS_EXPL_OXID_RXID;
2040  	if (sid == fc_host_port_id(lport->host))
2041  		xid = oxid;
2042  	else
2043  		xid = rxid;
2044  	if (xid == FC_XID_UNKNOWN) {
2045  		FC_LPORT_DBG(lport,
2046  			     "REC request from %x: invalid rxid %x oxid %x\n",
2047  			     sid, rxid, oxid);
2048  		goto reject;
2049  	}
2050  	ep = fc_exch_lookup(lport, xid);
2051  	if (!ep) {
2052  		FC_LPORT_DBG(lport,
2053  			     "REC request from %x: rxid %x oxid %x not found\n",
2054  			     sid, rxid, oxid);
2055  		goto reject;
2056  	}
2057  	FC_EXCH_DBG(ep, "REC request from %x: rxid %x oxid %x\n",
2058  		    sid, rxid, oxid);
2059  	if (ep->oid != sid || oxid != ep->oxid)
2060  		goto rel;
2061  	if (rxid != FC_XID_UNKNOWN && rxid != ep->rxid)
2062  		goto rel;
2063  	fp = fc_frame_alloc(lport, sizeof(*acc));
2064  	if (!fp) {
2065  		FC_EXCH_DBG(ep, "Drop REC request, out of memory\n");
2066  		goto out;
2067  	}
2068  
2069  	acc = fc_frame_payload_get(fp, sizeof(*acc));
2070  	memset(acc, 0, sizeof(*acc));
2071  	acc->reca_cmd = ELS_LS_ACC;
2072  	acc->reca_ox_id = rp->rec_ox_id;
2073  	memcpy(acc->reca_ofid, rp->rec_s_id, 3);
2074  	acc->reca_rx_id = htons(ep->rxid);
2075  	if (ep->sid == ep->oid)
2076  		hton24(acc->reca_rfid, ep->did);
2077  	else
2078  		hton24(acc->reca_rfid, ep->sid);
2079  	acc->reca_fc4value = htonl(ep->seq.rec_data);
2080  	acc->reca_e_stat = htonl(ep->esb_stat & (ESB_ST_RESP |
2081  						 ESB_ST_SEQ_INIT |
2082  						 ESB_ST_COMPLETE));
2083  	fc_fill_reply_hdr(fp, rfp, FC_RCTL_ELS_REP, 0);
2084  	lport->tt.frame_send(lport, fp);
2085  out:
2086  	fc_exch_release(ep);
2087  	return;
2088  
2089  rel:
2090  	fc_exch_release(ep);
2091  reject:
2092  	fc_seq_ls_rjt(rfp, reason, explan);
2093  }
2094  
2095  /**
2096   * fc_exch_rrq_resp() - Handler for RRQ responses
2097   * @sp:	 The sequence that the RRQ is on
2098   * @fp:	 The RRQ frame
2099   * @arg: The exchange that the RRQ is on
2100   *
2101   * TODO: fix error handler.
2102   */
fc_exch_rrq_resp(struct fc_seq * sp,struct fc_frame * fp,void * arg)2103  static void fc_exch_rrq_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg)
2104  {
2105  	struct fc_exch *aborted_ep = arg;
2106  	unsigned int op;
2107  
2108  	if (IS_ERR(fp)) {
2109  		int err = PTR_ERR(fp);
2110  
2111  		if (err == -FC_EX_CLOSED || err == -FC_EX_TIMEOUT)
2112  			goto cleanup;
2113  		FC_EXCH_DBG(aborted_ep, "Cannot process RRQ, "
2114  			    "frame error %d\n", err);
2115  		return;
2116  	}
2117  
2118  	op = fc_frame_payload_op(fp);
2119  	fc_frame_free(fp);
2120  
2121  	switch (op) {
2122  	case ELS_LS_RJT:
2123  		FC_EXCH_DBG(aborted_ep, "LS_RJT for RRQ\n");
2124  		fallthrough;
2125  	case ELS_LS_ACC:
2126  		goto cleanup;
2127  	default:
2128  		FC_EXCH_DBG(aborted_ep, "unexpected response op %x for RRQ\n",
2129  			    op);
2130  		return;
2131  	}
2132  
2133  cleanup:
2134  	fc_exch_done(&aborted_ep->seq);
2135  	/* drop hold for rec qual */
2136  	fc_exch_release(aborted_ep);
2137  }
2138  
2139  
2140  /**
2141   * fc_exch_seq_send() - Send a frame using a new exchange and sequence
2142   * @lport:	The local port to send the frame on
2143   * @fp:		The frame to be sent
2144   * @resp:	The response handler for this request
2145   * @destructor: The destructor for the exchange
2146   * @arg:	The argument to be passed to the response handler
2147   * @timer_msec: The timeout period for the exchange
2148   *
2149   * The exchange response handler is set in this routine to resp()
2150   * function pointer. It can be called in two scenarios: if a timeout
2151   * occurs or if a response frame is received for the exchange. The
2152   * fc_frame pointer in response handler will also indicate timeout
2153   * as error using IS_ERR related macros.
2154   *
2155   * The exchange destructor handler is also set in this routine.
2156   * The destructor handler is invoked by EM layer when exchange
2157   * is about to free, this can be used by caller to free its
2158   * resources along with exchange free.
2159   *
2160   * The arg is passed back to resp and destructor handler.
2161   *
2162   * The timeout value (in msec) for an exchange is set if non zero
2163   * timer_msec argument is specified. The timer is canceled when
2164   * it fires or when the exchange is done. The exchange timeout handler
2165   * is registered by EM layer.
2166   *
2167   * The frame pointer with some of the header's fields must be
2168   * filled before calling this routine, those fields are:
2169   *
2170   * - routing control
2171   * - FC port did
2172   * - FC port sid
2173   * - FC header type
2174   * - frame control
2175   * - parameter or relative offset
2176   */
fc_exch_seq_send(struct fc_lport * lport,struct fc_frame * fp,void (* resp)(struct fc_seq *,struct fc_frame * fp,void * arg),void (* destructor)(struct fc_seq *,void *),void * arg,u32 timer_msec)2177  struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
2178  				struct fc_frame *fp,
2179  				void (*resp)(struct fc_seq *,
2180  					     struct fc_frame *fp,
2181  					     void *arg),
2182  				void (*destructor)(struct fc_seq *, void *),
2183  				void *arg, u32 timer_msec)
2184  {
2185  	struct fc_exch *ep;
2186  	struct fc_seq *sp = NULL;
2187  	struct fc_frame_header *fh;
2188  	struct fc_fcp_pkt *fsp = NULL;
2189  	int rc = 1;
2190  
2191  	ep = fc_exch_alloc(lport, fp);
2192  	if (!ep) {
2193  		fc_frame_free(fp);
2194  		return NULL;
2195  	}
2196  	ep->esb_stat |= ESB_ST_SEQ_INIT;
2197  	fh = fc_frame_header_get(fp);
2198  	fc_exch_set_addr(ep, ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id));
2199  	ep->resp = resp;
2200  	ep->destructor = destructor;
2201  	ep->arg = arg;
2202  	ep->r_a_tov = lport->r_a_tov;
2203  	ep->lp = lport;
2204  	sp = &ep->seq;
2205  
2206  	ep->fh_type = fh->fh_type; /* save for possbile timeout handling */
2207  	ep->f_ctl = ntoh24(fh->fh_f_ctl);
2208  	fc_exch_setup_hdr(ep, fp, ep->f_ctl);
2209  	sp->cnt++;
2210  
2211  	if (ep->xid <= lport->lro_xid && fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) {
2212  		fsp = fr_fsp(fp);
2213  		fc_fcp_ddp_setup(fr_fsp(fp), ep->xid);
2214  	}
2215  
2216  	if (unlikely(lport->tt.frame_send(lport, fp)))
2217  		goto err;
2218  
2219  	if (timer_msec)
2220  		fc_exch_timer_set_locked(ep, timer_msec);
2221  	ep->f_ctl &= ~FC_FC_FIRST_SEQ;	/* not first seq */
2222  
2223  	if (ep->f_ctl & FC_FC_SEQ_INIT)
2224  		ep->esb_stat &= ~ESB_ST_SEQ_INIT;
2225  	spin_unlock_bh(&ep->ex_lock);
2226  	return sp;
2227  err:
2228  	if (fsp)
2229  		fc_fcp_ddp_done(fsp);
2230  	rc = fc_exch_done_locked(ep);
2231  	spin_unlock_bh(&ep->ex_lock);
2232  	if (!rc)
2233  		fc_exch_delete(ep);
2234  	return NULL;
2235  }
2236  EXPORT_SYMBOL(fc_exch_seq_send);
2237  
2238  /**
2239   * fc_exch_rrq() - Send an ELS RRQ (Reinstate Recovery Qualifier) command
2240   * @ep: The exchange to send the RRQ on
2241   *
2242   * This tells the remote port to stop blocking the use of
2243   * the exchange and the seq_cnt range.
2244   */
fc_exch_rrq(struct fc_exch * ep)2245  static void fc_exch_rrq(struct fc_exch *ep)
2246  {
2247  	struct fc_lport *lport;
2248  	struct fc_els_rrq *rrq;
2249  	struct fc_frame *fp;
2250  	u32 did;
2251  
2252  	lport = ep->lp;
2253  
2254  	fp = fc_frame_alloc(lport, sizeof(*rrq));
2255  	if (!fp)
2256  		goto retry;
2257  
2258  	rrq = fc_frame_payload_get(fp, sizeof(*rrq));
2259  	memset(rrq, 0, sizeof(*rrq));
2260  	rrq->rrq_cmd = ELS_RRQ;
2261  	hton24(rrq->rrq_s_id, ep->sid);
2262  	rrq->rrq_ox_id = htons(ep->oxid);
2263  	rrq->rrq_rx_id = htons(ep->rxid);
2264  
2265  	did = ep->did;
2266  	if (ep->esb_stat & ESB_ST_RESP)
2267  		did = ep->sid;
2268  
2269  	fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, did,
2270  		       lport->port_id, FC_TYPE_ELS,
2271  		       FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
2272  
2273  	if (fc_exch_seq_send(lport, fp, fc_exch_rrq_resp, NULL, ep,
2274  			     lport->e_d_tov))
2275  		return;
2276  
2277  retry:
2278  	FC_EXCH_DBG(ep, "exch: RRQ send failed\n");
2279  	spin_lock_bh(&ep->ex_lock);
2280  	if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) {
2281  		spin_unlock_bh(&ep->ex_lock);
2282  		/* drop hold for rec qual */
2283  		fc_exch_release(ep);
2284  		return;
2285  	}
2286  	ep->esb_stat |= ESB_ST_REC_QUAL;
2287  	fc_exch_timer_set_locked(ep, ep->r_a_tov);
2288  	spin_unlock_bh(&ep->ex_lock);
2289  }
2290  
2291  /**
2292   * fc_exch_els_rrq() - Handler for ELS RRQ (Reset Recovery Qualifier) requests
2293   * @fp: The RRQ frame, not freed here.
2294   */
fc_exch_els_rrq(struct fc_frame * fp)2295  static void fc_exch_els_rrq(struct fc_frame *fp)
2296  {
2297  	struct fc_lport *lport;
2298  	struct fc_exch *ep = NULL;	/* request or subject exchange */
2299  	struct fc_els_rrq *rp;
2300  	u32 sid;
2301  	u16 xid;
2302  	enum fc_els_rjt_explan explan;
2303  
2304  	lport = fr_dev(fp);
2305  	rp = fc_frame_payload_get(fp, sizeof(*rp));
2306  	explan = ELS_EXPL_INV_LEN;
2307  	if (!rp)
2308  		goto reject;
2309  
2310  	/*
2311  	 * lookup subject exchange.
2312  	 */
2313  	sid = ntoh24(rp->rrq_s_id);		/* subject source */
2314  	xid = fc_host_port_id(lport->host) == sid ?
2315  			ntohs(rp->rrq_ox_id) : ntohs(rp->rrq_rx_id);
2316  	ep = fc_exch_lookup(lport, xid);
2317  	explan = ELS_EXPL_OXID_RXID;
2318  	if (!ep)
2319  		goto reject;
2320  	spin_lock_bh(&ep->ex_lock);
2321  	FC_EXCH_DBG(ep, "RRQ request from %x: xid %x rxid %x oxid %x\n",
2322  		    sid, xid, ntohs(rp->rrq_rx_id), ntohs(rp->rrq_ox_id));
2323  	if (ep->oxid != ntohs(rp->rrq_ox_id))
2324  		goto unlock_reject;
2325  	if (ep->rxid != ntohs(rp->rrq_rx_id) &&
2326  	    ep->rxid != FC_XID_UNKNOWN)
2327  		goto unlock_reject;
2328  	explan = ELS_EXPL_SID;
2329  	if (ep->sid != sid)
2330  		goto unlock_reject;
2331  
2332  	/*
2333  	 * Clear Recovery Qualifier state, and cancel timer if complete.
2334  	 */
2335  	if (ep->esb_stat & ESB_ST_REC_QUAL) {
2336  		ep->esb_stat &= ~ESB_ST_REC_QUAL;
2337  		atomic_dec(&ep->ex_refcnt);	/* drop hold for rec qual */
2338  	}
2339  	if (ep->esb_stat & ESB_ST_COMPLETE)
2340  		fc_exch_timer_cancel(ep);
2341  
2342  	spin_unlock_bh(&ep->ex_lock);
2343  
2344  	/*
2345  	 * Send LS_ACC.
2346  	 */
2347  	fc_seq_ls_acc(fp);
2348  	goto out;
2349  
2350  unlock_reject:
2351  	spin_unlock_bh(&ep->ex_lock);
2352  reject:
2353  	fc_seq_ls_rjt(fp, ELS_RJT_LOGIC, explan);
2354  out:
2355  	if (ep)
2356  		fc_exch_release(ep);	/* drop hold from fc_exch_find */
2357  }
2358  
2359  /**
2360   * fc_exch_update_stats() - update exches stats to lport
2361   * @lport: The local port to update exchange manager stats
2362   */
fc_exch_update_stats(struct fc_lport * lport)2363  void fc_exch_update_stats(struct fc_lport *lport)
2364  {
2365  	struct fc_host_statistics *st;
2366  	struct fc_exch_mgr_anchor *ema;
2367  	struct fc_exch_mgr *mp;
2368  
2369  	st = &lport->host_stats;
2370  
2371  	list_for_each_entry(ema, &lport->ema_list, ema_list) {
2372  		mp = ema->mp;
2373  		st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
2374  		st->fc_no_free_exch_xid +=
2375  				atomic_read(&mp->stats.no_free_exch_xid);
2376  		st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
2377  		st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
2378  		st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
2379  		st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
2380  	}
2381  }
2382  EXPORT_SYMBOL(fc_exch_update_stats);
2383  
2384  /**
2385   * fc_exch_mgr_add() - Add an exchange manager to a local port's list of EMs
2386   * @lport: The local port to add the exchange manager to
2387   * @mp:	   The exchange manager to be added to the local port
2388   * @match: The match routine that indicates when this EM should be used
2389   */
fc_exch_mgr_add(struct fc_lport * lport,struct fc_exch_mgr * mp,bool (* match)(struct fc_frame *))2390  struct fc_exch_mgr_anchor *fc_exch_mgr_add(struct fc_lport *lport,
2391  					   struct fc_exch_mgr *mp,
2392  					   bool (*match)(struct fc_frame *))
2393  {
2394  	struct fc_exch_mgr_anchor *ema;
2395  
2396  	ema = kmalloc(sizeof(*ema), GFP_ATOMIC);
2397  	if (!ema)
2398  		return ema;
2399  
2400  	ema->mp = mp;
2401  	ema->match = match;
2402  	/* add EM anchor to EM anchors list */
2403  	list_add_tail(&ema->ema_list, &lport->ema_list);
2404  	kref_get(&mp->kref);
2405  	return ema;
2406  }
2407  EXPORT_SYMBOL(fc_exch_mgr_add);
2408  
2409  /**
2410   * fc_exch_mgr_destroy() - Destroy an exchange manager
2411   * @kref: The reference to the EM to be destroyed
2412   */
fc_exch_mgr_destroy(struct kref * kref)2413  static void fc_exch_mgr_destroy(struct kref *kref)
2414  {
2415  	struct fc_exch_mgr *mp = container_of(kref, struct fc_exch_mgr, kref);
2416  
2417  	mempool_destroy(mp->ep_pool);
2418  	free_percpu(mp->pool);
2419  	kfree(mp);
2420  }
2421  
2422  /**
2423   * fc_exch_mgr_del() - Delete an EM from a local port's list
2424   * @ema: The exchange manager anchor identifying the EM to be deleted
2425   */
fc_exch_mgr_del(struct fc_exch_mgr_anchor * ema)2426  void fc_exch_mgr_del(struct fc_exch_mgr_anchor *ema)
2427  {
2428  	/* remove EM anchor from EM anchors list */
2429  	list_del(&ema->ema_list);
2430  	kref_put(&ema->mp->kref, fc_exch_mgr_destroy);
2431  	kfree(ema);
2432  }
2433  EXPORT_SYMBOL(fc_exch_mgr_del);
2434  
2435  /**
2436   * fc_exch_mgr_list_clone() - Share all exchange manager objects
2437   * @src: Source lport to clone exchange managers from
2438   * @dst: New lport that takes references to all the exchange managers
2439   */
fc_exch_mgr_list_clone(struct fc_lport * src,struct fc_lport * dst)2440  int fc_exch_mgr_list_clone(struct fc_lport *src, struct fc_lport *dst)
2441  {
2442  	struct fc_exch_mgr_anchor *ema, *tmp;
2443  
2444  	list_for_each_entry(ema, &src->ema_list, ema_list) {
2445  		if (!fc_exch_mgr_add(dst, ema->mp, ema->match))
2446  			goto err;
2447  	}
2448  	return 0;
2449  err:
2450  	list_for_each_entry_safe(ema, tmp, &dst->ema_list, ema_list)
2451  		fc_exch_mgr_del(ema);
2452  	return -ENOMEM;
2453  }
2454  EXPORT_SYMBOL(fc_exch_mgr_list_clone);
2455  
2456  /**
2457   * fc_exch_mgr_alloc() - Allocate an exchange manager
2458   * @lport:   The local port that the new EM will be associated with
2459   * @class:   The default FC class for new exchanges
2460   * @min_xid: The minimum XID for exchanges from the new EM
2461   * @max_xid: The maximum XID for exchanges from the new EM
2462   * @match:   The match routine for the new EM
2463   */
fc_exch_mgr_alloc(struct fc_lport * lport,enum fc_class class,u16 min_xid,u16 max_xid,bool (* match)(struct fc_frame *))2464  struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lport,
2465  				      enum fc_class class,
2466  				      u16 min_xid, u16 max_xid,
2467  				      bool (*match)(struct fc_frame *))
2468  {
2469  	struct fc_exch_mgr *mp;
2470  	u16 pool_exch_range;
2471  	size_t pool_size;
2472  	unsigned int cpu;
2473  	struct fc_exch_pool *pool;
2474  
2475  	if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN ||
2476  	    (min_xid & fc_cpu_mask) != 0) {
2477  		FC_LPORT_DBG(lport, "Invalid min_xid 0x:%x and max_xid 0x:%x\n",
2478  			     min_xid, max_xid);
2479  		return NULL;
2480  	}
2481  
2482  	/*
2483  	 * allocate memory for EM
2484  	 */
2485  	mp = kzalloc(sizeof(struct fc_exch_mgr), GFP_ATOMIC);
2486  	if (!mp)
2487  		return NULL;
2488  
2489  	mp->class = class;
2490  	mp->lport = lport;
2491  	/* adjust em exch xid range for offload */
2492  	mp->min_xid = min_xid;
2493  
2494         /* reduce range so per cpu pool fits into PCPU_MIN_UNIT_SIZE pool */
2495  	pool_exch_range = (PCPU_MIN_UNIT_SIZE - sizeof(*pool)) /
2496  		sizeof(struct fc_exch *);
2497  	if ((max_xid - min_xid + 1) / (fc_cpu_mask + 1) > pool_exch_range) {
2498  		mp->max_xid = pool_exch_range * (fc_cpu_mask + 1) +
2499  			min_xid - 1;
2500  	} else {
2501  		mp->max_xid = max_xid;
2502  		pool_exch_range = (mp->max_xid - mp->min_xid + 1) /
2503  			(fc_cpu_mask + 1);
2504  	}
2505  
2506  	mp->ep_pool = mempool_create_slab_pool(2, fc_em_cachep);
2507  	if (!mp->ep_pool)
2508  		goto free_mp;
2509  
2510  	/*
2511  	 * Setup per cpu exch pool with entire exchange id range equally
2512  	 * divided across all cpus. The exch pointers array memory is
2513  	 * allocated for exch range per pool.
2514  	 */
2515  	mp->pool_max_index = pool_exch_range - 1;
2516  
2517  	/*
2518  	 * Allocate and initialize per cpu exch pool
2519  	 */
2520  	pool_size = sizeof(*pool) + pool_exch_range * sizeof(struct fc_exch *);
2521  	mp->pool = __alloc_percpu(pool_size, __alignof__(struct fc_exch_pool));
2522  	if (!mp->pool)
2523  		goto free_mempool;
2524  	for_each_possible_cpu(cpu) {
2525  		pool = per_cpu_ptr(mp->pool, cpu);
2526  		pool->next_index = 0;
2527  		pool->left = FC_XID_UNKNOWN;
2528  		pool->right = FC_XID_UNKNOWN;
2529  		spin_lock_init(&pool->lock);
2530  		INIT_LIST_HEAD(&pool->ex_list);
2531  	}
2532  
2533  	kref_init(&mp->kref);
2534  	if (!fc_exch_mgr_add(lport, mp, match)) {
2535  		free_percpu(mp->pool);
2536  		goto free_mempool;
2537  	}
2538  
2539  	/*
2540  	 * Above kref_init() sets mp->kref to 1 and then
2541  	 * call to fc_exch_mgr_add incremented mp->kref again,
2542  	 * so adjust that extra increment.
2543  	 */
2544  	kref_put(&mp->kref, fc_exch_mgr_destroy);
2545  	return mp;
2546  
2547  free_mempool:
2548  	mempool_destroy(mp->ep_pool);
2549  free_mp:
2550  	kfree(mp);
2551  	return NULL;
2552  }
2553  EXPORT_SYMBOL(fc_exch_mgr_alloc);
2554  
2555  /**
2556   * fc_exch_mgr_free() - Free all exchange managers on a local port
2557   * @lport: The local port whose EMs are to be freed
2558   */
fc_exch_mgr_free(struct fc_lport * lport)2559  void fc_exch_mgr_free(struct fc_lport *lport)
2560  {
2561  	struct fc_exch_mgr_anchor *ema, *next;
2562  
2563  	flush_workqueue(fc_exch_workqueue);
2564  	list_for_each_entry_safe(ema, next, &lport->ema_list, ema_list)
2565  		fc_exch_mgr_del(ema);
2566  }
2567  EXPORT_SYMBOL(fc_exch_mgr_free);
2568  
2569  /**
2570   * fc_find_ema() - Lookup and return appropriate Exchange Manager Anchor depending
2571   * upon 'xid'.
2572   * @f_ctl: f_ctl
2573   * @lport: The local port the frame was received on
2574   * @fh: The received frame header
2575   */
fc_find_ema(u32 f_ctl,struct fc_lport * lport,struct fc_frame_header * fh)2576  static struct fc_exch_mgr_anchor *fc_find_ema(u32 f_ctl,
2577  					      struct fc_lport *lport,
2578  					      struct fc_frame_header *fh)
2579  {
2580  	struct fc_exch_mgr_anchor *ema;
2581  	u16 xid;
2582  
2583  	if (f_ctl & FC_FC_EX_CTX)
2584  		xid = ntohs(fh->fh_ox_id);
2585  	else {
2586  		xid = ntohs(fh->fh_rx_id);
2587  		if (xid == FC_XID_UNKNOWN)
2588  			return list_entry(lport->ema_list.prev,
2589  					  typeof(*ema), ema_list);
2590  	}
2591  
2592  	list_for_each_entry(ema, &lport->ema_list, ema_list) {
2593  		if ((xid >= ema->mp->min_xid) &&
2594  		    (xid <= ema->mp->max_xid))
2595  			return ema;
2596  	}
2597  	return NULL;
2598  }
2599  /**
2600   * fc_exch_recv() - Handler for received frames
2601   * @lport: The local port the frame was received on
2602   * @fp:	The received frame
2603   */
fc_exch_recv(struct fc_lport * lport,struct fc_frame * fp)2604  void fc_exch_recv(struct fc_lport *lport, struct fc_frame *fp)
2605  {
2606  	struct fc_frame_header *fh = fc_frame_header_get(fp);
2607  	struct fc_exch_mgr_anchor *ema;
2608  	u32 f_ctl;
2609  
2610  	/* lport lock ? */
2611  	if (!lport || lport->state == LPORT_ST_DISABLED) {
2612  		FC_LIBFC_DBG("Receiving frames for an lport that "
2613  			     "has not been initialized correctly\n");
2614  		fc_frame_free(fp);
2615  		return;
2616  	}
2617  
2618  	f_ctl = ntoh24(fh->fh_f_ctl);
2619  	ema = fc_find_ema(f_ctl, lport, fh);
2620  	if (!ema) {
2621  		FC_LPORT_DBG(lport, "Unable to find Exchange Manager Anchor,"
2622  				    "fc_ctl <0x%x>, xid <0x%x>\n",
2623  				     f_ctl,
2624  				     (f_ctl & FC_FC_EX_CTX) ?
2625  				     ntohs(fh->fh_ox_id) :
2626  				     ntohs(fh->fh_rx_id));
2627  		fc_frame_free(fp);
2628  		return;
2629  	}
2630  
2631  	/*
2632  	 * If frame is marked invalid, just drop it.
2633  	 */
2634  	switch (fr_eof(fp)) {
2635  	case FC_EOF_T:
2636  		if (f_ctl & FC_FC_END_SEQ)
2637  			skb_trim(fp_skb(fp), fr_len(fp) - FC_FC_FILL(f_ctl));
2638  		fallthrough;
2639  	case FC_EOF_N:
2640  		if (fh->fh_type == FC_TYPE_BLS)
2641  			fc_exch_recv_bls(ema->mp, fp);
2642  		else if ((f_ctl & (FC_FC_EX_CTX | FC_FC_SEQ_CTX)) ==
2643  			 FC_FC_EX_CTX)
2644  			fc_exch_recv_seq_resp(ema->mp, fp);
2645  		else if (f_ctl & FC_FC_SEQ_CTX)
2646  			fc_exch_recv_resp(ema->mp, fp);
2647  		else	/* no EX_CTX and no SEQ_CTX */
2648  			fc_exch_recv_req(lport, ema->mp, fp);
2649  		break;
2650  	default:
2651  		FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)",
2652  			     fr_eof(fp));
2653  		fc_frame_free(fp);
2654  	}
2655  }
2656  EXPORT_SYMBOL(fc_exch_recv);
2657  
2658  /**
2659   * fc_exch_init() - Initialize the exchange layer for a local port
2660   * @lport: The local port to initialize the exchange layer for
2661   */
fc_exch_init(struct fc_lport * lport)2662  int fc_exch_init(struct fc_lport *lport)
2663  {
2664  	if (!lport->tt.exch_mgr_reset)
2665  		lport->tt.exch_mgr_reset = fc_exch_mgr_reset;
2666  
2667  	return 0;
2668  }
2669  EXPORT_SYMBOL(fc_exch_init);
2670  
2671  /**
2672   * fc_setup_exch_mgr() - Setup an exchange manager
2673   */
fc_setup_exch_mgr(void)2674  int fc_setup_exch_mgr(void)
2675  {
2676  	fc_em_cachep = kmem_cache_create("libfc_em", sizeof(struct fc_exch),
2677  					 0, SLAB_HWCACHE_ALIGN, NULL);
2678  	if (!fc_em_cachep)
2679  		return -ENOMEM;
2680  
2681  	/*
2682  	 * Initialize fc_cpu_mask and fc_cpu_order. The
2683  	 * fc_cpu_mask is set for nr_cpu_ids rounded up
2684  	 * to order of 2's * power and order is stored
2685  	 * in fc_cpu_order as this is later required in
2686  	 * mapping between an exch id and exch array index
2687  	 * in per cpu exch pool.
2688  	 *
2689  	 * This round up is required to align fc_cpu_mask
2690  	 * to exchange id's lower bits such that all incoming
2691  	 * frames of an exchange gets delivered to the same
2692  	 * cpu on which exchange originated by simple bitwise
2693  	 * AND operation between fc_cpu_mask and exchange id.
2694  	 */
2695  	fc_cpu_order = ilog2(roundup_pow_of_two(nr_cpu_ids));
2696  	fc_cpu_mask = (1 << fc_cpu_order) - 1;
2697  
2698  	fc_exch_workqueue = create_singlethread_workqueue("fc_exch_workqueue");
2699  	if (!fc_exch_workqueue)
2700  		goto err;
2701  	return 0;
2702  err:
2703  	kmem_cache_destroy(fc_em_cachep);
2704  	return -ENOMEM;
2705  }
2706  
2707  /**
2708   * fc_destroy_exch_mgr() - Destroy an exchange manager
2709   */
fc_destroy_exch_mgr(void)2710  void fc_destroy_exch_mgr(void)
2711  {
2712  	destroy_workqueue(fc_exch_workqueue);
2713  	kmem_cache_destroy(fc_em_cachep);
2714  }
2715