• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * This file is part of the Chelsio T4 Ethernet driver for Linux.
3  *
4  * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include <linux/skbuff.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/if_vlan.h>
39 #include <linux/ip.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/jiffies.h>
42 #include <linux/prefetch.h>
43 #include <linux/export.h>
44 #include <net/xfrm.h>
45 #include <net/ipv6.h>
46 #include <net/tcp.h>
47 #include <net/busy_poll.h>
48 #ifdef CONFIG_CHELSIO_T4_FCOE
49 #include <scsi/fc/fc_fcoe.h>
50 #endif /* CONFIG_CHELSIO_T4_FCOE */
51 #include "cxgb4.h"
52 #include "t4_regs.h"
53 #include "t4_values.h"
54 #include "t4_msg.h"
55 #include "t4fw_api.h"
56 #include "cxgb4_ptp.h"
57 #include "cxgb4_uld.h"
58 
59 /*
60  * Rx buffer size.  We use largish buffers if possible but settle for single
61  * pages under memory shortage.
62  */
63 #if PAGE_SHIFT >= 16
64 # define FL_PG_ORDER 0
65 #else
66 # define FL_PG_ORDER (16 - PAGE_SHIFT)
67 #endif
68 
69 /* RX_PULL_LEN should be <= RX_COPY_THRES */
70 #define RX_COPY_THRES    256
71 #define RX_PULL_LEN      128
72 
73 /*
74  * Main body length for sk_buffs used for Rx Ethernet packets with fragments.
75  * Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room.
76  */
77 #define RX_PKT_SKB_LEN   512
78 
79 /*
80  * Max number of Tx descriptors we clean up at a time.  Should be modest as
81  * freeing skbs isn't cheap and it happens while holding locks.  We just need
82  * to free packets faster than they arrive, we eventually catch up and keep
83  * the amortized cost reasonable.  Must be >= 2 * TXQ_STOP_THRES.
84  */
85 #define MAX_TX_RECLAIM 16
86 
87 /*
88  * Max number of Rx buffers we replenish at a time.  Again keep this modest,
89  * allocating buffers isn't cheap either.
90  */
91 #define MAX_RX_REFILL 16U
92 
93 /*
94  * Period of the Rx queue check timer.  This timer is infrequent as it has
95  * something to do only when the system experiences severe memory shortage.
96  */
97 #define RX_QCHECK_PERIOD (HZ / 2)
98 
99 /*
100  * Period of the Tx queue check timer.
101  */
102 #define TX_QCHECK_PERIOD (HZ / 2)
103 
104 /*
105  * Max number of Tx descriptors to be reclaimed by the Tx timer.
106  */
107 #define MAX_TIMER_TX_RECLAIM 100
108 
109 /*
110  * Timer index used when backing off due to memory shortage.
111  */
112 #define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
113 
114 /*
115  * Suspension threshold for non-Ethernet Tx queues.  We require enough room
116  * for a full sized WR.
117  */
118 #define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc))
119 
120 /*
121  * Max Tx descriptor space we allow for an Ethernet packet to be inlined
122  * into a WR.
123  */
124 #define MAX_IMM_TX_PKT_LEN 256
125 
126 /*
127  * Max size of a WR sent through a control Tx queue.
128  */
129 #define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
130 
131 struct rx_sw_desc {                /* SW state per Rx descriptor */
132 	struct page *page;
133 	dma_addr_t dma_addr;
134 };
135 
136 /*
137  * Rx buffer sizes for "useskbs" Free List buffers (one ingress packet pe skb
138  * buffer).  We currently only support two sizes for 1500- and 9000-byte MTUs.
139  * We could easily support more but there doesn't seem to be much need for
140  * that ...
141  */
142 #define FL_MTU_SMALL 1500
143 #define FL_MTU_LARGE 9000
144 
fl_mtu_bufsize(struct adapter * adapter,unsigned int mtu)145 static inline unsigned int fl_mtu_bufsize(struct adapter *adapter,
146 					  unsigned int mtu)
147 {
148 	struct sge *s = &adapter->sge;
149 
150 	return ALIGN(s->pktshift + ETH_HLEN + VLAN_HLEN + mtu, s->fl_align);
151 }
152 
153 #define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL)
154 #define FL_MTU_LARGE_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_LARGE)
155 
156 /*
157  * Bits 0..3 of rx_sw_desc.dma_addr have special meaning.  The hardware uses
158  * these to specify the buffer size as an index into the SGE Free List Buffer
159  * Size register array.  We also use bit 4, when the buffer has been unmapped
160  * for DMA, but this is of course never sent to the hardware and is only used
161  * to prevent double unmappings.  All of the above requires that the Free List
162  * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are
163  * 32-byte or or a power of 2 greater in alignment.  Since the SGE's minimal
164  * Free List Buffer alignment is 32 bytes, this works out for us ...
165  */
166 enum {
167 	RX_BUF_FLAGS     = 0x1f,   /* bottom five bits are special */
168 	RX_BUF_SIZE      = 0x0f,   /* bottom three bits are for buf sizes */
169 	RX_UNMAPPED_BUF  = 0x10,   /* buffer is not mapped */
170 
171 	/*
172 	 * XXX We shouldn't depend on being able to use these indices.
173 	 * XXX Especially when some other Master PF has initialized the
174 	 * XXX adapter or we use the Firmware Configuration File.  We
175 	 * XXX should really search through the Host Buffer Size register
176 	 * XXX array for the appropriately sized buffer indices.
177 	 */
178 	RX_SMALL_PG_BUF  = 0x0,   /* small (PAGE_SIZE) page buffer */
179 	RX_LARGE_PG_BUF  = 0x1,   /* buffer large (FL_PG_ORDER) page buffer */
180 
181 	RX_SMALL_MTU_BUF = 0x2,   /* small MTU buffer */
182 	RX_LARGE_MTU_BUF = 0x3,   /* large MTU buffer */
183 };
184 
185 static int timer_pkt_quota[] = {1, 1, 2, 3, 4, 5};
186 #define MIN_NAPI_WORK  1
187 
get_buf_addr(const struct rx_sw_desc * d)188 static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d)
189 {
190 	return d->dma_addr & ~(dma_addr_t)RX_BUF_FLAGS;
191 }
192 
is_buf_mapped(const struct rx_sw_desc * d)193 static inline bool is_buf_mapped(const struct rx_sw_desc *d)
194 {
195 	return !(d->dma_addr & RX_UNMAPPED_BUF);
196 }
197 
198 /**
199  *	txq_avail - return the number of available slots in a Tx queue
200  *	@q: the Tx queue
201  *
202  *	Returns the number of descriptors in a Tx queue available to write new
203  *	packets.
204  */
txq_avail(const struct sge_txq * q)205 static inline unsigned int txq_avail(const struct sge_txq *q)
206 {
207 	return q->size - 1 - q->in_use;
208 }
209 
210 /**
211  *	fl_cap - return the capacity of a free-buffer list
212  *	@fl: the FL
213  *
214  *	Returns the capacity of a free-buffer list.  The capacity is less than
215  *	the size because one descriptor needs to be left unpopulated, otherwise
216  *	HW will think the FL is empty.
217  */
fl_cap(const struct sge_fl * fl)218 static inline unsigned int fl_cap(const struct sge_fl *fl)
219 {
220 	return fl->size - 8;   /* 1 descriptor = 8 buffers */
221 }
222 
223 /**
224  *	fl_starving - return whether a Free List is starving.
225  *	@adapter: pointer to the adapter
226  *	@fl: the Free List
227  *
228  *	Tests specified Free List to see whether the number of buffers
229  *	available to the hardware has falled below our "starvation"
230  *	threshold.
231  */
fl_starving(const struct adapter * adapter,const struct sge_fl * fl)232 static inline bool fl_starving(const struct adapter *adapter,
233 			       const struct sge_fl *fl)
234 {
235 	const struct sge *s = &adapter->sge;
236 
237 	return fl->avail - fl->pend_cred <= s->fl_starve_thres;
238 }
239 
cxgb4_map_skb(struct device * dev,const struct sk_buff * skb,dma_addr_t * addr)240 int cxgb4_map_skb(struct device *dev, const struct sk_buff *skb,
241 		  dma_addr_t *addr)
242 {
243 	const skb_frag_t *fp, *end;
244 	const struct skb_shared_info *si;
245 
246 	*addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
247 	if (dma_mapping_error(dev, *addr))
248 		goto out_err;
249 
250 	si = skb_shinfo(skb);
251 	end = &si->frags[si->nr_frags];
252 
253 	for (fp = si->frags; fp < end; fp++) {
254 		*++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp),
255 					   DMA_TO_DEVICE);
256 		if (dma_mapping_error(dev, *addr))
257 			goto unwind;
258 	}
259 	return 0;
260 
261 unwind:
262 	while (fp-- > si->frags)
263 		dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
264 
265 	dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
266 out_err:
267 	return -ENOMEM;
268 }
269 EXPORT_SYMBOL(cxgb4_map_skb);
270 
271 #ifdef CONFIG_NEED_DMA_MAP_STATE
unmap_skb(struct device * dev,const struct sk_buff * skb,const dma_addr_t * addr)272 static void unmap_skb(struct device *dev, const struct sk_buff *skb,
273 		      const dma_addr_t *addr)
274 {
275 	const skb_frag_t *fp, *end;
276 	const struct skb_shared_info *si;
277 
278 	dma_unmap_single(dev, *addr++, skb_headlen(skb), DMA_TO_DEVICE);
279 
280 	si = skb_shinfo(skb);
281 	end = &si->frags[si->nr_frags];
282 	for (fp = si->frags; fp < end; fp++)
283 		dma_unmap_page(dev, *addr++, skb_frag_size(fp), DMA_TO_DEVICE);
284 }
285 
286 /**
287  *	deferred_unmap_destructor - unmap a packet when it is freed
288  *	@skb: the packet
289  *
290  *	This is the packet destructor used for Tx packets that need to remain
291  *	mapped until they are freed rather than until their Tx descriptors are
292  *	freed.
293  */
deferred_unmap_destructor(struct sk_buff * skb)294 static void deferred_unmap_destructor(struct sk_buff *skb)
295 {
296 	unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head);
297 }
298 #endif
299 
unmap_sgl(struct device * dev,const struct sk_buff * skb,const struct ulptx_sgl * sgl,const struct sge_txq * q)300 static void unmap_sgl(struct device *dev, const struct sk_buff *skb,
301 		      const struct ulptx_sgl *sgl, const struct sge_txq *q)
302 {
303 	const struct ulptx_sge_pair *p;
304 	unsigned int nfrags = skb_shinfo(skb)->nr_frags;
305 
306 	if (likely(skb_headlen(skb)))
307 		dma_unmap_single(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
308 				 DMA_TO_DEVICE);
309 	else {
310 		dma_unmap_page(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
311 			       DMA_TO_DEVICE);
312 		nfrags--;
313 	}
314 
315 	/*
316 	 * the complexity below is because of the possibility of a wrap-around
317 	 * in the middle of an SGL
318 	 */
319 	for (p = sgl->sge; nfrags >= 2; nfrags -= 2) {
320 		if (likely((u8 *)(p + 1) <= (u8 *)q->stat)) {
321 unmap:			dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
322 				       ntohl(p->len[0]), DMA_TO_DEVICE);
323 			dma_unmap_page(dev, be64_to_cpu(p->addr[1]),
324 				       ntohl(p->len[1]), DMA_TO_DEVICE);
325 			p++;
326 		} else if ((u8 *)p == (u8 *)q->stat) {
327 			p = (const struct ulptx_sge_pair *)q->desc;
328 			goto unmap;
329 		} else if ((u8 *)p + 8 == (u8 *)q->stat) {
330 			const __be64 *addr = (const __be64 *)q->desc;
331 
332 			dma_unmap_page(dev, be64_to_cpu(addr[0]),
333 				       ntohl(p->len[0]), DMA_TO_DEVICE);
334 			dma_unmap_page(dev, be64_to_cpu(addr[1]),
335 				       ntohl(p->len[1]), DMA_TO_DEVICE);
336 			p = (const struct ulptx_sge_pair *)&addr[2];
337 		} else {
338 			const __be64 *addr = (const __be64 *)q->desc;
339 
340 			dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
341 				       ntohl(p->len[0]), DMA_TO_DEVICE);
342 			dma_unmap_page(dev, be64_to_cpu(addr[0]),
343 				       ntohl(p->len[1]), DMA_TO_DEVICE);
344 			p = (const struct ulptx_sge_pair *)&addr[1];
345 		}
346 	}
347 	if (nfrags) {
348 		__be64 addr;
349 
350 		if ((u8 *)p == (u8 *)q->stat)
351 			p = (const struct ulptx_sge_pair *)q->desc;
352 		addr = (u8 *)p + 16 <= (u8 *)q->stat ? p->addr[0] :
353 						       *(const __be64 *)q->desc;
354 		dma_unmap_page(dev, be64_to_cpu(addr), ntohl(p->len[0]),
355 			       DMA_TO_DEVICE);
356 	}
357 }
358 
359 /**
360  *	free_tx_desc - reclaims Tx descriptors and their buffers
361  *	@adapter: the adapter
362  *	@q: the Tx queue to reclaim descriptors from
363  *	@n: the number of descriptors to reclaim
364  *	@unmap: whether the buffers should be unmapped for DMA
365  *
366  *	Reclaims Tx descriptors from an SGE Tx queue and frees the associated
367  *	Tx buffers.  Called with the Tx queue lock held.
368  */
free_tx_desc(struct adapter * adap,struct sge_txq * q,unsigned int n,bool unmap)369 void free_tx_desc(struct adapter *adap, struct sge_txq *q,
370 		  unsigned int n, bool unmap)
371 {
372 	struct tx_sw_desc *d;
373 	unsigned int cidx = q->cidx;
374 	struct device *dev = adap->pdev_dev;
375 
376 	d = &q->sdesc[cidx];
377 	while (n--) {
378 		if (d->skb) {                       /* an SGL is present */
379 			if (unmap)
380 				unmap_sgl(dev, d->skb, d->sgl, q);
381 			dev_consume_skb_any(d->skb);
382 			d->skb = NULL;
383 		}
384 		++d;
385 		if (++cidx == q->size) {
386 			cidx = 0;
387 			d = q->sdesc;
388 		}
389 	}
390 	q->cidx = cidx;
391 }
392 
393 /*
394  * Return the number of reclaimable descriptors in a Tx queue.
395  */
reclaimable(const struct sge_txq * q)396 static inline int reclaimable(const struct sge_txq *q)
397 {
398 	int hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
399 	hw_cidx -= q->cidx;
400 	return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx;
401 }
402 
403 /**
404  *	cxgb4_reclaim_completed_tx - reclaims completed Tx descriptors
405  *	@adap: the adapter
406  *	@q: the Tx queue to reclaim completed descriptors from
407  *	@unmap: whether the buffers should be unmapped for DMA
408  *
409  *	Reclaims Tx descriptors that the SGE has indicated it has processed,
410  *	and frees the associated buffers if possible.  Called with the Tx
411  *	queue locked.
412  */
cxgb4_reclaim_completed_tx(struct adapter * adap,struct sge_txq * q,bool unmap)413 inline void cxgb4_reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
414 					bool unmap)
415 {
416 	int avail = reclaimable(q);
417 
418 	if (avail) {
419 		/*
420 		 * Limit the amount of clean up work we do at a time to keep
421 		 * the Tx lock hold time O(1).
422 		 */
423 		if (avail > MAX_TX_RECLAIM)
424 			avail = MAX_TX_RECLAIM;
425 
426 		free_tx_desc(adap, q, avail, unmap);
427 		q->in_use -= avail;
428 	}
429 }
430 EXPORT_SYMBOL(cxgb4_reclaim_completed_tx);
431 
get_buf_size(struct adapter * adapter,const struct rx_sw_desc * d)432 static inline int get_buf_size(struct adapter *adapter,
433 			       const struct rx_sw_desc *d)
434 {
435 	struct sge *s = &adapter->sge;
436 	unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE;
437 	int buf_size;
438 
439 	switch (rx_buf_size_idx) {
440 	case RX_SMALL_PG_BUF:
441 		buf_size = PAGE_SIZE;
442 		break;
443 
444 	case RX_LARGE_PG_BUF:
445 		buf_size = PAGE_SIZE << s->fl_pg_order;
446 		break;
447 
448 	case RX_SMALL_MTU_BUF:
449 		buf_size = FL_MTU_SMALL_BUFSIZE(adapter);
450 		break;
451 
452 	case RX_LARGE_MTU_BUF:
453 		buf_size = FL_MTU_LARGE_BUFSIZE(adapter);
454 		break;
455 
456 	default:
457 		BUG_ON(1);
458 	}
459 
460 	return buf_size;
461 }
462 
463 /**
464  *	free_rx_bufs - free the Rx buffers on an SGE free list
465  *	@adap: the adapter
466  *	@q: the SGE free list to free buffers from
467  *	@n: how many buffers to free
468  *
469  *	Release the next @n buffers on an SGE free-buffer Rx queue.   The
470  *	buffers must be made inaccessible to HW before calling this function.
471  */
free_rx_bufs(struct adapter * adap,struct sge_fl * q,int n)472 static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n)
473 {
474 	while (n--) {
475 		struct rx_sw_desc *d = &q->sdesc[q->cidx];
476 
477 		if (is_buf_mapped(d))
478 			dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
479 				       get_buf_size(adap, d),
480 				       PCI_DMA_FROMDEVICE);
481 		put_page(d->page);
482 		d->page = NULL;
483 		if (++q->cidx == q->size)
484 			q->cidx = 0;
485 		q->avail--;
486 	}
487 }
488 
489 /**
490  *	unmap_rx_buf - unmap the current Rx buffer on an SGE free list
491  *	@adap: the adapter
492  *	@q: the SGE free list
493  *
494  *	Unmap the current buffer on an SGE free-buffer Rx queue.   The
495  *	buffer must be made inaccessible to HW before calling this function.
496  *
497  *	This is similar to @free_rx_bufs above but does not free the buffer.
498  *	Do note that the FL still loses any further access to the buffer.
499  */
unmap_rx_buf(struct adapter * adap,struct sge_fl * q)500 static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
501 {
502 	struct rx_sw_desc *d = &q->sdesc[q->cidx];
503 
504 	if (is_buf_mapped(d))
505 		dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
506 			       get_buf_size(adap, d), PCI_DMA_FROMDEVICE);
507 	d->page = NULL;
508 	if (++q->cidx == q->size)
509 		q->cidx = 0;
510 	q->avail--;
511 }
512 
ring_fl_db(struct adapter * adap,struct sge_fl * q)513 static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
514 {
515 	if (q->pend_cred >= 8) {
516 		u32 val = adap->params.arch.sge_fl_db;
517 
518 		if (is_t4(adap->params.chip))
519 			val |= PIDX_V(q->pend_cred / 8);
520 		else
521 			val |= PIDX_T5_V(q->pend_cred / 8);
522 
523 		/* Make sure all memory writes to the Free List queue are
524 		 * committed before we tell the hardware about them.
525 		 */
526 		wmb();
527 
528 		/* If we don't have access to the new User Doorbell (T5+), use
529 		 * the old doorbell mechanism; otherwise use the new BAR2
530 		 * mechanism.
531 		 */
532 		if (unlikely(q->bar2_addr == NULL)) {
533 			t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
534 				     val | QID_V(q->cntxt_id));
535 		} else {
536 			writel(val | QID_V(q->bar2_qid),
537 			       q->bar2_addr + SGE_UDB_KDOORBELL);
538 
539 			/* This Write memory Barrier will force the write to
540 			 * the User Doorbell area to be flushed.
541 			 */
542 			wmb();
543 		}
544 		q->pend_cred &= 7;
545 	}
546 }
547 
set_rx_sw_desc(struct rx_sw_desc * sd,struct page * pg,dma_addr_t mapping)548 static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg,
549 				  dma_addr_t mapping)
550 {
551 	sd->page = pg;
552 	sd->dma_addr = mapping;      /* includes size low bits */
553 }
554 
555 /**
556  *	refill_fl - refill an SGE Rx buffer ring
557  *	@adap: the adapter
558  *	@q: the ring to refill
559  *	@n: the number of new buffers to allocate
560  *	@gfp: the gfp flags for the allocations
561  *
562  *	(Re)populate an SGE free-buffer queue with up to @n new packet buffers,
563  *	allocated with the supplied gfp flags.  The caller must assure that
564  *	@n does not exceed the queue's capacity.  If afterwards the queue is
565  *	found critically low mark it as starving in the bitmap of starving FLs.
566  *
567  *	Returns the number of buffers allocated.
568  */
refill_fl(struct adapter * adap,struct sge_fl * q,int n,gfp_t gfp)569 static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
570 			      gfp_t gfp)
571 {
572 	struct sge *s = &adap->sge;
573 	struct page *pg;
574 	dma_addr_t mapping;
575 	unsigned int cred = q->avail;
576 	__be64 *d = &q->desc[q->pidx];
577 	struct rx_sw_desc *sd = &q->sdesc[q->pidx];
578 	int node;
579 
580 #ifdef CONFIG_DEBUG_FS
581 	if (test_bit(q->cntxt_id - adap->sge.egr_start, adap->sge.blocked_fl))
582 		goto out;
583 #endif
584 
585 	gfp |= __GFP_NOWARN;
586 	node = dev_to_node(adap->pdev_dev);
587 
588 	if (s->fl_pg_order == 0)
589 		goto alloc_small_pages;
590 
591 	/*
592 	 * Prefer large buffers
593 	 */
594 	while (n) {
595 		pg = alloc_pages_node(node, gfp | __GFP_COMP, s->fl_pg_order);
596 		if (unlikely(!pg)) {
597 			q->large_alloc_failed++;
598 			break;       /* fall back to single pages */
599 		}
600 
601 		mapping = dma_map_page(adap->pdev_dev, pg, 0,
602 				       PAGE_SIZE << s->fl_pg_order,
603 				       PCI_DMA_FROMDEVICE);
604 		if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
605 			__free_pages(pg, s->fl_pg_order);
606 			q->mapping_err++;
607 			goto out;   /* do not try small pages for this error */
608 		}
609 		mapping |= RX_LARGE_PG_BUF;
610 		*d++ = cpu_to_be64(mapping);
611 
612 		set_rx_sw_desc(sd, pg, mapping);
613 		sd++;
614 
615 		q->avail++;
616 		if (++q->pidx == q->size) {
617 			q->pidx = 0;
618 			sd = q->sdesc;
619 			d = q->desc;
620 		}
621 		n--;
622 	}
623 
624 alloc_small_pages:
625 	while (n--) {
626 		pg = alloc_pages_node(node, gfp, 0);
627 		if (unlikely(!pg)) {
628 			q->alloc_failed++;
629 			break;
630 		}
631 
632 		mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE,
633 				       PCI_DMA_FROMDEVICE);
634 		if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
635 			put_page(pg);
636 			q->mapping_err++;
637 			goto out;
638 		}
639 		*d++ = cpu_to_be64(mapping);
640 
641 		set_rx_sw_desc(sd, pg, mapping);
642 		sd++;
643 
644 		q->avail++;
645 		if (++q->pidx == q->size) {
646 			q->pidx = 0;
647 			sd = q->sdesc;
648 			d = q->desc;
649 		}
650 	}
651 
652 out:	cred = q->avail - cred;
653 	q->pend_cred += cred;
654 	ring_fl_db(adap, q);
655 
656 	if (unlikely(fl_starving(adap, q))) {
657 		smp_wmb();
658 		q->low++;
659 		set_bit(q->cntxt_id - adap->sge.egr_start,
660 			adap->sge.starving_fl);
661 	}
662 
663 	return cred;
664 }
665 
__refill_fl(struct adapter * adap,struct sge_fl * fl)666 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
667 {
668 	refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail),
669 		  GFP_ATOMIC);
670 }
671 
672 /**
673  *	alloc_ring - allocate resources for an SGE descriptor ring
674  *	@dev: the PCI device's core device
675  *	@nelem: the number of descriptors
676  *	@elem_size: the size of each descriptor
677  *	@sw_size: the size of the SW state associated with each ring element
678  *	@phys: the physical address of the allocated ring
679  *	@metadata: address of the array holding the SW state for the ring
680  *	@stat_size: extra space in HW ring for status information
681  *	@node: preferred node for memory allocations
682  *
683  *	Allocates resources for an SGE descriptor ring, such as Tx queues,
684  *	free buffer lists, or response queues.  Each SGE ring requires
685  *	space for its HW descriptors plus, optionally, space for the SW state
686  *	associated with each HW entry (the metadata).  The function returns
687  *	three values: the virtual address for the HW ring (the return value
688  *	of the function), the bus address of the HW ring, and the address
689  *	of the SW ring.
690  */
alloc_ring(struct device * dev,size_t nelem,size_t elem_size,size_t sw_size,dma_addr_t * phys,void * metadata,size_t stat_size,int node)691 static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
692 			size_t sw_size, dma_addr_t *phys, void *metadata,
693 			size_t stat_size, int node)
694 {
695 	size_t len = nelem * elem_size + stat_size;
696 	void *s = NULL;
697 	void *p = dma_zalloc_coherent(dev, len, phys, GFP_KERNEL);
698 
699 	if (!p)
700 		return NULL;
701 	if (sw_size) {
702 		s = kcalloc_node(sw_size, nelem, GFP_KERNEL, node);
703 
704 		if (!s) {
705 			dma_free_coherent(dev, len, p, *phys);
706 			return NULL;
707 		}
708 	}
709 	if (metadata)
710 		*(void **)metadata = s;
711 	return p;
712 }
713 
714 /**
715  *	sgl_len - calculates the size of an SGL of the given capacity
716  *	@n: the number of SGL entries
717  *
718  *	Calculates the number of flits needed for a scatter/gather list that
719  *	can hold the given number of entries.
720  */
sgl_len(unsigned int n)721 static inline unsigned int sgl_len(unsigned int n)
722 {
723 	/* A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA
724 	 * addresses.  The DSGL Work Request starts off with a 32-bit DSGL
725 	 * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N,
726 	 * repeated sequences of { Length[i], Length[i+1], Address[i],
727 	 * Address[i+1] } (this ensures that all addresses are on 64-bit
728 	 * boundaries).  If N is even, then Length[N+1] should be set to 0 and
729 	 * Address[N+1] is omitted.
730 	 *
731 	 * The following calculation incorporates all of the above.  It's
732 	 * somewhat hard to follow but, briefly: the "+2" accounts for the
733 	 * first two flits which include the DSGL header, Length0 and
734 	 * Address0; the "(3*(n-1))/2" covers the main body of list entries (3
735 	 * flits for every pair of the remaining N) +1 if (n-1) is odd; and
736 	 * finally the "+((n-1)&1)" adds the one remaining flit needed if
737 	 * (n-1) is odd ...
738 	 */
739 	n--;
740 	return (3 * n) / 2 + (n & 1) + 2;
741 }
742 
743 /**
744  *	flits_to_desc - returns the num of Tx descriptors for the given flits
745  *	@n: the number of flits
746  *
747  *	Returns the number of Tx descriptors needed for the supplied number
748  *	of flits.
749  */
flits_to_desc(unsigned int n)750 static inline unsigned int flits_to_desc(unsigned int n)
751 {
752 	BUG_ON(n > SGE_MAX_WR_LEN / 8);
753 	return DIV_ROUND_UP(n, 8);
754 }
755 
756 /**
757  *	is_eth_imm - can an Ethernet packet be sent as immediate data?
758  *	@skb: the packet
759  *
760  *	Returns whether an Ethernet packet is small enough to fit as
761  *	immediate data. Return value corresponds to headroom required.
762  */
is_eth_imm(const struct sk_buff * skb,unsigned int chip_ver)763 static inline int is_eth_imm(const struct sk_buff *skb, unsigned int chip_ver)
764 {
765 	int hdrlen = 0;
766 
767 	if (skb->encapsulation && skb_shinfo(skb)->gso_size &&
768 	    chip_ver > CHELSIO_T5) {
769 		hdrlen = sizeof(struct cpl_tx_tnl_lso);
770 		hdrlen += sizeof(struct cpl_tx_pkt_core);
771 	} else {
772 		hdrlen = skb_shinfo(skb)->gso_size ?
773 			 sizeof(struct cpl_tx_pkt_lso_core) : 0;
774 		hdrlen += sizeof(struct cpl_tx_pkt);
775 	}
776 	if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
777 		return hdrlen;
778 	return 0;
779 }
780 
781 /**
782  *	calc_tx_flits - calculate the number of flits for a packet Tx WR
783  *	@skb: the packet
784  *
785  *	Returns the number of flits needed for a Tx WR for the given Ethernet
786  *	packet, including the needed WR and CPL headers.
787  */
calc_tx_flits(const struct sk_buff * skb,unsigned int chip_ver)788 static inline unsigned int calc_tx_flits(const struct sk_buff *skb,
789 					 unsigned int chip_ver)
790 {
791 	unsigned int flits;
792 	int hdrlen = is_eth_imm(skb, chip_ver);
793 
794 	/* If the skb is small enough, we can pump it out as a work request
795 	 * with only immediate data.  In that case we just have to have the
796 	 * TX Packet header plus the skb data in the Work Request.
797 	 */
798 
799 	if (hdrlen)
800 		return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64));
801 
802 	/* Otherwise, we're going to have to construct a Scatter gather list
803 	 * of the skb body and fragments.  We also include the flits necessary
804 	 * for the TX Packet Work Request and CPL.  We always have a firmware
805 	 * Write Header (incorporated as part of the cpl_tx_pkt_lso and
806 	 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
807 	 * message or, if we're doing a Large Send Offload, an LSO CPL message
808 	 * with an embedded TX Packet Write CPL message.
809 	 */
810 	flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
811 	if (skb_shinfo(skb)->gso_size) {
812 		if (skb->encapsulation && chip_ver > CHELSIO_T5)
813 			hdrlen = sizeof(struct fw_eth_tx_pkt_wr) +
814 				 sizeof(struct cpl_tx_tnl_lso);
815 		else
816 			hdrlen = sizeof(struct fw_eth_tx_pkt_wr) +
817 				 sizeof(struct cpl_tx_pkt_lso_core);
818 
819 		hdrlen += sizeof(struct cpl_tx_pkt_core);
820 		flits += (hdrlen / sizeof(__be64));
821 	} else {
822 		flits += (sizeof(struct fw_eth_tx_pkt_wr) +
823 			  sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
824 	}
825 	return flits;
826 }
827 
828 /**
829  *	calc_tx_descs - calculate the number of Tx descriptors for a packet
830  *	@skb: the packet
831  *
832  *	Returns the number of Tx descriptors needed for the given Ethernet
833  *	packet, including the needed WR and CPL headers.
834  */
calc_tx_descs(const struct sk_buff * skb,unsigned int chip_ver)835 static inline unsigned int calc_tx_descs(const struct sk_buff *skb,
836 					 unsigned int chip_ver)
837 {
838 	return flits_to_desc(calc_tx_flits(skb, chip_ver));
839 }
840 
841 /**
842  *	cxgb4_write_sgl - populate a scatter/gather list for a packet
843  *	@skb: the packet
844  *	@q: the Tx queue we are writing into
845  *	@sgl: starting location for writing the SGL
846  *	@end: points right after the end of the SGL
847  *	@start: start offset into skb main-body data to include in the SGL
848  *	@addr: the list of bus addresses for the SGL elements
849  *
850  *	Generates a gather list for the buffers that make up a packet.
851  *	The caller must provide adequate space for the SGL that will be written.
852  *	The SGL includes all of the packet's page fragments and the data in its
853  *	main body except for the first @start bytes.  @sgl must be 16-byte
854  *	aligned and within a Tx descriptor with available space.  @end points
855  *	right after the end of the SGL but does not account for any potential
856  *	wrap around, i.e., @end > @sgl.
857  */
cxgb4_write_sgl(const struct sk_buff * skb,struct sge_txq * q,struct ulptx_sgl * sgl,u64 * end,unsigned int start,const dma_addr_t * addr)858 void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q,
859 		     struct ulptx_sgl *sgl, u64 *end, unsigned int start,
860 		     const dma_addr_t *addr)
861 {
862 	unsigned int i, len;
863 	struct ulptx_sge_pair *to;
864 	const struct skb_shared_info *si = skb_shinfo(skb);
865 	unsigned int nfrags = si->nr_frags;
866 	struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
867 
868 	len = skb_headlen(skb) - start;
869 	if (likely(len)) {
870 		sgl->len0 = htonl(len);
871 		sgl->addr0 = cpu_to_be64(addr[0] + start);
872 		nfrags++;
873 	} else {
874 		sgl->len0 = htonl(skb_frag_size(&si->frags[0]));
875 		sgl->addr0 = cpu_to_be64(addr[1]);
876 	}
877 
878 	sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
879 			      ULPTX_NSGE_V(nfrags));
880 	if (likely(--nfrags == 0))
881 		return;
882 	/*
883 	 * Most of the complexity below deals with the possibility we hit the
884 	 * end of the queue in the middle of writing the SGL.  For this case
885 	 * only we create the SGL in a temporary buffer and then copy it.
886 	 */
887 	to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
888 
889 	for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
890 		to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
891 		to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i]));
892 		to->addr[0] = cpu_to_be64(addr[i]);
893 		to->addr[1] = cpu_to_be64(addr[++i]);
894 	}
895 	if (nfrags) {
896 		to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
897 		to->len[1] = cpu_to_be32(0);
898 		to->addr[0] = cpu_to_be64(addr[i + 1]);
899 	}
900 	if (unlikely((u8 *)end > (u8 *)q->stat)) {
901 		unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1;
902 
903 		if (likely(part0))
904 			memcpy(sgl->sge, buf, part0);
905 		part1 = (u8 *)end - (u8 *)q->stat;
906 		memcpy(q->desc, (u8 *)buf + part0, part1);
907 		end = (void *)q->desc + part1;
908 	}
909 	if ((uintptr_t)end & 8)           /* 0-pad to multiple of 16 */
910 		*end = 0;
911 }
912 EXPORT_SYMBOL(cxgb4_write_sgl);
913 
914 /* This function copies 64 byte coalesced work request to
915  * memory mapped BAR2 space. For coalesced WR SGE fetches
916  * data from the FIFO instead of from Host.
917  */
cxgb_pio_copy(u64 __iomem * dst,u64 * src)918 static void cxgb_pio_copy(u64 __iomem *dst, u64 *src)
919 {
920 	int count = 8;
921 
922 	while (count) {
923 		writeq(*src, dst);
924 		src++;
925 		dst++;
926 		count--;
927 	}
928 }
929 
930 /**
931  *	cxgb4_ring_tx_db - check and potentially ring a Tx queue's doorbell
932  *	@adap: the adapter
933  *	@q: the Tx queue
934  *	@n: number of new descriptors to give to HW
935  *
936  *	Ring the doorbel for a Tx queue.
937  */
cxgb4_ring_tx_db(struct adapter * adap,struct sge_txq * q,int n)938 inline void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
939 {
940 	/* Make sure that all writes to the TX Descriptors are committed
941 	 * before we tell the hardware about them.
942 	 */
943 	wmb();
944 
945 	/* If we don't have access to the new User Doorbell (T5+), use the old
946 	 * doorbell mechanism; otherwise use the new BAR2 mechanism.
947 	 */
948 	if (unlikely(q->bar2_addr == NULL)) {
949 		u32 val = PIDX_V(n);
950 		unsigned long flags;
951 
952 		/* For T4 we need to participate in the Doorbell Recovery
953 		 * mechanism.
954 		 */
955 		spin_lock_irqsave(&q->db_lock, flags);
956 		if (!q->db_disabled)
957 			t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
958 				     QID_V(q->cntxt_id) | val);
959 		else
960 			q->db_pidx_inc += n;
961 		q->db_pidx = q->pidx;
962 		spin_unlock_irqrestore(&q->db_lock, flags);
963 	} else {
964 		u32 val = PIDX_T5_V(n);
965 
966 		/* T4 and later chips share the same PIDX field offset within
967 		 * the doorbell, but T5 and later shrank the field in order to
968 		 * gain a bit for Doorbell Priority.  The field was absurdly
969 		 * large in the first place (14 bits) so we just use the T5
970 		 * and later limits and warn if a Queue ID is too large.
971 		 */
972 		WARN_ON(val & DBPRIO_F);
973 
974 		/* If we're only writing a single TX Descriptor and we can use
975 		 * Inferred QID registers, we can use the Write Combining
976 		 * Gather Buffer; otherwise we use the simple doorbell.
977 		 */
978 		if (n == 1 && q->bar2_qid == 0) {
979 			int index = (q->pidx
980 				     ? (q->pidx - 1)
981 				     : (q->size - 1));
982 			u64 *wr = (u64 *)&q->desc[index];
983 
984 			cxgb_pio_copy((u64 __iomem *)
985 				      (q->bar2_addr + SGE_UDB_WCDOORBELL),
986 				      wr);
987 		} else {
988 			writel(val | QID_V(q->bar2_qid),
989 			       q->bar2_addr + SGE_UDB_KDOORBELL);
990 		}
991 
992 		/* This Write Memory Barrier will force the write to the User
993 		 * Doorbell area to be flushed.  This is needed to prevent
994 		 * writes on different CPUs for the same queue from hitting
995 		 * the adapter out of order.  This is required when some Work
996 		 * Requests take the Write Combine Gather Buffer path (user
997 		 * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some
998 		 * take the traditional path where we simply increment the
999 		 * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the
1000 		 * hardware DMA read the actual Work Request.
1001 		 */
1002 		wmb();
1003 	}
1004 }
1005 EXPORT_SYMBOL(cxgb4_ring_tx_db);
1006 
1007 /**
1008  *	cxgb4_inline_tx_skb - inline a packet's data into Tx descriptors
1009  *	@skb: the packet
1010  *	@q: the Tx queue where the packet will be inlined
1011  *	@pos: starting position in the Tx queue where to inline the packet
1012  *
1013  *	Inline a packet's contents directly into Tx descriptors, starting at
1014  *	the given position within the Tx DMA ring.
1015  *	Most of the complexity of this operation is dealing with wrap arounds
1016  *	in the middle of the packet we want to inline.
1017  */
cxgb4_inline_tx_skb(const struct sk_buff * skb,const struct sge_txq * q,void * pos)1018 void cxgb4_inline_tx_skb(const struct sk_buff *skb,
1019 			 const struct sge_txq *q, void *pos)
1020 {
1021 	int left = (void *)q->stat - pos;
1022 	u64 *p;
1023 
1024 	if (likely(skb->len <= left)) {
1025 		if (likely(!skb->data_len))
1026 			skb_copy_from_linear_data(skb, pos, skb->len);
1027 		else
1028 			skb_copy_bits(skb, 0, pos, skb->len);
1029 		pos += skb->len;
1030 	} else {
1031 		skb_copy_bits(skb, 0, pos, left);
1032 		skb_copy_bits(skb, left, q->desc, skb->len - left);
1033 		pos = (void *)q->desc + (skb->len - left);
1034 	}
1035 
1036 	/* 0-pad to multiple of 16 */
1037 	p = PTR_ALIGN(pos, 8);
1038 	if ((uintptr_t)p & 8)
1039 		*p = 0;
1040 }
1041 EXPORT_SYMBOL(cxgb4_inline_tx_skb);
1042 
inline_tx_skb_header(const struct sk_buff * skb,const struct sge_txq * q,void * pos,int length)1043 static void *inline_tx_skb_header(const struct sk_buff *skb,
1044 				  const struct sge_txq *q,  void *pos,
1045 				  int length)
1046 {
1047 	u64 *p;
1048 	int left = (void *)q->stat - pos;
1049 
1050 	if (likely(length <= left)) {
1051 		memcpy(pos, skb->data, length);
1052 		pos += length;
1053 	} else {
1054 		memcpy(pos, skb->data, left);
1055 		memcpy(q->desc, skb->data + left, length - left);
1056 		pos = (void *)q->desc + (length - left);
1057 	}
1058 	/* 0-pad to multiple of 16 */
1059 	p = PTR_ALIGN(pos, 8);
1060 	if ((uintptr_t)p & 8) {
1061 		*p = 0;
1062 		return p + 1;
1063 	}
1064 	return p;
1065 }
1066 
1067 /*
1068  * Figure out what HW csum a packet wants and return the appropriate control
1069  * bits.
1070  */
hwcsum(enum chip_type chip,const struct sk_buff * skb)1071 static u64 hwcsum(enum chip_type chip, const struct sk_buff *skb)
1072 {
1073 	int csum_type;
1074 	bool inner_hdr_csum = false;
1075 	u16 proto, ver;
1076 
1077 	if (skb->encapsulation &&
1078 	    (CHELSIO_CHIP_VERSION(chip) > CHELSIO_T5))
1079 		inner_hdr_csum = true;
1080 
1081 	if (inner_hdr_csum) {
1082 		ver = inner_ip_hdr(skb)->version;
1083 		proto = (ver == 4) ? inner_ip_hdr(skb)->protocol :
1084 			inner_ipv6_hdr(skb)->nexthdr;
1085 	} else {
1086 		ver = ip_hdr(skb)->version;
1087 		proto = (ver == 4) ? ip_hdr(skb)->protocol :
1088 			ipv6_hdr(skb)->nexthdr;
1089 	}
1090 
1091 	if (ver == 4) {
1092 		if (proto == IPPROTO_TCP)
1093 			csum_type = TX_CSUM_TCPIP;
1094 		else if (proto == IPPROTO_UDP)
1095 			csum_type = TX_CSUM_UDPIP;
1096 		else {
1097 nocsum:			/*
1098 			 * unknown protocol, disable HW csum
1099 			 * and hope a bad packet is detected
1100 			 */
1101 			return TXPKT_L4CSUM_DIS_F;
1102 		}
1103 	} else {
1104 		/*
1105 		 * this doesn't work with extension headers
1106 		 */
1107 		if (proto == IPPROTO_TCP)
1108 			csum_type = TX_CSUM_TCPIP6;
1109 		else if (proto == IPPROTO_UDP)
1110 			csum_type = TX_CSUM_UDPIP6;
1111 		else
1112 			goto nocsum;
1113 	}
1114 
1115 	if (likely(csum_type >= TX_CSUM_TCPIP)) {
1116 		int eth_hdr_len, l4_len;
1117 		u64 hdr_len;
1118 
1119 		if (inner_hdr_csum) {
1120 			/* This allows checksum offload for all encapsulated
1121 			 * packets like GRE etc..
1122 			 */
1123 			l4_len = skb_inner_network_header_len(skb);
1124 			eth_hdr_len = skb_inner_network_offset(skb) - ETH_HLEN;
1125 		} else {
1126 			l4_len = skb_network_header_len(skb);
1127 			eth_hdr_len = skb_network_offset(skb) - ETH_HLEN;
1128 		}
1129 		hdr_len = TXPKT_IPHDR_LEN_V(l4_len);
1130 
1131 		if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5)
1132 			hdr_len |= TXPKT_ETHHDR_LEN_V(eth_hdr_len);
1133 		else
1134 			hdr_len |= T6_TXPKT_ETHHDR_LEN_V(eth_hdr_len);
1135 		return TXPKT_CSUM_TYPE_V(csum_type) | hdr_len;
1136 	} else {
1137 		int start = skb_transport_offset(skb);
1138 
1139 		return TXPKT_CSUM_TYPE_V(csum_type) |
1140 			TXPKT_CSUM_START_V(start) |
1141 			TXPKT_CSUM_LOC_V(start + skb->csum_offset);
1142 	}
1143 }
1144 
eth_txq_stop(struct sge_eth_txq * q)1145 static void eth_txq_stop(struct sge_eth_txq *q)
1146 {
1147 	netif_tx_stop_queue(q->txq);
1148 	q->q.stops++;
1149 }
1150 
txq_advance(struct sge_txq * q,unsigned int n)1151 static inline void txq_advance(struct sge_txq *q, unsigned int n)
1152 {
1153 	q->in_use += n;
1154 	q->pidx += n;
1155 	if (q->pidx >= q->size)
1156 		q->pidx -= q->size;
1157 }
1158 
1159 #ifdef CONFIG_CHELSIO_T4_FCOE
1160 static inline int
cxgb_fcoe_offload(struct sk_buff * skb,struct adapter * adap,const struct port_info * pi,u64 * cntrl)1161 cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap,
1162 		  const struct port_info *pi, u64 *cntrl)
1163 {
1164 	const struct cxgb_fcoe *fcoe = &pi->fcoe;
1165 
1166 	if (!(fcoe->flags & CXGB_FCOE_ENABLED))
1167 		return 0;
1168 
1169 	if (skb->protocol != htons(ETH_P_FCOE))
1170 		return 0;
1171 
1172 	skb_reset_mac_header(skb);
1173 	skb->mac_len = sizeof(struct ethhdr);
1174 
1175 	skb_set_network_header(skb, skb->mac_len);
1176 	skb_set_transport_header(skb, skb->mac_len + sizeof(struct fcoe_hdr));
1177 
1178 	if (!cxgb_fcoe_sof_eof_supported(adap, skb))
1179 		return -ENOTSUPP;
1180 
1181 	/* FC CRC offload */
1182 	*cntrl = TXPKT_CSUM_TYPE_V(TX_CSUM_FCOE) |
1183 		     TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F |
1184 		     TXPKT_CSUM_START_V(CXGB_FCOE_TXPKT_CSUM_START) |
1185 		     TXPKT_CSUM_END_V(CXGB_FCOE_TXPKT_CSUM_END) |
1186 		     TXPKT_CSUM_LOC_V(CXGB_FCOE_TXPKT_CSUM_END);
1187 	return 0;
1188 }
1189 #endif /* CONFIG_CHELSIO_T4_FCOE */
1190 
1191 /* Returns tunnel type if hardware supports offloading of the same.
1192  * It is called only for T5 and onwards.
1193  */
cxgb_encap_offload_supported(struct sk_buff * skb)1194 enum cpl_tx_tnl_lso_type cxgb_encap_offload_supported(struct sk_buff *skb)
1195 {
1196 	u8 l4_hdr = 0;
1197 	enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE;
1198 	struct port_info *pi = netdev_priv(skb->dev);
1199 	struct adapter *adapter = pi->adapter;
1200 
1201 	if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
1202 	    skb->inner_protocol != htons(ETH_P_TEB))
1203 		return tnl_type;
1204 
1205 	switch (vlan_get_protocol(skb)) {
1206 	case htons(ETH_P_IP):
1207 		l4_hdr = ip_hdr(skb)->protocol;
1208 		break;
1209 	case htons(ETH_P_IPV6):
1210 		l4_hdr = ipv6_hdr(skb)->nexthdr;
1211 		break;
1212 	default:
1213 		return tnl_type;
1214 	}
1215 
1216 	switch (l4_hdr) {
1217 	case IPPROTO_UDP:
1218 		if (adapter->vxlan_port == udp_hdr(skb)->dest)
1219 			tnl_type = TX_TNL_TYPE_VXLAN;
1220 		else if (adapter->geneve_port == udp_hdr(skb)->dest)
1221 			tnl_type = TX_TNL_TYPE_GENEVE;
1222 		break;
1223 	default:
1224 		return tnl_type;
1225 	}
1226 
1227 	return tnl_type;
1228 }
1229 
t6_fill_tnl_lso(struct sk_buff * skb,struct cpl_tx_tnl_lso * tnl_lso,enum cpl_tx_tnl_lso_type tnl_type)1230 static inline void t6_fill_tnl_lso(struct sk_buff *skb,
1231 				   struct cpl_tx_tnl_lso *tnl_lso,
1232 				   enum cpl_tx_tnl_lso_type tnl_type)
1233 {
1234 	u32 val;
1235 	int in_eth_xtra_len;
1236 	int l3hdr_len = skb_network_header_len(skb);
1237 	int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
1238 	const struct skb_shared_info *ssi = skb_shinfo(skb);
1239 	bool v6 = (ip_hdr(skb)->version == 6);
1240 
1241 	val = CPL_TX_TNL_LSO_OPCODE_V(CPL_TX_TNL_LSO) |
1242 	      CPL_TX_TNL_LSO_FIRST_F |
1243 	      CPL_TX_TNL_LSO_LAST_F |
1244 	      (v6 ? CPL_TX_TNL_LSO_IPV6OUT_F : 0) |
1245 	      CPL_TX_TNL_LSO_ETHHDRLENOUT_V(eth_xtra_len / 4) |
1246 	      CPL_TX_TNL_LSO_IPHDRLENOUT_V(l3hdr_len / 4) |
1247 	      (v6 ? 0 : CPL_TX_TNL_LSO_IPHDRCHKOUT_F) |
1248 	      CPL_TX_TNL_LSO_IPLENSETOUT_F |
1249 	      (v6 ? 0 : CPL_TX_TNL_LSO_IPIDINCOUT_F);
1250 	tnl_lso->op_to_IpIdSplitOut = htonl(val);
1251 
1252 	tnl_lso->IpIdOffsetOut = 0;
1253 
1254 	/* Get the tunnel header length */
1255 	val = skb_inner_mac_header(skb) - skb_mac_header(skb);
1256 	in_eth_xtra_len = skb_inner_network_header(skb) -
1257 			  skb_inner_mac_header(skb) - ETH_HLEN;
1258 
1259 	switch (tnl_type) {
1260 	case TX_TNL_TYPE_VXLAN:
1261 	case TX_TNL_TYPE_GENEVE:
1262 		tnl_lso->UdpLenSetOut_to_TnlHdrLen =
1263 			htons(CPL_TX_TNL_LSO_UDPCHKCLROUT_F |
1264 			CPL_TX_TNL_LSO_UDPLENSETOUT_F);
1265 		break;
1266 	default:
1267 		tnl_lso->UdpLenSetOut_to_TnlHdrLen = 0;
1268 		break;
1269 	}
1270 
1271 	tnl_lso->UdpLenSetOut_to_TnlHdrLen |=
1272 		 htons(CPL_TX_TNL_LSO_TNLHDRLEN_V(val) |
1273 		       CPL_TX_TNL_LSO_TNLTYPE_V(tnl_type));
1274 
1275 	tnl_lso->r1 = 0;
1276 
1277 	val = CPL_TX_TNL_LSO_ETHHDRLEN_V(in_eth_xtra_len / 4) |
1278 	      CPL_TX_TNL_LSO_IPV6_V(inner_ip_hdr(skb)->version == 6) |
1279 	      CPL_TX_TNL_LSO_IPHDRLEN_V(skb_inner_network_header_len(skb) / 4) |
1280 	      CPL_TX_TNL_LSO_TCPHDRLEN_V(inner_tcp_hdrlen(skb) / 4);
1281 	tnl_lso->Flow_to_TcpHdrLen = htonl(val);
1282 
1283 	tnl_lso->IpIdOffset = htons(0);
1284 
1285 	tnl_lso->IpIdSplit_to_Mss = htons(CPL_TX_TNL_LSO_MSS_V(ssi->gso_size));
1286 	tnl_lso->TCPSeqOffset = htonl(0);
1287 	tnl_lso->EthLenOffset_Size = htonl(CPL_TX_TNL_LSO_SIZE_V(skb->len));
1288 }
1289 
1290 /**
1291  *	cxgb4_eth_xmit - add a packet to an Ethernet Tx queue
1292  *	@skb: the packet
1293  *	@dev: the egress net device
1294  *
1295  *	Add a packet to an SGE Ethernet Tx queue.  Runs with softirqs disabled.
1296  */
cxgb4_eth_xmit(struct sk_buff * skb,struct net_device * dev)1297 static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1298 {
1299 	u32 wr_mid, ctrl0, op;
1300 	u64 cntrl, *end, *sgl;
1301 	int qidx, credits;
1302 	unsigned int flits, ndesc;
1303 	struct adapter *adap;
1304 	struct sge_eth_txq *q;
1305 	const struct port_info *pi;
1306 	struct fw_eth_tx_pkt_wr *wr;
1307 	struct cpl_tx_pkt_core *cpl;
1308 	const struct skb_shared_info *ssi;
1309 	dma_addr_t addr[MAX_SKB_FRAGS + 1];
1310 	bool immediate = false;
1311 	int len, max_pkt_len;
1312 	bool ptp_enabled = is_ptp_enabled(skb, dev);
1313 	unsigned int chip_ver;
1314 	enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE;
1315 
1316 #ifdef CONFIG_CHELSIO_T4_FCOE
1317 	int err;
1318 #endif /* CONFIG_CHELSIO_T4_FCOE */
1319 
1320 	/*
1321 	 * The chip min packet length is 10 octets but play safe and reject
1322 	 * anything shorter than an Ethernet header.
1323 	 */
1324 	if (unlikely(skb->len < ETH_HLEN)) {
1325 out_free:	dev_kfree_skb_any(skb);
1326 		return NETDEV_TX_OK;
1327 	}
1328 
1329 	/* Discard the packet if the length is greater than mtu */
1330 	max_pkt_len = ETH_HLEN + dev->mtu;
1331 	if (skb_vlan_tagged(skb))
1332 		max_pkt_len += VLAN_HLEN;
1333 	if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
1334 		goto out_free;
1335 
1336 	pi = netdev_priv(dev);
1337 	adap = pi->adapter;
1338 	ssi = skb_shinfo(skb);
1339 #ifdef CONFIG_CHELSIO_IPSEC_INLINE
1340 	if (xfrm_offload(skb) && !ssi->gso_size)
1341 		return adap->uld[CXGB4_ULD_CRYPTO].tx_handler(skb, dev);
1342 #endif /* CHELSIO_IPSEC_INLINE */
1343 
1344 	qidx = skb_get_queue_mapping(skb);
1345 	if (ptp_enabled) {
1346 		spin_lock(&adap->ptp_lock);
1347 		if (!(adap->ptp_tx_skb)) {
1348 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1349 			adap->ptp_tx_skb = skb_get(skb);
1350 		} else {
1351 			spin_unlock(&adap->ptp_lock);
1352 			goto out_free;
1353 		}
1354 		q = &adap->sge.ptptxq;
1355 	} else {
1356 		q = &adap->sge.ethtxq[qidx + pi->first_qset];
1357 	}
1358 	skb_tx_timestamp(skb);
1359 
1360 	cxgb4_reclaim_completed_tx(adap, &q->q, true);
1361 	cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
1362 
1363 #ifdef CONFIG_CHELSIO_T4_FCOE
1364 	err = cxgb_fcoe_offload(skb, adap, pi, &cntrl);
1365 	if (unlikely(err == -ENOTSUPP)) {
1366 		if (ptp_enabled)
1367 			spin_unlock(&adap->ptp_lock);
1368 		goto out_free;
1369 	}
1370 #endif /* CONFIG_CHELSIO_T4_FCOE */
1371 
1372 	chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
1373 	flits = calc_tx_flits(skb, chip_ver);
1374 	ndesc = flits_to_desc(flits);
1375 	credits = txq_avail(&q->q) - ndesc;
1376 
1377 	if (unlikely(credits < 0)) {
1378 		eth_txq_stop(q);
1379 		dev_err(adap->pdev_dev,
1380 			"%s: Tx ring %u full while queue awake!\n",
1381 			dev->name, qidx);
1382 		if (ptp_enabled)
1383 			spin_unlock(&adap->ptp_lock);
1384 		return NETDEV_TX_BUSY;
1385 	}
1386 
1387 	if (is_eth_imm(skb, chip_ver))
1388 		immediate = true;
1389 
1390 	if (skb->encapsulation && chip_ver > CHELSIO_T5)
1391 		tnl_type = cxgb_encap_offload_supported(skb);
1392 
1393 	if (!immediate &&
1394 	    unlikely(cxgb4_map_skb(adap->pdev_dev, skb, addr) < 0)) {
1395 		q->mapping_err++;
1396 		if (ptp_enabled)
1397 			spin_unlock(&adap->ptp_lock);
1398 		goto out_free;
1399 	}
1400 
1401 	wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
1402 	if (unlikely(credits < ETHTXQ_STOP_THRES)) {
1403 		eth_txq_stop(q);
1404 		wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
1405 	}
1406 
1407 	wr = (void *)&q->q.desc[q->q.pidx];
1408 	wr->equiq_to_len16 = htonl(wr_mid);
1409 	wr->r3 = cpu_to_be64(0);
1410 	end = (u64 *)wr + flits;
1411 
1412 	len = immediate ? skb->len : 0;
1413 	len += sizeof(*cpl);
1414 	if (ssi->gso_size) {
1415 		struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
1416 		bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
1417 		int l3hdr_len = skb_network_header_len(skb);
1418 		int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
1419 		struct cpl_tx_tnl_lso *tnl_lso = (void *)(wr + 1);
1420 
1421 		if (tnl_type)
1422 			len += sizeof(*tnl_lso);
1423 		else
1424 			len += sizeof(*lso);
1425 
1426 		wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
1427 				       FW_WR_IMMDLEN_V(len));
1428 		if (tnl_type) {
1429 			struct iphdr *iph = ip_hdr(skb);
1430 
1431 			t6_fill_tnl_lso(skb, tnl_lso, tnl_type);
1432 			cpl = (void *)(tnl_lso + 1);
1433 			/* Driver is expected to compute partial checksum that
1434 			 * does not include the IP Total Length.
1435 			 */
1436 			if (iph->version == 4) {
1437 				iph->check = 0;
1438 				iph->tot_len = 0;
1439 				iph->check = (u16)(~ip_fast_csum((u8 *)iph,
1440 								 iph->ihl));
1441 			}
1442 			if (skb->ip_summed == CHECKSUM_PARTIAL)
1443 				cntrl = hwcsum(adap->params.chip, skb);
1444 		} else {
1445 			lso->lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
1446 					LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F |
1447 					LSO_IPV6_V(v6) |
1448 					LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
1449 					LSO_IPHDR_LEN_V(l3hdr_len / 4) |
1450 					LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
1451 			lso->ipid_ofst = htons(0);
1452 			lso->mss = htons(ssi->gso_size);
1453 			lso->seqno_offset = htonl(0);
1454 			if (is_t4(adap->params.chip))
1455 				lso->len = htonl(skb->len);
1456 			else
1457 				lso->len = htonl(LSO_T5_XFER_SIZE_V(skb->len));
1458 			cpl = (void *)(lso + 1);
1459 
1460 			if (CHELSIO_CHIP_VERSION(adap->params.chip)
1461 			    <= CHELSIO_T5)
1462 				cntrl =	TXPKT_ETHHDR_LEN_V(eth_xtra_len);
1463 			else
1464 				cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
1465 
1466 			cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
1467 				 TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
1468 				 TXPKT_IPHDR_LEN_V(l3hdr_len);
1469 		}
1470 		sgl = (u64 *)(cpl + 1); /* sgl start here */
1471 		if (unlikely((u8 *)sgl >= (u8 *)q->q.stat)) {
1472 			/* If current position is already at the end of the
1473 			 * txq, reset the current to point to start of the queue
1474 			 * and update the end ptr as well.
1475 			 */
1476 			if (sgl == (u64 *)q->q.stat) {
1477 				int left = (u8 *)end - (u8 *)q->q.stat;
1478 
1479 				end = (void *)q->q.desc + left;
1480 				sgl = (void *)q->q.desc;
1481 			}
1482 		}
1483 		q->tso++;
1484 		q->tx_cso += ssi->gso_segs;
1485 	} else {
1486 		if (ptp_enabled)
1487 			op = FW_PTP_TX_PKT_WR;
1488 		else
1489 			op = FW_ETH_TX_PKT_WR;
1490 		wr->op_immdlen = htonl(FW_WR_OP_V(op) |
1491 				       FW_WR_IMMDLEN_V(len));
1492 		cpl = (void *)(wr + 1);
1493 		sgl = (u64 *)(cpl + 1);
1494 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
1495 			cntrl = hwcsum(adap->params.chip, skb) |
1496 				TXPKT_IPCSUM_DIS_F;
1497 			q->tx_cso++;
1498 		}
1499 	}
1500 
1501 	if (skb_vlan_tag_present(skb)) {
1502 		q->vlan_ins++;
1503 		cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
1504 #ifdef CONFIG_CHELSIO_T4_FCOE
1505 		if (skb->protocol == htons(ETH_P_FCOE))
1506 			cntrl |= TXPKT_VLAN_V(
1507 				 ((skb->priority & 0x7) << VLAN_PRIO_SHIFT));
1508 #endif /* CONFIG_CHELSIO_T4_FCOE */
1509 	}
1510 
1511 	ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) |
1512 		TXPKT_PF_V(adap->pf);
1513 	if (ptp_enabled)
1514 		ctrl0 |= TXPKT_TSTAMP_F;
1515 #ifdef CONFIG_CHELSIO_T4_DCB
1516 	if (is_t4(adap->params.chip))
1517 		ctrl0 |= TXPKT_OVLAN_IDX_V(q->dcb_prio);
1518 	else
1519 		ctrl0 |= TXPKT_T5_OVLAN_IDX_V(q->dcb_prio);
1520 #endif
1521 	cpl->ctrl0 = htonl(ctrl0);
1522 	cpl->pack = htons(0);
1523 	cpl->len = htons(skb->len);
1524 	cpl->ctrl1 = cpu_to_be64(cntrl);
1525 
1526 	if (immediate) {
1527 		cxgb4_inline_tx_skb(skb, &q->q, sgl);
1528 		dev_consume_skb_any(skb);
1529 	} else {
1530 		int last_desc;
1531 
1532 		cxgb4_write_sgl(skb, &q->q, (void *)sgl, end, 0, addr);
1533 		skb_orphan(skb);
1534 
1535 		last_desc = q->q.pidx + ndesc - 1;
1536 		if (last_desc >= q->q.size)
1537 			last_desc -= q->q.size;
1538 		q->q.sdesc[last_desc].skb = skb;
1539 		q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)sgl;
1540 	}
1541 
1542 	txq_advance(&q->q, ndesc);
1543 
1544 	cxgb4_ring_tx_db(adap, &q->q, ndesc);
1545 	if (ptp_enabled)
1546 		spin_unlock(&adap->ptp_lock);
1547 	return NETDEV_TX_OK;
1548 }
1549 
1550 /* Constants ... */
1551 enum {
1552 	/* Egress Queue sizes, producer and consumer indices are all in units
1553 	 * of Egress Context Units bytes.  Note that as far as the hardware is
1554 	 * concerned, the free list is an Egress Queue (the host produces free
1555 	 * buffers which the hardware consumes) and free list entries are
1556 	 * 64-bit PCI DMA addresses.
1557 	 */
1558 	EQ_UNIT = SGE_EQ_IDXSIZE,
1559 	FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
1560 	TXD_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
1561 
1562 	T4VF_ETHTXQ_MAX_HDR = (sizeof(struct fw_eth_tx_pkt_vm_wr) +
1563 			       sizeof(struct cpl_tx_pkt_lso_core) +
1564 			       sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64),
1565 };
1566 
1567 /**
1568  *	t4vf_is_eth_imm - can an Ethernet packet be sent as immediate data?
1569  *	@skb: the packet
1570  *
1571  *	Returns whether an Ethernet packet is small enough to fit completely as
1572  *	immediate data.
1573  */
t4vf_is_eth_imm(const struct sk_buff * skb)1574 static inline int t4vf_is_eth_imm(const struct sk_buff *skb)
1575 {
1576 	/* The VF Driver uses the FW_ETH_TX_PKT_VM_WR firmware Work Request
1577 	 * which does not accommodate immediate data.  We could dike out all
1578 	 * of the support code for immediate data but that would tie our hands
1579 	 * too much if we ever want to enhace the firmware.  It would also
1580 	 * create more differences between the PF and VF Drivers.
1581 	 */
1582 	return false;
1583 }
1584 
1585 /**
1586  *	t4vf_calc_tx_flits - calculate the number of flits for a packet TX WR
1587  *	@skb: the packet
1588  *
1589  *	Returns the number of flits needed for a TX Work Request for the
1590  *	given Ethernet packet, including the needed WR and CPL headers.
1591  */
t4vf_calc_tx_flits(const struct sk_buff * skb)1592 static inline unsigned int t4vf_calc_tx_flits(const struct sk_buff *skb)
1593 {
1594 	unsigned int flits;
1595 
1596 	/* If the skb is small enough, we can pump it out as a work request
1597 	 * with only immediate data.  In that case we just have to have the
1598 	 * TX Packet header plus the skb data in the Work Request.
1599 	 */
1600 	if (t4vf_is_eth_imm(skb))
1601 		return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt),
1602 				    sizeof(__be64));
1603 
1604 	/* Otherwise, we're going to have to construct a Scatter gather list
1605 	 * of the skb body and fragments.  We also include the flits necessary
1606 	 * for the TX Packet Work Request and CPL.  We always have a firmware
1607 	 * Write Header (incorporated as part of the cpl_tx_pkt_lso and
1608 	 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
1609 	 * message or, if we're doing a Large Send Offload, an LSO CPL message
1610 	 * with an embedded TX Packet Write CPL message.
1611 	 */
1612 	flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
1613 	if (skb_shinfo(skb)->gso_size)
1614 		flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
1615 			  sizeof(struct cpl_tx_pkt_lso_core) +
1616 			  sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
1617 	else
1618 		flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
1619 			  sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
1620 	return flits;
1621 }
1622 
1623 /**
1624  *	cxgb4_vf_eth_xmit - add a packet to an Ethernet TX queue
1625  *	@skb: the packet
1626  *	@dev: the egress net device
1627  *
1628  *	Add a packet to an SGE Ethernet TX queue.  Runs with softirqs disabled.
1629  */
cxgb4_vf_eth_xmit(struct sk_buff * skb,struct net_device * dev)1630 static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
1631 				     struct net_device *dev)
1632 {
1633 	dma_addr_t addr[MAX_SKB_FRAGS + 1];
1634 	const struct skb_shared_info *ssi;
1635 	struct fw_eth_tx_pkt_vm_wr *wr;
1636 	int qidx, credits, max_pkt_len;
1637 	struct cpl_tx_pkt_core *cpl;
1638 	const struct port_info *pi;
1639 	unsigned int flits, ndesc;
1640 	struct sge_eth_txq *txq;
1641 	struct adapter *adapter;
1642 	u64 cntrl, *end;
1643 	u32 wr_mid;
1644 	const size_t fw_hdr_copy_len = sizeof(wr->ethmacdst) +
1645 				       sizeof(wr->ethmacsrc) +
1646 				       sizeof(wr->ethtype) +
1647 				       sizeof(wr->vlantci);
1648 
1649 	/* The chip minimum packet length is 10 octets but the firmware
1650 	 * command that we are using requires that we copy the Ethernet header
1651 	 * (including the VLAN tag) into the header so we reject anything
1652 	 * smaller than that ...
1653 	 */
1654 	if (unlikely(skb->len < fw_hdr_copy_len))
1655 		goto out_free;
1656 
1657 	/* Discard the packet if the length is greater than mtu */
1658 	max_pkt_len = ETH_HLEN + dev->mtu;
1659 	if (skb_vlan_tag_present(skb))
1660 		max_pkt_len += VLAN_HLEN;
1661 	if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
1662 		goto out_free;
1663 
1664 	/* Figure out which TX Queue we're going to use. */
1665 	pi = netdev_priv(dev);
1666 	adapter = pi->adapter;
1667 	qidx = skb_get_queue_mapping(skb);
1668 	WARN_ON(qidx >= pi->nqsets);
1669 	txq = &adapter->sge.ethtxq[pi->first_qset + qidx];
1670 
1671 	/* Take this opportunity to reclaim any TX Descriptors whose DMA
1672 	 * transfers have completed.
1673 	 */
1674 	cxgb4_reclaim_completed_tx(adapter, &txq->q, true);
1675 
1676 	/* Calculate the number of flits and TX Descriptors we're going to
1677 	 * need along with how many TX Descriptors will be left over after
1678 	 * we inject our Work Request.
1679 	 */
1680 	flits = t4vf_calc_tx_flits(skb);
1681 	ndesc = flits_to_desc(flits);
1682 	credits = txq_avail(&txq->q) - ndesc;
1683 
1684 	if (unlikely(credits < 0)) {
1685 		/* Not enough room for this packet's Work Request.  Stop the
1686 		 * TX Queue and return a "busy" condition.  The queue will get
1687 		 * started later on when the firmware informs us that space
1688 		 * has opened up.
1689 		 */
1690 		eth_txq_stop(txq);
1691 		dev_err(adapter->pdev_dev,
1692 			"%s: TX ring %u full while queue awake!\n",
1693 			dev->name, qidx);
1694 		return NETDEV_TX_BUSY;
1695 	}
1696 
1697 	if (!t4vf_is_eth_imm(skb) &&
1698 	    unlikely(cxgb4_map_skb(adapter->pdev_dev, skb, addr) < 0)) {
1699 		/* We need to map the skb into PCI DMA space (because it can't
1700 		 * be in-lined directly into the Work Request) and the mapping
1701 		 * operation failed.  Record the error and drop the packet.
1702 		 */
1703 		txq->mapping_err++;
1704 		goto out_free;
1705 	}
1706 
1707 	wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
1708 	if (unlikely(credits < ETHTXQ_STOP_THRES)) {
1709 		/* After we're done injecting the Work Request for this
1710 		 * packet, we'll be below our "stop threshold" so stop the TX
1711 		 * Queue now and schedule a request for an SGE Egress Queue
1712 		 * Update message.  The queue will get started later on when
1713 		 * the firmware processes this Work Request and sends us an
1714 		 * Egress Queue Status Update message indicating that space
1715 		 * has opened up.
1716 		 */
1717 		eth_txq_stop(txq);
1718 		wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
1719 	}
1720 
1721 	/* Start filling in our Work Request.  Note that we do _not_ handle
1722 	 * the WR Header wrapping around the TX Descriptor Ring.  If our
1723 	 * maximum header size ever exceeds one TX Descriptor, we'll need to
1724 	 * do something else here.
1725 	 */
1726 	WARN_ON(DIV_ROUND_UP(T4VF_ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1);
1727 	wr = (void *)&txq->q.desc[txq->q.pidx];
1728 	wr->equiq_to_len16 = cpu_to_be32(wr_mid);
1729 	wr->r3[0] = cpu_to_be32(0);
1730 	wr->r3[1] = cpu_to_be32(0);
1731 	skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len);
1732 	end = (u64 *)wr + flits;
1733 
1734 	/* If this is a Large Send Offload packet we'll put in an LSO CPL
1735 	 * message with an encapsulated TX Packet CPL message.  Otherwise we
1736 	 * just use a TX Packet CPL message.
1737 	 */
1738 	ssi = skb_shinfo(skb);
1739 	if (ssi->gso_size) {
1740 		struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
1741 		bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
1742 		int l3hdr_len = skb_network_header_len(skb);
1743 		int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
1744 
1745 		wr->op_immdlen =
1746 			cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
1747 				    FW_WR_IMMDLEN_V(sizeof(*lso) +
1748 						    sizeof(*cpl)));
1749 		 /* Fill in the LSO CPL message. */
1750 		lso->lso_ctrl =
1751 			cpu_to_be32(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
1752 				    LSO_FIRST_SLICE_F |
1753 				    LSO_LAST_SLICE_F |
1754 				    LSO_IPV6_V(v6) |
1755 				    LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
1756 				    LSO_IPHDR_LEN_V(l3hdr_len / 4) |
1757 				    LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
1758 		lso->ipid_ofst = cpu_to_be16(0);
1759 		lso->mss = cpu_to_be16(ssi->gso_size);
1760 		lso->seqno_offset = cpu_to_be32(0);
1761 		if (is_t4(adapter->params.chip))
1762 			lso->len = cpu_to_be32(skb->len);
1763 		else
1764 			lso->len = cpu_to_be32(LSO_T5_XFER_SIZE_V(skb->len));
1765 
1766 		/* Set up TX Packet CPL pointer, control word and perform
1767 		 * accounting.
1768 		 */
1769 		cpl = (void *)(lso + 1);
1770 
1771 		if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
1772 			cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
1773 		else
1774 			cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
1775 
1776 		cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
1777 					   TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
1778 			 TXPKT_IPHDR_LEN_V(l3hdr_len);
1779 		txq->tso++;
1780 		txq->tx_cso += ssi->gso_segs;
1781 	} else {
1782 		int len;
1783 
1784 		len = (t4vf_is_eth_imm(skb)
1785 		       ? skb->len + sizeof(*cpl)
1786 		       : sizeof(*cpl));
1787 		wr->op_immdlen =
1788 			cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
1789 				    FW_WR_IMMDLEN_V(len));
1790 
1791 		/* Set up TX Packet CPL pointer, control word and perform
1792 		 * accounting.
1793 		 */
1794 		cpl = (void *)(wr + 1);
1795 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
1796 			cntrl = hwcsum(adapter->params.chip, skb) |
1797 				TXPKT_IPCSUM_DIS_F;
1798 			txq->tx_cso++;
1799 		} else {
1800 			cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
1801 		}
1802 	}
1803 
1804 	/* If there's a VLAN tag present, add that to the list of things to
1805 	 * do in this Work Request.
1806 	 */
1807 	if (skb_vlan_tag_present(skb)) {
1808 		txq->vlan_ins++;
1809 		cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
1810 	}
1811 
1812 	 /* Fill in the TX Packet CPL message header. */
1813 	cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
1814 				 TXPKT_INTF_V(pi->port_id) |
1815 				 TXPKT_PF_V(0));
1816 	cpl->pack = cpu_to_be16(0);
1817 	cpl->len = cpu_to_be16(skb->len);
1818 	cpl->ctrl1 = cpu_to_be64(cntrl);
1819 
1820 	/* Fill in the body of the TX Packet CPL message with either in-lined
1821 	 * data or a Scatter/Gather List.
1822 	 */
1823 	if (t4vf_is_eth_imm(skb)) {
1824 		/* In-line the packet's data and free the skb since we don't
1825 		 * need it any longer.
1826 		 */
1827 		cxgb4_inline_tx_skb(skb, &txq->q, cpl + 1);
1828 		dev_consume_skb_any(skb);
1829 	} else {
1830 		/* Write the skb's Scatter/Gather list into the TX Packet CPL
1831 		 * message and retain a pointer to the skb so we can free it
1832 		 * later when its DMA completes.  (We store the skb pointer
1833 		 * in the Software Descriptor corresponding to the last TX
1834 		 * Descriptor used by the Work Request.)
1835 		 *
1836 		 * The retained skb will be freed when the corresponding TX
1837 		 * Descriptors are reclaimed after their DMAs complete.
1838 		 * However, this could take quite a while since, in general,
1839 		 * the hardware is set up to be lazy about sending DMA
1840 		 * completion notifications to us and we mostly perform TX
1841 		 * reclaims in the transmit routine.
1842 		 *
1843 		 * This is good for performamce but means that we rely on new
1844 		 * TX packets arriving to run the destructors of completed
1845 		 * packets, which open up space in their sockets' send queues.
1846 		 * Sometimes we do not get such new packets causing TX to
1847 		 * stall.  A single UDP transmitter is a good example of this
1848 		 * situation.  We have a clean up timer that periodically
1849 		 * reclaims completed packets but it doesn't run often enough
1850 		 * (nor do we want it to) to prevent lengthy stalls.  A
1851 		 * solution to this problem is to run the destructor early,
1852 		 * after the packet is queued but before it's DMAd.  A con is
1853 		 * that we lie to socket memory accounting, but the amount of
1854 		 * extra memory is reasonable (limited by the number of TX
1855 		 * descriptors), the packets do actually get freed quickly by
1856 		 * new packets almost always, and for protocols like TCP that
1857 		 * wait for acks to really free up the data the extra memory
1858 		 * is even less.  On the positive side we run the destructors
1859 		 * on the sending CPU rather than on a potentially different
1860 		 * completing CPU, usually a good thing.
1861 		 *
1862 		 * Run the destructor before telling the DMA engine about the
1863 		 * packet to make sure it doesn't complete and get freed
1864 		 * prematurely.
1865 		 */
1866 		struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1);
1867 		struct sge_txq *tq = &txq->q;
1868 		int last_desc;
1869 
1870 		/* If the Work Request header was an exact multiple of our TX
1871 		 * Descriptor length, then it's possible that the starting SGL
1872 		 * pointer lines up exactly with the end of our TX Descriptor
1873 		 * ring.  If that's the case, wrap around to the beginning
1874 		 * here ...
1875 		 */
1876 		if (unlikely((void *)sgl == (void *)tq->stat)) {
1877 			sgl = (void *)tq->desc;
1878 			end = (void *)((void *)tq->desc +
1879 				       ((void *)end - (void *)tq->stat));
1880 		}
1881 
1882 		cxgb4_write_sgl(skb, tq, sgl, end, 0, addr);
1883 		skb_orphan(skb);
1884 
1885 		last_desc = tq->pidx + ndesc - 1;
1886 		if (last_desc >= tq->size)
1887 			last_desc -= tq->size;
1888 		tq->sdesc[last_desc].skb = skb;
1889 		tq->sdesc[last_desc].sgl = sgl;
1890 	}
1891 
1892 	/* Advance our internal TX Queue state, tell the hardware about
1893 	 * the new TX descriptors and return success.
1894 	 */
1895 	txq_advance(&txq->q, ndesc);
1896 
1897 	cxgb4_ring_tx_db(adapter, &txq->q, ndesc);
1898 	return NETDEV_TX_OK;
1899 
1900 out_free:
1901 	/* An error of some sort happened.  Free the TX skb and tell the
1902 	 * OS that we've "dealt" with the packet ...
1903 	 */
1904 	dev_kfree_skb_any(skb);
1905 	return NETDEV_TX_OK;
1906 }
1907 
t4_start_xmit(struct sk_buff * skb,struct net_device * dev)1908 netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev)
1909 {
1910 	struct port_info *pi = netdev_priv(dev);
1911 
1912 	if (unlikely(pi->eth_flags & PRIV_FLAG_PORT_TX_VM))
1913 		return cxgb4_vf_eth_xmit(skb, dev);
1914 
1915 	return cxgb4_eth_xmit(skb, dev);
1916 }
1917 
1918 /**
1919  *	reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1920  *	@q: the SGE control Tx queue
1921  *
1922  *	This is a variant of cxgb4_reclaim_completed_tx() that is used
1923  *	for Tx queues that send only immediate data (presently just
1924  *	the control queues) and	thus do not have any sk_buffs to release.
1925  */
reclaim_completed_tx_imm(struct sge_txq * q)1926 static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1927 {
1928 	int hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
1929 	int reclaim = hw_cidx - q->cidx;
1930 
1931 	if (reclaim < 0)
1932 		reclaim += q->size;
1933 
1934 	q->in_use -= reclaim;
1935 	q->cidx = hw_cidx;
1936 }
1937 
1938 /**
1939  *	is_imm - check whether a packet can be sent as immediate data
1940  *	@skb: the packet
1941  *
1942  *	Returns true if a packet can be sent as a WR with immediate data.
1943  */
is_imm(const struct sk_buff * skb)1944 static inline int is_imm(const struct sk_buff *skb)
1945 {
1946 	return skb->len <= MAX_CTRL_WR_LEN;
1947 }
1948 
1949 /**
1950  *	ctrlq_check_stop - check if a control queue is full and should stop
1951  *	@q: the queue
1952  *	@wr: most recent WR written to the queue
1953  *
1954  *	Check if a control queue has become full and should be stopped.
1955  *	We clean up control queue descriptors very lazily, only when we are out.
1956  *	If the queue is still full after reclaiming any completed descriptors
1957  *	we suspend it and have the last WR wake it up.
1958  */
ctrlq_check_stop(struct sge_ctrl_txq * q,struct fw_wr_hdr * wr)1959 static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr)
1960 {
1961 	reclaim_completed_tx_imm(&q->q);
1962 	if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
1963 		wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F);
1964 		q->q.stops++;
1965 		q->full = 1;
1966 	}
1967 }
1968 
1969 /**
1970  *	ctrl_xmit - send a packet through an SGE control Tx queue
1971  *	@q: the control queue
1972  *	@skb: the packet
1973  *
1974  *	Send a packet through an SGE control Tx queue.  Packets sent through
1975  *	a control queue must fit entirely as immediate data.
1976  */
ctrl_xmit(struct sge_ctrl_txq * q,struct sk_buff * skb)1977 static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb)
1978 {
1979 	unsigned int ndesc;
1980 	struct fw_wr_hdr *wr;
1981 
1982 	if (unlikely(!is_imm(skb))) {
1983 		WARN_ON(1);
1984 		dev_kfree_skb(skb);
1985 		return NET_XMIT_DROP;
1986 	}
1987 
1988 	ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc));
1989 	spin_lock(&q->sendq.lock);
1990 
1991 	if (unlikely(q->full)) {
1992 		skb->priority = ndesc;                  /* save for restart */
1993 		__skb_queue_tail(&q->sendq, skb);
1994 		spin_unlock(&q->sendq.lock);
1995 		return NET_XMIT_CN;
1996 	}
1997 
1998 	wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
1999 	cxgb4_inline_tx_skb(skb, &q->q, wr);
2000 
2001 	txq_advance(&q->q, ndesc);
2002 	if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES))
2003 		ctrlq_check_stop(q, wr);
2004 
2005 	cxgb4_ring_tx_db(q->adap, &q->q, ndesc);
2006 	spin_unlock(&q->sendq.lock);
2007 
2008 	kfree_skb(skb);
2009 	return NET_XMIT_SUCCESS;
2010 }
2011 
2012 /**
2013  *	restart_ctrlq - restart a suspended control queue
2014  *	@data: the control queue to restart
2015  *
2016  *	Resumes transmission on a suspended Tx control queue.
2017  */
restart_ctrlq(unsigned long data)2018 static void restart_ctrlq(unsigned long data)
2019 {
2020 	struct sk_buff *skb;
2021 	unsigned int written = 0;
2022 	struct sge_ctrl_txq *q = (struct sge_ctrl_txq *)data;
2023 
2024 	spin_lock(&q->sendq.lock);
2025 	reclaim_completed_tx_imm(&q->q);
2026 	BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES);  /* q should be empty */
2027 
2028 	while ((skb = __skb_dequeue(&q->sendq)) != NULL) {
2029 		struct fw_wr_hdr *wr;
2030 		unsigned int ndesc = skb->priority;     /* previously saved */
2031 
2032 		written += ndesc;
2033 		/* Write descriptors and free skbs outside the lock to limit
2034 		 * wait times.  q->full is still set so new skbs will be queued.
2035 		 */
2036 		wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
2037 		txq_advance(&q->q, ndesc);
2038 		spin_unlock(&q->sendq.lock);
2039 
2040 		cxgb4_inline_tx_skb(skb, &q->q, wr);
2041 		kfree_skb(skb);
2042 
2043 		if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
2044 			unsigned long old = q->q.stops;
2045 
2046 			ctrlq_check_stop(q, wr);
2047 			if (q->q.stops != old) {          /* suspended anew */
2048 				spin_lock(&q->sendq.lock);
2049 				goto ringdb;
2050 			}
2051 		}
2052 		if (written > 16) {
2053 			cxgb4_ring_tx_db(q->adap, &q->q, written);
2054 			written = 0;
2055 		}
2056 		spin_lock(&q->sendq.lock);
2057 	}
2058 	q->full = 0;
2059 ringdb:
2060 	if (written)
2061 		cxgb4_ring_tx_db(q->adap, &q->q, written);
2062 	spin_unlock(&q->sendq.lock);
2063 }
2064 
2065 /**
2066  *	t4_mgmt_tx - send a management message
2067  *	@adap: the adapter
2068  *	@skb: the packet containing the management message
2069  *
2070  *	Send a management message through control queue 0.
2071  */
t4_mgmt_tx(struct adapter * adap,struct sk_buff * skb)2072 int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
2073 {
2074 	int ret;
2075 
2076 	local_bh_disable();
2077 	ret = ctrl_xmit(&adap->sge.ctrlq[0], skb);
2078 	local_bh_enable();
2079 	return ret;
2080 }
2081 
2082 /**
2083  *	is_ofld_imm - check whether a packet can be sent as immediate data
2084  *	@skb: the packet
2085  *
2086  *	Returns true if a packet can be sent as an offload WR with immediate
2087  *	data.  We currently use the same limit as for Ethernet packets.
2088  */
is_ofld_imm(const struct sk_buff * skb)2089 static inline int is_ofld_imm(const struct sk_buff *skb)
2090 {
2091 	struct work_request_hdr *req = (struct work_request_hdr *)skb->data;
2092 	unsigned long opcode = FW_WR_OP_G(ntohl(req->wr_hi));
2093 
2094 	if (opcode == FW_CRYPTO_LOOKASIDE_WR)
2095 		return skb->len <= SGE_MAX_WR_LEN;
2096 	else
2097 		return skb->len <= MAX_IMM_TX_PKT_LEN;
2098 }
2099 
2100 /**
2101  *	calc_tx_flits_ofld - calculate # of flits for an offload packet
2102  *	@skb: the packet
2103  *
2104  *	Returns the number of flits needed for the given offload packet.
2105  *	These packets are already fully constructed and no additional headers
2106  *	will be added.
2107  */
calc_tx_flits_ofld(const struct sk_buff * skb)2108 static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
2109 {
2110 	unsigned int flits, cnt;
2111 
2112 	if (is_ofld_imm(skb))
2113 		return DIV_ROUND_UP(skb->len, 8);
2114 
2115 	flits = skb_transport_offset(skb) / 8U;   /* headers */
2116 	cnt = skb_shinfo(skb)->nr_frags;
2117 	if (skb_tail_pointer(skb) != skb_transport_header(skb))
2118 		cnt++;
2119 	return flits + sgl_len(cnt);
2120 }
2121 
2122 /**
2123  *	txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion
2124  *	@adap: the adapter
2125  *	@q: the queue to stop
2126  *
2127  *	Mark a Tx queue stopped due to I/O MMU exhaustion and resulting
2128  *	inability to map packets.  A periodic timer attempts to restart
2129  *	queues so marked.
2130  */
txq_stop_maperr(struct sge_uld_txq * q)2131 static void txq_stop_maperr(struct sge_uld_txq *q)
2132 {
2133 	q->mapping_err++;
2134 	q->q.stops++;
2135 	set_bit(q->q.cntxt_id - q->adap->sge.egr_start,
2136 		q->adap->sge.txq_maperr);
2137 }
2138 
2139 /**
2140  *	ofldtxq_stop - stop an offload Tx queue that has become full
2141  *	@q: the queue to stop
2142  *	@wr: the Work Request causing the queue to become full
2143  *
2144  *	Stops an offload Tx queue that has become full and modifies the packet
2145  *	being written to request a wakeup.
2146  */
ofldtxq_stop(struct sge_uld_txq * q,struct fw_wr_hdr * wr)2147 static void ofldtxq_stop(struct sge_uld_txq *q, struct fw_wr_hdr *wr)
2148 {
2149 	wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F);
2150 	q->q.stops++;
2151 	q->full = 1;
2152 }
2153 
2154 /**
2155  *	service_ofldq - service/restart a suspended offload queue
2156  *	@q: the offload queue
2157  *
2158  *	Services an offload Tx queue by moving packets from its Pending Send
2159  *	Queue to the Hardware TX ring.  The function starts and ends with the
2160  *	Send Queue locked, but drops the lock while putting the skb at the
2161  *	head of the Send Queue onto the Hardware TX Ring.  Dropping the lock
2162  *	allows more skbs to be added to the Send Queue by other threads.
2163  *	The packet being processed at the head of the Pending Send Queue is
2164  *	left on the queue in case we experience DMA Mapping errors, etc.
2165  *	and need to give up and restart later.
2166  *
2167  *	service_ofldq() can be thought of as a task which opportunistically
2168  *	uses other threads execution contexts.  We use the Offload Queue
2169  *	boolean "service_ofldq_running" to make sure that only one instance
2170  *	is ever running at a time ...
2171  */
service_ofldq(struct sge_uld_txq * q)2172 static void service_ofldq(struct sge_uld_txq *q)
2173 {
2174 	u64 *pos, *before, *end;
2175 	int credits;
2176 	struct sk_buff *skb;
2177 	struct sge_txq *txq;
2178 	unsigned int left;
2179 	unsigned int written = 0;
2180 	unsigned int flits, ndesc;
2181 
2182 	/* If another thread is currently in service_ofldq() processing the
2183 	 * Pending Send Queue then there's nothing to do. Otherwise, flag
2184 	 * that we're doing the work and continue.  Examining/modifying
2185 	 * the Offload Queue boolean "service_ofldq_running" must be done
2186 	 * while holding the Pending Send Queue Lock.
2187 	 */
2188 	if (q->service_ofldq_running)
2189 		return;
2190 	q->service_ofldq_running = true;
2191 
2192 	while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) {
2193 		/* We drop the lock while we're working with the skb at the
2194 		 * head of the Pending Send Queue.  This allows more skbs to
2195 		 * be added to the Pending Send Queue while we're working on
2196 		 * this one.  We don't need to lock to guard the TX Ring
2197 		 * updates because only one thread of execution is ever
2198 		 * allowed into service_ofldq() at a time.
2199 		 */
2200 		spin_unlock(&q->sendq.lock);
2201 
2202 		cxgb4_reclaim_completed_tx(q->adap, &q->q, false);
2203 
2204 		flits = skb->priority;                /* previously saved */
2205 		ndesc = flits_to_desc(flits);
2206 		credits = txq_avail(&q->q) - ndesc;
2207 		BUG_ON(credits < 0);
2208 		if (unlikely(credits < TXQ_STOP_THRES))
2209 			ofldtxq_stop(q, (struct fw_wr_hdr *)skb->data);
2210 
2211 		pos = (u64 *)&q->q.desc[q->q.pidx];
2212 		if (is_ofld_imm(skb))
2213 			cxgb4_inline_tx_skb(skb, &q->q, pos);
2214 		else if (cxgb4_map_skb(q->adap->pdev_dev, skb,
2215 				       (dma_addr_t *)skb->head)) {
2216 			txq_stop_maperr(q);
2217 			spin_lock(&q->sendq.lock);
2218 			break;
2219 		} else {
2220 			int last_desc, hdr_len = skb_transport_offset(skb);
2221 
2222 			/* The WR headers  may not fit within one descriptor.
2223 			 * So we need to deal with wrap-around here.
2224 			 */
2225 			before = (u64 *)pos;
2226 			end = (u64 *)pos + flits;
2227 			txq = &q->q;
2228 			pos = (void *)inline_tx_skb_header(skb, &q->q,
2229 							   (void *)pos,
2230 							   hdr_len);
2231 			if (before > (u64 *)pos) {
2232 				left = (u8 *)end - (u8 *)txq->stat;
2233 				end = (void *)txq->desc + left;
2234 			}
2235 
2236 			/* If current position is already at the end of the
2237 			 * ofld queue, reset the current to point to
2238 			 * start of the queue and update the end ptr as well.
2239 			 */
2240 			if (pos == (u64 *)txq->stat) {
2241 				left = (u8 *)end - (u8 *)txq->stat;
2242 				end = (void *)txq->desc + left;
2243 				pos = (void *)txq->desc;
2244 			}
2245 
2246 			cxgb4_write_sgl(skb, &q->q, (void *)pos,
2247 					end, hdr_len,
2248 					(dma_addr_t *)skb->head);
2249 #ifdef CONFIG_NEED_DMA_MAP_STATE
2250 			skb->dev = q->adap->port[0];
2251 			skb->destructor = deferred_unmap_destructor;
2252 #endif
2253 			last_desc = q->q.pidx + ndesc - 1;
2254 			if (last_desc >= q->q.size)
2255 				last_desc -= q->q.size;
2256 			q->q.sdesc[last_desc].skb = skb;
2257 		}
2258 
2259 		txq_advance(&q->q, ndesc);
2260 		written += ndesc;
2261 		if (unlikely(written > 32)) {
2262 			cxgb4_ring_tx_db(q->adap, &q->q, written);
2263 			written = 0;
2264 		}
2265 
2266 		/* Reacquire the Pending Send Queue Lock so we can unlink the
2267 		 * skb we've just successfully transferred to the TX Ring and
2268 		 * loop for the next skb which may be at the head of the
2269 		 * Pending Send Queue.
2270 		 */
2271 		spin_lock(&q->sendq.lock);
2272 		__skb_unlink(skb, &q->sendq);
2273 		if (is_ofld_imm(skb))
2274 			kfree_skb(skb);
2275 	}
2276 	if (likely(written))
2277 		cxgb4_ring_tx_db(q->adap, &q->q, written);
2278 
2279 	/*Indicate that no thread is processing the Pending Send Queue
2280 	 * currently.
2281 	 */
2282 	q->service_ofldq_running = false;
2283 }
2284 
2285 /**
2286  *	ofld_xmit - send a packet through an offload queue
2287  *	@q: the Tx offload queue
2288  *	@skb: the packet
2289  *
2290  *	Send an offload packet through an SGE offload queue.
2291  */
ofld_xmit(struct sge_uld_txq * q,struct sk_buff * skb)2292 static int ofld_xmit(struct sge_uld_txq *q, struct sk_buff *skb)
2293 {
2294 	skb->priority = calc_tx_flits_ofld(skb);       /* save for restart */
2295 	spin_lock(&q->sendq.lock);
2296 
2297 	/* Queue the new skb onto the Offload Queue's Pending Send Queue.  If
2298 	 * that results in this new skb being the only one on the queue, start
2299 	 * servicing it.  If there are other skbs already on the list, then
2300 	 * either the queue is currently being processed or it's been stopped
2301 	 * for some reason and it'll be restarted at a later time.  Restart
2302 	 * paths are triggered by events like experiencing a DMA Mapping Error
2303 	 * or filling the Hardware TX Ring.
2304 	 */
2305 	__skb_queue_tail(&q->sendq, skb);
2306 	if (q->sendq.qlen == 1)
2307 		service_ofldq(q);
2308 
2309 	spin_unlock(&q->sendq.lock);
2310 	return NET_XMIT_SUCCESS;
2311 }
2312 
2313 /**
2314  *	restart_ofldq - restart a suspended offload queue
2315  *	@data: the offload queue to restart
2316  *
2317  *	Resumes transmission on a suspended Tx offload queue.
2318  */
restart_ofldq(unsigned long data)2319 static void restart_ofldq(unsigned long data)
2320 {
2321 	struct sge_uld_txq *q = (struct sge_uld_txq *)data;
2322 
2323 	spin_lock(&q->sendq.lock);
2324 	q->full = 0;            /* the queue actually is completely empty now */
2325 	service_ofldq(q);
2326 	spin_unlock(&q->sendq.lock);
2327 }
2328 
2329 /**
2330  *	skb_txq - return the Tx queue an offload packet should use
2331  *	@skb: the packet
2332  *
2333  *	Returns the Tx queue an offload packet should use as indicated by bits
2334  *	1-15 in the packet's queue_mapping.
2335  */
skb_txq(const struct sk_buff * skb)2336 static inline unsigned int skb_txq(const struct sk_buff *skb)
2337 {
2338 	return skb->queue_mapping >> 1;
2339 }
2340 
2341 /**
2342  *	is_ctrl_pkt - return whether an offload packet is a control packet
2343  *	@skb: the packet
2344  *
2345  *	Returns whether an offload packet should use an OFLD or a CTRL
2346  *	Tx queue as indicated by bit 0 in the packet's queue_mapping.
2347  */
is_ctrl_pkt(const struct sk_buff * skb)2348 static inline unsigned int is_ctrl_pkt(const struct sk_buff *skb)
2349 {
2350 	return skb->queue_mapping & 1;
2351 }
2352 
uld_send(struct adapter * adap,struct sk_buff * skb,unsigned int tx_uld_type)2353 static inline int uld_send(struct adapter *adap, struct sk_buff *skb,
2354 			   unsigned int tx_uld_type)
2355 {
2356 	struct sge_uld_txq_info *txq_info;
2357 	struct sge_uld_txq *txq;
2358 	unsigned int idx = skb_txq(skb);
2359 
2360 	if (unlikely(is_ctrl_pkt(skb))) {
2361 		/* Single ctrl queue is a requirement for LE workaround path */
2362 		if (adap->tids.nsftids)
2363 			idx = 0;
2364 		return ctrl_xmit(&adap->sge.ctrlq[idx], skb);
2365 	}
2366 
2367 	txq_info = adap->sge.uld_txq_info[tx_uld_type];
2368 	if (unlikely(!txq_info)) {
2369 		WARN_ON(true);
2370 		kfree_skb(skb);
2371 		return NET_XMIT_DROP;
2372 	}
2373 
2374 	txq = &txq_info->uldtxq[idx];
2375 	return ofld_xmit(txq, skb);
2376 }
2377 
2378 /**
2379  *	t4_ofld_send - send an offload packet
2380  *	@adap: the adapter
2381  *	@skb: the packet
2382  *
2383  *	Sends an offload packet.  We use the packet queue_mapping to select the
2384  *	appropriate Tx queue as follows: bit 0 indicates whether the packet
2385  *	should be sent as regular or control, bits 1-15 select the queue.
2386  */
t4_ofld_send(struct adapter * adap,struct sk_buff * skb)2387 int t4_ofld_send(struct adapter *adap, struct sk_buff *skb)
2388 {
2389 	int ret;
2390 
2391 	local_bh_disable();
2392 	ret = uld_send(adap, skb, CXGB4_TX_OFLD);
2393 	local_bh_enable();
2394 	return ret;
2395 }
2396 
2397 /**
2398  *	cxgb4_ofld_send - send an offload packet
2399  *	@dev: the net device
2400  *	@skb: the packet
2401  *
2402  *	Sends an offload packet.  This is an exported version of @t4_ofld_send,
2403  *	intended for ULDs.
2404  */
cxgb4_ofld_send(struct net_device * dev,struct sk_buff * skb)2405 int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb)
2406 {
2407 	return t4_ofld_send(netdev2adap(dev), skb);
2408 }
2409 EXPORT_SYMBOL(cxgb4_ofld_send);
2410 
inline_tx_header(const void * src,const struct sge_txq * q,void * pos,int length)2411 static void *inline_tx_header(const void *src,
2412 			      const struct sge_txq *q,
2413 			      void *pos, int length)
2414 {
2415 	int left = (void *)q->stat - pos;
2416 	u64 *p;
2417 
2418 	if (likely(length <= left)) {
2419 		memcpy(pos, src, length);
2420 		pos += length;
2421 	} else {
2422 		memcpy(pos, src, left);
2423 		memcpy(q->desc, src + left, length - left);
2424 		pos = (void *)q->desc + (length - left);
2425 	}
2426 	/* 0-pad to multiple of 16 */
2427 	p = PTR_ALIGN(pos, 8);
2428 	if ((uintptr_t)p & 8) {
2429 		*p = 0;
2430 		return p + 1;
2431 	}
2432 	return p;
2433 }
2434 
2435 /**
2436  *      ofld_xmit_direct - copy a WR into offload queue
2437  *      @q: the Tx offload queue
2438  *      @src: location of WR
2439  *      @len: WR length
2440  *
2441  *      Copy an immediate WR into an uncontended SGE offload queue.
2442  */
ofld_xmit_direct(struct sge_uld_txq * q,const void * src,unsigned int len)2443 static int ofld_xmit_direct(struct sge_uld_txq *q, const void *src,
2444 			    unsigned int len)
2445 {
2446 	unsigned int ndesc;
2447 	int credits;
2448 	u64 *pos;
2449 
2450 	/* Use the lower limit as the cut-off */
2451 	if (len > MAX_IMM_OFLD_TX_DATA_WR_LEN) {
2452 		WARN_ON(1);
2453 		return NET_XMIT_DROP;
2454 	}
2455 
2456 	/* Don't return NET_XMIT_CN here as the current
2457 	 * implementation doesn't queue the request
2458 	 * using an skb when the following conditions not met
2459 	 */
2460 	if (!spin_trylock(&q->sendq.lock))
2461 		return NET_XMIT_DROP;
2462 
2463 	if (q->full || !skb_queue_empty(&q->sendq) ||
2464 	    q->service_ofldq_running) {
2465 		spin_unlock(&q->sendq.lock);
2466 		return NET_XMIT_DROP;
2467 	}
2468 	ndesc = flits_to_desc(DIV_ROUND_UP(len, 8));
2469 	credits = txq_avail(&q->q) - ndesc;
2470 	pos = (u64 *)&q->q.desc[q->q.pidx];
2471 
2472 	/* ofldtxq_stop modifies WR header in-situ */
2473 	inline_tx_header(src, &q->q, pos, len);
2474 	if (unlikely(credits < TXQ_STOP_THRES))
2475 		ofldtxq_stop(q, (struct fw_wr_hdr *)pos);
2476 	txq_advance(&q->q, ndesc);
2477 	cxgb4_ring_tx_db(q->adap, &q->q, ndesc);
2478 
2479 	spin_unlock(&q->sendq.lock);
2480 	return NET_XMIT_SUCCESS;
2481 }
2482 
cxgb4_immdata_send(struct net_device * dev,unsigned int idx,const void * src,unsigned int len)2483 int cxgb4_immdata_send(struct net_device *dev, unsigned int idx,
2484 		       const void *src, unsigned int len)
2485 {
2486 	struct sge_uld_txq_info *txq_info;
2487 	struct sge_uld_txq *txq;
2488 	struct adapter *adap;
2489 	int ret;
2490 
2491 	adap = netdev2adap(dev);
2492 
2493 	local_bh_disable();
2494 	txq_info = adap->sge.uld_txq_info[CXGB4_TX_OFLD];
2495 	if (unlikely(!txq_info)) {
2496 		WARN_ON(true);
2497 		local_bh_enable();
2498 		return NET_XMIT_DROP;
2499 	}
2500 	txq = &txq_info->uldtxq[idx];
2501 
2502 	ret = ofld_xmit_direct(txq, src, len);
2503 	local_bh_enable();
2504 	return net_xmit_eval(ret);
2505 }
2506 EXPORT_SYMBOL(cxgb4_immdata_send);
2507 
2508 /**
2509  *	t4_crypto_send - send crypto packet
2510  *	@adap: the adapter
2511  *	@skb: the packet
2512  *
2513  *	Sends crypto packet.  We use the packet queue_mapping to select the
2514  *	appropriate Tx queue as follows: bit 0 indicates whether the packet
2515  *	should be sent as regular or control, bits 1-15 select the queue.
2516  */
t4_crypto_send(struct adapter * adap,struct sk_buff * skb)2517 static int t4_crypto_send(struct adapter *adap, struct sk_buff *skb)
2518 {
2519 	int ret;
2520 
2521 	local_bh_disable();
2522 	ret = uld_send(adap, skb, CXGB4_TX_CRYPTO);
2523 	local_bh_enable();
2524 	return ret;
2525 }
2526 
2527 /**
2528  *	cxgb4_crypto_send - send crypto packet
2529  *	@dev: the net device
2530  *	@skb: the packet
2531  *
2532  *	Sends crypto packet.  This is an exported version of @t4_crypto_send,
2533  *	intended for ULDs.
2534  */
cxgb4_crypto_send(struct net_device * dev,struct sk_buff * skb)2535 int cxgb4_crypto_send(struct net_device *dev, struct sk_buff *skb)
2536 {
2537 	return t4_crypto_send(netdev2adap(dev), skb);
2538 }
2539 EXPORT_SYMBOL(cxgb4_crypto_send);
2540 
copy_frags(struct sk_buff * skb,const struct pkt_gl * gl,unsigned int offset)2541 static inline void copy_frags(struct sk_buff *skb,
2542 			      const struct pkt_gl *gl, unsigned int offset)
2543 {
2544 	int i;
2545 
2546 	/* usually there's just one frag */
2547 	__skb_fill_page_desc(skb, 0, gl->frags[0].page,
2548 			     gl->frags[0].offset + offset,
2549 			     gl->frags[0].size - offset);
2550 	skb_shinfo(skb)->nr_frags = gl->nfrags;
2551 	for (i = 1; i < gl->nfrags; i++)
2552 		__skb_fill_page_desc(skb, i, gl->frags[i].page,
2553 				     gl->frags[i].offset,
2554 				     gl->frags[i].size);
2555 
2556 	/* get a reference to the last page, we don't own it */
2557 	get_page(gl->frags[gl->nfrags - 1].page);
2558 }
2559 
2560 /**
2561  *	cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list
2562  *	@gl: the gather list
2563  *	@skb_len: size of sk_buff main body if it carries fragments
2564  *	@pull_len: amount of data to move to the sk_buff's main body
2565  *
2566  *	Builds an sk_buff from the given packet gather list.  Returns the
2567  *	sk_buff or %NULL if sk_buff allocation failed.
2568  */
cxgb4_pktgl_to_skb(const struct pkt_gl * gl,unsigned int skb_len,unsigned int pull_len)2569 struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
2570 				   unsigned int skb_len, unsigned int pull_len)
2571 {
2572 	struct sk_buff *skb;
2573 
2574 	/*
2575 	 * Below we rely on RX_COPY_THRES being less than the smallest Rx buffer
2576 	 * size, which is expected since buffers are at least PAGE_SIZEd.
2577 	 * In this case packets up to RX_COPY_THRES have only one fragment.
2578 	 */
2579 	if (gl->tot_len <= RX_COPY_THRES) {
2580 		skb = dev_alloc_skb(gl->tot_len);
2581 		if (unlikely(!skb))
2582 			goto out;
2583 		__skb_put(skb, gl->tot_len);
2584 		skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
2585 	} else {
2586 		skb = dev_alloc_skb(skb_len);
2587 		if (unlikely(!skb))
2588 			goto out;
2589 		__skb_put(skb, pull_len);
2590 		skb_copy_to_linear_data(skb, gl->va, pull_len);
2591 
2592 		copy_frags(skb, gl, pull_len);
2593 		skb->len = gl->tot_len;
2594 		skb->data_len = skb->len - pull_len;
2595 		skb->truesize += skb->data_len;
2596 	}
2597 out:	return skb;
2598 }
2599 EXPORT_SYMBOL(cxgb4_pktgl_to_skb);
2600 
2601 /**
2602  *	t4_pktgl_free - free a packet gather list
2603  *	@gl: the gather list
2604  *
2605  *	Releases the pages of a packet gather list.  We do not own the last
2606  *	page on the list and do not free it.
2607  */
t4_pktgl_free(const struct pkt_gl * gl)2608 static void t4_pktgl_free(const struct pkt_gl *gl)
2609 {
2610 	int n;
2611 	const struct page_frag *p;
2612 
2613 	for (p = gl->frags, n = gl->nfrags - 1; n--; p++)
2614 		put_page(p->page);
2615 }
2616 
2617 /*
2618  * Process an MPS trace packet.  Give it an unused protocol number so it won't
2619  * be delivered to anyone and send it to the stack for capture.
2620  */
handle_trace_pkt(struct adapter * adap,const struct pkt_gl * gl)2621 static noinline int handle_trace_pkt(struct adapter *adap,
2622 				     const struct pkt_gl *gl)
2623 {
2624 	struct sk_buff *skb;
2625 
2626 	skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
2627 	if (unlikely(!skb)) {
2628 		t4_pktgl_free(gl);
2629 		return 0;
2630 	}
2631 
2632 	if (is_t4(adap->params.chip))
2633 		__skb_pull(skb, sizeof(struct cpl_trace_pkt));
2634 	else
2635 		__skb_pull(skb, sizeof(struct cpl_t5_trace_pkt));
2636 
2637 	skb_reset_mac_header(skb);
2638 	skb->protocol = htons(0xffff);
2639 	skb->dev = adap->port[0];
2640 	netif_receive_skb(skb);
2641 	return 0;
2642 }
2643 
2644 /**
2645  * cxgb4_sgetim_to_hwtstamp - convert sge time stamp to hw time stamp
2646  * @adap: the adapter
2647  * @hwtstamps: time stamp structure to update
2648  * @sgetstamp: 60bit iqe timestamp
2649  *
2650  * Every ingress queue entry has the 60-bit timestamp, convert that timestamp
2651  * which is in Core Clock ticks into ktime_t and assign it
2652  **/
cxgb4_sgetim_to_hwtstamp(struct adapter * adap,struct skb_shared_hwtstamps * hwtstamps,u64 sgetstamp)2653 static void cxgb4_sgetim_to_hwtstamp(struct adapter *adap,
2654 				     struct skb_shared_hwtstamps *hwtstamps,
2655 				     u64 sgetstamp)
2656 {
2657 	u64 ns;
2658 	u64 tmp = (sgetstamp * 1000 * 1000 + adap->params.vpd.cclk / 2);
2659 
2660 	ns = div_u64(tmp, adap->params.vpd.cclk);
2661 
2662 	memset(hwtstamps, 0, sizeof(*hwtstamps));
2663 	hwtstamps->hwtstamp = ns_to_ktime(ns);
2664 }
2665 
do_gro(struct sge_eth_rxq * rxq,const struct pkt_gl * gl,const struct cpl_rx_pkt * pkt,unsigned long tnl_hdr_len)2666 static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
2667 		   const struct cpl_rx_pkt *pkt, unsigned long tnl_hdr_len)
2668 {
2669 	struct adapter *adapter = rxq->rspq.adap;
2670 	struct sge *s = &adapter->sge;
2671 	struct port_info *pi;
2672 	int ret;
2673 	struct sk_buff *skb;
2674 
2675 	skb = napi_get_frags(&rxq->rspq.napi);
2676 	if (unlikely(!skb)) {
2677 		t4_pktgl_free(gl);
2678 		rxq->stats.rx_drops++;
2679 		return;
2680 	}
2681 
2682 	copy_frags(skb, gl, s->pktshift);
2683 	if (tnl_hdr_len)
2684 		skb->csum_level = 1;
2685 	skb->len = gl->tot_len - s->pktshift;
2686 	skb->data_len = skb->len;
2687 	skb->truesize += skb->data_len;
2688 	skb->ip_summed = CHECKSUM_UNNECESSARY;
2689 	skb_record_rx_queue(skb, rxq->rspq.idx);
2690 	pi = netdev_priv(skb->dev);
2691 	if (pi->rxtstamp)
2692 		cxgb4_sgetim_to_hwtstamp(adapter, skb_hwtstamps(skb),
2693 					 gl->sgetstamp);
2694 	if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
2695 		skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
2696 			     PKT_HASH_TYPE_L3);
2697 
2698 	if (unlikely(pkt->vlan_ex)) {
2699 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
2700 		rxq->stats.vlan_ex++;
2701 	}
2702 	ret = napi_gro_frags(&rxq->rspq.napi);
2703 	if (ret == GRO_HELD)
2704 		rxq->stats.lro_pkts++;
2705 	else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
2706 		rxq->stats.lro_merged++;
2707 	rxq->stats.pkts++;
2708 	rxq->stats.rx_cso++;
2709 }
2710 
2711 enum {
2712 	RX_NON_PTP_PKT = 0,
2713 	RX_PTP_PKT_SUC = 1,
2714 	RX_PTP_PKT_ERR = 2
2715 };
2716 
2717 /**
2718  *     t4_systim_to_hwstamp - read hardware time stamp
2719  *     @adap: the adapter
2720  *     @skb: the packet
2721  *
2722  *     Read Time Stamp from MPS packet and insert in skb which
2723  *     is forwarded to PTP application
2724  */
t4_systim_to_hwstamp(struct adapter * adapter,struct sk_buff * skb)2725 static noinline int t4_systim_to_hwstamp(struct adapter *adapter,
2726 					 struct sk_buff *skb)
2727 {
2728 	struct skb_shared_hwtstamps *hwtstamps;
2729 	struct cpl_rx_mps_pkt *cpl = NULL;
2730 	unsigned char *data;
2731 	int offset;
2732 
2733 	cpl = (struct cpl_rx_mps_pkt *)skb->data;
2734 	if (!(CPL_RX_MPS_PKT_TYPE_G(ntohl(cpl->op_to_r1_hi)) &
2735 	     X_CPL_RX_MPS_PKT_TYPE_PTP))
2736 		return RX_PTP_PKT_ERR;
2737 
2738 	data = skb->data + sizeof(*cpl);
2739 	skb_pull(skb, 2 * sizeof(u64) + sizeof(struct cpl_rx_mps_pkt));
2740 	offset = ETH_HLEN + IPV4_HLEN(skb->data) + UDP_HLEN;
2741 	if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(short))
2742 		return RX_PTP_PKT_ERR;
2743 
2744 	hwtstamps = skb_hwtstamps(skb);
2745 	memset(hwtstamps, 0, sizeof(*hwtstamps));
2746 	hwtstamps->hwtstamp = ns_to_ktime(get_unaligned_be64(data));
2747 
2748 	return RX_PTP_PKT_SUC;
2749 }
2750 
2751 /**
2752  *     t4_rx_hststamp - Recv PTP Event Message
2753  *     @adap: the adapter
2754  *     @rsp: the response queue descriptor holding the RX_PKT message
2755  *     @skb: the packet
2756  *
2757  *     PTP enabled and MPS packet, read HW timestamp
2758  */
t4_rx_hststamp(struct adapter * adapter,const __be64 * rsp,struct sge_eth_rxq * rxq,struct sk_buff * skb)2759 static int t4_rx_hststamp(struct adapter *adapter, const __be64 *rsp,
2760 			  struct sge_eth_rxq *rxq, struct sk_buff *skb)
2761 {
2762 	int ret;
2763 
2764 	if (unlikely((*(u8 *)rsp == CPL_RX_MPS_PKT) &&
2765 		     !is_t4(adapter->params.chip))) {
2766 		ret = t4_systim_to_hwstamp(adapter, skb);
2767 		if (ret == RX_PTP_PKT_ERR) {
2768 			kfree_skb(skb);
2769 			rxq->stats.rx_drops++;
2770 		}
2771 		return ret;
2772 	}
2773 	return RX_NON_PTP_PKT;
2774 }
2775 
2776 /**
2777  *      t4_tx_hststamp - Loopback PTP Transmit Event Message
2778  *      @adap: the adapter
2779  *      @skb: the packet
2780  *      @dev: the ingress net device
2781  *
2782  *      Read hardware timestamp for the loopback PTP Tx event message
2783  */
t4_tx_hststamp(struct adapter * adapter,struct sk_buff * skb,struct net_device * dev)2784 static int t4_tx_hststamp(struct adapter *adapter, struct sk_buff *skb,
2785 			  struct net_device *dev)
2786 {
2787 	struct port_info *pi = netdev_priv(dev);
2788 
2789 	if (!is_t4(adapter->params.chip) && adapter->ptp_tx_skb) {
2790 		cxgb4_ptp_read_hwstamp(adapter, pi);
2791 		kfree_skb(skb);
2792 		return 0;
2793 	}
2794 	return 1;
2795 }
2796 
2797 /**
2798  *	t4_ethrx_handler - process an ingress ethernet packet
2799  *	@q: the response queue that received the packet
2800  *	@rsp: the response queue descriptor holding the RX_PKT message
2801  *	@si: the gather list of packet fragments
2802  *
2803  *	Process an ingress ethernet packet and deliver it to the stack.
2804  */
t4_ethrx_handler(struct sge_rspq * q,const __be64 * rsp,const struct pkt_gl * si)2805 int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
2806 		     const struct pkt_gl *si)
2807 {
2808 	bool csum_ok;
2809 	struct sk_buff *skb;
2810 	const struct cpl_rx_pkt *pkt;
2811 	struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
2812 	struct adapter *adapter = q->adap;
2813 	struct sge *s = &q->adap->sge;
2814 	int cpl_trace_pkt = is_t4(q->adap->params.chip) ?
2815 			    CPL_TRACE_PKT : CPL_TRACE_PKT_T5;
2816 	u16 err_vec, tnl_hdr_len = 0;
2817 	struct port_info *pi;
2818 	int ret = 0;
2819 
2820 	if (unlikely(*(u8 *)rsp == cpl_trace_pkt))
2821 		return handle_trace_pkt(q->adap, si);
2822 
2823 	pkt = (const struct cpl_rx_pkt *)rsp;
2824 	/* Compressed error vector is enabled for T6 only */
2825 	if (q->adap->params.tp.rx_pkt_encap) {
2826 		err_vec = T6_COMPR_RXERR_VEC_G(be16_to_cpu(pkt->err_vec));
2827 		tnl_hdr_len = T6_RX_TNLHDR_LEN_G(ntohs(pkt->err_vec));
2828 	} else {
2829 		err_vec = be16_to_cpu(pkt->err_vec);
2830 	}
2831 
2832 	csum_ok = pkt->csum_calc && !err_vec &&
2833 		  (q->netdev->features & NETIF_F_RXCSUM);
2834 	if (((pkt->l2info & htonl(RXF_TCP_F)) ||
2835 	     tnl_hdr_len) &&
2836 	    (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
2837 		do_gro(rxq, si, pkt, tnl_hdr_len);
2838 		return 0;
2839 	}
2840 
2841 	skb = cxgb4_pktgl_to_skb(si, RX_PKT_SKB_LEN, RX_PULL_LEN);
2842 	if (unlikely(!skb)) {
2843 		t4_pktgl_free(si);
2844 		rxq->stats.rx_drops++;
2845 		return 0;
2846 	}
2847 	pi = netdev_priv(q->netdev);
2848 
2849 	/* Handle PTP Event Rx packet */
2850 	if (unlikely(pi->ptp_enable)) {
2851 		ret = t4_rx_hststamp(adapter, rsp, rxq, skb);
2852 		if (ret == RX_PTP_PKT_ERR)
2853 			return 0;
2854 	}
2855 	if (likely(!ret))
2856 		__skb_pull(skb, s->pktshift); /* remove ethernet header pad */
2857 
2858 	/* Handle the PTP Event Tx Loopback packet */
2859 	if (unlikely(pi->ptp_enable && !ret &&
2860 		     (pkt->l2info & htonl(RXF_UDP_F)) &&
2861 		     cxgb4_ptp_is_ptp_rx(skb))) {
2862 		if (!t4_tx_hststamp(adapter, skb, q->netdev))
2863 			return 0;
2864 	}
2865 
2866 	skb->protocol = eth_type_trans(skb, q->netdev);
2867 	skb_record_rx_queue(skb, q->idx);
2868 	if (skb->dev->features & NETIF_F_RXHASH)
2869 		skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
2870 			     PKT_HASH_TYPE_L3);
2871 
2872 	rxq->stats.pkts++;
2873 
2874 	if (pi->rxtstamp)
2875 		cxgb4_sgetim_to_hwtstamp(q->adap, skb_hwtstamps(skb),
2876 					 si->sgetstamp);
2877 	if (csum_ok && (pkt->l2info & htonl(RXF_UDP_F | RXF_TCP_F))) {
2878 		if (!pkt->ip_frag) {
2879 			skb->ip_summed = CHECKSUM_UNNECESSARY;
2880 			rxq->stats.rx_cso++;
2881 		} else if (pkt->l2info & htonl(RXF_IP_F)) {
2882 			__sum16 c = (__force __sum16)pkt->csum;
2883 			skb->csum = csum_unfold(c);
2884 
2885 			if (tnl_hdr_len) {
2886 				skb->ip_summed = CHECKSUM_UNNECESSARY;
2887 				skb->csum_level = 1;
2888 			} else {
2889 				skb->ip_summed = CHECKSUM_COMPLETE;
2890 			}
2891 			rxq->stats.rx_cso++;
2892 		}
2893 	} else {
2894 		skb_checksum_none_assert(skb);
2895 #ifdef CONFIG_CHELSIO_T4_FCOE
2896 #define CPL_RX_PKT_FLAGS (RXF_PSH_F | RXF_SYN_F | RXF_UDP_F | \
2897 			  RXF_TCP_F | RXF_IP_F | RXF_IP6_F | RXF_LRO_F)
2898 
2899 		if (!(pkt->l2info & cpu_to_be32(CPL_RX_PKT_FLAGS))) {
2900 			if ((pkt->l2info & cpu_to_be32(RXF_FCOE_F)) &&
2901 			    (pi->fcoe.flags & CXGB_FCOE_ENABLED)) {
2902 				if (q->adap->params.tp.rx_pkt_encap)
2903 					csum_ok = err_vec &
2904 						  T6_COMPR_RXERR_SUM_F;
2905 				else
2906 					csum_ok = err_vec & RXERR_CSUM_F;
2907 				if (!csum_ok)
2908 					skb->ip_summed = CHECKSUM_UNNECESSARY;
2909 			}
2910 		}
2911 
2912 #undef CPL_RX_PKT_FLAGS
2913 #endif /* CONFIG_CHELSIO_T4_FCOE */
2914 	}
2915 
2916 	if (unlikely(pkt->vlan_ex)) {
2917 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
2918 		rxq->stats.vlan_ex++;
2919 	}
2920 	skb_mark_napi_id(skb, &q->napi);
2921 	netif_receive_skb(skb);
2922 	return 0;
2923 }
2924 
2925 /**
2926  *	restore_rx_bufs - put back a packet's Rx buffers
2927  *	@si: the packet gather list
2928  *	@q: the SGE free list
2929  *	@frags: number of FL buffers to restore
2930  *
2931  *	Puts back on an FL the Rx buffers associated with @si.  The buffers
2932  *	have already been unmapped and are left unmapped, we mark them so to
2933  *	prevent further unmapping attempts.
2934  *
2935  *	This function undoes a series of @unmap_rx_buf calls when we find out
2936  *	that the current packet can't be processed right away afterall and we
2937  *	need to come back to it later.  This is a very rare event and there's
2938  *	no effort to make this particularly efficient.
2939  */
restore_rx_bufs(const struct pkt_gl * si,struct sge_fl * q,int frags)2940 static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q,
2941 			    int frags)
2942 {
2943 	struct rx_sw_desc *d;
2944 
2945 	while (frags--) {
2946 		if (q->cidx == 0)
2947 			q->cidx = q->size - 1;
2948 		else
2949 			q->cidx--;
2950 		d = &q->sdesc[q->cidx];
2951 		d->page = si->frags[frags].page;
2952 		d->dma_addr |= RX_UNMAPPED_BUF;
2953 		q->avail++;
2954 	}
2955 }
2956 
2957 /**
2958  *	is_new_response - check if a response is newly written
2959  *	@r: the response descriptor
2960  *	@q: the response queue
2961  *
2962  *	Returns true if a response descriptor contains a yet unprocessed
2963  *	response.
2964  */
is_new_response(const struct rsp_ctrl * r,const struct sge_rspq * q)2965 static inline bool is_new_response(const struct rsp_ctrl *r,
2966 				   const struct sge_rspq *q)
2967 {
2968 	return (r->type_gen >> RSPD_GEN_S) == q->gen;
2969 }
2970 
2971 /**
2972  *	rspq_next - advance to the next entry in a response queue
2973  *	@q: the queue
2974  *
2975  *	Updates the state of a response queue to advance it to the next entry.
2976  */
rspq_next(struct sge_rspq * q)2977 static inline void rspq_next(struct sge_rspq *q)
2978 {
2979 	q->cur_desc = (void *)q->cur_desc + q->iqe_len;
2980 	if (unlikely(++q->cidx == q->size)) {
2981 		q->cidx = 0;
2982 		q->gen ^= 1;
2983 		q->cur_desc = q->desc;
2984 	}
2985 }
2986 
2987 /**
2988  *	process_responses - process responses from an SGE response queue
2989  *	@q: the ingress queue to process
2990  *	@budget: how many responses can be processed in this round
2991  *
2992  *	Process responses from an SGE response queue up to the supplied budget.
2993  *	Responses include received packets as well as control messages from FW
2994  *	or HW.
2995  *
2996  *	Additionally choose the interrupt holdoff time for the next interrupt
2997  *	on this queue.  If the system is under memory shortage use a fairly
2998  *	long delay to help recovery.
2999  */
process_responses(struct sge_rspq * q,int budget)3000 static int process_responses(struct sge_rspq *q, int budget)
3001 {
3002 	int ret, rsp_type;
3003 	int budget_left = budget;
3004 	const struct rsp_ctrl *rc;
3005 	struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
3006 	struct adapter *adapter = q->adap;
3007 	struct sge *s = &adapter->sge;
3008 
3009 	while (likely(budget_left)) {
3010 		rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
3011 		if (!is_new_response(rc, q)) {
3012 			if (q->flush_handler)
3013 				q->flush_handler(q);
3014 			break;
3015 		}
3016 
3017 		dma_rmb();
3018 		rsp_type = RSPD_TYPE_G(rc->type_gen);
3019 		if (likely(rsp_type == RSPD_TYPE_FLBUF_X)) {
3020 			struct page_frag *fp;
3021 			struct pkt_gl si;
3022 			const struct rx_sw_desc *rsd;
3023 			u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags;
3024 
3025 			if (len & RSPD_NEWBUF_F) {
3026 				if (likely(q->offset > 0)) {
3027 					free_rx_bufs(q->adap, &rxq->fl, 1);
3028 					q->offset = 0;
3029 				}
3030 				len = RSPD_LEN_G(len);
3031 			}
3032 			si.tot_len = len;
3033 
3034 			/* gather packet fragments */
3035 			for (frags = 0, fp = si.frags; ; frags++, fp++) {
3036 				rsd = &rxq->fl.sdesc[rxq->fl.cidx];
3037 				bufsz = get_buf_size(adapter, rsd);
3038 				fp->page = rsd->page;
3039 				fp->offset = q->offset;
3040 				fp->size = min(bufsz, len);
3041 				len -= fp->size;
3042 				if (!len)
3043 					break;
3044 				unmap_rx_buf(q->adap, &rxq->fl);
3045 			}
3046 
3047 			si.sgetstamp = SGE_TIMESTAMP_G(
3048 					be64_to_cpu(rc->last_flit));
3049 			/*
3050 			 * Last buffer remains mapped so explicitly make it
3051 			 * coherent for CPU access.
3052 			 */
3053 			dma_sync_single_for_cpu(q->adap->pdev_dev,
3054 						get_buf_addr(rsd),
3055 						fp->size, DMA_FROM_DEVICE);
3056 
3057 			si.va = page_address(si.frags[0].page) +
3058 				si.frags[0].offset;
3059 			prefetch(si.va);
3060 
3061 			si.nfrags = frags + 1;
3062 			ret = q->handler(q, q->cur_desc, &si);
3063 			if (likely(ret == 0))
3064 				q->offset += ALIGN(fp->size, s->fl_align);
3065 			else
3066 				restore_rx_bufs(&si, &rxq->fl, frags);
3067 		} else if (likely(rsp_type == RSPD_TYPE_CPL_X)) {
3068 			ret = q->handler(q, q->cur_desc, NULL);
3069 		} else {
3070 			ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN);
3071 		}
3072 
3073 		if (unlikely(ret)) {
3074 			/* couldn't process descriptor, back off for recovery */
3075 			q->next_intr_params = QINTR_TIMER_IDX_V(NOMEM_TMR_IDX);
3076 			break;
3077 		}
3078 
3079 		rspq_next(q);
3080 		budget_left--;
3081 	}
3082 
3083 	if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 16)
3084 		__refill_fl(q->adap, &rxq->fl);
3085 	return budget - budget_left;
3086 }
3087 
3088 /**
3089  *	napi_rx_handler - the NAPI handler for Rx processing
3090  *	@napi: the napi instance
3091  *	@budget: how many packets we can process in this round
3092  *
3093  *	Handler for new data events when using NAPI.  This does not need any
3094  *	locking or protection from interrupts as data interrupts are off at
3095  *	this point and other adapter interrupts do not interfere (the latter
3096  *	in not a concern at all with MSI-X as non-data interrupts then have
3097  *	a separate handler).
3098  */
napi_rx_handler(struct napi_struct * napi,int budget)3099 static int napi_rx_handler(struct napi_struct *napi, int budget)
3100 {
3101 	unsigned int params;
3102 	struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
3103 	int work_done;
3104 	u32 val;
3105 
3106 	work_done = process_responses(q, budget);
3107 	if (likely(work_done < budget)) {
3108 		int timer_index;
3109 
3110 		napi_complete_done(napi, work_done);
3111 		timer_index = QINTR_TIMER_IDX_G(q->next_intr_params);
3112 
3113 		if (q->adaptive_rx) {
3114 			if (work_done > max(timer_pkt_quota[timer_index],
3115 					    MIN_NAPI_WORK))
3116 				timer_index = (timer_index + 1);
3117 			else
3118 				timer_index = timer_index - 1;
3119 
3120 			timer_index = clamp(timer_index, 0, SGE_TIMERREGS - 1);
3121 			q->next_intr_params =
3122 					QINTR_TIMER_IDX_V(timer_index) |
3123 					QINTR_CNT_EN_V(0);
3124 			params = q->next_intr_params;
3125 		} else {
3126 			params = q->next_intr_params;
3127 			q->next_intr_params = q->intr_params;
3128 		}
3129 	} else
3130 		params = QINTR_TIMER_IDX_V(7);
3131 
3132 	val = CIDXINC_V(work_done) | SEINTARM_V(params);
3133 
3134 	/* If we don't have access to the new User GTS (T5+), use the old
3135 	 * doorbell mechanism; otherwise use the new BAR2 mechanism.
3136 	 */
3137 	if (unlikely(q->bar2_addr == NULL)) {
3138 		t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A),
3139 			     val | INGRESSQID_V((u32)q->cntxt_id));
3140 	} else {
3141 		writel(val | INGRESSQID_V(q->bar2_qid),
3142 		       q->bar2_addr + SGE_UDB_GTS);
3143 		wmb();
3144 	}
3145 	return work_done;
3146 }
3147 
3148 /*
3149  * The MSI-X interrupt handler for an SGE response queue.
3150  */
t4_sge_intr_msix(int irq,void * cookie)3151 irqreturn_t t4_sge_intr_msix(int irq, void *cookie)
3152 {
3153 	struct sge_rspq *q = cookie;
3154 
3155 	napi_schedule(&q->napi);
3156 	return IRQ_HANDLED;
3157 }
3158 
3159 /*
3160  * Process the indirect interrupt entries in the interrupt queue and kick off
3161  * NAPI for each queue that has generated an entry.
3162  */
process_intrq(struct adapter * adap)3163 static unsigned int process_intrq(struct adapter *adap)
3164 {
3165 	unsigned int credits;
3166 	const struct rsp_ctrl *rc;
3167 	struct sge_rspq *q = &adap->sge.intrq;
3168 	u32 val;
3169 
3170 	spin_lock(&adap->sge.intrq_lock);
3171 	for (credits = 0; ; credits++) {
3172 		rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
3173 		if (!is_new_response(rc, q))
3174 			break;
3175 
3176 		dma_rmb();
3177 		if (RSPD_TYPE_G(rc->type_gen) == RSPD_TYPE_INTR_X) {
3178 			unsigned int qid = ntohl(rc->pldbuflen_qid);
3179 
3180 			qid -= adap->sge.ingr_start;
3181 			napi_schedule(&adap->sge.ingr_map[qid]->napi);
3182 		}
3183 
3184 		rspq_next(q);
3185 	}
3186 
3187 	val =  CIDXINC_V(credits) | SEINTARM_V(q->intr_params);
3188 
3189 	/* If we don't have access to the new User GTS (T5+), use the old
3190 	 * doorbell mechanism; otherwise use the new BAR2 mechanism.
3191 	 */
3192 	if (unlikely(q->bar2_addr == NULL)) {
3193 		t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
3194 			     val | INGRESSQID_V(q->cntxt_id));
3195 	} else {
3196 		writel(val | INGRESSQID_V(q->bar2_qid),
3197 		       q->bar2_addr + SGE_UDB_GTS);
3198 		wmb();
3199 	}
3200 	spin_unlock(&adap->sge.intrq_lock);
3201 	return credits;
3202 }
3203 
3204 /*
3205  * The MSI interrupt handler, which handles data events from SGE response queues
3206  * as well as error and other async events as they all use the same MSI vector.
3207  */
t4_intr_msi(int irq,void * cookie)3208 static irqreturn_t t4_intr_msi(int irq, void *cookie)
3209 {
3210 	struct adapter *adap = cookie;
3211 
3212 	if (adap->flags & MASTER_PF)
3213 		t4_slow_intr_handler(adap);
3214 	process_intrq(adap);
3215 	return IRQ_HANDLED;
3216 }
3217 
3218 /*
3219  * Interrupt handler for legacy INTx interrupts.
3220  * Handles data events from SGE response queues as well as error and other
3221  * async events as they all use the same interrupt line.
3222  */
t4_intr_intx(int irq,void * cookie)3223 static irqreturn_t t4_intr_intx(int irq, void *cookie)
3224 {
3225 	struct adapter *adap = cookie;
3226 
3227 	t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI_A), 0);
3228 	if (((adap->flags & MASTER_PF) && t4_slow_intr_handler(adap)) |
3229 	    process_intrq(adap))
3230 		return IRQ_HANDLED;
3231 	return IRQ_NONE;             /* probably shared interrupt */
3232 }
3233 
3234 /**
3235  *	t4_intr_handler - select the top-level interrupt handler
3236  *	@adap: the adapter
3237  *
3238  *	Selects the top-level interrupt handler based on the type of interrupts
3239  *	(MSI-X, MSI, or INTx).
3240  */
t4_intr_handler(struct adapter * adap)3241 irq_handler_t t4_intr_handler(struct adapter *adap)
3242 {
3243 	if (adap->flags & USING_MSIX)
3244 		return t4_sge_intr_msix;
3245 	if (adap->flags & USING_MSI)
3246 		return t4_intr_msi;
3247 	return t4_intr_intx;
3248 }
3249 
sge_rx_timer_cb(struct timer_list * t)3250 static void sge_rx_timer_cb(struct timer_list *t)
3251 {
3252 	unsigned long m;
3253 	unsigned int i;
3254 	struct adapter *adap = from_timer(adap, t, sge.rx_timer);
3255 	struct sge *s = &adap->sge;
3256 
3257 	for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
3258 		for (m = s->starving_fl[i]; m; m &= m - 1) {
3259 			struct sge_eth_rxq *rxq;
3260 			unsigned int id = __ffs(m) + i * BITS_PER_LONG;
3261 			struct sge_fl *fl = s->egr_map[id];
3262 
3263 			clear_bit(id, s->starving_fl);
3264 			smp_mb__after_atomic();
3265 
3266 			if (fl_starving(adap, fl)) {
3267 				rxq = container_of(fl, struct sge_eth_rxq, fl);
3268 				if (napi_reschedule(&rxq->rspq.napi))
3269 					fl->starving++;
3270 				else
3271 					set_bit(id, s->starving_fl);
3272 			}
3273 		}
3274 	/* The remainder of the SGE RX Timer Callback routine is dedicated to
3275 	 * global Master PF activities like checking for chip ingress stalls,
3276 	 * etc.
3277 	 */
3278 	if (!(adap->flags & MASTER_PF))
3279 		goto done;
3280 
3281 	t4_idma_monitor(adap, &s->idma_monitor, HZ, RX_QCHECK_PERIOD);
3282 
3283 done:
3284 	mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
3285 }
3286 
sge_tx_timer_cb(struct timer_list * t)3287 static void sge_tx_timer_cb(struct timer_list *t)
3288 {
3289 	unsigned long m;
3290 	unsigned int i, budget;
3291 	struct adapter *adap = from_timer(adap, t, sge.tx_timer);
3292 	struct sge *s = &adap->sge;
3293 
3294 	for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
3295 		for (m = s->txq_maperr[i]; m; m &= m - 1) {
3296 			unsigned long id = __ffs(m) + i * BITS_PER_LONG;
3297 			struct sge_uld_txq *txq = s->egr_map[id];
3298 
3299 			clear_bit(id, s->txq_maperr);
3300 			tasklet_schedule(&txq->qresume_tsk);
3301 		}
3302 
3303 	if (!is_t4(adap->params.chip)) {
3304 		struct sge_eth_txq *q = &s->ptptxq;
3305 		int avail;
3306 
3307 		spin_lock(&adap->ptp_lock);
3308 		avail = reclaimable(&q->q);
3309 
3310 		if (avail) {
3311 			free_tx_desc(adap, &q->q, avail, false);
3312 			q->q.in_use -= avail;
3313 		}
3314 		spin_unlock(&adap->ptp_lock);
3315 	}
3316 
3317 	budget = MAX_TIMER_TX_RECLAIM;
3318 	i = s->ethtxq_rover;
3319 	do {
3320 		struct sge_eth_txq *q = &s->ethtxq[i];
3321 
3322 		if (q->q.in_use &&
3323 		    time_after_eq(jiffies, q->txq->trans_start + HZ / 100) &&
3324 		    __netif_tx_trylock(q->txq)) {
3325 			int avail = reclaimable(&q->q);
3326 
3327 			if (avail) {
3328 				if (avail > budget)
3329 					avail = budget;
3330 
3331 				free_tx_desc(adap, &q->q, avail, true);
3332 				q->q.in_use -= avail;
3333 				budget -= avail;
3334 			}
3335 			__netif_tx_unlock(q->txq);
3336 		}
3337 
3338 		if (++i >= s->ethqsets)
3339 			i = 0;
3340 	} while (budget && i != s->ethtxq_rover);
3341 	s->ethtxq_rover = i;
3342 	mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2));
3343 }
3344 
3345 /**
3346  *	bar2_address - return the BAR2 address for an SGE Queue's Registers
3347  *	@adapter: the adapter
3348  *	@qid: the SGE Queue ID
3349  *	@qtype: the SGE Queue Type (Egress or Ingress)
3350  *	@pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
3351  *
3352  *	Returns the BAR2 address for the SGE Queue Registers associated with
3353  *	@qid.  If BAR2 SGE Registers aren't available, returns NULL.  Also
3354  *	returns the BAR2 Queue ID to be used with writes to the BAR2 SGE
3355  *	Queue Registers.  If the BAR2 Queue ID is 0, then "Inferred Queue ID"
3356  *	Registers are supported (e.g. the Write Combining Doorbell Buffer).
3357  */
bar2_address(struct adapter * adapter,unsigned int qid,enum t4_bar2_qtype qtype,unsigned int * pbar2_qid)3358 static void __iomem *bar2_address(struct adapter *adapter,
3359 				  unsigned int qid,
3360 				  enum t4_bar2_qtype qtype,
3361 				  unsigned int *pbar2_qid)
3362 {
3363 	u64 bar2_qoffset;
3364 	int ret;
3365 
3366 	ret = t4_bar2_sge_qregs(adapter, qid, qtype, 0,
3367 				&bar2_qoffset, pbar2_qid);
3368 	if (ret)
3369 		return NULL;
3370 
3371 	return adapter->bar2 + bar2_qoffset;
3372 }
3373 
3374 /* @intr_idx: MSI/MSI-X vector if >=0, -(absolute qid + 1) if < 0
3375  * @cong: < 0 -> no congestion feedback, >= 0 -> congestion channel map
3376  */
t4_sge_alloc_rxq(struct adapter * adap,struct sge_rspq * iq,bool fwevtq,struct net_device * dev,int intr_idx,struct sge_fl * fl,rspq_handler_t hnd,rspq_flush_handler_t flush_hnd,int cong)3377 int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
3378 		     struct net_device *dev, int intr_idx,
3379 		     struct sge_fl *fl, rspq_handler_t hnd,
3380 		     rspq_flush_handler_t flush_hnd, int cong)
3381 {
3382 	int ret, flsz = 0;
3383 	struct fw_iq_cmd c;
3384 	struct sge *s = &adap->sge;
3385 	struct port_info *pi = netdev_priv(dev);
3386 	int relaxed = !(adap->flags & ROOT_NO_RELAXED_ORDERING);
3387 
3388 	/* Size needs to be multiple of 16, including status entry. */
3389 	iq->size = roundup(iq->size, 16);
3390 
3391 	iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0,
3392 			      &iq->phys_addr, NULL, 0,
3393 			      dev_to_node(adap->pdev_dev));
3394 	if (!iq->desc)
3395 		return -ENOMEM;
3396 
3397 	memset(&c, 0, sizeof(c));
3398 	c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
3399 			    FW_CMD_WRITE_F | FW_CMD_EXEC_F |
3400 			    FW_IQ_CMD_PFN_V(adap->pf) | FW_IQ_CMD_VFN_V(0));
3401 	c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC_F | FW_IQ_CMD_IQSTART_F |
3402 				 FW_LEN16(c));
3403 	c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) |
3404 		FW_IQ_CMD_IQASYNCH_V(fwevtq) | FW_IQ_CMD_VIID_V(pi->viid) |
3405 		FW_IQ_CMD_IQANDST_V(intr_idx < 0) |
3406 		FW_IQ_CMD_IQANUD_V(UPDATEDELIVERY_INTERRUPT_X) |
3407 		FW_IQ_CMD_IQANDSTINDEX_V(intr_idx >= 0 ? intr_idx :
3408 							-intr_idx - 1));
3409 	c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH_V(pi->tx_chan) |
3410 		FW_IQ_CMD_IQGTSMODE_F |
3411 		FW_IQ_CMD_IQINTCNTTHRESH_V(iq->pktcnt_idx) |
3412 		FW_IQ_CMD_IQESIZE_V(ilog2(iq->iqe_len) - 4));
3413 	c.iqsize = htons(iq->size);
3414 	c.iqaddr = cpu_to_be64(iq->phys_addr);
3415 	if (cong >= 0)
3416 		c.iqns_to_fl0congen = htonl(FW_IQ_CMD_IQFLINTCONGEN_F |
3417 				FW_IQ_CMD_IQTYPE_V(cong ? FW_IQ_IQTYPE_NIC
3418 							:  FW_IQ_IQTYPE_OFLD));
3419 
3420 	if (fl) {
3421 		enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
3422 
3423 		/* Allocate the ring for the hardware free list (with space
3424 		 * for its status page) along with the associated software
3425 		 * descriptor ring.  The free list size needs to be a multiple
3426 		 * of the Egress Queue Unit and at least 2 Egress Units larger
3427 		 * than the SGE's Egress Congrestion Threshold
3428 		 * (fl_starve_thres - 1).
3429 		 */
3430 		if (fl->size < s->fl_starve_thres - 1 + 2 * 8)
3431 			fl->size = s->fl_starve_thres - 1 + 2 * 8;
3432 		fl->size = roundup(fl->size, 8);
3433 		fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64),
3434 				      sizeof(struct rx_sw_desc), &fl->addr,
3435 				      &fl->sdesc, s->stat_len,
3436 				      dev_to_node(adap->pdev_dev));
3437 		if (!fl->desc)
3438 			goto fl_nomem;
3439 
3440 		flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
3441 		c.iqns_to_fl0congen |= htonl(FW_IQ_CMD_FL0PACKEN_F |
3442 					     FW_IQ_CMD_FL0FETCHRO_V(relaxed) |
3443 					     FW_IQ_CMD_FL0DATARO_V(relaxed) |
3444 					     FW_IQ_CMD_FL0PADEN_F);
3445 		if (cong >= 0)
3446 			c.iqns_to_fl0congen |=
3447 				htonl(FW_IQ_CMD_FL0CNGCHMAP_V(cong) |
3448 				      FW_IQ_CMD_FL0CONGCIF_F |
3449 				      FW_IQ_CMD_FL0CONGEN_F);
3450 		/* In T6, for egress queue type FL there is internal overhead
3451 		 * of 16B for header going into FLM module.  Hence the maximum
3452 		 * allowed burst size is 448 bytes.  For T4/T5, the hardware
3453 		 * doesn't coalesce fetch requests if more than 64 bytes of
3454 		 * Free List pointers are provided, so we use a 128-byte Fetch
3455 		 * Burst Minimum there (T6 implements coalescing so we can use
3456 		 * the smaller 64-byte value there).
3457 		 */
3458 		c.fl0dcaen_to_fl0cidxfthresh =
3459 			htons(FW_IQ_CMD_FL0FBMIN_V(chip <= CHELSIO_T5 ?
3460 						   FETCHBURSTMIN_128B_X :
3461 						   FETCHBURSTMIN_64B_X) |
3462 			      FW_IQ_CMD_FL0FBMAX_V((chip <= CHELSIO_T5) ?
3463 						   FETCHBURSTMAX_512B_X :
3464 						   FETCHBURSTMAX_256B_X));
3465 		c.fl0size = htons(flsz);
3466 		c.fl0addr = cpu_to_be64(fl->addr);
3467 	}
3468 
3469 	ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
3470 	if (ret)
3471 		goto err;
3472 
3473 	netif_napi_add(dev, &iq->napi, napi_rx_handler, 64);
3474 	iq->cur_desc = iq->desc;
3475 	iq->cidx = 0;
3476 	iq->gen = 1;
3477 	iq->next_intr_params = iq->intr_params;
3478 	iq->cntxt_id = ntohs(c.iqid);
3479 	iq->abs_id = ntohs(c.physiqid);
3480 	iq->bar2_addr = bar2_address(adap,
3481 				     iq->cntxt_id,
3482 				     T4_BAR2_QTYPE_INGRESS,
3483 				     &iq->bar2_qid);
3484 	iq->size--;                           /* subtract status entry */
3485 	iq->netdev = dev;
3486 	iq->handler = hnd;
3487 	iq->flush_handler = flush_hnd;
3488 
3489 	memset(&iq->lro_mgr, 0, sizeof(struct t4_lro_mgr));
3490 	skb_queue_head_init(&iq->lro_mgr.lroq);
3491 
3492 	/* set offset to -1 to distinguish ingress queues without FL */
3493 	iq->offset = fl ? 0 : -1;
3494 
3495 	adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq;
3496 
3497 	if (fl) {
3498 		fl->cntxt_id = ntohs(c.fl0id);
3499 		fl->avail = fl->pend_cred = 0;
3500 		fl->pidx = fl->cidx = 0;
3501 		fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0;
3502 		adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl;
3503 
3504 		/* Note, we must initialize the BAR2 Free List User Doorbell
3505 		 * information before refilling the Free List!
3506 		 */
3507 		fl->bar2_addr = bar2_address(adap,
3508 					     fl->cntxt_id,
3509 					     T4_BAR2_QTYPE_EGRESS,
3510 					     &fl->bar2_qid);
3511 		refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL);
3512 	}
3513 
3514 	/* For T5 and later we attempt to set up the Congestion Manager values
3515 	 * of the new RX Ethernet Queue.  This should really be handled by
3516 	 * firmware because it's more complex than any host driver wants to
3517 	 * get involved with and it's different per chip and this is almost
3518 	 * certainly wrong.  Firmware would be wrong as well, but it would be
3519 	 * a lot easier to fix in one place ...  For now we do something very
3520 	 * simple (and hopefully less wrong).
3521 	 */
3522 	if (!is_t4(adap->params.chip) && cong >= 0) {
3523 		u32 param, val, ch_map = 0;
3524 		int i;
3525 		u16 cng_ch_bits_log = adap->params.arch.cng_ch_bits_log;
3526 
3527 		param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
3528 			 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) |
3529 			 FW_PARAMS_PARAM_YZ_V(iq->cntxt_id));
3530 		if (cong == 0) {
3531 			val = CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_QUEUE_X);
3532 		} else {
3533 			val =
3534 			    CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_CHANNEL_X);
3535 			for (i = 0; i < 4; i++) {
3536 				if (cong & (1 << i))
3537 					ch_map |= 1 << (i << cng_ch_bits_log);
3538 			}
3539 			val |= CONMCTXT_CNGCHMAP_V(ch_map);
3540 		}
3541 		ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
3542 				    &param, &val);
3543 		if (ret)
3544 			dev_warn(adap->pdev_dev, "Failed to set Congestion"
3545 				 " Manager Context for Ingress Queue %d: %d\n",
3546 				 iq->cntxt_id, -ret);
3547 	}
3548 
3549 	return 0;
3550 
3551 fl_nomem:
3552 	ret = -ENOMEM;
3553 err:
3554 	if (iq->desc) {
3555 		dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len,
3556 				  iq->desc, iq->phys_addr);
3557 		iq->desc = NULL;
3558 	}
3559 	if (fl && fl->desc) {
3560 		kfree(fl->sdesc);
3561 		fl->sdesc = NULL;
3562 		dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc),
3563 				  fl->desc, fl->addr);
3564 		fl->desc = NULL;
3565 	}
3566 	return ret;
3567 }
3568 
init_txq(struct adapter * adap,struct sge_txq * q,unsigned int id)3569 static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
3570 {
3571 	q->cntxt_id = id;
3572 	q->bar2_addr = bar2_address(adap,
3573 				    q->cntxt_id,
3574 				    T4_BAR2_QTYPE_EGRESS,
3575 				    &q->bar2_qid);
3576 	q->in_use = 0;
3577 	q->cidx = q->pidx = 0;
3578 	q->stops = q->restarts = 0;
3579 	q->stat = (void *)&q->desc[q->size];
3580 	spin_lock_init(&q->db_lock);
3581 	adap->sge.egr_map[id - adap->sge.egr_start] = q;
3582 }
3583 
t4_sge_alloc_eth_txq(struct adapter * adap,struct sge_eth_txq * txq,struct net_device * dev,struct netdev_queue * netdevq,unsigned int iqid)3584 int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
3585 			 struct net_device *dev, struct netdev_queue *netdevq,
3586 			 unsigned int iqid)
3587 {
3588 	int ret, nentries;
3589 	struct fw_eq_eth_cmd c;
3590 	struct sge *s = &adap->sge;
3591 	struct port_info *pi = netdev_priv(dev);
3592 
3593 	/* Add status entries */
3594 	nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
3595 
3596 	txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
3597 			sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
3598 			&txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
3599 			netdev_queue_numa_node_read(netdevq));
3600 	if (!txq->q.desc)
3601 		return -ENOMEM;
3602 
3603 	memset(&c, 0, sizeof(c));
3604 	c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F |
3605 			    FW_CMD_WRITE_F | FW_CMD_EXEC_F |
3606 			    FW_EQ_ETH_CMD_PFN_V(adap->pf) |
3607 			    FW_EQ_ETH_CMD_VFN_V(0));
3608 	c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC_F |
3609 				 FW_EQ_ETH_CMD_EQSTART_F | FW_LEN16(c));
3610 	c.viid_pkd = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
3611 			   FW_EQ_ETH_CMD_VIID_V(pi->viid));
3612 	c.fetchszm_to_iqid =
3613 		htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
3614 		      FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) |
3615 		      FW_EQ_ETH_CMD_FETCHRO_F | FW_EQ_ETH_CMD_IQID_V(iqid));
3616 	c.dcaen_to_eqsize =
3617 		htonl(FW_EQ_ETH_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) |
3618 		      FW_EQ_ETH_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
3619 		      FW_EQ_ETH_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
3620 		      FW_EQ_ETH_CMD_EQSIZE_V(nentries));
3621 	c.eqaddr = cpu_to_be64(txq->q.phys_addr);
3622 
3623 	ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
3624 	if (ret) {
3625 		kfree(txq->q.sdesc);
3626 		txq->q.sdesc = NULL;
3627 		dma_free_coherent(adap->pdev_dev,
3628 				  nentries * sizeof(struct tx_desc),
3629 				  txq->q.desc, txq->q.phys_addr);
3630 		txq->q.desc = NULL;
3631 		return ret;
3632 	}
3633 
3634 	txq->q.q_type = CXGB4_TXQ_ETH;
3635 	init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd)));
3636 	txq->txq = netdevq;
3637 	txq->tso = txq->tx_cso = txq->vlan_ins = 0;
3638 	txq->mapping_err = 0;
3639 	return 0;
3640 }
3641 
t4_sge_alloc_ctrl_txq(struct adapter * adap,struct sge_ctrl_txq * txq,struct net_device * dev,unsigned int iqid,unsigned int cmplqid)3642 int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
3643 			  struct net_device *dev, unsigned int iqid,
3644 			  unsigned int cmplqid)
3645 {
3646 	int ret, nentries;
3647 	struct fw_eq_ctrl_cmd c;
3648 	struct sge *s = &adap->sge;
3649 	struct port_info *pi = netdev_priv(dev);
3650 
3651 	/* Add status entries */
3652 	nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
3653 
3654 	txq->q.desc = alloc_ring(adap->pdev_dev, nentries,
3655 				 sizeof(struct tx_desc), 0, &txq->q.phys_addr,
3656 				 NULL, 0, dev_to_node(adap->pdev_dev));
3657 	if (!txq->q.desc)
3658 		return -ENOMEM;
3659 
3660 	c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F |
3661 			    FW_CMD_WRITE_F | FW_CMD_EXEC_F |
3662 			    FW_EQ_CTRL_CMD_PFN_V(adap->pf) |
3663 			    FW_EQ_CTRL_CMD_VFN_V(0));
3664 	c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC_F |
3665 				 FW_EQ_CTRL_CMD_EQSTART_F | FW_LEN16(c));
3666 	c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID_V(cmplqid));
3667 	c.physeqid_pkd = htonl(0);
3668 	c.fetchszm_to_iqid =
3669 		htonl(FW_EQ_CTRL_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
3670 		      FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) |
3671 		      FW_EQ_CTRL_CMD_FETCHRO_F | FW_EQ_CTRL_CMD_IQID_V(iqid));
3672 	c.dcaen_to_eqsize =
3673 		htonl(FW_EQ_CTRL_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) |
3674 		      FW_EQ_CTRL_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
3675 		      FW_EQ_CTRL_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
3676 		      FW_EQ_CTRL_CMD_EQSIZE_V(nentries));
3677 	c.eqaddr = cpu_to_be64(txq->q.phys_addr);
3678 
3679 	ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
3680 	if (ret) {
3681 		dma_free_coherent(adap->pdev_dev,
3682 				  nentries * sizeof(struct tx_desc),
3683 				  txq->q.desc, txq->q.phys_addr);
3684 		txq->q.desc = NULL;
3685 		return ret;
3686 	}
3687 
3688 	txq->q.q_type = CXGB4_TXQ_CTRL;
3689 	init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid)));
3690 	txq->adap = adap;
3691 	skb_queue_head_init(&txq->sendq);
3692 	tasklet_init(&txq->qresume_tsk, restart_ctrlq, (unsigned long)txq);
3693 	txq->full = 0;
3694 	return 0;
3695 }
3696 
t4_sge_mod_ctrl_txq(struct adapter * adap,unsigned int eqid,unsigned int cmplqid)3697 int t4_sge_mod_ctrl_txq(struct adapter *adap, unsigned int eqid,
3698 			unsigned int cmplqid)
3699 {
3700 	u32 param, val;
3701 
3702 	param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
3703 		 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL) |
3704 		 FW_PARAMS_PARAM_YZ_V(eqid));
3705 	val = cmplqid;
3706 	return t4_set_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val);
3707 }
3708 
t4_sge_alloc_uld_txq(struct adapter * adap,struct sge_uld_txq * txq,struct net_device * dev,unsigned int iqid,unsigned int uld_type)3709 int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
3710 			 struct net_device *dev, unsigned int iqid,
3711 			 unsigned int uld_type)
3712 {
3713 	int ret, nentries;
3714 	struct fw_eq_ofld_cmd c;
3715 	struct sge *s = &adap->sge;
3716 	struct port_info *pi = netdev_priv(dev);
3717 	int cmd = FW_EQ_OFLD_CMD;
3718 
3719 	/* Add status entries */
3720 	nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
3721 
3722 	txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
3723 			sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
3724 			&txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
3725 			NUMA_NO_NODE);
3726 	if (!txq->q.desc)
3727 		return -ENOMEM;
3728 
3729 	memset(&c, 0, sizeof(c));
3730 	if (unlikely(uld_type == CXGB4_TX_CRYPTO))
3731 		cmd = FW_EQ_CTRL_CMD;
3732 	c.op_to_vfn = htonl(FW_CMD_OP_V(cmd) | FW_CMD_REQUEST_F |
3733 			    FW_CMD_WRITE_F | FW_CMD_EXEC_F |
3734 			    FW_EQ_OFLD_CMD_PFN_V(adap->pf) |
3735 			    FW_EQ_OFLD_CMD_VFN_V(0));
3736 	c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC_F |
3737 				 FW_EQ_OFLD_CMD_EQSTART_F | FW_LEN16(c));
3738 	c.fetchszm_to_iqid =
3739 		htonl(FW_EQ_OFLD_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
3740 		      FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) |
3741 		      FW_EQ_OFLD_CMD_FETCHRO_F | FW_EQ_OFLD_CMD_IQID_V(iqid));
3742 	c.dcaen_to_eqsize =
3743 		htonl(FW_EQ_OFLD_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) |
3744 		      FW_EQ_OFLD_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
3745 		      FW_EQ_OFLD_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
3746 		      FW_EQ_OFLD_CMD_EQSIZE_V(nentries));
3747 	c.eqaddr = cpu_to_be64(txq->q.phys_addr);
3748 
3749 	ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
3750 	if (ret) {
3751 		kfree(txq->q.sdesc);
3752 		txq->q.sdesc = NULL;
3753 		dma_free_coherent(adap->pdev_dev,
3754 				  nentries * sizeof(struct tx_desc),
3755 				  txq->q.desc, txq->q.phys_addr);
3756 		txq->q.desc = NULL;
3757 		return ret;
3758 	}
3759 
3760 	txq->q.q_type = CXGB4_TXQ_ULD;
3761 	init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd)));
3762 	txq->adap = adap;
3763 	skb_queue_head_init(&txq->sendq);
3764 	tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq);
3765 	txq->full = 0;
3766 	txq->mapping_err = 0;
3767 	return 0;
3768 }
3769 
free_txq(struct adapter * adap,struct sge_txq * q)3770 void free_txq(struct adapter *adap, struct sge_txq *q)
3771 {
3772 	struct sge *s = &adap->sge;
3773 
3774 	dma_free_coherent(adap->pdev_dev,
3775 			  q->size * sizeof(struct tx_desc) + s->stat_len,
3776 			  q->desc, q->phys_addr);
3777 	q->cntxt_id = 0;
3778 	q->sdesc = NULL;
3779 	q->desc = NULL;
3780 }
3781 
free_rspq_fl(struct adapter * adap,struct sge_rspq * rq,struct sge_fl * fl)3782 void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
3783 		  struct sge_fl *fl)
3784 {
3785 	struct sge *s = &adap->sge;
3786 	unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
3787 
3788 	adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL;
3789 	t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP,
3790 		   rq->cntxt_id, fl_id, 0xffff);
3791 	dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
3792 			  rq->desc, rq->phys_addr);
3793 	netif_napi_del(&rq->napi);
3794 	rq->netdev = NULL;
3795 	rq->cntxt_id = rq->abs_id = 0;
3796 	rq->desc = NULL;
3797 
3798 	if (fl) {
3799 		free_rx_bufs(adap, fl, fl->avail);
3800 		dma_free_coherent(adap->pdev_dev, fl->size * 8 + s->stat_len,
3801 				  fl->desc, fl->addr);
3802 		kfree(fl->sdesc);
3803 		fl->sdesc = NULL;
3804 		fl->cntxt_id = 0;
3805 		fl->desc = NULL;
3806 	}
3807 }
3808 
3809 /**
3810  *      t4_free_ofld_rxqs - free a block of consecutive Rx queues
3811  *      @adap: the adapter
3812  *      @n: number of queues
3813  *      @q: pointer to first queue
3814  *
3815  *      Release the resources of a consecutive block of offload Rx queues.
3816  */
t4_free_ofld_rxqs(struct adapter * adap,int n,struct sge_ofld_rxq * q)3817 void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q)
3818 {
3819 	for ( ; n; n--, q++)
3820 		if (q->rspq.desc)
3821 			free_rspq_fl(adap, &q->rspq,
3822 				     q->fl.size ? &q->fl : NULL);
3823 }
3824 
3825 /**
3826  *	t4_free_sge_resources - free SGE resources
3827  *	@adap: the adapter
3828  *
3829  *	Frees resources used by the SGE queue sets.
3830  */
t4_free_sge_resources(struct adapter * adap)3831 void t4_free_sge_resources(struct adapter *adap)
3832 {
3833 	int i;
3834 	struct sge_eth_rxq *eq;
3835 	struct sge_eth_txq *etq;
3836 
3837 	/* stop all Rx queues in order to start them draining */
3838 	for (i = 0; i < adap->sge.ethqsets; i++) {
3839 		eq = &adap->sge.ethrxq[i];
3840 		if (eq->rspq.desc)
3841 			t4_iq_stop(adap, adap->mbox, adap->pf, 0,
3842 				   FW_IQ_TYPE_FL_INT_CAP,
3843 				   eq->rspq.cntxt_id,
3844 				   eq->fl.size ? eq->fl.cntxt_id : 0xffff,
3845 				   0xffff);
3846 	}
3847 
3848 	/* clean up Ethernet Tx/Rx queues */
3849 	for (i = 0; i < adap->sge.ethqsets; i++) {
3850 		eq = &adap->sge.ethrxq[i];
3851 		if (eq->rspq.desc)
3852 			free_rspq_fl(adap, &eq->rspq,
3853 				     eq->fl.size ? &eq->fl : NULL);
3854 
3855 		etq = &adap->sge.ethtxq[i];
3856 		if (etq->q.desc) {
3857 			t4_eth_eq_free(adap, adap->mbox, adap->pf, 0,
3858 				       etq->q.cntxt_id);
3859 			__netif_tx_lock_bh(etq->txq);
3860 			free_tx_desc(adap, &etq->q, etq->q.in_use, true);
3861 			__netif_tx_unlock_bh(etq->txq);
3862 			kfree(etq->q.sdesc);
3863 			free_txq(adap, &etq->q);
3864 		}
3865 	}
3866 
3867 	/* clean up control Tx queues */
3868 	for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) {
3869 		struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i];
3870 
3871 		if (cq->q.desc) {
3872 			tasklet_kill(&cq->qresume_tsk);
3873 			t4_ctrl_eq_free(adap, adap->mbox, adap->pf, 0,
3874 					cq->q.cntxt_id);
3875 			__skb_queue_purge(&cq->sendq);
3876 			free_txq(adap, &cq->q);
3877 		}
3878 	}
3879 
3880 	if (adap->sge.fw_evtq.desc)
3881 		free_rspq_fl(adap, &adap->sge.fw_evtq, NULL);
3882 
3883 	if (adap->sge.intrq.desc)
3884 		free_rspq_fl(adap, &adap->sge.intrq, NULL);
3885 
3886 	if (!is_t4(adap->params.chip)) {
3887 		etq = &adap->sge.ptptxq;
3888 		if (etq->q.desc) {
3889 			t4_eth_eq_free(adap, adap->mbox, adap->pf, 0,
3890 				       etq->q.cntxt_id);
3891 			spin_lock_bh(&adap->ptp_lock);
3892 			free_tx_desc(adap, &etq->q, etq->q.in_use, true);
3893 			spin_unlock_bh(&adap->ptp_lock);
3894 			kfree(etq->q.sdesc);
3895 			free_txq(adap, &etq->q);
3896 		}
3897 	}
3898 
3899 	/* clear the reverse egress queue map */
3900 	memset(adap->sge.egr_map, 0,
3901 	       adap->sge.egr_sz * sizeof(*adap->sge.egr_map));
3902 }
3903 
t4_sge_start(struct adapter * adap)3904 void t4_sge_start(struct adapter *adap)
3905 {
3906 	adap->sge.ethtxq_rover = 0;
3907 	mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD);
3908 	mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD);
3909 }
3910 
3911 /**
3912  *	t4_sge_stop - disable SGE operation
3913  *	@adap: the adapter
3914  *
3915  *	Stop tasklets and timers associated with the DMA engine.  Note that
3916  *	this is effective only if measures have been taken to disable any HW
3917  *	events that may restart them.
3918  */
t4_sge_stop(struct adapter * adap)3919 void t4_sge_stop(struct adapter *adap)
3920 {
3921 	int i;
3922 	struct sge *s = &adap->sge;
3923 
3924 	if (in_interrupt())  /* actions below require waiting */
3925 		return;
3926 
3927 	if (s->rx_timer.function)
3928 		del_timer_sync(&s->rx_timer);
3929 	if (s->tx_timer.function)
3930 		del_timer_sync(&s->tx_timer);
3931 
3932 	if (is_offload(adap)) {
3933 		struct sge_uld_txq_info *txq_info;
3934 
3935 		txq_info = adap->sge.uld_txq_info[CXGB4_TX_OFLD];
3936 		if (txq_info) {
3937 			struct sge_uld_txq *txq = txq_info->uldtxq;
3938 
3939 			for_each_ofldtxq(&adap->sge, i) {
3940 				if (txq->q.desc)
3941 					tasklet_kill(&txq->qresume_tsk);
3942 			}
3943 		}
3944 	}
3945 
3946 	if (is_pci_uld(adap)) {
3947 		struct sge_uld_txq_info *txq_info;
3948 
3949 		txq_info = adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
3950 		if (txq_info) {
3951 			struct sge_uld_txq *txq = txq_info->uldtxq;
3952 
3953 			for_each_ofldtxq(&adap->sge, i) {
3954 				if (txq->q.desc)
3955 					tasklet_kill(&txq->qresume_tsk);
3956 			}
3957 		}
3958 	}
3959 
3960 	for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) {
3961 		struct sge_ctrl_txq *cq = &s->ctrlq[i];
3962 
3963 		if (cq->q.desc)
3964 			tasklet_kill(&cq->qresume_tsk);
3965 	}
3966 }
3967 
3968 /**
3969  *	t4_sge_init_soft - grab core SGE values needed by SGE code
3970  *	@adap: the adapter
3971  *
3972  *	We need to grab the SGE operating parameters that we need to have
3973  *	in order to do our job and make sure we can live with them.
3974  */
3975 
t4_sge_init_soft(struct adapter * adap)3976 static int t4_sge_init_soft(struct adapter *adap)
3977 {
3978 	struct sge *s = &adap->sge;
3979 	u32 fl_small_pg, fl_large_pg, fl_small_mtu, fl_large_mtu;
3980 	u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5;
3981 	u32 ingress_rx_threshold;
3982 
3983 	/*
3984 	 * Verify that CPL messages are going to the Ingress Queue for
3985 	 * process_responses() and that only packet data is going to the
3986 	 * Free Lists.
3987 	 */
3988 	if ((t4_read_reg(adap, SGE_CONTROL_A) & RXPKTCPLMODE_F) !=
3989 	    RXPKTCPLMODE_V(RXPKTCPLMODE_SPLIT_X)) {
3990 		dev_err(adap->pdev_dev, "bad SGE CPL MODE\n");
3991 		return -EINVAL;
3992 	}
3993 
3994 	/*
3995 	 * Validate the Host Buffer Register Array indices that we want to
3996 	 * use ...
3997 	 *
3998 	 * XXX Note that we should really read through the Host Buffer Size
3999 	 * XXX register array and find the indices of the Buffer Sizes which
4000 	 * XXX meet our needs!
4001 	 */
4002 	#define READ_FL_BUF(x) \
4003 		t4_read_reg(adap, SGE_FL_BUFFER_SIZE0_A+(x)*sizeof(u32))
4004 
4005 	fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF);
4006 	fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF);
4007 	fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF);
4008 	fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF);
4009 
4010 	/* We only bother using the Large Page logic if the Large Page Buffer
4011 	 * is larger than our Page Size Buffer.
4012 	 */
4013 	if (fl_large_pg <= fl_small_pg)
4014 		fl_large_pg = 0;
4015 
4016 	#undef READ_FL_BUF
4017 
4018 	/* The Page Size Buffer must be exactly equal to our Page Size and the
4019 	 * Large Page Size Buffer should be 0 (per above) or a power of 2.
4020 	 */
4021 	if (fl_small_pg != PAGE_SIZE ||
4022 	    (fl_large_pg & (fl_large_pg-1)) != 0) {
4023 		dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n",
4024 			fl_small_pg, fl_large_pg);
4025 		return -EINVAL;
4026 	}
4027 	if (fl_large_pg)
4028 		s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
4029 
4030 	if (fl_small_mtu < FL_MTU_SMALL_BUFSIZE(adap) ||
4031 	    fl_large_mtu < FL_MTU_LARGE_BUFSIZE(adap)) {
4032 		dev_err(adap->pdev_dev, "bad SGE FL MTU sizes [%d, %d]\n",
4033 			fl_small_mtu, fl_large_mtu);
4034 		return -EINVAL;
4035 	}
4036 
4037 	/*
4038 	 * Retrieve our RX interrupt holdoff timer values and counter
4039 	 * threshold values from the SGE parameters.
4040 	 */
4041 	timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1_A);
4042 	timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3_A);
4043 	timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5_A);
4044 	s->timer_val[0] = core_ticks_to_us(adap,
4045 		TIMERVALUE0_G(timer_value_0_and_1));
4046 	s->timer_val[1] = core_ticks_to_us(adap,
4047 		TIMERVALUE1_G(timer_value_0_and_1));
4048 	s->timer_val[2] = core_ticks_to_us(adap,
4049 		TIMERVALUE2_G(timer_value_2_and_3));
4050 	s->timer_val[3] = core_ticks_to_us(adap,
4051 		TIMERVALUE3_G(timer_value_2_and_3));
4052 	s->timer_val[4] = core_ticks_to_us(adap,
4053 		TIMERVALUE4_G(timer_value_4_and_5));
4054 	s->timer_val[5] = core_ticks_to_us(adap,
4055 		TIMERVALUE5_G(timer_value_4_and_5));
4056 
4057 	ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD_A);
4058 	s->counter_val[0] = THRESHOLD_0_G(ingress_rx_threshold);
4059 	s->counter_val[1] = THRESHOLD_1_G(ingress_rx_threshold);
4060 	s->counter_val[2] = THRESHOLD_2_G(ingress_rx_threshold);
4061 	s->counter_val[3] = THRESHOLD_3_G(ingress_rx_threshold);
4062 
4063 	return 0;
4064 }
4065 
4066 /**
4067  *     t4_sge_init - initialize SGE
4068  *     @adap: the adapter
4069  *
4070  *     Perform low-level SGE code initialization needed every time after a
4071  *     chip reset.
4072  */
t4_sge_init(struct adapter * adap)4073 int t4_sge_init(struct adapter *adap)
4074 {
4075 	struct sge *s = &adap->sge;
4076 	u32 sge_control, sge_conm_ctrl;
4077 	int ret, egress_threshold;
4078 
4079 	/*
4080 	 * Ingress Padding Boundary and Egress Status Page Size are set up by
4081 	 * t4_fixup_host_params().
4082 	 */
4083 	sge_control = t4_read_reg(adap, SGE_CONTROL_A);
4084 	s->pktshift = PKTSHIFT_G(sge_control);
4085 	s->stat_len = (sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64;
4086 
4087 	s->fl_align = t4_fl_pkt_align(adap);
4088 	ret = t4_sge_init_soft(adap);
4089 	if (ret < 0)
4090 		return ret;
4091 
4092 	/*
4093 	 * A FL with <= fl_starve_thres buffers is starving and a periodic
4094 	 * timer will attempt to refill it.  This needs to be larger than the
4095 	 * SGE's Egress Congestion Threshold.  If it isn't, then we can get
4096 	 * stuck waiting for new packets while the SGE is waiting for us to
4097 	 * give it more Free List entries.  (Note that the SGE's Egress
4098 	 * Congestion Threshold is in units of 2 Free List pointers.) For T4,
4099 	 * there was only a single field to control this.  For T5 there's the
4100 	 * original field which now only applies to Unpacked Mode Free List
4101 	 * buffers and a new field which only applies to Packed Mode Free List
4102 	 * buffers.
4103 	 */
4104 	sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL_A);
4105 	switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
4106 	case CHELSIO_T4:
4107 		egress_threshold = EGRTHRESHOLD_G(sge_conm_ctrl);
4108 		break;
4109 	case CHELSIO_T5:
4110 		egress_threshold = EGRTHRESHOLDPACKING_G(sge_conm_ctrl);
4111 		break;
4112 	case CHELSIO_T6:
4113 		egress_threshold = T6_EGRTHRESHOLDPACKING_G(sge_conm_ctrl);
4114 		break;
4115 	default:
4116 		dev_err(adap->pdev_dev, "Unsupported Chip version %d\n",
4117 			CHELSIO_CHIP_VERSION(adap->params.chip));
4118 		return -EINVAL;
4119 	}
4120 	s->fl_starve_thres = 2*egress_threshold + 1;
4121 
4122 	t4_idma_monitor_init(adap, &s->idma_monitor);
4123 
4124 	/* Set up timers used for recuring callbacks to process RX and TX
4125 	 * administrative tasks.
4126 	 */
4127 	timer_setup(&s->rx_timer, sge_rx_timer_cb, 0);
4128 	timer_setup(&s->tx_timer, sge_tx_timer_cb, 0);
4129 
4130 	spin_lock_init(&s->intrq_lock);
4131 
4132 	return 0;
4133 }
4134