1 /*
2 * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
3 * driver for Linux.
4 *
5 * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36 #include <linux/skbuff.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/ip.h>
41 #include <net/ipv6.h>
42 #include <net/tcp.h>
43 #include <linux/dma-mapping.h>
44 #include <linux/prefetch.h>
45
46 #include "t4vf_common.h"
47 #include "t4vf_defs.h"
48
49 #include "../cxgb4/t4_regs.h"
50 #include "../cxgb4/t4_values.h"
51 #include "../cxgb4/t4fw_api.h"
52 #include "../cxgb4/t4_msg.h"
53
54 /*
55 * Constants ...
56 */
57 enum {
58 /*
59 * Egress Queue sizes, producer and consumer indices are all in units
60 * of Egress Context Units bytes. Note that as far as the hardware is
61 * concerned, the free list is an Egress Queue (the host produces free
62 * buffers which the hardware consumes) and free list entries are
63 * 64-bit PCI DMA addresses.
64 */
65 EQ_UNIT = SGE_EQ_IDXSIZE,
66 FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
67 TXD_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
68
69 /*
70 * Max number of TX descriptors we clean up at a time. Should be
71 * modest as freeing skbs isn't cheap and it happens while holding
72 * locks. We just need to free packets faster than they arrive, we
73 * eventually catch up and keep the amortized cost reasonable.
74 */
75 MAX_TX_RECLAIM = 16,
76
77 /*
78 * Max number of Rx buffers we replenish at a time. Again keep this
79 * modest, allocating buffers isn't cheap either.
80 */
81 MAX_RX_REFILL = 16,
82
83 /*
84 * Period of the Rx queue check timer. This timer is infrequent as it
85 * has something to do only when the system experiences severe memory
86 * shortage.
87 */
88 RX_QCHECK_PERIOD = (HZ / 2),
89
90 /*
91 * Period of the TX queue check timer and the maximum number of TX
92 * descriptors to be reclaimed by the TX timer.
93 */
94 TX_QCHECK_PERIOD = (HZ / 2),
95 MAX_TIMER_TX_RECLAIM = 100,
96
97 /*
98 * Suspend an Ethernet TX queue with fewer available descriptors than
99 * this. We always want to have room for a maximum sized packet:
100 * inline immediate data + MAX_SKB_FRAGS. This is the same as
101 * calc_tx_flits() for a TSO packet with nr_frags == MAX_SKB_FRAGS
102 * (see that function and its helpers for a description of the
103 * calculation).
104 */
105 ETHTXQ_MAX_FRAGS = MAX_SKB_FRAGS + 1,
106 ETHTXQ_MAX_SGL_LEN = ((3 * (ETHTXQ_MAX_FRAGS-1))/2 +
107 ((ETHTXQ_MAX_FRAGS-1) & 1) +
108 2),
109 ETHTXQ_MAX_HDR = (sizeof(struct fw_eth_tx_pkt_vm_wr) +
110 sizeof(struct cpl_tx_pkt_lso_core) +
111 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64),
112 ETHTXQ_MAX_FLITS = ETHTXQ_MAX_SGL_LEN + ETHTXQ_MAX_HDR,
113
114 ETHTXQ_STOP_THRES = 1 + DIV_ROUND_UP(ETHTXQ_MAX_FLITS, TXD_PER_EQ_UNIT),
115
116 /*
117 * Max TX descriptor space we allow for an Ethernet packet to be
118 * inlined into a WR. This is limited by the maximum value which
119 * we can specify for immediate data in the firmware Ethernet TX
120 * Work Request.
121 */
122 MAX_IMM_TX_PKT_LEN = FW_WR_IMMDLEN_M,
123
124 /*
125 * Max size of a WR sent through a control TX queue.
126 */
127 MAX_CTRL_WR_LEN = 256,
128
129 /*
130 * Maximum amount of data which we'll ever need to inline into a
131 * TX ring: max(MAX_IMM_TX_PKT_LEN, MAX_CTRL_WR_LEN).
132 */
133 MAX_IMM_TX_LEN = (MAX_IMM_TX_PKT_LEN > MAX_CTRL_WR_LEN
134 ? MAX_IMM_TX_PKT_LEN
135 : MAX_CTRL_WR_LEN),
136
137 /*
138 * For incoming packets less than RX_COPY_THRES, we copy the data into
139 * an skb rather than referencing the data. We allocate enough
140 * in-line room in skb's to accommodate pulling in RX_PULL_LEN bytes
141 * of the data (header).
142 */
143 RX_COPY_THRES = 256,
144 RX_PULL_LEN = 128,
145
146 /*
147 * Main body length for sk_buffs used for RX Ethernet packets with
148 * fragments. Should be >= RX_PULL_LEN but possibly bigger to give
149 * pskb_may_pull() some room.
150 */
151 RX_SKB_LEN = 512,
152 };
153
154 /*
155 * Software state per TX descriptor.
156 */
157 struct tx_sw_desc {
158 struct sk_buff *skb; /* socket buffer of TX data source */
159 struct ulptx_sgl *sgl; /* scatter/gather list in TX Queue */
160 };
161
162 /*
163 * Software state per RX Free List descriptor. We keep track of the allocated
164 * FL page, its size, and its PCI DMA address (if the page is mapped). The FL
165 * page size and its PCI DMA mapped state are stored in the low bits of the
166 * PCI DMA address as per below.
167 */
168 struct rx_sw_desc {
169 struct page *page; /* Free List page buffer */
170 dma_addr_t dma_addr; /* PCI DMA address (if mapped) */
171 /* and flags (see below) */
172 };
173
174 /*
175 * The low bits of rx_sw_desc.dma_addr have special meaning. Note that the
176 * SGE also uses the low 4 bits to determine the size of the buffer. It uses
177 * those bits to index into the SGE_FL_BUFFER_SIZE[index] register array.
178 * Since we only use SGE_FL_BUFFER_SIZE0 and SGE_FL_BUFFER_SIZE1, these low 4
179 * bits can only contain a 0 or a 1 to indicate which size buffer we're giving
180 * to the SGE. Thus, our software state of "is the buffer mapped for DMA" is
181 * maintained in an inverse sense so the hardware never sees that bit high.
182 */
183 enum {
184 RX_LARGE_BUF = 1 << 0, /* buffer is SGE_FL_BUFFER_SIZE[1] */
185 RX_UNMAPPED_BUF = 1 << 1, /* buffer is not mapped */
186 };
187
188 /**
189 * get_buf_addr - return DMA buffer address of software descriptor
190 * @sdesc: pointer to the software buffer descriptor
191 *
192 * Return the DMA buffer address of a software descriptor (stripping out
193 * our low-order flag bits).
194 */
get_buf_addr(const struct rx_sw_desc * sdesc)195 static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *sdesc)
196 {
197 return sdesc->dma_addr & ~(dma_addr_t)(RX_LARGE_BUF | RX_UNMAPPED_BUF);
198 }
199
200 /**
201 * is_buf_mapped - is buffer mapped for DMA?
202 * @sdesc: pointer to the software buffer descriptor
203 *
204 * Determine whether the buffer associated with a software descriptor in
205 * mapped for DMA or not.
206 */
is_buf_mapped(const struct rx_sw_desc * sdesc)207 static inline bool is_buf_mapped(const struct rx_sw_desc *sdesc)
208 {
209 return !(sdesc->dma_addr & RX_UNMAPPED_BUF);
210 }
211
212 /**
213 * need_skb_unmap - does the platform need unmapping of sk_buffs?
214 *
215 * Returns true if the platform needs sk_buff unmapping. The compiler
216 * optimizes away unnecessary code if this returns true.
217 */
need_skb_unmap(void)218 static inline int need_skb_unmap(void)
219 {
220 #ifdef CONFIG_NEED_DMA_MAP_STATE
221 return 1;
222 #else
223 return 0;
224 #endif
225 }
226
227 /**
228 * txq_avail - return the number of available slots in a TX queue
229 * @tq: the TX queue
230 *
231 * Returns the number of available descriptors in a TX queue.
232 */
txq_avail(const struct sge_txq * tq)233 static inline unsigned int txq_avail(const struct sge_txq *tq)
234 {
235 return tq->size - 1 - tq->in_use;
236 }
237
238 /**
239 * fl_cap - return the capacity of a Free List
240 * @fl: the Free List
241 *
242 * Returns the capacity of a Free List. The capacity is less than the
243 * size because an Egress Queue Index Unit worth of descriptors needs to
244 * be left unpopulated, otherwise the Producer and Consumer indices PIDX
245 * and CIDX will match and the hardware will think the FL is empty.
246 */
fl_cap(const struct sge_fl * fl)247 static inline unsigned int fl_cap(const struct sge_fl *fl)
248 {
249 return fl->size - FL_PER_EQ_UNIT;
250 }
251
252 /**
253 * fl_starving - return whether a Free List is starving.
254 * @adapter: pointer to the adapter
255 * @fl: the Free List
256 *
257 * Tests specified Free List to see whether the number of buffers
258 * available to the hardware has falled below our "starvation"
259 * threshold.
260 */
fl_starving(const struct adapter * adapter,const struct sge_fl * fl)261 static inline bool fl_starving(const struct adapter *adapter,
262 const struct sge_fl *fl)
263 {
264 const struct sge *s = &adapter->sge;
265
266 return fl->avail - fl->pend_cred <= s->fl_starve_thres;
267 }
268
269 /**
270 * map_skb - map an skb for DMA to the device
271 * @dev: the egress net device
272 * @skb: the packet to map
273 * @addr: a pointer to the base of the DMA mapping array
274 *
275 * Map an skb for DMA to the device and return an array of DMA addresses.
276 */
map_skb(struct device * dev,const struct sk_buff * skb,dma_addr_t * addr)277 static int map_skb(struct device *dev, const struct sk_buff *skb,
278 dma_addr_t *addr)
279 {
280 const skb_frag_t *fp, *end;
281 const struct skb_shared_info *si;
282
283 *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
284 if (dma_mapping_error(dev, *addr))
285 goto out_err;
286
287 si = skb_shinfo(skb);
288 end = &si->frags[si->nr_frags];
289 for (fp = si->frags; fp < end; fp++) {
290 *++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp),
291 DMA_TO_DEVICE);
292 if (dma_mapping_error(dev, *addr))
293 goto unwind;
294 }
295 return 0;
296
297 unwind:
298 while (fp-- > si->frags)
299 dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
300 dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
301
302 out_err:
303 return -ENOMEM;
304 }
305
unmap_sgl(struct device * dev,const struct sk_buff * skb,const struct ulptx_sgl * sgl,const struct sge_txq * tq)306 static void unmap_sgl(struct device *dev, const struct sk_buff *skb,
307 const struct ulptx_sgl *sgl, const struct sge_txq *tq)
308 {
309 const struct ulptx_sge_pair *p;
310 unsigned int nfrags = skb_shinfo(skb)->nr_frags;
311
312 if (likely(skb_headlen(skb)))
313 dma_unmap_single(dev, be64_to_cpu(sgl->addr0),
314 be32_to_cpu(sgl->len0), DMA_TO_DEVICE);
315 else {
316 dma_unmap_page(dev, be64_to_cpu(sgl->addr0),
317 be32_to_cpu(sgl->len0), DMA_TO_DEVICE);
318 nfrags--;
319 }
320
321 /*
322 * the complexity below is because of the possibility of a wrap-around
323 * in the middle of an SGL
324 */
325 for (p = sgl->sge; nfrags >= 2; nfrags -= 2) {
326 if (likely((u8 *)(p + 1) <= (u8 *)tq->stat)) {
327 unmap:
328 dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
329 be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
330 dma_unmap_page(dev, be64_to_cpu(p->addr[1]),
331 be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
332 p++;
333 } else if ((u8 *)p == (u8 *)tq->stat) {
334 p = (const struct ulptx_sge_pair *)tq->desc;
335 goto unmap;
336 } else if ((u8 *)p + 8 == (u8 *)tq->stat) {
337 const __be64 *addr = (const __be64 *)tq->desc;
338
339 dma_unmap_page(dev, be64_to_cpu(addr[0]),
340 be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
341 dma_unmap_page(dev, be64_to_cpu(addr[1]),
342 be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
343 p = (const struct ulptx_sge_pair *)&addr[2];
344 } else {
345 const __be64 *addr = (const __be64 *)tq->desc;
346
347 dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
348 be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
349 dma_unmap_page(dev, be64_to_cpu(addr[0]),
350 be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
351 p = (const struct ulptx_sge_pair *)&addr[1];
352 }
353 }
354 if (nfrags) {
355 __be64 addr;
356
357 if ((u8 *)p == (u8 *)tq->stat)
358 p = (const struct ulptx_sge_pair *)tq->desc;
359 addr = ((u8 *)p + 16 <= (u8 *)tq->stat
360 ? p->addr[0]
361 : *(const __be64 *)tq->desc);
362 dma_unmap_page(dev, be64_to_cpu(addr), be32_to_cpu(p->len[0]),
363 DMA_TO_DEVICE);
364 }
365 }
366
367 /**
368 * free_tx_desc - reclaims TX descriptors and their buffers
369 * @adapter: the adapter
370 * @tq: the TX queue to reclaim descriptors from
371 * @n: the number of descriptors to reclaim
372 * @unmap: whether the buffers should be unmapped for DMA
373 *
374 * Reclaims TX descriptors from an SGE TX queue and frees the associated
375 * TX buffers. Called with the TX queue lock held.
376 */
free_tx_desc(struct adapter * adapter,struct sge_txq * tq,unsigned int n,bool unmap)377 static void free_tx_desc(struct adapter *adapter, struct sge_txq *tq,
378 unsigned int n, bool unmap)
379 {
380 struct tx_sw_desc *sdesc;
381 unsigned int cidx = tq->cidx;
382 struct device *dev = adapter->pdev_dev;
383
384 const int need_unmap = need_skb_unmap() && unmap;
385
386 sdesc = &tq->sdesc[cidx];
387 while (n--) {
388 /*
389 * If we kept a reference to the original TX skb, we need to
390 * unmap it from PCI DMA space (if required) and free it.
391 */
392 if (sdesc->skb) {
393 if (need_unmap)
394 unmap_sgl(dev, sdesc->skb, sdesc->sgl, tq);
395 dev_consume_skb_any(sdesc->skb);
396 sdesc->skb = NULL;
397 }
398
399 sdesc++;
400 if (++cidx == tq->size) {
401 cidx = 0;
402 sdesc = tq->sdesc;
403 }
404 }
405 tq->cidx = cidx;
406 }
407
408 /*
409 * Return the number of reclaimable descriptors in a TX queue.
410 */
reclaimable(const struct sge_txq * tq)411 static inline int reclaimable(const struct sge_txq *tq)
412 {
413 int hw_cidx = be16_to_cpu(tq->stat->cidx);
414 int reclaimable = hw_cidx - tq->cidx;
415 if (reclaimable < 0)
416 reclaimable += tq->size;
417 return reclaimable;
418 }
419
420 /**
421 * reclaim_completed_tx - reclaims completed TX descriptors
422 * @adapter: the adapter
423 * @tq: the TX queue to reclaim completed descriptors from
424 * @unmap: whether the buffers should be unmapped for DMA
425 *
426 * Reclaims TX descriptors that the SGE has indicated it has processed,
427 * and frees the associated buffers if possible. Called with the TX
428 * queue locked.
429 */
reclaim_completed_tx(struct adapter * adapter,struct sge_txq * tq,bool unmap)430 static inline void reclaim_completed_tx(struct adapter *adapter,
431 struct sge_txq *tq,
432 bool unmap)
433 {
434 int avail = reclaimable(tq);
435
436 if (avail) {
437 /*
438 * Limit the amount of clean up work we do at a time to keep
439 * the TX lock hold time O(1).
440 */
441 if (avail > MAX_TX_RECLAIM)
442 avail = MAX_TX_RECLAIM;
443
444 free_tx_desc(adapter, tq, avail, unmap);
445 tq->in_use -= avail;
446 }
447 }
448
449 /**
450 * get_buf_size - return the size of an RX Free List buffer.
451 * @adapter: pointer to the associated adapter
452 * @sdesc: pointer to the software buffer descriptor
453 */
get_buf_size(const struct adapter * adapter,const struct rx_sw_desc * sdesc)454 static inline int get_buf_size(const struct adapter *adapter,
455 const struct rx_sw_desc *sdesc)
456 {
457 const struct sge *s = &adapter->sge;
458
459 return (s->fl_pg_order > 0 && (sdesc->dma_addr & RX_LARGE_BUF)
460 ? (PAGE_SIZE << s->fl_pg_order) : PAGE_SIZE);
461 }
462
463 /**
464 * free_rx_bufs - free RX buffers on an SGE Free List
465 * @adapter: the adapter
466 * @fl: the SGE Free List to free buffers from
467 * @n: how many buffers to free
468 *
469 * Release the next @n buffers on an SGE Free List RX queue. The
470 * buffers must be made inaccessible to hardware before calling this
471 * function.
472 */
free_rx_bufs(struct adapter * adapter,struct sge_fl * fl,int n)473 static void free_rx_bufs(struct adapter *adapter, struct sge_fl *fl, int n)
474 {
475 while (n--) {
476 struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx];
477
478 if (is_buf_mapped(sdesc))
479 dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
480 get_buf_size(adapter, sdesc),
481 PCI_DMA_FROMDEVICE);
482 put_page(sdesc->page);
483 sdesc->page = NULL;
484 if (++fl->cidx == fl->size)
485 fl->cidx = 0;
486 fl->avail--;
487 }
488 }
489
490 /**
491 * unmap_rx_buf - unmap the current RX buffer on an SGE Free List
492 * @adapter: the adapter
493 * @fl: the SGE Free List
494 *
495 * Unmap the current buffer on an SGE Free List RX queue. The
496 * buffer must be made inaccessible to HW before calling this function.
497 *
498 * This is similar to @free_rx_bufs above but does not free the buffer.
499 * Do note that the FL still loses any further access to the buffer.
500 * This is used predominantly to "transfer ownership" of an FL buffer
501 * to another entity (typically an skb's fragment list).
502 */
unmap_rx_buf(struct adapter * adapter,struct sge_fl * fl)503 static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl)
504 {
505 struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx];
506
507 if (is_buf_mapped(sdesc))
508 dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
509 get_buf_size(adapter, sdesc),
510 PCI_DMA_FROMDEVICE);
511 sdesc->page = NULL;
512 if (++fl->cidx == fl->size)
513 fl->cidx = 0;
514 fl->avail--;
515 }
516
517 /**
518 * ring_fl_db - righ doorbell on free list
519 * @adapter: the adapter
520 * @fl: the Free List whose doorbell should be rung ...
521 *
522 * Tell the Scatter Gather Engine that there are new free list entries
523 * available.
524 */
ring_fl_db(struct adapter * adapter,struct sge_fl * fl)525 static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
526 {
527 u32 val = adapter->params.arch.sge_fl_db;
528
529 /* The SGE keeps track of its Producer and Consumer Indices in terms
530 * of Egress Queue Units so we can only tell it about integral numbers
531 * of multiples of Free List Entries per Egress Queue Units ...
532 */
533 if (fl->pend_cred >= FL_PER_EQ_UNIT) {
534 if (is_t4(adapter->params.chip))
535 val |= PIDX_V(fl->pend_cred / FL_PER_EQ_UNIT);
536 else
537 val |= PIDX_T5_V(fl->pend_cred / FL_PER_EQ_UNIT);
538
539 /* Make sure all memory writes to the Free List queue are
540 * committed before we tell the hardware about them.
541 */
542 wmb();
543
544 /* If we don't have access to the new User Doorbell (T5+), use
545 * the old doorbell mechanism; otherwise use the new BAR2
546 * mechanism.
547 */
548 if (unlikely(fl->bar2_addr == NULL)) {
549 t4_write_reg(adapter,
550 T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
551 QID_V(fl->cntxt_id) | val);
552 } else {
553 writel(val | QID_V(fl->bar2_qid),
554 fl->bar2_addr + SGE_UDB_KDOORBELL);
555
556 /* This Write memory Barrier will force the write to
557 * the User Doorbell area to be flushed.
558 */
559 wmb();
560 }
561 fl->pend_cred %= FL_PER_EQ_UNIT;
562 }
563 }
564
565 /**
566 * set_rx_sw_desc - initialize software RX buffer descriptor
567 * @sdesc: pointer to the softwore RX buffer descriptor
568 * @page: pointer to the page data structure backing the RX buffer
569 * @dma_addr: PCI DMA address (possibly with low-bit flags)
570 */
set_rx_sw_desc(struct rx_sw_desc * sdesc,struct page * page,dma_addr_t dma_addr)571 static inline void set_rx_sw_desc(struct rx_sw_desc *sdesc, struct page *page,
572 dma_addr_t dma_addr)
573 {
574 sdesc->page = page;
575 sdesc->dma_addr = dma_addr;
576 }
577
578 /*
579 * Support for poisoning RX buffers ...
580 */
581 #define POISON_BUF_VAL -1
582
poison_buf(struct page * page,size_t sz)583 static inline void poison_buf(struct page *page, size_t sz)
584 {
585 #if POISON_BUF_VAL >= 0
586 memset(page_address(page), POISON_BUF_VAL, sz);
587 #endif
588 }
589
590 /**
591 * refill_fl - refill an SGE RX buffer ring
592 * @adapter: the adapter
593 * @fl: the Free List ring to refill
594 * @n: the number of new buffers to allocate
595 * @gfp: the gfp flags for the allocations
596 *
597 * (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
598 * allocated with the supplied gfp flags. The caller must assure that
599 * @n does not exceed the queue's capacity -- i.e. (cidx == pidx) _IN
600 * EGRESS QUEUE UNITS_ indicates an empty Free List! Returns the number
601 * of buffers allocated. If afterwards the queue is found critically low,
602 * mark it as starving in the bitmap of starving FLs.
603 */
refill_fl(struct adapter * adapter,struct sge_fl * fl,int n,gfp_t gfp)604 static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
605 int n, gfp_t gfp)
606 {
607 struct sge *s = &adapter->sge;
608 struct page *page;
609 dma_addr_t dma_addr;
610 unsigned int cred = fl->avail;
611 __be64 *d = &fl->desc[fl->pidx];
612 struct rx_sw_desc *sdesc = &fl->sdesc[fl->pidx];
613
614 /*
615 * Sanity: ensure that the result of adding n Free List buffers
616 * won't result in wrapping the SGE's Producer Index around to
617 * it's Consumer Index thereby indicating an empty Free List ...
618 */
619 BUG_ON(fl->avail + n > fl->size - FL_PER_EQ_UNIT);
620
621 gfp |= __GFP_NOWARN;
622
623 /*
624 * If we support large pages, prefer large buffers and fail over to
625 * small pages if we can't allocate large pages to satisfy the refill.
626 * If we don't support large pages, drop directly into the small page
627 * allocation code.
628 */
629 if (s->fl_pg_order == 0)
630 goto alloc_small_pages;
631
632 while (n) {
633 page = __dev_alloc_pages(gfp, s->fl_pg_order);
634 if (unlikely(!page)) {
635 /*
636 * We've failed inour attempt to allocate a "large
637 * page". Fail over to the "small page" allocation
638 * below.
639 */
640 fl->large_alloc_failed++;
641 break;
642 }
643 poison_buf(page, PAGE_SIZE << s->fl_pg_order);
644
645 dma_addr = dma_map_page(adapter->pdev_dev, page, 0,
646 PAGE_SIZE << s->fl_pg_order,
647 PCI_DMA_FROMDEVICE);
648 if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
649 /*
650 * We've run out of DMA mapping space. Free up the
651 * buffer and return with what we've managed to put
652 * into the free list. We don't want to fail over to
653 * the small page allocation below in this case
654 * because DMA mapping resources are typically
655 * critical resources once they become scarse.
656 */
657 __free_pages(page, s->fl_pg_order);
658 goto out;
659 }
660 dma_addr |= RX_LARGE_BUF;
661 *d++ = cpu_to_be64(dma_addr);
662
663 set_rx_sw_desc(sdesc, page, dma_addr);
664 sdesc++;
665
666 fl->avail++;
667 if (++fl->pidx == fl->size) {
668 fl->pidx = 0;
669 sdesc = fl->sdesc;
670 d = fl->desc;
671 }
672 n--;
673 }
674
675 alloc_small_pages:
676 while (n--) {
677 page = __dev_alloc_page(gfp);
678 if (unlikely(!page)) {
679 fl->alloc_failed++;
680 break;
681 }
682 poison_buf(page, PAGE_SIZE);
683
684 dma_addr = dma_map_page(adapter->pdev_dev, page, 0, PAGE_SIZE,
685 PCI_DMA_FROMDEVICE);
686 if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
687 put_page(page);
688 break;
689 }
690 *d++ = cpu_to_be64(dma_addr);
691
692 set_rx_sw_desc(sdesc, page, dma_addr);
693 sdesc++;
694
695 fl->avail++;
696 if (++fl->pidx == fl->size) {
697 fl->pidx = 0;
698 sdesc = fl->sdesc;
699 d = fl->desc;
700 }
701 }
702
703 out:
704 /*
705 * Update our accounting state to incorporate the new Free List
706 * buffers, tell the hardware about them and return the number of
707 * buffers which we were able to allocate.
708 */
709 cred = fl->avail - cred;
710 fl->pend_cred += cred;
711 ring_fl_db(adapter, fl);
712
713 if (unlikely(fl_starving(adapter, fl))) {
714 smp_wmb();
715 set_bit(fl->cntxt_id, adapter->sge.starving_fl);
716 }
717
718 return cred;
719 }
720
721 /*
722 * Refill a Free List to its capacity or the Maximum Refill Increment,
723 * whichever is smaller ...
724 */
__refill_fl(struct adapter * adapter,struct sge_fl * fl)725 static inline void __refill_fl(struct adapter *adapter, struct sge_fl *fl)
726 {
727 refill_fl(adapter, fl,
728 min((unsigned int)MAX_RX_REFILL, fl_cap(fl) - fl->avail),
729 GFP_ATOMIC);
730 }
731
732 /**
733 * alloc_ring - allocate resources for an SGE descriptor ring
734 * @dev: the PCI device's core device
735 * @nelem: the number of descriptors
736 * @hwsize: the size of each hardware descriptor
737 * @swsize: the size of each software descriptor
738 * @busaddrp: the physical PCI bus address of the allocated ring
739 * @swringp: return address pointer for software ring
740 * @stat_size: extra space in hardware ring for status information
741 *
742 * Allocates resources for an SGE descriptor ring, such as TX queues,
743 * free buffer lists, response queues, etc. Each SGE ring requires
744 * space for its hardware descriptors plus, optionally, space for software
745 * state associated with each hardware entry (the metadata). The function
746 * returns three values: the virtual address for the hardware ring (the
747 * return value of the function), the PCI bus address of the hardware
748 * ring (in *busaddrp), and the address of the software ring (in swringp).
749 * Both the hardware and software rings are returned zeroed out.
750 */
alloc_ring(struct device * dev,size_t nelem,size_t hwsize,size_t swsize,dma_addr_t * busaddrp,void * swringp,size_t stat_size)751 static void *alloc_ring(struct device *dev, size_t nelem, size_t hwsize,
752 size_t swsize, dma_addr_t *busaddrp, void *swringp,
753 size_t stat_size)
754 {
755 /*
756 * Allocate the hardware ring and PCI DMA bus address space for said.
757 */
758 size_t hwlen = nelem * hwsize + stat_size;
759 void *hwring = dma_alloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL);
760
761 if (!hwring)
762 return NULL;
763
764 /*
765 * If the caller wants a software ring, allocate it and return a
766 * pointer to it in *swringp.
767 */
768 BUG_ON((swsize != 0) != (swringp != NULL));
769 if (swsize) {
770 void *swring = kcalloc(nelem, swsize, GFP_KERNEL);
771
772 if (!swring) {
773 dma_free_coherent(dev, hwlen, hwring, *busaddrp);
774 return NULL;
775 }
776 *(void **)swringp = swring;
777 }
778
779 return hwring;
780 }
781
782 /**
783 * sgl_len - calculates the size of an SGL of the given capacity
784 * @n: the number of SGL entries
785 *
786 * Calculates the number of flits (8-byte units) needed for a Direct
787 * Scatter/Gather List that can hold the given number of entries.
788 */
sgl_len(unsigned int n)789 static inline unsigned int sgl_len(unsigned int n)
790 {
791 /*
792 * A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA
793 * addresses. The DSGL Work Request starts off with a 32-bit DSGL
794 * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N,
795 * repeated sequences of { Length[i], Length[i+1], Address[i],
796 * Address[i+1] } (this ensures that all addresses are on 64-bit
797 * boundaries). If N is even, then Length[N+1] should be set to 0 and
798 * Address[N+1] is omitted.
799 *
800 * The following calculation incorporates all of the above. It's
801 * somewhat hard to follow but, briefly: the "+2" accounts for the
802 * first two flits which include the DSGL header, Length0 and
803 * Address0; the "(3*(n-1))/2" covers the main body of list entries (3
804 * flits for every pair of the remaining N) +1 if (n-1) is odd; and
805 * finally the "+((n-1)&1)" adds the one remaining flit needed if
806 * (n-1) is odd ...
807 */
808 n--;
809 return (3 * n) / 2 + (n & 1) + 2;
810 }
811
812 /**
813 * flits_to_desc - returns the num of TX descriptors for the given flits
814 * @flits: the number of flits
815 *
816 * Returns the number of TX descriptors needed for the supplied number
817 * of flits.
818 */
flits_to_desc(unsigned int flits)819 static inline unsigned int flits_to_desc(unsigned int flits)
820 {
821 BUG_ON(flits > SGE_MAX_WR_LEN / sizeof(__be64));
822 return DIV_ROUND_UP(flits, TXD_PER_EQ_UNIT);
823 }
824
825 /**
826 * is_eth_imm - can an Ethernet packet be sent as immediate data?
827 * @skb: the packet
828 *
829 * Returns whether an Ethernet packet is small enough to fit completely as
830 * immediate data.
831 */
is_eth_imm(const struct sk_buff * skb)832 static inline int is_eth_imm(const struct sk_buff *skb)
833 {
834 /*
835 * The VF Driver uses the FW_ETH_TX_PKT_VM_WR firmware Work Request
836 * which does not accommodate immediate data. We could dike out all
837 * of the support code for immediate data but that would tie our hands
838 * too much if we ever want to enhace the firmware. It would also
839 * create more differences between the PF and VF Drivers.
840 */
841 return false;
842 }
843
844 /**
845 * calc_tx_flits - calculate the number of flits for a packet TX WR
846 * @skb: the packet
847 *
848 * Returns the number of flits needed for a TX Work Request for the
849 * given Ethernet packet, including the needed WR and CPL headers.
850 */
calc_tx_flits(const struct sk_buff * skb)851 static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
852 {
853 unsigned int flits;
854
855 /*
856 * If the skb is small enough, we can pump it out as a work request
857 * with only immediate data. In that case we just have to have the
858 * TX Packet header plus the skb data in the Work Request.
859 */
860 if (is_eth_imm(skb))
861 return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt),
862 sizeof(__be64));
863
864 /*
865 * Otherwise, we're going to have to construct a Scatter gather list
866 * of the skb body and fragments. We also include the flits necessary
867 * for the TX Packet Work Request and CPL. We always have a firmware
868 * Write Header (incorporated as part of the cpl_tx_pkt_lso and
869 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
870 * message or, if we're doing a Large Send Offload, an LSO CPL message
871 * with an embedded TX Packet Write CPL message.
872 */
873 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
874 if (skb_shinfo(skb)->gso_size)
875 flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
876 sizeof(struct cpl_tx_pkt_lso_core) +
877 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
878 else
879 flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
880 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
881 return flits;
882 }
883
884 /**
885 * write_sgl - populate a Scatter/Gather List for a packet
886 * @skb: the packet
887 * @tq: the TX queue we are writing into
888 * @sgl: starting location for writing the SGL
889 * @end: points right after the end of the SGL
890 * @start: start offset into skb main-body data to include in the SGL
891 * @addr: the list of DMA bus addresses for the SGL elements
892 *
893 * Generates a Scatter/Gather List for the buffers that make up a packet.
894 * The caller must provide adequate space for the SGL that will be written.
895 * The SGL includes all of the packet's page fragments and the data in its
896 * main body except for the first @start bytes. @pos must be 16-byte
897 * aligned and within a TX descriptor with available space. @end points
898 * write after the end of the SGL but does not account for any potential
899 * wrap around, i.e., @end > @tq->stat.
900 */
write_sgl(const struct sk_buff * skb,struct sge_txq * tq,struct ulptx_sgl * sgl,u64 * end,unsigned int start,const dma_addr_t * addr)901 static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq,
902 struct ulptx_sgl *sgl, u64 *end, unsigned int start,
903 const dma_addr_t *addr)
904 {
905 unsigned int i, len;
906 struct ulptx_sge_pair *to;
907 const struct skb_shared_info *si = skb_shinfo(skb);
908 unsigned int nfrags = si->nr_frags;
909 struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
910
911 len = skb_headlen(skb) - start;
912 if (likely(len)) {
913 sgl->len0 = htonl(len);
914 sgl->addr0 = cpu_to_be64(addr[0] + start);
915 nfrags++;
916 } else {
917 sgl->len0 = htonl(skb_frag_size(&si->frags[0]));
918 sgl->addr0 = cpu_to_be64(addr[1]);
919 }
920
921 sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
922 ULPTX_NSGE_V(nfrags));
923 if (likely(--nfrags == 0))
924 return;
925 /*
926 * Most of the complexity below deals with the possibility we hit the
927 * end of the queue in the middle of writing the SGL. For this case
928 * only we create the SGL in a temporary buffer and then copy it.
929 */
930 to = (u8 *)end > (u8 *)tq->stat ? buf : sgl->sge;
931
932 for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
933 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
934 to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i]));
935 to->addr[0] = cpu_to_be64(addr[i]);
936 to->addr[1] = cpu_to_be64(addr[++i]);
937 }
938 if (nfrags) {
939 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
940 to->len[1] = cpu_to_be32(0);
941 to->addr[0] = cpu_to_be64(addr[i + 1]);
942 }
943 if (unlikely((u8 *)end > (u8 *)tq->stat)) {
944 unsigned int part0 = (u8 *)tq->stat - (u8 *)sgl->sge, part1;
945
946 if (likely(part0))
947 memcpy(sgl->sge, buf, part0);
948 part1 = (u8 *)end - (u8 *)tq->stat;
949 memcpy(tq->desc, (u8 *)buf + part0, part1);
950 end = (void *)tq->desc + part1;
951 }
952 if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */
953 *end = 0;
954 }
955
956 /**
957 * check_ring_tx_db - check and potentially ring a TX queue's doorbell
958 * @adapter: the adapter
959 * @tq: the TX queue
960 * @n: number of new descriptors to give to HW
961 *
962 * Ring the doorbel for a TX queue.
963 */
ring_tx_db(struct adapter * adapter,struct sge_txq * tq,int n)964 static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
965 int n)
966 {
967 /* Make sure that all writes to the TX Descriptors are committed
968 * before we tell the hardware about them.
969 */
970 wmb();
971
972 /* If we don't have access to the new User Doorbell (T5+), use the old
973 * doorbell mechanism; otherwise use the new BAR2 mechanism.
974 */
975 if (unlikely(tq->bar2_addr == NULL)) {
976 u32 val = PIDX_V(n);
977
978 t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
979 QID_V(tq->cntxt_id) | val);
980 } else {
981 u32 val = PIDX_T5_V(n);
982
983 /* T4 and later chips share the same PIDX field offset within
984 * the doorbell, but T5 and later shrank the field in order to
985 * gain a bit for Doorbell Priority. The field was absurdly
986 * large in the first place (14 bits) so we just use the T5
987 * and later limits and warn if a Queue ID is too large.
988 */
989 WARN_ON(val & DBPRIO_F);
990
991 /* If we're only writing a single Egress Unit and the BAR2
992 * Queue ID is 0, we can use the Write Combining Doorbell
993 * Gather Buffer; otherwise we use the simple doorbell.
994 */
995 if (n == 1 && tq->bar2_qid == 0) {
996 unsigned int index = (tq->pidx
997 ? (tq->pidx - 1)
998 : (tq->size - 1));
999 __be64 *src = (__be64 *)&tq->desc[index];
1000 __be64 __iomem *dst = (__be64 __iomem *)(tq->bar2_addr +
1001 SGE_UDB_WCDOORBELL);
1002 unsigned int count = EQ_UNIT / sizeof(__be64);
1003
1004 /* Copy the TX Descriptor in a tight loop in order to
1005 * try to get it to the adapter in a single Write
1006 * Combined transfer on the PCI-E Bus. If the Write
1007 * Combine fails (say because of an interrupt, etc.)
1008 * the hardware will simply take the last write as a
1009 * simple doorbell write with a PIDX Increment of 1
1010 * and will fetch the TX Descriptor from memory via
1011 * DMA.
1012 */
1013 while (count) {
1014 /* the (__force u64) is because the compiler
1015 * doesn't understand the endian swizzling
1016 * going on
1017 */
1018 writeq((__force u64)*src, dst);
1019 src++;
1020 dst++;
1021 count--;
1022 }
1023 } else
1024 writel(val | QID_V(tq->bar2_qid),
1025 tq->bar2_addr + SGE_UDB_KDOORBELL);
1026
1027 /* This Write Memory Barrier will force the write to the User
1028 * Doorbell area to be flushed. This is needed to prevent
1029 * writes on different CPUs for the same queue from hitting
1030 * the adapter out of order. This is required when some Work
1031 * Requests take the Write Combine Gather Buffer path (user
1032 * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some
1033 * take the traditional path where we simply increment the
1034 * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the
1035 * hardware DMA read the actual Work Request.
1036 */
1037 wmb();
1038 }
1039 }
1040
1041 /**
1042 * inline_tx_skb - inline a packet's data into TX descriptors
1043 * @skb: the packet
1044 * @tq: the TX queue where the packet will be inlined
1045 * @pos: starting position in the TX queue to inline the packet
1046 *
1047 * Inline a packet's contents directly into TX descriptors, starting at
1048 * the given position within the TX DMA ring.
1049 * Most of the complexity of this operation is dealing with wrap arounds
1050 * in the middle of the packet we want to inline.
1051 */
inline_tx_skb(const struct sk_buff * skb,const struct sge_txq * tq,void * pos)1052 static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *tq,
1053 void *pos)
1054 {
1055 u64 *p;
1056 int left = (void *)tq->stat - pos;
1057
1058 if (likely(skb->len <= left)) {
1059 if (likely(!skb->data_len))
1060 skb_copy_from_linear_data(skb, pos, skb->len);
1061 else
1062 skb_copy_bits(skb, 0, pos, skb->len);
1063 pos += skb->len;
1064 } else {
1065 skb_copy_bits(skb, 0, pos, left);
1066 skb_copy_bits(skb, left, tq->desc, skb->len - left);
1067 pos = (void *)tq->desc + (skb->len - left);
1068 }
1069
1070 /* 0-pad to multiple of 16 */
1071 p = PTR_ALIGN(pos, 8);
1072 if ((uintptr_t)p & 8)
1073 *p = 0;
1074 }
1075
1076 /*
1077 * Figure out what HW csum a packet wants and return the appropriate control
1078 * bits.
1079 */
hwcsum(enum chip_type chip,const struct sk_buff * skb)1080 static u64 hwcsum(enum chip_type chip, const struct sk_buff *skb)
1081 {
1082 int csum_type;
1083 const struct iphdr *iph = ip_hdr(skb);
1084
1085 if (iph->version == 4) {
1086 if (iph->protocol == IPPROTO_TCP)
1087 csum_type = TX_CSUM_TCPIP;
1088 else if (iph->protocol == IPPROTO_UDP)
1089 csum_type = TX_CSUM_UDPIP;
1090 else {
1091 nocsum:
1092 /*
1093 * unknown protocol, disable HW csum
1094 * and hope a bad packet is detected
1095 */
1096 return TXPKT_L4CSUM_DIS_F;
1097 }
1098 } else {
1099 /*
1100 * this doesn't work with extension headers
1101 */
1102 const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph;
1103
1104 if (ip6h->nexthdr == IPPROTO_TCP)
1105 csum_type = TX_CSUM_TCPIP6;
1106 else if (ip6h->nexthdr == IPPROTO_UDP)
1107 csum_type = TX_CSUM_UDPIP6;
1108 else
1109 goto nocsum;
1110 }
1111
1112 if (likely(csum_type >= TX_CSUM_TCPIP)) {
1113 u64 hdr_len = TXPKT_IPHDR_LEN_V(skb_network_header_len(skb));
1114 int eth_hdr_len = skb_network_offset(skb) - ETH_HLEN;
1115
1116 if (chip <= CHELSIO_T5)
1117 hdr_len |= TXPKT_ETHHDR_LEN_V(eth_hdr_len);
1118 else
1119 hdr_len |= T6_TXPKT_ETHHDR_LEN_V(eth_hdr_len);
1120 return TXPKT_CSUM_TYPE_V(csum_type) | hdr_len;
1121 } else {
1122 int start = skb_transport_offset(skb);
1123
1124 return TXPKT_CSUM_TYPE_V(csum_type) |
1125 TXPKT_CSUM_START_V(start) |
1126 TXPKT_CSUM_LOC_V(start + skb->csum_offset);
1127 }
1128 }
1129
1130 /*
1131 * Stop an Ethernet TX queue and record that state change.
1132 */
txq_stop(struct sge_eth_txq * txq)1133 static void txq_stop(struct sge_eth_txq *txq)
1134 {
1135 netif_tx_stop_queue(txq->txq);
1136 txq->q.stops++;
1137 }
1138
1139 /*
1140 * Advance our software state for a TX queue by adding n in use descriptors.
1141 */
txq_advance(struct sge_txq * tq,unsigned int n)1142 static inline void txq_advance(struct sge_txq *tq, unsigned int n)
1143 {
1144 tq->in_use += n;
1145 tq->pidx += n;
1146 if (tq->pidx >= tq->size)
1147 tq->pidx -= tq->size;
1148 }
1149
1150 /**
1151 * t4vf_eth_xmit - add a packet to an Ethernet TX queue
1152 * @skb: the packet
1153 * @dev: the egress net device
1154 *
1155 * Add a packet to an SGE Ethernet TX queue. Runs with softirqs disabled.
1156 */
t4vf_eth_xmit(struct sk_buff * skb,struct net_device * dev)1157 netdev_tx_t t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1158 {
1159 u32 wr_mid;
1160 u64 cntrl, *end;
1161 int qidx, credits, max_pkt_len;
1162 unsigned int flits, ndesc;
1163 struct adapter *adapter;
1164 struct sge_eth_txq *txq;
1165 const struct port_info *pi;
1166 struct fw_eth_tx_pkt_vm_wr *wr;
1167 struct cpl_tx_pkt_core *cpl;
1168 const struct skb_shared_info *ssi;
1169 dma_addr_t addr[MAX_SKB_FRAGS + 1];
1170 const size_t fw_hdr_copy_len = (sizeof(wr->ethmacdst) +
1171 sizeof(wr->ethmacsrc) +
1172 sizeof(wr->ethtype) +
1173 sizeof(wr->vlantci));
1174
1175 /*
1176 * The chip minimum packet length is 10 octets but the firmware
1177 * command that we are using requires that we copy the Ethernet header
1178 * (including the VLAN tag) into the header so we reject anything
1179 * smaller than that ...
1180 */
1181 if (unlikely(skb->len < fw_hdr_copy_len))
1182 goto out_free;
1183
1184 /* Discard the packet if the length is greater than mtu */
1185 max_pkt_len = ETH_HLEN + dev->mtu;
1186 if (skb_vlan_tagged(skb))
1187 max_pkt_len += VLAN_HLEN;
1188 if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
1189 goto out_free;
1190
1191 /*
1192 * Figure out which TX Queue we're going to use.
1193 */
1194 pi = netdev_priv(dev);
1195 adapter = pi->adapter;
1196 qidx = skb_get_queue_mapping(skb);
1197 BUG_ON(qidx >= pi->nqsets);
1198 txq = &adapter->sge.ethtxq[pi->first_qset + qidx];
1199
1200 if (pi->vlan_id && !skb_vlan_tag_present(skb))
1201 __vlan_hwaccel_put_tag(skb, cpu_to_be16(ETH_P_8021Q),
1202 pi->vlan_id);
1203
1204 /*
1205 * Take this opportunity to reclaim any TX Descriptors whose DMA
1206 * transfers have completed.
1207 */
1208 reclaim_completed_tx(adapter, &txq->q, true);
1209
1210 /*
1211 * Calculate the number of flits and TX Descriptors we're going to
1212 * need along with how many TX Descriptors will be left over after
1213 * we inject our Work Request.
1214 */
1215 flits = calc_tx_flits(skb);
1216 ndesc = flits_to_desc(flits);
1217 credits = txq_avail(&txq->q) - ndesc;
1218
1219 if (unlikely(credits < 0)) {
1220 /*
1221 * Not enough room for this packet's Work Request. Stop the
1222 * TX Queue and return a "busy" condition. The queue will get
1223 * started later on when the firmware informs us that space
1224 * has opened up.
1225 */
1226 txq_stop(txq);
1227 dev_err(adapter->pdev_dev,
1228 "%s: TX ring %u full while queue awake!\n",
1229 dev->name, qidx);
1230 return NETDEV_TX_BUSY;
1231 }
1232
1233 if (!is_eth_imm(skb) &&
1234 unlikely(map_skb(adapter->pdev_dev, skb, addr) < 0)) {
1235 /*
1236 * We need to map the skb into PCI DMA space (because it can't
1237 * be in-lined directly into the Work Request) and the mapping
1238 * operation failed. Record the error and drop the packet.
1239 */
1240 txq->mapping_err++;
1241 goto out_free;
1242 }
1243
1244 wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
1245 if (unlikely(credits < ETHTXQ_STOP_THRES)) {
1246 /*
1247 * After we're done injecting the Work Request for this
1248 * packet, we'll be below our "stop threshold" so stop the TX
1249 * Queue now and schedule a request for an SGE Egress Queue
1250 * Update message. The queue will get started later on when
1251 * the firmware processes this Work Request and sends us an
1252 * Egress Queue Status Update message indicating that space
1253 * has opened up.
1254 */
1255 txq_stop(txq);
1256 wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
1257 }
1258
1259 /*
1260 * Start filling in our Work Request. Note that we do _not_ handle
1261 * the WR Header wrapping around the TX Descriptor Ring. If our
1262 * maximum header size ever exceeds one TX Descriptor, we'll need to
1263 * do something else here.
1264 */
1265 BUG_ON(DIV_ROUND_UP(ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1);
1266 wr = (void *)&txq->q.desc[txq->q.pidx];
1267 wr->equiq_to_len16 = cpu_to_be32(wr_mid);
1268 wr->r3[0] = cpu_to_be32(0);
1269 wr->r3[1] = cpu_to_be32(0);
1270 skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len);
1271 end = (u64 *)wr + flits;
1272
1273 /*
1274 * If this is a Large Send Offload packet we'll put in an LSO CPL
1275 * message with an encapsulated TX Packet CPL message. Otherwise we
1276 * just use a TX Packet CPL message.
1277 */
1278 ssi = skb_shinfo(skb);
1279 if (ssi->gso_size) {
1280 struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
1281 bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
1282 int l3hdr_len = skb_network_header_len(skb);
1283 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
1284
1285 wr->op_immdlen =
1286 cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
1287 FW_WR_IMMDLEN_V(sizeof(*lso) +
1288 sizeof(*cpl)));
1289 /*
1290 * Fill in the LSO CPL message.
1291 */
1292 lso->lso_ctrl =
1293 cpu_to_be32(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
1294 LSO_FIRST_SLICE_F |
1295 LSO_LAST_SLICE_F |
1296 LSO_IPV6_V(v6) |
1297 LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
1298 LSO_IPHDR_LEN_V(l3hdr_len / 4) |
1299 LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
1300 lso->ipid_ofst = cpu_to_be16(0);
1301 lso->mss = cpu_to_be16(ssi->gso_size);
1302 lso->seqno_offset = cpu_to_be32(0);
1303 if (is_t4(adapter->params.chip))
1304 lso->len = cpu_to_be32(skb->len);
1305 else
1306 lso->len = cpu_to_be32(LSO_T5_XFER_SIZE_V(skb->len));
1307
1308 /*
1309 * Set up TX Packet CPL pointer, control word and perform
1310 * accounting.
1311 */
1312 cpl = (void *)(lso + 1);
1313
1314 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
1315 cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
1316 else
1317 cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
1318
1319 cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
1320 TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
1321 TXPKT_IPHDR_LEN_V(l3hdr_len);
1322 txq->tso++;
1323 txq->tx_cso += ssi->gso_segs;
1324 } else {
1325 int len;
1326
1327 len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl);
1328 wr->op_immdlen =
1329 cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
1330 FW_WR_IMMDLEN_V(len));
1331
1332 /*
1333 * Set up TX Packet CPL pointer, control word and perform
1334 * accounting.
1335 */
1336 cpl = (void *)(wr + 1);
1337 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1338 cntrl = hwcsum(adapter->params.chip, skb) |
1339 TXPKT_IPCSUM_DIS_F;
1340 txq->tx_cso++;
1341 } else
1342 cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
1343 }
1344
1345 /*
1346 * If there's a VLAN tag present, add that to the list of things to
1347 * do in this Work Request.
1348 */
1349 if (skb_vlan_tag_present(skb)) {
1350 txq->vlan_ins++;
1351 cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
1352 }
1353
1354 /*
1355 * Fill in the TX Packet CPL message header.
1356 */
1357 cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
1358 TXPKT_INTF_V(pi->port_id) |
1359 TXPKT_PF_V(0));
1360 cpl->pack = cpu_to_be16(0);
1361 cpl->len = cpu_to_be16(skb->len);
1362 cpl->ctrl1 = cpu_to_be64(cntrl);
1363
1364 #ifdef T4_TRACE
1365 T4_TRACE5(adapter->tb[txq->q.cntxt_id & 7],
1366 "eth_xmit: ndesc %u, credits %u, pidx %u, len %u, frags %u",
1367 ndesc, credits, txq->q.pidx, skb->len, ssi->nr_frags);
1368 #endif
1369
1370 /*
1371 * Fill in the body of the TX Packet CPL message with either in-lined
1372 * data or a Scatter/Gather List.
1373 */
1374 if (is_eth_imm(skb)) {
1375 /*
1376 * In-line the packet's data and free the skb since we don't
1377 * need it any longer.
1378 */
1379 inline_tx_skb(skb, &txq->q, cpl + 1);
1380 dev_consume_skb_any(skb);
1381 } else {
1382 /*
1383 * Write the skb's Scatter/Gather list into the TX Packet CPL
1384 * message and retain a pointer to the skb so we can free it
1385 * later when its DMA completes. (We store the skb pointer
1386 * in the Software Descriptor corresponding to the last TX
1387 * Descriptor used by the Work Request.)
1388 *
1389 * The retained skb will be freed when the corresponding TX
1390 * Descriptors are reclaimed after their DMAs complete.
1391 * However, this could take quite a while since, in general,
1392 * the hardware is set up to be lazy about sending DMA
1393 * completion notifications to us and we mostly perform TX
1394 * reclaims in the transmit routine.
1395 *
1396 * This is good for performamce but means that we rely on new
1397 * TX packets arriving to run the destructors of completed
1398 * packets, which open up space in their sockets' send queues.
1399 * Sometimes we do not get such new packets causing TX to
1400 * stall. A single UDP transmitter is a good example of this
1401 * situation. We have a clean up timer that periodically
1402 * reclaims completed packets but it doesn't run often enough
1403 * (nor do we want it to) to prevent lengthy stalls. A
1404 * solution to this problem is to run the destructor early,
1405 * after the packet is queued but before it's DMAd. A con is
1406 * that we lie to socket memory accounting, but the amount of
1407 * extra memory is reasonable (limited by the number of TX
1408 * descriptors), the packets do actually get freed quickly by
1409 * new packets almost always, and for protocols like TCP that
1410 * wait for acks to really free up the data the extra memory
1411 * is even less. On the positive side we run the destructors
1412 * on the sending CPU rather than on a potentially different
1413 * completing CPU, usually a good thing.
1414 *
1415 * Run the destructor before telling the DMA engine about the
1416 * packet to make sure it doesn't complete and get freed
1417 * prematurely.
1418 */
1419 struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1);
1420 struct sge_txq *tq = &txq->q;
1421 int last_desc;
1422
1423 /*
1424 * If the Work Request header was an exact multiple of our TX
1425 * Descriptor length, then it's possible that the starting SGL
1426 * pointer lines up exactly with the end of our TX Descriptor
1427 * ring. If that's the case, wrap around to the beginning
1428 * here ...
1429 */
1430 if (unlikely((void *)sgl == (void *)tq->stat)) {
1431 sgl = (void *)tq->desc;
1432 end = ((void *)tq->desc + ((void *)end - (void *)tq->stat));
1433 }
1434
1435 write_sgl(skb, tq, sgl, end, 0, addr);
1436 skb_orphan(skb);
1437
1438 last_desc = tq->pidx + ndesc - 1;
1439 if (last_desc >= tq->size)
1440 last_desc -= tq->size;
1441 tq->sdesc[last_desc].skb = skb;
1442 tq->sdesc[last_desc].sgl = sgl;
1443 }
1444
1445 /*
1446 * Advance our internal TX Queue state, tell the hardware about
1447 * the new TX descriptors and return success.
1448 */
1449 txq_advance(&txq->q, ndesc);
1450 netif_trans_update(dev);
1451 ring_tx_db(adapter, &txq->q, ndesc);
1452 return NETDEV_TX_OK;
1453
1454 out_free:
1455 /*
1456 * An error of some sort happened. Free the TX skb and tell the
1457 * OS that we've "dealt" with the packet ...
1458 */
1459 dev_kfree_skb_any(skb);
1460 return NETDEV_TX_OK;
1461 }
1462
1463 /**
1464 * copy_frags - copy fragments from gather list into skb_shared_info
1465 * @skb: destination skb
1466 * @gl: source internal packet gather list
1467 * @offset: packet start offset in first page
1468 *
1469 * Copy an internal packet gather list into a Linux skb_shared_info
1470 * structure.
1471 */
copy_frags(struct sk_buff * skb,const struct pkt_gl * gl,unsigned int offset)1472 static inline void copy_frags(struct sk_buff *skb,
1473 const struct pkt_gl *gl,
1474 unsigned int offset)
1475 {
1476 int i;
1477
1478 /* usually there's just one frag */
1479 __skb_fill_page_desc(skb, 0, gl->frags[0].page,
1480 gl->frags[0].offset + offset,
1481 gl->frags[0].size - offset);
1482 skb_shinfo(skb)->nr_frags = gl->nfrags;
1483 for (i = 1; i < gl->nfrags; i++)
1484 __skb_fill_page_desc(skb, i, gl->frags[i].page,
1485 gl->frags[i].offset,
1486 gl->frags[i].size);
1487
1488 /* get a reference to the last page, we don't own it */
1489 get_page(gl->frags[gl->nfrags - 1].page);
1490 }
1491
1492 /**
1493 * t4vf_pktgl_to_skb - build an sk_buff from a packet gather list
1494 * @gl: the gather list
1495 * @skb_len: size of sk_buff main body if it carries fragments
1496 * @pull_len: amount of data to move to the sk_buff's main body
1497 *
1498 * Builds an sk_buff from the given packet gather list. Returns the
1499 * sk_buff or %NULL if sk_buff allocation failed.
1500 */
t4vf_pktgl_to_skb(const struct pkt_gl * gl,unsigned int skb_len,unsigned int pull_len)1501 static struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl,
1502 unsigned int skb_len,
1503 unsigned int pull_len)
1504 {
1505 struct sk_buff *skb;
1506
1507 /*
1508 * If the ingress packet is small enough, allocate an skb large enough
1509 * for all of the data and copy it inline. Otherwise, allocate an skb
1510 * with enough room to pull in the header and reference the rest of
1511 * the data via the skb fragment list.
1512 *
1513 * Below we rely on RX_COPY_THRES being less than the smallest Rx
1514 * buff! size, which is expected since buffers are at least
1515 * PAGE_SIZEd. In this case packets up to RX_COPY_THRES have only one
1516 * fragment.
1517 */
1518 if (gl->tot_len <= RX_COPY_THRES) {
1519 /* small packets have only one fragment */
1520 skb = alloc_skb(gl->tot_len, GFP_ATOMIC);
1521 if (unlikely(!skb))
1522 goto out;
1523 __skb_put(skb, gl->tot_len);
1524 skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
1525 } else {
1526 skb = alloc_skb(skb_len, GFP_ATOMIC);
1527 if (unlikely(!skb))
1528 goto out;
1529 __skb_put(skb, pull_len);
1530 skb_copy_to_linear_data(skb, gl->va, pull_len);
1531
1532 copy_frags(skb, gl, pull_len);
1533 skb->len = gl->tot_len;
1534 skb->data_len = skb->len - pull_len;
1535 skb->truesize += skb->data_len;
1536 }
1537
1538 out:
1539 return skb;
1540 }
1541
1542 /**
1543 * t4vf_pktgl_free - free a packet gather list
1544 * @gl: the gather list
1545 *
1546 * Releases the pages of a packet gather list. We do not own the last
1547 * page on the list and do not free it.
1548 */
t4vf_pktgl_free(const struct pkt_gl * gl)1549 static void t4vf_pktgl_free(const struct pkt_gl *gl)
1550 {
1551 int frag;
1552
1553 frag = gl->nfrags - 1;
1554 while (frag--)
1555 put_page(gl->frags[frag].page);
1556 }
1557
1558 /**
1559 * do_gro - perform Generic Receive Offload ingress packet processing
1560 * @rxq: ingress RX Ethernet Queue
1561 * @gl: gather list for ingress packet
1562 * @pkt: CPL header for last packet fragment
1563 *
1564 * Perform Generic Receive Offload (GRO) ingress packet processing.
1565 * We use the standard Linux GRO interfaces for this.
1566 */
do_gro(struct sge_eth_rxq * rxq,const struct pkt_gl * gl,const struct cpl_rx_pkt * pkt)1567 static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1568 const struct cpl_rx_pkt *pkt)
1569 {
1570 struct adapter *adapter = rxq->rspq.adapter;
1571 struct sge *s = &adapter->sge;
1572 struct port_info *pi;
1573 int ret;
1574 struct sk_buff *skb;
1575
1576 skb = napi_get_frags(&rxq->rspq.napi);
1577 if (unlikely(!skb)) {
1578 t4vf_pktgl_free(gl);
1579 rxq->stats.rx_drops++;
1580 return;
1581 }
1582
1583 copy_frags(skb, gl, s->pktshift);
1584 skb->len = gl->tot_len - s->pktshift;
1585 skb->data_len = skb->len;
1586 skb->truesize += skb->data_len;
1587 skb->ip_summed = CHECKSUM_UNNECESSARY;
1588 skb_record_rx_queue(skb, rxq->rspq.idx);
1589 pi = netdev_priv(skb->dev);
1590
1591 if (pkt->vlan_ex && !pi->vlan_id) {
1592 __vlan_hwaccel_put_tag(skb, cpu_to_be16(ETH_P_8021Q),
1593 be16_to_cpu(pkt->vlan));
1594 rxq->stats.vlan_ex++;
1595 }
1596 ret = napi_gro_frags(&rxq->rspq.napi);
1597
1598 if (ret == GRO_HELD)
1599 rxq->stats.lro_pkts++;
1600 else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
1601 rxq->stats.lro_merged++;
1602 rxq->stats.pkts++;
1603 rxq->stats.rx_cso++;
1604 }
1605
1606 /**
1607 * t4vf_ethrx_handler - process an ingress ethernet packet
1608 * @rspq: the response queue that received the packet
1609 * @rsp: the response queue descriptor holding the RX_PKT message
1610 * @gl: the gather list of packet fragments
1611 *
1612 * Process an ingress ethernet packet and deliver it to the stack.
1613 */
t4vf_ethrx_handler(struct sge_rspq * rspq,const __be64 * rsp,const struct pkt_gl * gl)1614 int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1615 const struct pkt_gl *gl)
1616 {
1617 struct sk_buff *skb;
1618 const struct cpl_rx_pkt *pkt = (void *)rsp;
1619 bool csum_ok = pkt->csum_calc && !pkt->err_vec &&
1620 (rspq->netdev->features & NETIF_F_RXCSUM);
1621 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
1622 struct adapter *adapter = rspq->adapter;
1623 struct sge *s = &adapter->sge;
1624 struct port_info *pi;
1625
1626 /*
1627 * If this is a good TCP packet and we have Generic Receive Offload
1628 * enabled, handle the packet in the GRO path.
1629 */
1630 if ((pkt->l2info & cpu_to_be32(RXF_TCP_F)) &&
1631 (rspq->netdev->features & NETIF_F_GRO) && csum_ok &&
1632 !pkt->ip_frag) {
1633 do_gro(rxq, gl, pkt);
1634 return 0;
1635 }
1636
1637 /*
1638 * Convert the Packet Gather List into an skb.
1639 */
1640 skb = t4vf_pktgl_to_skb(gl, RX_SKB_LEN, RX_PULL_LEN);
1641 if (unlikely(!skb)) {
1642 t4vf_pktgl_free(gl);
1643 rxq->stats.rx_drops++;
1644 return 0;
1645 }
1646 __skb_pull(skb, s->pktshift);
1647 skb->protocol = eth_type_trans(skb, rspq->netdev);
1648 skb_record_rx_queue(skb, rspq->idx);
1649 pi = netdev_priv(skb->dev);
1650 rxq->stats.pkts++;
1651
1652 if (csum_ok && !pkt->err_vec &&
1653 (be32_to_cpu(pkt->l2info) & (RXF_UDP_F | RXF_TCP_F))) {
1654 if (!pkt->ip_frag) {
1655 skb->ip_summed = CHECKSUM_UNNECESSARY;
1656 rxq->stats.rx_cso++;
1657 } else if (pkt->l2info & htonl(RXF_IP_F)) {
1658 __sum16 c = (__force __sum16)pkt->csum;
1659 skb->csum = csum_unfold(c);
1660 skb->ip_summed = CHECKSUM_COMPLETE;
1661 rxq->stats.rx_cso++;
1662 }
1663 } else
1664 skb_checksum_none_assert(skb);
1665
1666 if (pkt->vlan_ex && !pi->vlan_id) {
1667 rxq->stats.vlan_ex++;
1668 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1669 be16_to_cpu(pkt->vlan));
1670 }
1671
1672 netif_receive_skb(skb);
1673
1674 return 0;
1675 }
1676
1677 /**
1678 * is_new_response - check if a response is newly written
1679 * @rc: the response control descriptor
1680 * @rspq: the response queue
1681 *
1682 * Returns true if a response descriptor contains a yet unprocessed
1683 * response.
1684 */
is_new_response(const struct rsp_ctrl * rc,const struct sge_rspq * rspq)1685 static inline bool is_new_response(const struct rsp_ctrl *rc,
1686 const struct sge_rspq *rspq)
1687 {
1688 return ((rc->type_gen >> RSPD_GEN_S) & 0x1) == rspq->gen;
1689 }
1690
1691 /**
1692 * restore_rx_bufs - put back a packet's RX buffers
1693 * @gl: the packet gather list
1694 * @fl: the SGE Free List
1695 * @frags: how many fragments in @si
1696 *
1697 * Called when we find out that the current packet, @si, can't be
1698 * processed right away for some reason. This is a very rare event and
1699 * there's no effort to make this suspension/resumption process
1700 * particularly efficient.
1701 *
1702 * We implement the suspension by putting all of the RX buffers associated
1703 * with the current packet back on the original Free List. The buffers
1704 * have already been unmapped and are left unmapped, we mark them as
1705 * unmapped in order to prevent further unmapping attempts. (Effectively
1706 * this function undoes the series of @unmap_rx_buf calls which were done
1707 * to create the current packet's gather list.) This leaves us ready to
1708 * restart processing of the packet the next time we start processing the
1709 * RX Queue ...
1710 */
restore_rx_bufs(const struct pkt_gl * gl,struct sge_fl * fl,int frags)1711 static void restore_rx_bufs(const struct pkt_gl *gl, struct sge_fl *fl,
1712 int frags)
1713 {
1714 struct rx_sw_desc *sdesc;
1715
1716 while (frags--) {
1717 if (fl->cidx == 0)
1718 fl->cidx = fl->size - 1;
1719 else
1720 fl->cidx--;
1721 sdesc = &fl->sdesc[fl->cidx];
1722 sdesc->page = gl->frags[frags].page;
1723 sdesc->dma_addr |= RX_UNMAPPED_BUF;
1724 fl->avail++;
1725 }
1726 }
1727
1728 /**
1729 * rspq_next - advance to the next entry in a response queue
1730 * @rspq: the queue
1731 *
1732 * Updates the state of a response queue to advance it to the next entry.
1733 */
rspq_next(struct sge_rspq * rspq)1734 static inline void rspq_next(struct sge_rspq *rspq)
1735 {
1736 rspq->cur_desc = (void *)rspq->cur_desc + rspq->iqe_len;
1737 if (unlikely(++rspq->cidx == rspq->size)) {
1738 rspq->cidx = 0;
1739 rspq->gen ^= 1;
1740 rspq->cur_desc = rspq->desc;
1741 }
1742 }
1743
1744 /**
1745 * process_responses - process responses from an SGE response queue
1746 * @rspq: the ingress response queue to process
1747 * @budget: how many responses can be processed in this round
1748 *
1749 * Process responses from a Scatter Gather Engine response queue up to
1750 * the supplied budget. Responses include received packets as well as
1751 * control messages from firmware or hardware.
1752 *
1753 * Additionally choose the interrupt holdoff time for the next interrupt
1754 * on this queue. If the system is under memory shortage use a fairly
1755 * long delay to help recovery.
1756 */
process_responses(struct sge_rspq * rspq,int budget)1757 static int process_responses(struct sge_rspq *rspq, int budget)
1758 {
1759 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
1760 struct adapter *adapter = rspq->adapter;
1761 struct sge *s = &adapter->sge;
1762 int budget_left = budget;
1763
1764 while (likely(budget_left)) {
1765 int ret, rsp_type;
1766 const struct rsp_ctrl *rc;
1767
1768 rc = (void *)rspq->cur_desc + (rspq->iqe_len - sizeof(*rc));
1769 if (!is_new_response(rc, rspq))
1770 break;
1771
1772 /*
1773 * Figure out what kind of response we've received from the
1774 * SGE.
1775 */
1776 dma_rmb();
1777 rsp_type = RSPD_TYPE_G(rc->type_gen);
1778 if (likely(rsp_type == RSPD_TYPE_FLBUF_X)) {
1779 struct page_frag *fp;
1780 struct pkt_gl gl;
1781 const struct rx_sw_desc *sdesc;
1782 u32 bufsz, frag;
1783 u32 len = be32_to_cpu(rc->pldbuflen_qid);
1784
1785 /*
1786 * If we get a "new buffer" message from the SGE we
1787 * need to move on to the next Free List buffer.
1788 */
1789 if (len & RSPD_NEWBUF_F) {
1790 /*
1791 * We get one "new buffer" message when we
1792 * first start up a queue so we need to ignore
1793 * it when our offset into the buffer is 0.
1794 */
1795 if (likely(rspq->offset > 0)) {
1796 free_rx_bufs(rspq->adapter, &rxq->fl,
1797 1);
1798 rspq->offset = 0;
1799 }
1800 len = RSPD_LEN_G(len);
1801 }
1802 gl.tot_len = len;
1803
1804 /*
1805 * Gather packet fragments.
1806 */
1807 for (frag = 0, fp = gl.frags; /**/; frag++, fp++) {
1808 BUG_ON(frag >= MAX_SKB_FRAGS);
1809 BUG_ON(rxq->fl.avail == 0);
1810 sdesc = &rxq->fl.sdesc[rxq->fl.cidx];
1811 bufsz = get_buf_size(adapter, sdesc);
1812 fp->page = sdesc->page;
1813 fp->offset = rspq->offset;
1814 fp->size = min(bufsz, len);
1815 len -= fp->size;
1816 if (!len)
1817 break;
1818 unmap_rx_buf(rspq->adapter, &rxq->fl);
1819 }
1820 gl.nfrags = frag+1;
1821
1822 /*
1823 * Last buffer remains mapped so explicitly make it
1824 * coherent for CPU access and start preloading first
1825 * cache line ...
1826 */
1827 dma_sync_single_for_cpu(rspq->adapter->pdev_dev,
1828 get_buf_addr(sdesc),
1829 fp->size, DMA_FROM_DEVICE);
1830 gl.va = (page_address(gl.frags[0].page) +
1831 gl.frags[0].offset);
1832 prefetch(gl.va);
1833
1834 /*
1835 * Hand the new ingress packet to the handler for
1836 * this Response Queue.
1837 */
1838 ret = rspq->handler(rspq, rspq->cur_desc, &gl);
1839 if (likely(ret == 0))
1840 rspq->offset += ALIGN(fp->size, s->fl_align);
1841 else
1842 restore_rx_bufs(&gl, &rxq->fl, frag);
1843 } else if (likely(rsp_type == RSPD_TYPE_CPL_X)) {
1844 ret = rspq->handler(rspq, rspq->cur_desc, NULL);
1845 } else {
1846 WARN_ON(rsp_type > RSPD_TYPE_CPL_X);
1847 ret = 0;
1848 }
1849
1850 if (unlikely(ret)) {
1851 /*
1852 * Couldn't process descriptor, back off for recovery.
1853 * We use the SGE's last timer which has the longest
1854 * interrupt coalescing value ...
1855 */
1856 const int NOMEM_TIMER_IDX = SGE_NTIMERS-1;
1857 rspq->next_intr_params =
1858 QINTR_TIMER_IDX_V(NOMEM_TIMER_IDX);
1859 break;
1860 }
1861
1862 rspq_next(rspq);
1863 budget_left--;
1864 }
1865
1866 /*
1867 * If this is a Response Queue with an associated Free List and
1868 * at least two Egress Queue units available in the Free List
1869 * for new buffer pointers, refill the Free List.
1870 */
1871 if (rspq->offset >= 0 &&
1872 fl_cap(&rxq->fl) - rxq->fl.avail >= 2*FL_PER_EQ_UNIT)
1873 __refill_fl(rspq->adapter, &rxq->fl);
1874 return budget - budget_left;
1875 }
1876
1877 /**
1878 * napi_rx_handler - the NAPI handler for RX processing
1879 * @napi: the napi instance
1880 * @budget: how many packets we can process in this round
1881 *
1882 * Handler for new data events when using NAPI. This does not need any
1883 * locking or protection from interrupts as data interrupts are off at
1884 * this point and other adapter interrupts do not interfere (the latter
1885 * in not a concern at all with MSI-X as non-data interrupts then have
1886 * a separate handler).
1887 */
napi_rx_handler(struct napi_struct * napi,int budget)1888 static int napi_rx_handler(struct napi_struct *napi, int budget)
1889 {
1890 unsigned int intr_params;
1891 struct sge_rspq *rspq = container_of(napi, struct sge_rspq, napi);
1892 int work_done = process_responses(rspq, budget);
1893 u32 val;
1894
1895 if (likely(work_done < budget)) {
1896 napi_complete_done(napi, work_done);
1897 intr_params = rspq->next_intr_params;
1898 rspq->next_intr_params = rspq->intr_params;
1899 } else
1900 intr_params = QINTR_TIMER_IDX_V(SGE_TIMER_UPD_CIDX);
1901
1902 if (unlikely(work_done == 0))
1903 rspq->unhandled_irqs++;
1904
1905 val = CIDXINC_V(work_done) | SEINTARM_V(intr_params);
1906 /* If we don't have access to the new User GTS (T5+), use the old
1907 * doorbell mechanism; otherwise use the new BAR2 mechanism.
1908 */
1909 if (unlikely(!rspq->bar2_addr)) {
1910 t4_write_reg(rspq->adapter,
1911 T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
1912 val | INGRESSQID_V((u32)rspq->cntxt_id));
1913 } else {
1914 writel(val | INGRESSQID_V(rspq->bar2_qid),
1915 rspq->bar2_addr + SGE_UDB_GTS);
1916 wmb();
1917 }
1918 return work_done;
1919 }
1920
1921 /*
1922 * The MSI-X interrupt handler for an SGE response queue for the NAPI case
1923 * (i.e., response queue serviced by NAPI polling).
1924 */
t4vf_sge_intr_msix(int irq,void * cookie)1925 irqreturn_t t4vf_sge_intr_msix(int irq, void *cookie)
1926 {
1927 struct sge_rspq *rspq = cookie;
1928
1929 napi_schedule(&rspq->napi);
1930 return IRQ_HANDLED;
1931 }
1932
1933 /*
1934 * Process the indirect interrupt entries in the interrupt queue and kick off
1935 * NAPI for each queue that has generated an entry.
1936 */
process_intrq(struct adapter * adapter)1937 static unsigned int process_intrq(struct adapter *adapter)
1938 {
1939 struct sge *s = &adapter->sge;
1940 struct sge_rspq *intrq = &s->intrq;
1941 unsigned int work_done;
1942 u32 val;
1943
1944 spin_lock(&adapter->sge.intrq_lock);
1945 for (work_done = 0; ; work_done++) {
1946 const struct rsp_ctrl *rc;
1947 unsigned int qid, iq_idx;
1948 struct sge_rspq *rspq;
1949
1950 /*
1951 * Grab the next response from the interrupt queue and bail
1952 * out if it's not a new response.
1953 */
1954 rc = (void *)intrq->cur_desc + (intrq->iqe_len - sizeof(*rc));
1955 if (!is_new_response(rc, intrq))
1956 break;
1957
1958 /*
1959 * If the response isn't a forwarded interrupt message issue a
1960 * error and go on to the next response message. This should
1961 * never happen ...
1962 */
1963 dma_rmb();
1964 if (unlikely(RSPD_TYPE_G(rc->type_gen) != RSPD_TYPE_INTR_X)) {
1965 dev_err(adapter->pdev_dev,
1966 "Unexpected INTRQ response type %d\n",
1967 RSPD_TYPE_G(rc->type_gen));
1968 continue;
1969 }
1970
1971 /*
1972 * Extract the Queue ID from the interrupt message and perform
1973 * sanity checking to make sure it really refers to one of our
1974 * Ingress Queues which is active and matches the queue's ID.
1975 * None of these error conditions should ever happen so we may
1976 * want to either make them fatal and/or conditionalized under
1977 * DEBUG.
1978 */
1979 qid = RSPD_QID_G(be32_to_cpu(rc->pldbuflen_qid));
1980 iq_idx = IQ_IDX(s, qid);
1981 if (unlikely(iq_idx >= MAX_INGQ)) {
1982 dev_err(adapter->pdev_dev,
1983 "Ingress QID %d out of range\n", qid);
1984 continue;
1985 }
1986 rspq = s->ingr_map[iq_idx];
1987 if (unlikely(rspq == NULL)) {
1988 dev_err(adapter->pdev_dev,
1989 "Ingress QID %d RSPQ=NULL\n", qid);
1990 continue;
1991 }
1992 if (unlikely(rspq->abs_id != qid)) {
1993 dev_err(adapter->pdev_dev,
1994 "Ingress QID %d refers to RSPQ %d\n",
1995 qid, rspq->abs_id);
1996 continue;
1997 }
1998
1999 /*
2000 * Schedule NAPI processing on the indicated Response Queue
2001 * and move on to the next entry in the Forwarded Interrupt
2002 * Queue.
2003 */
2004 napi_schedule(&rspq->napi);
2005 rspq_next(intrq);
2006 }
2007
2008 val = CIDXINC_V(work_done) | SEINTARM_V(intrq->intr_params);
2009 /* If we don't have access to the new User GTS (T5+), use the old
2010 * doorbell mechanism; otherwise use the new BAR2 mechanism.
2011 */
2012 if (unlikely(!intrq->bar2_addr)) {
2013 t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
2014 val | INGRESSQID_V(intrq->cntxt_id));
2015 } else {
2016 writel(val | INGRESSQID_V(intrq->bar2_qid),
2017 intrq->bar2_addr + SGE_UDB_GTS);
2018 wmb();
2019 }
2020
2021 spin_unlock(&adapter->sge.intrq_lock);
2022
2023 return work_done;
2024 }
2025
2026 /*
2027 * The MSI interrupt handler handles data events from SGE response queues as
2028 * well as error and other async events as they all use the same MSI vector.
2029 */
t4vf_intr_msi(int irq,void * cookie)2030 static irqreturn_t t4vf_intr_msi(int irq, void *cookie)
2031 {
2032 struct adapter *adapter = cookie;
2033
2034 process_intrq(adapter);
2035 return IRQ_HANDLED;
2036 }
2037
2038 /**
2039 * t4vf_intr_handler - select the top-level interrupt handler
2040 * @adapter: the adapter
2041 *
2042 * Selects the top-level interrupt handler based on the type of interrupts
2043 * (MSI-X or MSI).
2044 */
t4vf_intr_handler(struct adapter * adapter)2045 irq_handler_t t4vf_intr_handler(struct adapter *adapter)
2046 {
2047 BUG_ON((adapter->flags &
2048 (CXGB4VF_USING_MSIX | CXGB4VF_USING_MSI)) == 0);
2049 if (adapter->flags & CXGB4VF_USING_MSIX)
2050 return t4vf_sge_intr_msix;
2051 else
2052 return t4vf_intr_msi;
2053 }
2054
2055 /**
2056 * sge_rx_timer_cb - perform periodic maintenance of SGE RX queues
2057 * @t: Rx timer
2058 *
2059 * Runs periodically from a timer to perform maintenance of SGE RX queues.
2060 *
2061 * a) Replenishes RX queues that have run out due to memory shortage.
2062 * Normally new RX buffers are added when existing ones are consumed but
2063 * when out of memory a queue can become empty. We schedule NAPI to do
2064 * the actual refill.
2065 */
sge_rx_timer_cb(struct timer_list * t)2066 static void sge_rx_timer_cb(struct timer_list *t)
2067 {
2068 struct adapter *adapter = from_timer(adapter, t, sge.rx_timer);
2069 struct sge *s = &adapter->sge;
2070 unsigned int i;
2071
2072 /*
2073 * Scan the "Starving Free Lists" flag array looking for any Free
2074 * Lists in need of more free buffers. If we find one and it's not
2075 * being actively polled, then bump its "starving" counter and attempt
2076 * to refill it. If we're successful in adding enough buffers to push
2077 * the Free List over the starving threshold, then we can clear its
2078 * "starving" status.
2079 */
2080 for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++) {
2081 unsigned long m;
2082
2083 for (m = s->starving_fl[i]; m; m &= m - 1) {
2084 unsigned int id = __ffs(m) + i * BITS_PER_LONG;
2085 struct sge_fl *fl = s->egr_map[id];
2086
2087 clear_bit(id, s->starving_fl);
2088 smp_mb__after_atomic();
2089
2090 /*
2091 * Since we are accessing fl without a lock there's a
2092 * small probability of a false positive where we
2093 * schedule napi but the FL is no longer starving.
2094 * No biggie.
2095 */
2096 if (fl_starving(adapter, fl)) {
2097 struct sge_eth_rxq *rxq;
2098
2099 rxq = container_of(fl, struct sge_eth_rxq, fl);
2100 if (napi_reschedule(&rxq->rspq.napi))
2101 fl->starving++;
2102 else
2103 set_bit(id, s->starving_fl);
2104 }
2105 }
2106 }
2107
2108 /*
2109 * Reschedule the next scan for starving Free Lists ...
2110 */
2111 mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
2112 }
2113
2114 /**
2115 * sge_tx_timer_cb - perform periodic maintenance of SGE Tx queues
2116 * @t: Tx timer
2117 *
2118 * Runs periodically from a timer to perform maintenance of SGE TX queues.
2119 *
2120 * b) Reclaims completed Tx packets for the Ethernet queues. Normally
2121 * packets are cleaned up by new Tx packets, this timer cleans up packets
2122 * when no new packets are being submitted. This is essential for pktgen,
2123 * at least.
2124 */
sge_tx_timer_cb(struct timer_list * t)2125 static void sge_tx_timer_cb(struct timer_list *t)
2126 {
2127 struct adapter *adapter = from_timer(adapter, t, sge.tx_timer);
2128 struct sge *s = &adapter->sge;
2129 unsigned int i, budget;
2130
2131 budget = MAX_TIMER_TX_RECLAIM;
2132 i = s->ethtxq_rover;
2133 do {
2134 struct sge_eth_txq *txq = &s->ethtxq[i];
2135
2136 if (reclaimable(&txq->q) && __netif_tx_trylock(txq->txq)) {
2137 int avail = reclaimable(&txq->q);
2138
2139 if (avail > budget)
2140 avail = budget;
2141
2142 free_tx_desc(adapter, &txq->q, avail, true);
2143 txq->q.in_use -= avail;
2144 __netif_tx_unlock(txq->txq);
2145
2146 budget -= avail;
2147 if (!budget)
2148 break;
2149 }
2150
2151 i++;
2152 if (i >= s->ethqsets)
2153 i = 0;
2154 } while (i != s->ethtxq_rover);
2155 s->ethtxq_rover = i;
2156
2157 /*
2158 * If we found too many reclaimable packets schedule a timer in the
2159 * near future to continue where we left off. Otherwise the next timer
2160 * will be at its normal interval.
2161 */
2162 mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2));
2163 }
2164
2165 /**
2166 * bar2_address - return the BAR2 address for an SGE Queue's Registers
2167 * @adapter: the adapter
2168 * @qid: the SGE Queue ID
2169 * @qtype: the SGE Queue Type (Egress or Ingress)
2170 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
2171 *
2172 * Returns the BAR2 address for the SGE Queue Registers associated with
2173 * @qid. If BAR2 SGE Registers aren't available, returns NULL. Also
2174 * returns the BAR2 Queue ID to be used with writes to the BAR2 SGE
2175 * Queue Registers. If the BAR2 Queue ID is 0, then "Inferred Queue ID"
2176 * Registers are supported (e.g. the Write Combining Doorbell Buffer).
2177 */
bar2_address(struct adapter * adapter,unsigned int qid,enum t4_bar2_qtype qtype,unsigned int * pbar2_qid)2178 static void __iomem *bar2_address(struct adapter *adapter,
2179 unsigned int qid,
2180 enum t4_bar2_qtype qtype,
2181 unsigned int *pbar2_qid)
2182 {
2183 u64 bar2_qoffset;
2184 int ret;
2185
2186 ret = t4vf_bar2_sge_qregs(adapter, qid, qtype,
2187 &bar2_qoffset, pbar2_qid);
2188 if (ret)
2189 return NULL;
2190
2191 return adapter->bar2 + bar2_qoffset;
2192 }
2193
2194 /**
2195 * t4vf_sge_alloc_rxq - allocate an SGE RX Queue
2196 * @adapter: the adapter
2197 * @rspq: pointer to to the new rxq's Response Queue to be filled in
2198 * @iqasynch: if 0, a normal rspq; if 1, an asynchronous event queue
2199 * @dev: the network device associated with the new rspq
2200 * @intr_dest: MSI-X vector index (overriden in MSI mode)
2201 * @fl: pointer to the new rxq's Free List to be filled in
2202 * @hnd: the interrupt handler to invoke for the rspq
2203 */
t4vf_sge_alloc_rxq(struct adapter * adapter,struct sge_rspq * rspq,bool iqasynch,struct net_device * dev,int intr_dest,struct sge_fl * fl,rspq_handler_t hnd)2204 int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
2205 bool iqasynch, struct net_device *dev,
2206 int intr_dest,
2207 struct sge_fl *fl, rspq_handler_t hnd)
2208 {
2209 struct sge *s = &adapter->sge;
2210 struct port_info *pi = netdev_priv(dev);
2211 struct fw_iq_cmd cmd, rpl;
2212 int ret, iqandst, flsz = 0;
2213 int relaxed = !(adapter->flags & CXGB4VF_ROOT_NO_RELAXED_ORDERING);
2214
2215 /*
2216 * If we're using MSI interrupts and we're not initializing the
2217 * Forwarded Interrupt Queue itself, then set up this queue for
2218 * indirect interrupts to the Forwarded Interrupt Queue. Obviously
2219 * the Forwarded Interrupt Queue must be set up before any other
2220 * ingress queue ...
2221 */
2222 if ((adapter->flags & CXGB4VF_USING_MSI) &&
2223 rspq != &adapter->sge.intrq) {
2224 iqandst = SGE_INTRDST_IQ;
2225 intr_dest = adapter->sge.intrq.abs_id;
2226 } else
2227 iqandst = SGE_INTRDST_PCI;
2228
2229 /*
2230 * Allocate the hardware ring for the Response Queue. The size needs
2231 * to be a multiple of 16 which includes the mandatory status entry
2232 * (regardless of whether the Status Page capabilities are enabled or
2233 * not).
2234 */
2235 rspq->size = roundup(rspq->size, 16);
2236 rspq->desc = alloc_ring(adapter->pdev_dev, rspq->size, rspq->iqe_len,
2237 0, &rspq->phys_addr, NULL, 0);
2238 if (!rspq->desc)
2239 return -ENOMEM;
2240
2241 /*
2242 * Fill in the Ingress Queue Command. Note: Ideally this code would
2243 * be in t4vf_hw.c but there are so many parameters and dependencies
2244 * on our Linux SGE state that we would end up having to pass tons of
2245 * parameters. We'll have to think about how this might be migrated
2246 * into OS-independent common code ...
2247 */
2248 memset(&cmd, 0, sizeof(cmd));
2249 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) |
2250 FW_CMD_REQUEST_F |
2251 FW_CMD_WRITE_F |
2252 FW_CMD_EXEC_F);
2253 cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_ALLOC_F |
2254 FW_IQ_CMD_IQSTART_F |
2255 FW_LEN16(cmd));
2256 cmd.type_to_iqandstindex =
2257 cpu_to_be32(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) |
2258 FW_IQ_CMD_IQASYNCH_V(iqasynch) |
2259 FW_IQ_CMD_VIID_V(pi->viid) |
2260 FW_IQ_CMD_IQANDST_V(iqandst) |
2261 FW_IQ_CMD_IQANUS_V(1) |
2262 FW_IQ_CMD_IQANUD_V(SGE_UPDATEDEL_INTR) |
2263 FW_IQ_CMD_IQANDSTINDEX_V(intr_dest));
2264 cmd.iqdroprss_to_iqesize =
2265 cpu_to_be16(FW_IQ_CMD_IQPCIECH_V(pi->port_id) |
2266 FW_IQ_CMD_IQGTSMODE_F |
2267 FW_IQ_CMD_IQINTCNTTHRESH_V(rspq->pktcnt_idx) |
2268 FW_IQ_CMD_IQESIZE_V(ilog2(rspq->iqe_len) - 4));
2269 cmd.iqsize = cpu_to_be16(rspq->size);
2270 cmd.iqaddr = cpu_to_be64(rspq->phys_addr);
2271
2272 if (fl) {
2273 unsigned int chip_ver =
2274 CHELSIO_CHIP_VERSION(adapter->params.chip);
2275 /*
2276 * Allocate the ring for the hardware free list (with space
2277 * for its status page) along with the associated software
2278 * descriptor ring. The free list size needs to be a multiple
2279 * of the Egress Queue Unit and at least 2 Egress Units larger
2280 * than the SGE's Egress Congrestion Threshold
2281 * (fl_starve_thres - 1).
2282 */
2283 if (fl->size < s->fl_starve_thres - 1 + 2 * FL_PER_EQ_UNIT)
2284 fl->size = s->fl_starve_thres - 1 + 2 * FL_PER_EQ_UNIT;
2285 fl->size = roundup(fl->size, FL_PER_EQ_UNIT);
2286 fl->desc = alloc_ring(adapter->pdev_dev, fl->size,
2287 sizeof(__be64), sizeof(struct rx_sw_desc),
2288 &fl->addr, &fl->sdesc, s->stat_len);
2289 if (!fl->desc) {
2290 ret = -ENOMEM;
2291 goto err;
2292 }
2293
2294 /*
2295 * Calculate the size of the hardware free list ring plus
2296 * Status Page (which the SGE will place after the end of the
2297 * free list ring) in Egress Queue Units.
2298 */
2299 flsz = (fl->size / FL_PER_EQ_UNIT +
2300 s->stat_len / EQ_UNIT);
2301
2302 /*
2303 * Fill in all the relevant firmware Ingress Queue Command
2304 * fields for the free list.
2305 */
2306 cmd.iqns_to_fl0congen =
2307 cpu_to_be32(
2308 FW_IQ_CMD_FL0HOSTFCMODE_V(SGE_HOSTFCMODE_NONE) |
2309 FW_IQ_CMD_FL0PACKEN_F |
2310 FW_IQ_CMD_FL0FETCHRO_V(relaxed) |
2311 FW_IQ_CMD_FL0DATARO_V(relaxed) |
2312 FW_IQ_CMD_FL0PADEN_F);
2313
2314 /* In T6, for egress queue type FL there is internal overhead
2315 * of 16B for header going into FLM module. Hence the maximum
2316 * allowed burst size is 448 bytes. For T4/T5, the hardware
2317 * doesn't coalesce fetch requests if more than 64 bytes of
2318 * Free List pointers are provided, so we use a 128-byte Fetch
2319 * Burst Minimum there (T6 implements coalescing so we can use
2320 * the smaller 64-byte value there).
2321 */
2322 cmd.fl0dcaen_to_fl0cidxfthresh =
2323 cpu_to_be16(
2324 FW_IQ_CMD_FL0FBMIN_V(chip_ver <= CHELSIO_T5
2325 ? FETCHBURSTMIN_128B_X
2326 : FETCHBURSTMIN_64B_T6_X) |
2327 FW_IQ_CMD_FL0FBMAX_V((chip_ver <= CHELSIO_T5) ?
2328 FETCHBURSTMAX_512B_X :
2329 FETCHBURSTMAX_256B_X));
2330 cmd.fl0size = cpu_to_be16(flsz);
2331 cmd.fl0addr = cpu_to_be64(fl->addr);
2332 }
2333
2334 /*
2335 * Issue the firmware Ingress Queue Command and extract the results if
2336 * it completes successfully.
2337 */
2338 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
2339 if (ret)
2340 goto err;
2341
2342 netif_napi_add(dev, &rspq->napi, napi_rx_handler, 64);
2343 rspq->cur_desc = rspq->desc;
2344 rspq->cidx = 0;
2345 rspq->gen = 1;
2346 rspq->next_intr_params = rspq->intr_params;
2347 rspq->cntxt_id = be16_to_cpu(rpl.iqid);
2348 rspq->bar2_addr = bar2_address(adapter,
2349 rspq->cntxt_id,
2350 T4_BAR2_QTYPE_INGRESS,
2351 &rspq->bar2_qid);
2352 rspq->abs_id = be16_to_cpu(rpl.physiqid);
2353 rspq->size--; /* subtract status entry */
2354 rspq->adapter = adapter;
2355 rspq->netdev = dev;
2356 rspq->handler = hnd;
2357
2358 /* set offset to -1 to distinguish ingress queues without FL */
2359 rspq->offset = fl ? 0 : -1;
2360
2361 if (fl) {
2362 fl->cntxt_id = be16_to_cpu(rpl.fl0id);
2363 fl->avail = 0;
2364 fl->pend_cred = 0;
2365 fl->pidx = 0;
2366 fl->cidx = 0;
2367 fl->alloc_failed = 0;
2368 fl->large_alloc_failed = 0;
2369 fl->starving = 0;
2370
2371 /* Note, we must initialize the BAR2 Free List User Doorbell
2372 * information before refilling the Free List!
2373 */
2374 fl->bar2_addr = bar2_address(adapter,
2375 fl->cntxt_id,
2376 T4_BAR2_QTYPE_EGRESS,
2377 &fl->bar2_qid);
2378
2379 refill_fl(adapter, fl, fl_cap(fl), GFP_KERNEL);
2380 }
2381
2382 return 0;
2383
2384 err:
2385 /*
2386 * An error occurred. Clean up our partial allocation state and
2387 * return the error.
2388 */
2389 if (rspq->desc) {
2390 dma_free_coherent(adapter->pdev_dev, rspq->size * rspq->iqe_len,
2391 rspq->desc, rspq->phys_addr);
2392 rspq->desc = NULL;
2393 }
2394 if (fl && fl->desc) {
2395 kfree(fl->sdesc);
2396 fl->sdesc = NULL;
2397 dma_free_coherent(adapter->pdev_dev, flsz * EQ_UNIT,
2398 fl->desc, fl->addr);
2399 fl->desc = NULL;
2400 }
2401 return ret;
2402 }
2403
2404 /**
2405 * t4vf_sge_alloc_eth_txq - allocate an SGE Ethernet TX Queue
2406 * @adapter: the adapter
2407 * @txq: pointer to the new txq to be filled in
2408 * @dev: the network device
2409 * @devq: the network TX queue associated with the new txq
2410 * @iqid: the relative ingress queue ID to which events relating to
2411 * the new txq should be directed
2412 */
t4vf_sge_alloc_eth_txq(struct adapter * adapter,struct sge_eth_txq * txq,struct net_device * dev,struct netdev_queue * devq,unsigned int iqid)2413 int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
2414 struct net_device *dev, struct netdev_queue *devq,
2415 unsigned int iqid)
2416 {
2417 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
2418 struct port_info *pi = netdev_priv(dev);
2419 struct fw_eq_eth_cmd cmd, rpl;
2420 struct sge *s = &adapter->sge;
2421 int ret, nentries;
2422
2423 /*
2424 * Calculate the size of the hardware TX Queue (including the Status
2425 * Page on the end of the TX Queue) in units of TX Descriptors.
2426 */
2427 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2428
2429 /*
2430 * Allocate the hardware ring for the TX ring (with space for its
2431 * status page) along with the associated software descriptor ring.
2432 */
2433 txq->q.desc = alloc_ring(adapter->pdev_dev, txq->q.size,
2434 sizeof(struct tx_desc),
2435 sizeof(struct tx_sw_desc),
2436 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len);
2437 if (!txq->q.desc)
2438 return -ENOMEM;
2439
2440 /*
2441 * Fill in the Egress Queue Command. Note: As with the direct use of
2442 * the firmware Ingress Queue COmmand above in our RXQ allocation
2443 * routine, ideally, this code would be in t4vf_hw.c. Again, we'll
2444 * have to see if there's some reasonable way to parameterize it
2445 * into the common code ...
2446 */
2447 memset(&cmd, 0, sizeof(cmd));
2448 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) |
2449 FW_CMD_REQUEST_F |
2450 FW_CMD_WRITE_F |
2451 FW_CMD_EXEC_F);
2452 cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_ALLOC_F |
2453 FW_EQ_ETH_CMD_EQSTART_F |
2454 FW_LEN16(cmd));
2455 cmd.autoequiqe_to_viid = cpu_to_be32(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
2456 FW_EQ_ETH_CMD_VIID_V(pi->viid));
2457 cmd.fetchszm_to_iqid =
2458 cpu_to_be32(FW_EQ_ETH_CMD_HOSTFCMODE_V(SGE_HOSTFCMODE_STPG) |
2459 FW_EQ_ETH_CMD_PCIECHN_V(pi->port_id) |
2460 FW_EQ_ETH_CMD_IQID_V(iqid));
2461 cmd.dcaen_to_eqsize =
2462 cpu_to_be32(FW_EQ_ETH_CMD_FBMIN_V(chip_ver <= CHELSIO_T5
2463 ? FETCHBURSTMIN_64B_X
2464 : FETCHBURSTMIN_64B_T6_X) |
2465 FW_EQ_ETH_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
2466 FW_EQ_ETH_CMD_CIDXFTHRESH_V(
2467 CIDXFLUSHTHRESH_32_X) |
2468 FW_EQ_ETH_CMD_EQSIZE_V(nentries));
2469 cmd.eqaddr = cpu_to_be64(txq->q.phys_addr);
2470
2471 /*
2472 * Issue the firmware Egress Queue Command and extract the results if
2473 * it completes successfully.
2474 */
2475 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
2476 if (ret) {
2477 /*
2478 * The girmware Ingress Queue Command failed for some reason.
2479 * Free up our partial allocation state and return the error.
2480 */
2481 kfree(txq->q.sdesc);
2482 txq->q.sdesc = NULL;
2483 dma_free_coherent(adapter->pdev_dev,
2484 nentries * sizeof(struct tx_desc),
2485 txq->q.desc, txq->q.phys_addr);
2486 txq->q.desc = NULL;
2487 return ret;
2488 }
2489
2490 txq->q.in_use = 0;
2491 txq->q.cidx = 0;
2492 txq->q.pidx = 0;
2493 txq->q.stat = (void *)&txq->q.desc[txq->q.size];
2494 txq->q.cntxt_id = FW_EQ_ETH_CMD_EQID_G(be32_to_cpu(rpl.eqid_pkd));
2495 txq->q.bar2_addr = bar2_address(adapter,
2496 txq->q.cntxt_id,
2497 T4_BAR2_QTYPE_EGRESS,
2498 &txq->q.bar2_qid);
2499 txq->q.abs_id =
2500 FW_EQ_ETH_CMD_PHYSEQID_G(be32_to_cpu(rpl.physeqid_pkd));
2501 txq->txq = devq;
2502 txq->tso = 0;
2503 txq->tx_cso = 0;
2504 txq->vlan_ins = 0;
2505 txq->q.stops = 0;
2506 txq->q.restarts = 0;
2507 txq->mapping_err = 0;
2508 return 0;
2509 }
2510
2511 /*
2512 * Free the DMA map resources associated with a TX queue.
2513 */
free_txq(struct adapter * adapter,struct sge_txq * tq)2514 static void free_txq(struct adapter *adapter, struct sge_txq *tq)
2515 {
2516 struct sge *s = &adapter->sge;
2517
2518 dma_free_coherent(adapter->pdev_dev,
2519 tq->size * sizeof(*tq->desc) + s->stat_len,
2520 tq->desc, tq->phys_addr);
2521 tq->cntxt_id = 0;
2522 tq->sdesc = NULL;
2523 tq->desc = NULL;
2524 }
2525
2526 /*
2527 * Free the resources associated with a response queue (possibly including a
2528 * free list).
2529 */
free_rspq_fl(struct adapter * adapter,struct sge_rspq * rspq,struct sge_fl * fl)2530 static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq,
2531 struct sge_fl *fl)
2532 {
2533 struct sge *s = &adapter->sge;
2534 unsigned int flid = fl ? fl->cntxt_id : 0xffff;
2535
2536 t4vf_iq_free(adapter, FW_IQ_TYPE_FL_INT_CAP,
2537 rspq->cntxt_id, flid, 0xffff);
2538 dma_free_coherent(adapter->pdev_dev, (rspq->size + 1) * rspq->iqe_len,
2539 rspq->desc, rspq->phys_addr);
2540 netif_napi_del(&rspq->napi);
2541 rspq->netdev = NULL;
2542 rspq->cntxt_id = 0;
2543 rspq->abs_id = 0;
2544 rspq->desc = NULL;
2545
2546 if (fl) {
2547 free_rx_bufs(adapter, fl, fl->avail);
2548 dma_free_coherent(adapter->pdev_dev,
2549 fl->size * sizeof(*fl->desc) + s->stat_len,
2550 fl->desc, fl->addr);
2551 kfree(fl->sdesc);
2552 fl->sdesc = NULL;
2553 fl->cntxt_id = 0;
2554 fl->desc = NULL;
2555 }
2556 }
2557
2558 /**
2559 * t4vf_free_sge_resources - free SGE resources
2560 * @adapter: the adapter
2561 *
2562 * Frees resources used by the SGE queue sets.
2563 */
t4vf_free_sge_resources(struct adapter * adapter)2564 void t4vf_free_sge_resources(struct adapter *adapter)
2565 {
2566 struct sge *s = &adapter->sge;
2567 struct sge_eth_rxq *rxq = s->ethrxq;
2568 struct sge_eth_txq *txq = s->ethtxq;
2569 struct sge_rspq *evtq = &s->fw_evtq;
2570 struct sge_rspq *intrq = &s->intrq;
2571 int qs;
2572
2573 for (qs = 0; qs < adapter->sge.ethqsets; qs++, rxq++, txq++) {
2574 if (rxq->rspq.desc)
2575 free_rspq_fl(adapter, &rxq->rspq, &rxq->fl);
2576 if (txq->q.desc) {
2577 t4vf_eth_eq_free(adapter, txq->q.cntxt_id);
2578 free_tx_desc(adapter, &txq->q, txq->q.in_use, true);
2579 kfree(txq->q.sdesc);
2580 free_txq(adapter, &txq->q);
2581 }
2582 }
2583 if (evtq->desc)
2584 free_rspq_fl(adapter, evtq, NULL);
2585 if (intrq->desc)
2586 free_rspq_fl(adapter, intrq, NULL);
2587 }
2588
2589 /**
2590 * t4vf_sge_start - enable SGE operation
2591 * @adapter: the adapter
2592 *
2593 * Start tasklets and timers associated with the DMA engine.
2594 */
t4vf_sge_start(struct adapter * adapter)2595 void t4vf_sge_start(struct adapter *adapter)
2596 {
2597 adapter->sge.ethtxq_rover = 0;
2598 mod_timer(&adapter->sge.rx_timer, jiffies + RX_QCHECK_PERIOD);
2599 mod_timer(&adapter->sge.tx_timer, jiffies + TX_QCHECK_PERIOD);
2600 }
2601
2602 /**
2603 * t4vf_sge_stop - disable SGE operation
2604 * @adapter: the adapter
2605 *
2606 * Stop tasklets and timers associated with the DMA engine. Note that
2607 * this is effective only if measures have been taken to disable any HW
2608 * events that may restart them.
2609 */
t4vf_sge_stop(struct adapter * adapter)2610 void t4vf_sge_stop(struct adapter *adapter)
2611 {
2612 struct sge *s = &adapter->sge;
2613
2614 if (s->rx_timer.function)
2615 del_timer_sync(&s->rx_timer);
2616 if (s->tx_timer.function)
2617 del_timer_sync(&s->tx_timer);
2618 }
2619
2620 /**
2621 * t4vf_sge_init - initialize SGE
2622 * @adapter: the adapter
2623 *
2624 * Performs SGE initialization needed every time after a chip reset.
2625 * We do not initialize any of the queue sets here, instead the driver
2626 * top-level must request those individually. We also do not enable DMA
2627 * here, that should be done after the queues have been set up.
2628 */
t4vf_sge_init(struct adapter * adapter)2629 int t4vf_sge_init(struct adapter *adapter)
2630 {
2631 struct sge_params *sge_params = &adapter->params.sge;
2632 u32 fl_small_pg = sge_params->sge_fl_buffer_size[0];
2633 u32 fl_large_pg = sge_params->sge_fl_buffer_size[1];
2634 struct sge *s = &adapter->sge;
2635
2636 /*
2637 * Start by vetting the basic SGE parameters which have been set up by
2638 * the Physical Function Driver. Ideally we should be able to deal
2639 * with _any_ configuration. Practice is different ...
2640 */
2641
2642 /* We only bother using the Large Page logic if the Large Page Buffer
2643 * is larger than our Page Size Buffer.
2644 */
2645 if (fl_large_pg <= fl_small_pg)
2646 fl_large_pg = 0;
2647
2648 /* The Page Size Buffer must be exactly equal to our Page Size and the
2649 * Large Page Size Buffer should be 0 (per above) or a power of 2.
2650 */
2651 if (fl_small_pg != PAGE_SIZE ||
2652 (fl_large_pg & (fl_large_pg - 1)) != 0) {
2653 dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n",
2654 fl_small_pg, fl_large_pg);
2655 return -EINVAL;
2656 }
2657 if ((sge_params->sge_control & RXPKTCPLMODE_F) !=
2658 RXPKTCPLMODE_V(RXPKTCPLMODE_SPLIT_X)) {
2659 dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n");
2660 return -EINVAL;
2661 }
2662
2663 /*
2664 * Now translate the adapter parameters into our internal forms.
2665 */
2666 if (fl_large_pg)
2667 s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
2668 s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_F)
2669 ? 128 : 64);
2670 s->pktshift = PKTSHIFT_G(sge_params->sge_control);
2671 s->fl_align = t4vf_fl_pkt_align(adapter);
2672
2673 /* A FL with <= fl_starve_thres buffers is starving and a periodic
2674 * timer will attempt to refill it. This needs to be larger than the
2675 * SGE's Egress Congestion Threshold. If it isn't, then we can get
2676 * stuck waiting for new packets while the SGE is waiting for us to
2677 * give it more Free List entries. (Note that the SGE's Egress
2678 * Congestion Threshold is in units of 2 Free List pointers.)
2679 */
2680 switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
2681 case CHELSIO_T4:
2682 s->fl_starve_thres =
2683 EGRTHRESHOLD_G(sge_params->sge_congestion_control);
2684 break;
2685 case CHELSIO_T5:
2686 s->fl_starve_thres =
2687 EGRTHRESHOLDPACKING_G(sge_params->sge_congestion_control);
2688 break;
2689 case CHELSIO_T6:
2690 default:
2691 s->fl_starve_thres =
2692 T6_EGRTHRESHOLDPACKING_G(sge_params->sge_congestion_control);
2693 break;
2694 }
2695 s->fl_starve_thres = s->fl_starve_thres * 2 + 1;
2696
2697 /*
2698 * Set up tasklet timers.
2699 */
2700 timer_setup(&s->rx_timer, sge_rx_timer_cb, 0);
2701 timer_setup(&s->tx_timer, sge_tx_timer_cb, 0);
2702
2703 /*
2704 * Initialize Forwarded Interrupt Queue lock.
2705 */
2706 spin_lock_init(&s->intrq_lock);
2707
2708 return 0;
2709 }
2710