1 /* bnx2x_cmn.c: QLogic Everest network driver.
2 *
3 * Copyright (c) 2007-2013 Broadcom Corporation
4 * Copyright (c) 2014 QLogic Corporation
5 * All rights reserved
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation.
10 *
11 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
12 * Written by: Eliezer Tamir
13 * Based on code from Michael Chan's bnx2 driver
14 * UDP CSUM errata workaround by Arik Gendelman
15 * Slowpath and fastpath rework by Vladislav Zolotarov
16 * Statistics and Link management by Yitchak Gertner
17 *
18 */
19
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22 #include <linux/etherdevice.h>
23 #include <linux/if_vlan.h>
24 #include <linux/interrupt.h>
25 #include <linux/ip.h>
26 #include <linux/crash_dump.h>
27 #include <net/tcp.h>
28 #include <net/ipv6.h>
29 #include <net/ip6_checksum.h>
30 #include <net/busy_poll.h>
31 #include <linux/prefetch.h>
32 #include "bnx2x_cmn.h"
33 #include "bnx2x_init.h"
34 #include "bnx2x_sp.h"
35
36 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
37 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
38 static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
39 static int bnx2x_poll(struct napi_struct *napi, int budget);
40
bnx2x_add_all_napi_cnic(struct bnx2x * bp)41 static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
42 {
43 int i;
44
45 /* Add NAPI objects */
46 for_each_rx_queue_cnic(bp, i) {
47 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
48 bnx2x_poll, NAPI_POLL_WEIGHT);
49 napi_hash_add(&bnx2x_fp(bp, i, napi));
50 }
51 }
52
bnx2x_add_all_napi(struct bnx2x * bp)53 static void bnx2x_add_all_napi(struct bnx2x *bp)
54 {
55 int i;
56
57 /* Add NAPI objects */
58 for_each_eth_queue(bp, i) {
59 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
60 bnx2x_poll, NAPI_POLL_WEIGHT);
61 napi_hash_add(&bnx2x_fp(bp, i, napi));
62 }
63 }
64
bnx2x_calc_num_queues(struct bnx2x * bp)65 static int bnx2x_calc_num_queues(struct bnx2x *bp)
66 {
67 int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
68
69 /* Reduce memory usage in kdump environment by using only one queue */
70 if (is_kdump_kernel())
71 nq = 1;
72
73 nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
74 return nq;
75 }
76
77 /**
78 * bnx2x_move_fp - move content of the fastpath structure.
79 *
80 * @bp: driver handle
81 * @from: source FP index
82 * @to: destination FP index
83 *
84 * Makes sure the contents of the bp->fp[to].napi is kept
85 * intact. This is done by first copying the napi struct from
86 * the target to the source, and then mem copying the entire
87 * source onto the target. Update txdata pointers and related
88 * content.
89 */
bnx2x_move_fp(struct bnx2x * bp,int from,int to)90 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
91 {
92 struct bnx2x_fastpath *from_fp = &bp->fp[from];
93 struct bnx2x_fastpath *to_fp = &bp->fp[to];
94 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
95 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
96 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
97 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
98 int old_max_eth_txqs, new_max_eth_txqs;
99 int old_txdata_index = 0, new_txdata_index = 0;
100 struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
101
102 /* Copy the NAPI object as it has been already initialized */
103 from_fp->napi = to_fp->napi;
104
105 /* Move bnx2x_fastpath contents */
106 memcpy(to_fp, from_fp, sizeof(*to_fp));
107 to_fp->index = to;
108
109 /* Retain the tpa_info of the original `to' version as we don't want
110 * 2 FPs to contain the same tpa_info pointer.
111 */
112 to_fp->tpa_info = old_tpa_info;
113
114 /* move sp_objs contents as well, as their indices match fp ones */
115 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
116
117 /* move fp_stats contents as well, as their indices match fp ones */
118 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
119
120 /* Update txdata pointers in fp and move txdata content accordingly:
121 * Each fp consumes 'max_cos' txdata structures, so the index should be
122 * decremented by max_cos x delta.
123 */
124
125 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
126 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
127 (bp)->max_cos;
128 if (from == FCOE_IDX(bp)) {
129 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
130 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
131 }
132
133 memcpy(&bp->bnx2x_txq[new_txdata_index],
134 &bp->bnx2x_txq[old_txdata_index],
135 sizeof(struct bnx2x_fp_txdata));
136 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
137 }
138
139 /**
140 * bnx2x_fill_fw_str - Fill buffer with FW version string.
141 *
142 * @bp: driver handle
143 * @buf: character buffer to fill with the fw name
144 * @buf_len: length of the above buffer
145 *
146 */
bnx2x_fill_fw_str(struct bnx2x * bp,char * buf,size_t buf_len)147 void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
148 {
149 if (IS_PF(bp)) {
150 u8 phy_fw_ver[PHY_FW_VER_LEN];
151
152 phy_fw_ver[0] = '\0';
153 bnx2x_get_ext_phy_fw_version(&bp->link_params,
154 phy_fw_ver, PHY_FW_VER_LEN);
155 strlcpy(buf, bp->fw_ver, buf_len);
156 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
157 "bc %d.%d.%d%s%s",
158 (bp->common.bc_ver & 0xff0000) >> 16,
159 (bp->common.bc_ver & 0xff00) >> 8,
160 (bp->common.bc_ver & 0xff),
161 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
162 } else {
163 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
164 }
165 }
166
167 /**
168 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
169 *
170 * @bp: driver handle
171 * @delta: number of eth queues which were not allocated
172 */
bnx2x_shrink_eth_fp(struct bnx2x * bp,int delta)173 static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
174 {
175 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
176
177 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
178 * backward along the array could cause memory to be overridden
179 */
180 for (cos = 1; cos < bp->max_cos; cos++) {
181 for (i = 0; i < old_eth_num - delta; i++) {
182 struct bnx2x_fastpath *fp = &bp->fp[i];
183 int new_idx = cos * (old_eth_num - delta) + i;
184
185 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
186 sizeof(struct bnx2x_fp_txdata));
187 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
188 }
189 }
190 }
191
192 int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
193
194 /* free skb in the packet ring at pos idx
195 * return idx of last bd freed
196 */
bnx2x_free_tx_pkt(struct bnx2x * bp,struct bnx2x_fp_txdata * txdata,u16 idx,unsigned int * pkts_compl,unsigned int * bytes_compl)197 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
198 u16 idx, unsigned int *pkts_compl,
199 unsigned int *bytes_compl)
200 {
201 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
202 struct eth_tx_start_bd *tx_start_bd;
203 struct eth_tx_bd *tx_data_bd;
204 struct sk_buff *skb = tx_buf->skb;
205 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
206 int nbd;
207 u16 split_bd_len = 0;
208
209 /* prefetch skb end pointer to speedup dev_kfree_skb() */
210 prefetch(&skb->end);
211
212 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
213 txdata->txq_index, idx, tx_buf, skb);
214
215 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
216
217 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
218 #ifdef BNX2X_STOP_ON_ERROR
219 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
220 BNX2X_ERR("BAD nbd!\n");
221 bnx2x_panic();
222 }
223 #endif
224 new_cons = nbd + tx_buf->first_bd;
225
226 /* Get the next bd */
227 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
228
229 /* Skip a parse bd... */
230 --nbd;
231 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
232
233 if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
234 /* Skip second parse bd... */
235 --nbd;
236 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
237 }
238
239 /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
240 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
241 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
242 split_bd_len = BD_UNMAP_LEN(tx_data_bd);
243 --nbd;
244 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
245 }
246
247 /* unmap first bd */
248 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
249 BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
250 DMA_TO_DEVICE);
251
252 /* now free frags */
253 while (nbd > 0) {
254
255 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
256 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
257 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
258 if (--nbd)
259 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
260 }
261
262 /* release skb */
263 WARN_ON(!skb);
264 if (likely(skb)) {
265 (*pkts_compl)++;
266 (*bytes_compl) += skb->len;
267 dev_kfree_skb_any(skb);
268 }
269
270 tx_buf->first_bd = 0;
271 tx_buf->skb = NULL;
272
273 return new_cons;
274 }
275
bnx2x_tx_int(struct bnx2x * bp,struct bnx2x_fp_txdata * txdata)276 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
277 {
278 struct netdev_queue *txq;
279 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
280 unsigned int pkts_compl = 0, bytes_compl = 0;
281
282 #ifdef BNX2X_STOP_ON_ERROR
283 if (unlikely(bp->panic))
284 return -1;
285 #endif
286
287 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
288 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
289 sw_cons = txdata->tx_pkt_cons;
290
291 /* Ensure subsequent loads occur after hw_cons */
292 smp_rmb();
293
294 while (sw_cons != hw_cons) {
295 u16 pkt_cons;
296
297 pkt_cons = TX_BD(sw_cons);
298
299 DP(NETIF_MSG_TX_DONE,
300 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
301 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
302
303 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
304 &pkts_compl, &bytes_compl);
305
306 sw_cons++;
307 }
308
309 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
310
311 txdata->tx_pkt_cons = sw_cons;
312 txdata->tx_bd_cons = bd_cons;
313
314 /* Need to make the tx_bd_cons update visible to start_xmit()
315 * before checking for netif_tx_queue_stopped(). Without the
316 * memory barrier, there is a small possibility that
317 * start_xmit() will miss it and cause the queue to be stopped
318 * forever.
319 * On the other hand we need an rmb() here to ensure the proper
320 * ordering of bit testing in the following
321 * netif_tx_queue_stopped(txq) call.
322 */
323 smp_mb();
324
325 if (unlikely(netif_tx_queue_stopped(txq))) {
326 /* Taking tx_lock() is needed to prevent re-enabling the queue
327 * while it's empty. This could have happen if rx_action() gets
328 * suspended in bnx2x_tx_int() after the condition before
329 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
330 *
331 * stops the queue->sees fresh tx_bd_cons->releases the queue->
332 * sends some packets consuming the whole queue again->
333 * stops the queue
334 */
335
336 __netif_tx_lock(txq, smp_processor_id());
337
338 if ((netif_tx_queue_stopped(txq)) &&
339 (bp->state == BNX2X_STATE_OPEN) &&
340 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
341 netif_tx_wake_queue(txq);
342
343 __netif_tx_unlock(txq);
344 }
345 return 0;
346 }
347
bnx2x_update_last_max_sge(struct bnx2x_fastpath * fp,u16 idx)348 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
349 u16 idx)
350 {
351 u16 last_max = fp->last_max_sge;
352
353 if (SUB_S16(idx, last_max) > 0)
354 fp->last_max_sge = idx;
355 }
356
bnx2x_update_sge_prod(struct bnx2x_fastpath * fp,u16 sge_len,struct eth_end_agg_rx_cqe * cqe)357 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
358 u16 sge_len,
359 struct eth_end_agg_rx_cqe *cqe)
360 {
361 struct bnx2x *bp = fp->bp;
362 u16 last_max, last_elem, first_elem;
363 u16 delta = 0;
364 u16 i;
365
366 if (!sge_len)
367 return;
368
369 /* First mark all used pages */
370 for (i = 0; i < sge_len; i++)
371 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
372 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
373
374 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
375 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
376
377 /* Here we assume that the last SGE index is the biggest */
378 prefetch((void *)(fp->sge_mask));
379 bnx2x_update_last_max_sge(fp,
380 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
381
382 last_max = RX_SGE(fp->last_max_sge);
383 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
384 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
385
386 /* If ring is not full */
387 if (last_elem + 1 != first_elem)
388 last_elem++;
389
390 /* Now update the prod */
391 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
392 if (likely(fp->sge_mask[i]))
393 break;
394
395 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
396 delta += BIT_VEC64_ELEM_SZ;
397 }
398
399 if (delta > 0) {
400 fp->rx_sge_prod += delta;
401 /* clear page-end entries */
402 bnx2x_clear_sge_mask_next_elems(fp);
403 }
404
405 DP(NETIF_MSG_RX_STATUS,
406 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
407 fp->last_max_sge, fp->rx_sge_prod);
408 }
409
410 /* Get Toeplitz hash value in the skb using the value from the
411 * CQE (calculated by HW).
412 */
bnx2x_get_rxhash(const struct bnx2x * bp,const struct eth_fast_path_rx_cqe * cqe,enum pkt_hash_types * rxhash_type)413 static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
414 const struct eth_fast_path_rx_cqe *cqe,
415 enum pkt_hash_types *rxhash_type)
416 {
417 /* Get Toeplitz hash from CQE */
418 if ((bp->dev->features & NETIF_F_RXHASH) &&
419 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
420 enum eth_rss_hash_type htype;
421
422 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
423 *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
424 (htype == TCP_IPV6_HASH_TYPE)) ?
425 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
426
427 return le32_to_cpu(cqe->rss_hash_result);
428 }
429 *rxhash_type = PKT_HASH_TYPE_NONE;
430 return 0;
431 }
432
bnx2x_tpa_start(struct bnx2x_fastpath * fp,u16 queue,u16 cons,u16 prod,struct eth_fast_path_rx_cqe * cqe)433 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
434 u16 cons, u16 prod,
435 struct eth_fast_path_rx_cqe *cqe)
436 {
437 struct bnx2x *bp = fp->bp;
438 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
439 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
440 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
441 dma_addr_t mapping;
442 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
443 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
444
445 /* print error if current state != stop */
446 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
447 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
448
449 /* Try to map an empty data buffer from the aggregation info */
450 mapping = dma_map_single(&bp->pdev->dev,
451 first_buf->data + NET_SKB_PAD,
452 fp->rx_buf_size, DMA_FROM_DEVICE);
453 /*
454 * ...if it fails - move the skb from the consumer to the producer
455 * and set the current aggregation state as ERROR to drop it
456 * when TPA_STOP arrives.
457 */
458
459 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
460 /* Move the BD from the consumer to the producer */
461 bnx2x_reuse_rx_data(fp, cons, prod);
462 tpa_info->tpa_state = BNX2X_TPA_ERROR;
463 return;
464 }
465
466 /* move empty data from pool to prod */
467 prod_rx_buf->data = first_buf->data;
468 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
469 /* point prod_bd to new data */
470 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
471 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
472
473 /* move partial skb from cons to pool (don't unmap yet) */
474 *first_buf = *cons_rx_buf;
475
476 /* mark bin state as START */
477 tpa_info->parsing_flags =
478 le16_to_cpu(cqe->pars_flags.flags);
479 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
480 tpa_info->tpa_state = BNX2X_TPA_START;
481 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
482 tpa_info->placement_offset = cqe->placement_offset;
483 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
484 if (fp->mode == TPA_MODE_GRO) {
485 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
486 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
487 tpa_info->gro_size = gro_size;
488 }
489
490 #ifdef BNX2X_STOP_ON_ERROR
491 fp->tpa_queue_used |= (1 << queue);
492 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
493 fp->tpa_queue_used);
494 #endif
495 }
496
497 /* Timestamp option length allowed for TPA aggregation:
498 *
499 * nop nop kind length echo val
500 */
501 #define TPA_TSTAMP_OPT_LEN 12
502 /**
503 * bnx2x_set_gro_params - compute GRO values
504 *
505 * @skb: packet skb
506 * @parsing_flags: parsing flags from the START CQE
507 * @len_on_bd: total length of the first packet for the
508 * aggregation.
509 * @pkt_len: length of all segments
510 *
511 * Approximate value of the MSS for this aggregation calculated using
512 * the first packet of it.
513 * Compute number of aggregated segments, and gso_type.
514 */
bnx2x_set_gro_params(struct sk_buff * skb,u16 parsing_flags,u16 len_on_bd,unsigned int pkt_len,u16 num_of_coalesced_segs)515 static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
516 u16 len_on_bd, unsigned int pkt_len,
517 u16 num_of_coalesced_segs)
518 {
519 /* TPA aggregation won't have either IP options or TCP options
520 * other than timestamp or IPv6 extension headers.
521 */
522 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
523
524 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
525 PRS_FLAG_OVERETH_IPV6) {
526 hdrs_len += sizeof(struct ipv6hdr);
527 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
528 } else {
529 hdrs_len += sizeof(struct iphdr);
530 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
531 }
532
533 /* Check if there was a TCP timestamp, if there is it's will
534 * always be 12 bytes length: nop nop kind length echo val.
535 *
536 * Otherwise FW would close the aggregation.
537 */
538 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
539 hdrs_len += TPA_TSTAMP_OPT_LEN;
540
541 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
542
543 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
544 * to skb_shinfo(skb)->gso_segs
545 */
546 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
547 }
548
bnx2x_alloc_rx_sge(struct bnx2x * bp,struct bnx2x_fastpath * fp,u16 index,gfp_t gfp_mask)549 static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
550 u16 index, gfp_t gfp_mask)
551 {
552 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
553 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
554 struct bnx2x_alloc_pool *pool = &fp->page_pool;
555 dma_addr_t mapping;
556
557 if (!pool->page || (PAGE_SIZE - pool->offset) < SGE_PAGE_SIZE) {
558
559 /* put page reference used by the memory pool, since we
560 * won't be using this page as the mempool anymore.
561 */
562 if (pool->page)
563 put_page(pool->page);
564
565 pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
566 if (unlikely(!pool->page)) {
567 BNX2X_ERR("Can't alloc sge\n");
568 return -ENOMEM;
569 }
570
571 pool->offset = 0;
572 }
573
574 mapping = dma_map_page(&bp->pdev->dev, pool->page,
575 pool->offset, SGE_PAGE_SIZE, DMA_FROM_DEVICE);
576 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
577 BNX2X_ERR("Can't map sge\n");
578 return -ENOMEM;
579 }
580
581 get_page(pool->page);
582 sw_buf->page = pool->page;
583 sw_buf->offset = pool->offset;
584
585 dma_unmap_addr_set(sw_buf, mapping, mapping);
586
587 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
588 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
589
590 pool->offset += SGE_PAGE_SIZE;
591
592 return 0;
593 }
594
bnx2x_fill_frag_skb(struct bnx2x * bp,struct bnx2x_fastpath * fp,struct bnx2x_agg_info * tpa_info,u16 pages,struct sk_buff * skb,struct eth_end_agg_rx_cqe * cqe,u16 cqe_idx)595 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
596 struct bnx2x_agg_info *tpa_info,
597 u16 pages,
598 struct sk_buff *skb,
599 struct eth_end_agg_rx_cqe *cqe,
600 u16 cqe_idx)
601 {
602 struct sw_rx_page *rx_pg, old_rx_pg;
603 u32 i, frag_len, frag_size;
604 int err, j, frag_id = 0;
605 u16 len_on_bd = tpa_info->len_on_bd;
606 u16 full_page = 0, gro_size = 0;
607
608 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
609
610 if (fp->mode == TPA_MODE_GRO) {
611 gro_size = tpa_info->gro_size;
612 full_page = tpa_info->full_page;
613 }
614
615 /* This is needed in order to enable forwarding support */
616 if (frag_size)
617 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
618 le16_to_cpu(cqe->pkt_len),
619 le16_to_cpu(cqe->num_of_coalesced_segs));
620
621 #ifdef BNX2X_STOP_ON_ERROR
622 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
623 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
624 pages, cqe_idx);
625 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
626 bnx2x_panic();
627 return -EINVAL;
628 }
629 #endif
630
631 /* Run through the SGL and compose the fragmented skb */
632 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
633 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
634
635 /* FW gives the indices of the SGE as if the ring is an array
636 (meaning that "next" element will consume 2 indices) */
637 if (fp->mode == TPA_MODE_GRO)
638 frag_len = min_t(u32, frag_size, (u32)full_page);
639 else /* LRO */
640 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
641
642 rx_pg = &fp->rx_page_ring[sge_idx];
643 old_rx_pg = *rx_pg;
644
645 /* If we fail to allocate a substitute page, we simply stop
646 where we are and drop the whole packet */
647 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
648 if (unlikely(err)) {
649 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
650 return err;
651 }
652
653 dma_unmap_page(&bp->pdev->dev,
654 dma_unmap_addr(&old_rx_pg, mapping),
655 SGE_PAGE_SIZE, DMA_FROM_DEVICE);
656 /* Add one frag and update the appropriate fields in the skb */
657 if (fp->mode == TPA_MODE_LRO)
658 skb_fill_page_desc(skb, j, old_rx_pg.page,
659 old_rx_pg.offset, frag_len);
660 else { /* GRO */
661 int rem;
662 int offset = 0;
663 for (rem = frag_len; rem > 0; rem -= gro_size) {
664 int len = rem > gro_size ? gro_size : rem;
665 skb_fill_page_desc(skb, frag_id++,
666 old_rx_pg.page,
667 old_rx_pg.offset + offset,
668 len);
669 if (offset)
670 get_page(old_rx_pg.page);
671 offset += len;
672 }
673 }
674
675 skb->data_len += frag_len;
676 skb->truesize += SGE_PAGES;
677 skb->len += frag_len;
678
679 frag_size -= frag_len;
680 }
681
682 return 0;
683 }
684
bnx2x_frag_free(const struct bnx2x_fastpath * fp,void * data)685 static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
686 {
687 if (fp->rx_frag_size)
688 skb_free_frag(data);
689 else
690 kfree(data);
691 }
692
bnx2x_frag_alloc(const struct bnx2x_fastpath * fp,gfp_t gfp_mask)693 static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
694 {
695 if (fp->rx_frag_size) {
696 /* GFP_KERNEL allocations are used only during initialization */
697 if (unlikely(gfpflags_allow_blocking(gfp_mask)))
698 return (void *)__get_free_page(gfp_mask);
699
700 return netdev_alloc_frag(fp->rx_frag_size);
701 }
702
703 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
704 }
705
706 #ifdef CONFIG_INET
bnx2x_gro_ip_csum(struct bnx2x * bp,struct sk_buff * skb)707 static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
708 {
709 const struct iphdr *iph = ip_hdr(skb);
710 struct tcphdr *th;
711
712 skb_set_transport_header(skb, sizeof(struct iphdr));
713 th = tcp_hdr(skb);
714
715 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
716 iph->saddr, iph->daddr, 0);
717 }
718
bnx2x_gro_ipv6_csum(struct bnx2x * bp,struct sk_buff * skb)719 static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
720 {
721 struct ipv6hdr *iph = ipv6_hdr(skb);
722 struct tcphdr *th;
723
724 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
725 th = tcp_hdr(skb);
726
727 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
728 &iph->saddr, &iph->daddr, 0);
729 }
730
bnx2x_gro_csum(struct bnx2x * bp,struct sk_buff * skb,void (* gro_func)(struct bnx2x *,struct sk_buff *))731 static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
732 void (*gro_func)(struct bnx2x*, struct sk_buff*))
733 {
734 skb_set_network_header(skb, 0);
735 gro_func(bp, skb);
736 tcp_gro_complete(skb);
737 }
738 #endif
739
bnx2x_gro_receive(struct bnx2x * bp,struct bnx2x_fastpath * fp,struct sk_buff * skb)740 static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
741 struct sk_buff *skb)
742 {
743 #ifdef CONFIG_INET
744 if (skb_shinfo(skb)->gso_size) {
745 switch (be16_to_cpu(skb->protocol)) {
746 case ETH_P_IP:
747 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
748 break;
749 case ETH_P_IPV6:
750 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
751 break;
752 default:
753 BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
754 be16_to_cpu(skb->protocol));
755 }
756 }
757 #endif
758 skb_record_rx_queue(skb, fp->rx_queue);
759 napi_gro_receive(&fp->napi, skb);
760 }
761
bnx2x_tpa_stop(struct bnx2x * bp,struct bnx2x_fastpath * fp,struct bnx2x_agg_info * tpa_info,u16 pages,struct eth_end_agg_rx_cqe * cqe,u16 cqe_idx)762 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
763 struct bnx2x_agg_info *tpa_info,
764 u16 pages,
765 struct eth_end_agg_rx_cqe *cqe,
766 u16 cqe_idx)
767 {
768 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
769 u8 pad = tpa_info->placement_offset;
770 u16 len = tpa_info->len_on_bd;
771 struct sk_buff *skb = NULL;
772 u8 *new_data, *data = rx_buf->data;
773 u8 old_tpa_state = tpa_info->tpa_state;
774
775 tpa_info->tpa_state = BNX2X_TPA_STOP;
776
777 /* If we there was an error during the handling of the TPA_START -
778 * drop this aggregation.
779 */
780 if (old_tpa_state == BNX2X_TPA_ERROR)
781 goto drop;
782
783 /* Try to allocate the new data */
784 new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
785 /* Unmap skb in the pool anyway, as we are going to change
786 pool entry status to BNX2X_TPA_STOP even if new skb allocation
787 fails. */
788 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
789 fp->rx_buf_size, DMA_FROM_DEVICE);
790 if (likely(new_data))
791 skb = build_skb(data, fp->rx_frag_size);
792
793 if (likely(skb)) {
794 #ifdef BNX2X_STOP_ON_ERROR
795 if (pad + len > fp->rx_buf_size) {
796 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
797 pad, len, fp->rx_buf_size);
798 bnx2x_panic();
799 return;
800 }
801 #endif
802
803 skb_reserve(skb, pad + NET_SKB_PAD);
804 skb_put(skb, len);
805 skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
806
807 skb->protocol = eth_type_trans(skb, bp->dev);
808 skb->ip_summed = CHECKSUM_UNNECESSARY;
809
810 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
811 skb, cqe, cqe_idx)) {
812 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
813 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
814 bnx2x_gro_receive(bp, fp, skb);
815 } else {
816 DP(NETIF_MSG_RX_STATUS,
817 "Failed to allocate new pages - dropping packet!\n");
818 dev_kfree_skb_any(skb);
819 }
820
821 /* put new data in bin */
822 rx_buf->data = new_data;
823
824 return;
825 }
826 if (new_data)
827 bnx2x_frag_free(fp, new_data);
828 drop:
829 /* drop the packet and keep the buffer in the bin */
830 DP(NETIF_MSG_RX_STATUS,
831 "Failed to allocate or map a new skb - dropping packet!\n");
832 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
833 }
834
bnx2x_alloc_rx_data(struct bnx2x * bp,struct bnx2x_fastpath * fp,u16 index,gfp_t gfp_mask)835 static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
836 u16 index, gfp_t gfp_mask)
837 {
838 u8 *data;
839 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
840 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
841 dma_addr_t mapping;
842
843 data = bnx2x_frag_alloc(fp, gfp_mask);
844 if (unlikely(data == NULL))
845 return -ENOMEM;
846
847 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
848 fp->rx_buf_size,
849 DMA_FROM_DEVICE);
850 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
851 bnx2x_frag_free(fp, data);
852 BNX2X_ERR("Can't map rx data\n");
853 return -ENOMEM;
854 }
855
856 rx_buf->data = data;
857 dma_unmap_addr_set(rx_buf, mapping, mapping);
858
859 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
860 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
861
862 return 0;
863 }
864
865 static
bnx2x_csum_validate(struct sk_buff * skb,union eth_rx_cqe * cqe,struct bnx2x_fastpath * fp,struct bnx2x_eth_q_stats * qstats)866 void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
867 struct bnx2x_fastpath *fp,
868 struct bnx2x_eth_q_stats *qstats)
869 {
870 /* Do nothing if no L4 csum validation was done.
871 * We do not check whether IP csum was validated. For IPv4 we assume
872 * that if the card got as far as validating the L4 csum, it also
873 * validated the IP csum. IPv6 has no IP csum.
874 */
875 if (cqe->fast_path_cqe.status_flags &
876 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
877 return;
878
879 /* If L4 validation was done, check if an error was found. */
880
881 if (cqe->fast_path_cqe.type_error_flags &
882 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
883 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
884 qstats->hw_csum_err++;
885 else
886 skb->ip_summed = CHECKSUM_UNNECESSARY;
887 }
888
bnx2x_rx_int(struct bnx2x_fastpath * fp,int budget)889 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
890 {
891 struct bnx2x *bp = fp->bp;
892 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
893 u16 sw_comp_cons, sw_comp_prod;
894 int rx_pkt = 0;
895 union eth_rx_cqe *cqe;
896 struct eth_fast_path_rx_cqe *cqe_fp;
897
898 #ifdef BNX2X_STOP_ON_ERROR
899 if (unlikely(bp->panic))
900 return 0;
901 #endif
902 if (budget <= 0)
903 return rx_pkt;
904
905 bd_cons = fp->rx_bd_cons;
906 bd_prod = fp->rx_bd_prod;
907 bd_prod_fw = bd_prod;
908 sw_comp_cons = fp->rx_comp_cons;
909 sw_comp_prod = fp->rx_comp_prod;
910
911 comp_ring_cons = RCQ_BD(sw_comp_cons);
912 cqe = &fp->rx_comp_ring[comp_ring_cons];
913 cqe_fp = &cqe->fast_path_cqe;
914
915 DP(NETIF_MSG_RX_STATUS,
916 "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
917
918 while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
919 struct sw_rx_bd *rx_buf = NULL;
920 struct sk_buff *skb;
921 u8 cqe_fp_flags;
922 enum eth_rx_cqe_type cqe_fp_type;
923 u16 len, pad, queue;
924 u8 *data;
925 u32 rxhash;
926 enum pkt_hash_types rxhash_type;
927
928 #ifdef BNX2X_STOP_ON_ERROR
929 if (unlikely(bp->panic))
930 return 0;
931 #endif
932
933 bd_prod = RX_BD(bd_prod);
934 bd_cons = RX_BD(bd_cons);
935
936 /* A rmb() is required to ensure that the CQE is not read
937 * before it is written by the adapter DMA. PCI ordering
938 * rules will make sure the other fields are written before
939 * the marker at the end of struct eth_fast_path_rx_cqe
940 * but without rmb() a weakly ordered processor can process
941 * stale data. Without the barrier TPA state-machine might
942 * enter inconsistent state and kernel stack might be
943 * provided with incorrect packet description - these lead
944 * to various kernel crashed.
945 */
946 rmb();
947
948 cqe_fp_flags = cqe_fp->type_error_flags;
949 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
950
951 DP(NETIF_MSG_RX_STATUS,
952 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
953 CQE_TYPE(cqe_fp_flags),
954 cqe_fp_flags, cqe_fp->status_flags,
955 le32_to_cpu(cqe_fp->rss_hash_result),
956 le16_to_cpu(cqe_fp->vlan_tag),
957 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
958
959 /* is this a slowpath msg? */
960 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
961 bnx2x_sp_event(fp, cqe);
962 goto next_cqe;
963 }
964
965 rx_buf = &fp->rx_buf_ring[bd_cons];
966 data = rx_buf->data;
967
968 if (!CQE_TYPE_FAST(cqe_fp_type)) {
969 struct bnx2x_agg_info *tpa_info;
970 u16 frag_size, pages;
971 #ifdef BNX2X_STOP_ON_ERROR
972 /* sanity check */
973 if (fp->mode == TPA_MODE_DISABLED &&
974 (CQE_TYPE_START(cqe_fp_type) ||
975 CQE_TYPE_STOP(cqe_fp_type)))
976 BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n",
977 CQE_TYPE(cqe_fp_type));
978 #endif
979
980 if (CQE_TYPE_START(cqe_fp_type)) {
981 u16 queue = cqe_fp->queue_index;
982 DP(NETIF_MSG_RX_STATUS,
983 "calling tpa_start on queue %d\n",
984 queue);
985
986 bnx2x_tpa_start(fp, queue,
987 bd_cons, bd_prod,
988 cqe_fp);
989
990 goto next_rx;
991 }
992 queue = cqe->end_agg_cqe.queue_index;
993 tpa_info = &fp->tpa_info[queue];
994 DP(NETIF_MSG_RX_STATUS,
995 "calling tpa_stop on queue %d\n",
996 queue);
997
998 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
999 tpa_info->len_on_bd;
1000
1001 if (fp->mode == TPA_MODE_GRO)
1002 pages = (frag_size + tpa_info->full_page - 1) /
1003 tpa_info->full_page;
1004 else
1005 pages = SGE_PAGE_ALIGN(frag_size) >>
1006 SGE_PAGE_SHIFT;
1007
1008 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
1009 &cqe->end_agg_cqe, comp_ring_cons);
1010 #ifdef BNX2X_STOP_ON_ERROR
1011 if (bp->panic)
1012 return 0;
1013 #endif
1014
1015 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
1016 goto next_cqe;
1017 }
1018 /* non TPA */
1019 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
1020 pad = cqe_fp->placement_offset;
1021 dma_sync_single_for_cpu(&bp->pdev->dev,
1022 dma_unmap_addr(rx_buf, mapping),
1023 pad + RX_COPY_THRESH,
1024 DMA_FROM_DEVICE);
1025 pad += NET_SKB_PAD;
1026 prefetch(data + pad); /* speedup eth_type_trans() */
1027 /* is this an error packet? */
1028 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1029 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1030 "ERROR flags %x rx packet %u\n",
1031 cqe_fp_flags, sw_comp_cons);
1032 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
1033 goto reuse_rx;
1034 }
1035
1036 /* Since we don't have a jumbo ring
1037 * copy small packets if mtu > 1500
1038 */
1039 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1040 (len <= RX_COPY_THRESH)) {
1041 skb = napi_alloc_skb(&fp->napi, len);
1042 if (skb == NULL) {
1043 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1044 "ERROR packet dropped because of alloc failure\n");
1045 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1046 goto reuse_rx;
1047 }
1048 memcpy(skb->data, data + pad, len);
1049 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1050 } else {
1051 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
1052 GFP_ATOMIC) == 0)) {
1053 dma_unmap_single(&bp->pdev->dev,
1054 dma_unmap_addr(rx_buf, mapping),
1055 fp->rx_buf_size,
1056 DMA_FROM_DEVICE);
1057 skb = build_skb(data, fp->rx_frag_size);
1058 if (unlikely(!skb)) {
1059 bnx2x_frag_free(fp, data);
1060 bnx2x_fp_qstats(bp, fp)->
1061 rx_skb_alloc_failed++;
1062 goto next_rx;
1063 }
1064 skb_reserve(skb, pad);
1065 } else {
1066 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1067 "ERROR packet dropped because of alloc failure\n");
1068 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1069 reuse_rx:
1070 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1071 goto next_rx;
1072 }
1073 }
1074
1075 skb_put(skb, len);
1076 skb->protocol = eth_type_trans(skb, bp->dev);
1077
1078 /* Set Toeplitz hash for a none-LRO skb */
1079 rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
1080 skb_set_hash(skb, rxhash, rxhash_type);
1081
1082 skb_checksum_none_assert(skb);
1083
1084 if (bp->dev->features & NETIF_F_RXCSUM)
1085 bnx2x_csum_validate(skb, cqe, fp,
1086 bnx2x_fp_qstats(bp, fp));
1087
1088 skb_record_rx_queue(skb, fp->rx_queue);
1089
1090 /* Check if this packet was timestamped */
1091 if (unlikely(cqe->fast_path_cqe.type_error_flags &
1092 (1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT)))
1093 bnx2x_set_rx_ts(bp, skb);
1094
1095 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1096 PARSING_FLAGS_VLAN)
1097 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1098 le16_to_cpu(cqe_fp->vlan_tag));
1099
1100 skb_mark_napi_id(skb, &fp->napi);
1101
1102 if (bnx2x_fp_ll_polling(fp))
1103 netif_receive_skb(skb);
1104 else
1105 napi_gro_receive(&fp->napi, skb);
1106 next_rx:
1107 rx_buf->data = NULL;
1108
1109 bd_cons = NEXT_RX_IDX(bd_cons);
1110 bd_prod = NEXT_RX_IDX(bd_prod);
1111 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1112 rx_pkt++;
1113 next_cqe:
1114 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1115 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1116
1117 /* mark CQE as free */
1118 BNX2X_SEED_CQE(cqe_fp);
1119
1120 if (rx_pkt == budget)
1121 break;
1122
1123 comp_ring_cons = RCQ_BD(sw_comp_cons);
1124 cqe = &fp->rx_comp_ring[comp_ring_cons];
1125 cqe_fp = &cqe->fast_path_cqe;
1126 } /* while */
1127
1128 fp->rx_bd_cons = bd_cons;
1129 fp->rx_bd_prod = bd_prod_fw;
1130 fp->rx_comp_cons = sw_comp_cons;
1131 fp->rx_comp_prod = sw_comp_prod;
1132
1133 /* Update producers */
1134 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1135 fp->rx_sge_prod);
1136
1137 fp->rx_pkt += rx_pkt;
1138 fp->rx_calls++;
1139
1140 return rx_pkt;
1141 }
1142
bnx2x_msix_fp_int(int irq,void * fp_cookie)1143 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1144 {
1145 struct bnx2x_fastpath *fp = fp_cookie;
1146 struct bnx2x *bp = fp->bp;
1147 u8 cos;
1148
1149 DP(NETIF_MSG_INTR,
1150 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
1151 fp->index, fp->fw_sb_id, fp->igu_sb_id);
1152
1153 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1154
1155 #ifdef BNX2X_STOP_ON_ERROR
1156 if (unlikely(bp->panic))
1157 return IRQ_HANDLED;
1158 #endif
1159
1160 /* Handle Rx and Tx according to MSI-X vector */
1161 for_each_cos_in_tx_queue(fp, cos)
1162 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1163
1164 prefetch(&fp->sb_running_index[SM_RX_ID]);
1165 napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
1166
1167 return IRQ_HANDLED;
1168 }
1169
1170 /* HW Lock for shared dual port PHYs */
bnx2x_acquire_phy_lock(struct bnx2x * bp)1171 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1172 {
1173 mutex_lock(&bp->port.phy_mutex);
1174
1175 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1176 }
1177
bnx2x_release_phy_lock(struct bnx2x * bp)1178 void bnx2x_release_phy_lock(struct bnx2x *bp)
1179 {
1180 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1181
1182 mutex_unlock(&bp->port.phy_mutex);
1183 }
1184
1185 /* calculates MF speed according to current linespeed and MF configuration */
bnx2x_get_mf_speed(struct bnx2x * bp)1186 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1187 {
1188 u16 line_speed = bp->link_vars.line_speed;
1189 if (IS_MF(bp)) {
1190 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1191 bp->mf_config[BP_VN(bp)]);
1192
1193 /* Calculate the current MAX line speed limit for the MF
1194 * devices
1195 */
1196 if (IS_MF_PERCENT_BW(bp))
1197 line_speed = (line_speed * maxCfg) / 100;
1198 else { /* SD mode */
1199 u16 vn_max_rate = maxCfg * 100;
1200
1201 if (vn_max_rate < line_speed)
1202 line_speed = vn_max_rate;
1203 }
1204 }
1205
1206 return line_speed;
1207 }
1208
1209 /**
1210 * bnx2x_fill_report_data - fill link report data to report
1211 *
1212 * @bp: driver handle
1213 * @data: link state to update
1214 *
1215 * It uses a none-atomic bit operations because is called under the mutex.
1216 */
bnx2x_fill_report_data(struct bnx2x * bp,struct bnx2x_link_report_data * data)1217 static void bnx2x_fill_report_data(struct bnx2x *bp,
1218 struct bnx2x_link_report_data *data)
1219 {
1220 memset(data, 0, sizeof(*data));
1221
1222 if (IS_PF(bp)) {
1223 /* Fill the report data: effective line speed */
1224 data->line_speed = bnx2x_get_mf_speed(bp);
1225
1226 /* Link is down */
1227 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1228 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1229 &data->link_report_flags);
1230
1231 if (!BNX2X_NUM_ETH_QUEUES(bp))
1232 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1233 &data->link_report_flags);
1234
1235 /* Full DUPLEX */
1236 if (bp->link_vars.duplex == DUPLEX_FULL)
1237 __set_bit(BNX2X_LINK_REPORT_FD,
1238 &data->link_report_flags);
1239
1240 /* Rx Flow Control is ON */
1241 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1242 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1243 &data->link_report_flags);
1244
1245 /* Tx Flow Control is ON */
1246 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1247 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1248 &data->link_report_flags);
1249 } else { /* VF */
1250 *data = bp->vf_link_vars;
1251 }
1252 }
1253
1254 /**
1255 * bnx2x_link_report - report link status to OS.
1256 *
1257 * @bp: driver handle
1258 *
1259 * Calls the __bnx2x_link_report() under the same locking scheme
1260 * as a link/PHY state managing code to ensure a consistent link
1261 * reporting.
1262 */
1263
bnx2x_link_report(struct bnx2x * bp)1264 void bnx2x_link_report(struct bnx2x *bp)
1265 {
1266 bnx2x_acquire_phy_lock(bp);
1267 __bnx2x_link_report(bp);
1268 bnx2x_release_phy_lock(bp);
1269 }
1270
1271 /**
1272 * __bnx2x_link_report - report link status to OS.
1273 *
1274 * @bp: driver handle
1275 *
1276 * None atomic implementation.
1277 * Should be called under the phy_lock.
1278 */
__bnx2x_link_report(struct bnx2x * bp)1279 void __bnx2x_link_report(struct bnx2x *bp)
1280 {
1281 struct bnx2x_link_report_data cur_data;
1282
1283 if (bp->force_link_down) {
1284 bp->link_vars.link_up = 0;
1285 return;
1286 }
1287
1288 /* reread mf_cfg */
1289 if (IS_PF(bp) && !CHIP_IS_E1(bp))
1290 bnx2x_read_mf_cfg(bp);
1291
1292 /* Read the current link report info */
1293 bnx2x_fill_report_data(bp, &cur_data);
1294
1295 /* Don't report link down or exactly the same link status twice */
1296 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1297 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1298 &bp->last_reported_link.link_report_flags) &&
1299 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1300 &cur_data.link_report_flags)))
1301 return;
1302
1303 bp->link_cnt++;
1304
1305 /* We are going to report a new link parameters now -
1306 * remember the current data for the next time.
1307 */
1308 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1309
1310 /* propagate status to VFs */
1311 if (IS_PF(bp))
1312 bnx2x_iov_link_update(bp);
1313
1314 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1315 &cur_data.link_report_flags)) {
1316 netif_carrier_off(bp->dev);
1317 netdev_err(bp->dev, "NIC Link is Down\n");
1318 return;
1319 } else {
1320 const char *duplex;
1321 const char *flow;
1322
1323 netif_carrier_on(bp->dev);
1324
1325 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1326 &cur_data.link_report_flags))
1327 duplex = "full";
1328 else
1329 duplex = "half";
1330
1331 /* Handle the FC at the end so that only these flags would be
1332 * possibly set. This way we may easily check if there is no FC
1333 * enabled.
1334 */
1335 if (cur_data.link_report_flags) {
1336 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1337 &cur_data.link_report_flags)) {
1338 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1339 &cur_data.link_report_flags))
1340 flow = "ON - receive & transmit";
1341 else
1342 flow = "ON - receive";
1343 } else {
1344 flow = "ON - transmit";
1345 }
1346 } else {
1347 flow = "none";
1348 }
1349 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1350 cur_data.line_speed, duplex, flow);
1351 }
1352 }
1353
bnx2x_set_next_page_sgl(struct bnx2x_fastpath * fp)1354 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1355 {
1356 int i;
1357
1358 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1359 struct eth_rx_sge *sge;
1360
1361 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1362 sge->addr_hi =
1363 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1364 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1365
1366 sge->addr_lo =
1367 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1368 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1369 }
1370 }
1371
bnx2x_free_tpa_pool(struct bnx2x * bp,struct bnx2x_fastpath * fp,int last)1372 static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1373 struct bnx2x_fastpath *fp, int last)
1374 {
1375 int i;
1376
1377 for (i = 0; i < last; i++) {
1378 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1379 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1380 u8 *data = first_buf->data;
1381
1382 if (data == NULL) {
1383 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1384 continue;
1385 }
1386 if (tpa_info->tpa_state == BNX2X_TPA_START)
1387 dma_unmap_single(&bp->pdev->dev,
1388 dma_unmap_addr(first_buf, mapping),
1389 fp->rx_buf_size, DMA_FROM_DEVICE);
1390 bnx2x_frag_free(fp, data);
1391 first_buf->data = NULL;
1392 }
1393 }
1394
bnx2x_init_rx_rings_cnic(struct bnx2x * bp)1395 void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1396 {
1397 int j;
1398
1399 for_each_rx_queue_cnic(bp, j) {
1400 struct bnx2x_fastpath *fp = &bp->fp[j];
1401
1402 fp->rx_bd_cons = 0;
1403
1404 /* Activate BD ring */
1405 /* Warning!
1406 * this will generate an interrupt (to the TSTORM)
1407 * must only be done after chip is initialized
1408 */
1409 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1410 fp->rx_sge_prod);
1411 }
1412 }
1413
bnx2x_init_rx_rings(struct bnx2x * bp)1414 void bnx2x_init_rx_rings(struct bnx2x *bp)
1415 {
1416 int func = BP_FUNC(bp);
1417 u16 ring_prod;
1418 int i, j;
1419
1420 /* Allocate TPA resources */
1421 for_each_eth_queue(bp, j) {
1422 struct bnx2x_fastpath *fp = &bp->fp[j];
1423
1424 DP(NETIF_MSG_IFUP,
1425 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1426
1427 if (fp->mode != TPA_MODE_DISABLED) {
1428 /* Fill the per-aggregation pool */
1429 for (i = 0; i < MAX_AGG_QS(bp); i++) {
1430 struct bnx2x_agg_info *tpa_info =
1431 &fp->tpa_info[i];
1432 struct sw_rx_bd *first_buf =
1433 &tpa_info->first_buf;
1434
1435 first_buf->data =
1436 bnx2x_frag_alloc(fp, GFP_KERNEL);
1437 if (!first_buf->data) {
1438 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1439 j);
1440 bnx2x_free_tpa_pool(bp, fp, i);
1441 fp->mode = TPA_MODE_DISABLED;
1442 break;
1443 }
1444 dma_unmap_addr_set(first_buf, mapping, 0);
1445 tpa_info->tpa_state = BNX2X_TPA_STOP;
1446 }
1447
1448 /* "next page" elements initialization */
1449 bnx2x_set_next_page_sgl(fp);
1450
1451 /* set SGEs bit mask */
1452 bnx2x_init_sge_ring_bit_mask(fp);
1453
1454 /* Allocate SGEs and initialize the ring elements */
1455 for (i = 0, ring_prod = 0;
1456 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1457
1458 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1459 GFP_KERNEL) < 0) {
1460 BNX2X_ERR("was only able to allocate %d rx sges\n",
1461 i);
1462 BNX2X_ERR("disabling TPA for queue[%d]\n",
1463 j);
1464 /* Cleanup already allocated elements */
1465 bnx2x_free_rx_sge_range(bp, fp,
1466 ring_prod);
1467 bnx2x_free_tpa_pool(bp, fp,
1468 MAX_AGG_QS(bp));
1469 fp->mode = TPA_MODE_DISABLED;
1470 ring_prod = 0;
1471 break;
1472 }
1473 ring_prod = NEXT_SGE_IDX(ring_prod);
1474 }
1475
1476 fp->rx_sge_prod = ring_prod;
1477 }
1478 }
1479
1480 for_each_eth_queue(bp, j) {
1481 struct bnx2x_fastpath *fp = &bp->fp[j];
1482
1483 fp->rx_bd_cons = 0;
1484
1485 /* Activate BD ring */
1486 /* Warning!
1487 * this will generate an interrupt (to the TSTORM)
1488 * must only be done after chip is initialized
1489 */
1490 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1491 fp->rx_sge_prod);
1492
1493 if (j != 0)
1494 continue;
1495
1496 if (CHIP_IS_E1(bp)) {
1497 REG_WR(bp, BAR_USTRORM_INTMEM +
1498 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1499 U64_LO(fp->rx_comp_mapping));
1500 REG_WR(bp, BAR_USTRORM_INTMEM +
1501 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1502 U64_HI(fp->rx_comp_mapping));
1503 }
1504 }
1505 }
1506
bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath * fp)1507 static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1508 {
1509 u8 cos;
1510 struct bnx2x *bp = fp->bp;
1511
1512 for_each_cos_in_tx_queue(fp, cos) {
1513 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1514 unsigned pkts_compl = 0, bytes_compl = 0;
1515
1516 u16 sw_prod = txdata->tx_pkt_prod;
1517 u16 sw_cons = txdata->tx_pkt_cons;
1518
1519 while (sw_cons != sw_prod) {
1520 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1521 &pkts_compl, &bytes_compl);
1522 sw_cons++;
1523 }
1524
1525 netdev_tx_reset_queue(
1526 netdev_get_tx_queue(bp->dev,
1527 txdata->txq_index));
1528 }
1529 }
1530
bnx2x_free_tx_skbs_cnic(struct bnx2x * bp)1531 static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1532 {
1533 int i;
1534
1535 for_each_tx_queue_cnic(bp, i) {
1536 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1537 }
1538 }
1539
bnx2x_free_tx_skbs(struct bnx2x * bp)1540 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1541 {
1542 int i;
1543
1544 for_each_eth_queue(bp, i) {
1545 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1546 }
1547 }
1548
bnx2x_free_rx_bds(struct bnx2x_fastpath * fp)1549 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1550 {
1551 struct bnx2x *bp = fp->bp;
1552 int i;
1553
1554 /* ring wasn't allocated */
1555 if (fp->rx_buf_ring == NULL)
1556 return;
1557
1558 for (i = 0; i < NUM_RX_BD; i++) {
1559 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1560 u8 *data = rx_buf->data;
1561
1562 if (data == NULL)
1563 continue;
1564 dma_unmap_single(&bp->pdev->dev,
1565 dma_unmap_addr(rx_buf, mapping),
1566 fp->rx_buf_size, DMA_FROM_DEVICE);
1567
1568 rx_buf->data = NULL;
1569 bnx2x_frag_free(fp, data);
1570 }
1571 }
1572
bnx2x_free_rx_skbs_cnic(struct bnx2x * bp)1573 static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1574 {
1575 int j;
1576
1577 for_each_rx_queue_cnic(bp, j) {
1578 bnx2x_free_rx_bds(&bp->fp[j]);
1579 }
1580 }
1581
bnx2x_free_rx_skbs(struct bnx2x * bp)1582 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1583 {
1584 int j;
1585
1586 for_each_eth_queue(bp, j) {
1587 struct bnx2x_fastpath *fp = &bp->fp[j];
1588
1589 bnx2x_free_rx_bds(fp);
1590
1591 if (fp->mode != TPA_MODE_DISABLED)
1592 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1593 }
1594 }
1595
bnx2x_free_skbs_cnic(struct bnx2x * bp)1596 static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1597 {
1598 bnx2x_free_tx_skbs_cnic(bp);
1599 bnx2x_free_rx_skbs_cnic(bp);
1600 }
1601
bnx2x_free_skbs(struct bnx2x * bp)1602 void bnx2x_free_skbs(struct bnx2x *bp)
1603 {
1604 bnx2x_free_tx_skbs(bp);
1605 bnx2x_free_rx_skbs(bp);
1606 }
1607
bnx2x_update_max_mf_config(struct bnx2x * bp,u32 value)1608 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1609 {
1610 /* load old values */
1611 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1612
1613 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1614 /* leave all but MAX value */
1615 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1616
1617 /* set new MAX value */
1618 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1619 & FUNC_MF_CFG_MAX_BW_MASK;
1620
1621 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1622 }
1623 }
1624
1625 /**
1626 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1627 *
1628 * @bp: driver handle
1629 * @nvecs: number of vectors to be released
1630 */
bnx2x_free_msix_irqs(struct bnx2x * bp,int nvecs)1631 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1632 {
1633 int i, offset = 0;
1634
1635 if (nvecs == offset)
1636 return;
1637
1638 /* VFs don't have a default SB */
1639 if (IS_PF(bp)) {
1640 free_irq(bp->msix_table[offset].vector, bp->dev);
1641 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1642 bp->msix_table[offset].vector);
1643 offset++;
1644 }
1645
1646 if (CNIC_SUPPORT(bp)) {
1647 if (nvecs == offset)
1648 return;
1649 offset++;
1650 }
1651
1652 for_each_eth_queue(bp, i) {
1653 if (nvecs == offset)
1654 return;
1655 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1656 i, bp->msix_table[offset].vector);
1657
1658 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1659 }
1660 }
1661
bnx2x_free_irq(struct bnx2x * bp)1662 void bnx2x_free_irq(struct bnx2x *bp)
1663 {
1664 if (bp->flags & USING_MSIX_FLAG &&
1665 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1666 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1667
1668 /* vfs don't have a default status block */
1669 if (IS_PF(bp))
1670 nvecs++;
1671
1672 bnx2x_free_msix_irqs(bp, nvecs);
1673 } else {
1674 free_irq(bp->dev->irq, bp->dev);
1675 }
1676 }
1677
bnx2x_enable_msix(struct bnx2x * bp)1678 int bnx2x_enable_msix(struct bnx2x *bp)
1679 {
1680 int msix_vec = 0, i, rc;
1681
1682 /* VFs don't have a default status block */
1683 if (IS_PF(bp)) {
1684 bp->msix_table[msix_vec].entry = msix_vec;
1685 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1686 bp->msix_table[0].entry);
1687 msix_vec++;
1688 }
1689
1690 /* Cnic requires an msix vector for itself */
1691 if (CNIC_SUPPORT(bp)) {
1692 bp->msix_table[msix_vec].entry = msix_vec;
1693 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1694 msix_vec, bp->msix_table[msix_vec].entry);
1695 msix_vec++;
1696 }
1697
1698 /* We need separate vectors for ETH queues only (not FCoE) */
1699 for_each_eth_queue(bp, i) {
1700 bp->msix_table[msix_vec].entry = msix_vec;
1701 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1702 msix_vec, msix_vec, i);
1703 msix_vec++;
1704 }
1705
1706 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1707 msix_vec);
1708
1709 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
1710 BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
1711 /*
1712 * reconfigure number of tx/rx queues according to available
1713 * MSI-X vectors
1714 */
1715 if (rc == -ENOSPC) {
1716 /* Get by with single vector */
1717 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
1718 if (rc < 0) {
1719 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1720 rc);
1721 goto no_msix;
1722 }
1723
1724 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1725 bp->flags |= USING_SINGLE_MSIX_FLAG;
1726
1727 BNX2X_DEV_INFO("set number of queues to 1\n");
1728 bp->num_ethernet_queues = 1;
1729 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1730 } else if (rc < 0) {
1731 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1732 goto no_msix;
1733 } else if (rc < msix_vec) {
1734 /* how less vectors we will have? */
1735 int diff = msix_vec - rc;
1736
1737 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1738
1739 /*
1740 * decrease number of queues by number of unallocated entries
1741 */
1742 bp->num_ethernet_queues -= diff;
1743 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1744
1745 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1746 bp->num_queues);
1747 }
1748
1749 bp->flags |= USING_MSIX_FLAG;
1750
1751 return 0;
1752
1753 no_msix:
1754 /* fall to INTx if not enough memory */
1755 if (rc == -ENOMEM)
1756 bp->flags |= DISABLE_MSI_FLAG;
1757
1758 return rc;
1759 }
1760
bnx2x_req_msix_irqs(struct bnx2x * bp)1761 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1762 {
1763 int i, rc, offset = 0;
1764
1765 /* no default status block for vf */
1766 if (IS_PF(bp)) {
1767 rc = request_irq(bp->msix_table[offset++].vector,
1768 bnx2x_msix_sp_int, 0,
1769 bp->dev->name, bp->dev);
1770 if (rc) {
1771 BNX2X_ERR("request sp irq failed\n");
1772 return -EBUSY;
1773 }
1774 }
1775
1776 if (CNIC_SUPPORT(bp))
1777 offset++;
1778
1779 for_each_eth_queue(bp, i) {
1780 struct bnx2x_fastpath *fp = &bp->fp[i];
1781 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1782 bp->dev->name, i);
1783
1784 rc = request_irq(bp->msix_table[offset].vector,
1785 bnx2x_msix_fp_int, 0, fp->name, fp);
1786 if (rc) {
1787 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1788 bp->msix_table[offset].vector, rc);
1789 bnx2x_free_msix_irqs(bp, offset);
1790 return -EBUSY;
1791 }
1792
1793 offset++;
1794 }
1795
1796 i = BNX2X_NUM_ETH_QUEUES(bp);
1797 if (IS_PF(bp)) {
1798 offset = 1 + CNIC_SUPPORT(bp);
1799 netdev_info(bp->dev,
1800 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1801 bp->msix_table[0].vector,
1802 0, bp->msix_table[offset].vector,
1803 i - 1, bp->msix_table[offset + i - 1].vector);
1804 } else {
1805 offset = CNIC_SUPPORT(bp);
1806 netdev_info(bp->dev,
1807 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1808 0, bp->msix_table[offset].vector,
1809 i - 1, bp->msix_table[offset + i - 1].vector);
1810 }
1811 return 0;
1812 }
1813
bnx2x_enable_msi(struct bnx2x * bp)1814 int bnx2x_enable_msi(struct bnx2x *bp)
1815 {
1816 int rc;
1817
1818 rc = pci_enable_msi(bp->pdev);
1819 if (rc) {
1820 BNX2X_DEV_INFO("MSI is not attainable\n");
1821 return -1;
1822 }
1823 bp->flags |= USING_MSI_FLAG;
1824
1825 return 0;
1826 }
1827
bnx2x_req_irq(struct bnx2x * bp)1828 static int bnx2x_req_irq(struct bnx2x *bp)
1829 {
1830 unsigned long flags;
1831 unsigned int irq;
1832
1833 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1834 flags = 0;
1835 else
1836 flags = IRQF_SHARED;
1837
1838 if (bp->flags & USING_MSIX_FLAG)
1839 irq = bp->msix_table[0].vector;
1840 else
1841 irq = bp->pdev->irq;
1842
1843 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1844 }
1845
bnx2x_setup_irqs(struct bnx2x * bp)1846 static int bnx2x_setup_irqs(struct bnx2x *bp)
1847 {
1848 int rc = 0;
1849 if (bp->flags & USING_MSIX_FLAG &&
1850 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1851 rc = bnx2x_req_msix_irqs(bp);
1852 if (rc)
1853 return rc;
1854 } else {
1855 rc = bnx2x_req_irq(bp);
1856 if (rc) {
1857 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1858 return rc;
1859 }
1860 if (bp->flags & USING_MSI_FLAG) {
1861 bp->dev->irq = bp->pdev->irq;
1862 netdev_info(bp->dev, "using MSI IRQ %d\n",
1863 bp->dev->irq);
1864 }
1865 if (bp->flags & USING_MSIX_FLAG) {
1866 bp->dev->irq = bp->msix_table[0].vector;
1867 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1868 bp->dev->irq);
1869 }
1870 }
1871
1872 return 0;
1873 }
1874
bnx2x_napi_enable_cnic(struct bnx2x * bp)1875 static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1876 {
1877 int i;
1878
1879 for_each_rx_queue_cnic(bp, i) {
1880 bnx2x_fp_busy_poll_init(&bp->fp[i]);
1881 napi_enable(&bnx2x_fp(bp, i, napi));
1882 }
1883 }
1884
bnx2x_napi_enable(struct bnx2x * bp)1885 static void bnx2x_napi_enable(struct bnx2x *bp)
1886 {
1887 int i;
1888
1889 for_each_eth_queue(bp, i) {
1890 bnx2x_fp_busy_poll_init(&bp->fp[i]);
1891 napi_enable(&bnx2x_fp(bp, i, napi));
1892 }
1893 }
1894
bnx2x_napi_disable_cnic(struct bnx2x * bp)1895 static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1896 {
1897 int i;
1898
1899 for_each_rx_queue_cnic(bp, i) {
1900 napi_disable(&bnx2x_fp(bp, i, napi));
1901 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1902 usleep_range(1000, 2000);
1903 }
1904 }
1905
bnx2x_napi_disable(struct bnx2x * bp)1906 static void bnx2x_napi_disable(struct bnx2x *bp)
1907 {
1908 int i;
1909
1910 for_each_eth_queue(bp, i) {
1911 napi_disable(&bnx2x_fp(bp, i, napi));
1912 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1913 usleep_range(1000, 2000);
1914 }
1915 }
1916
bnx2x_netif_start(struct bnx2x * bp)1917 void bnx2x_netif_start(struct bnx2x *bp)
1918 {
1919 if (netif_running(bp->dev)) {
1920 bnx2x_napi_enable(bp);
1921 if (CNIC_LOADED(bp))
1922 bnx2x_napi_enable_cnic(bp);
1923 bnx2x_int_enable(bp);
1924 if (bp->state == BNX2X_STATE_OPEN)
1925 netif_tx_wake_all_queues(bp->dev);
1926 }
1927 }
1928
bnx2x_netif_stop(struct bnx2x * bp,int disable_hw)1929 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1930 {
1931 bnx2x_int_disable_sync(bp, disable_hw);
1932 bnx2x_napi_disable(bp);
1933 if (CNIC_LOADED(bp))
1934 bnx2x_napi_disable_cnic(bp);
1935 }
1936
bnx2x_select_queue(struct net_device * dev,struct sk_buff * skb,void * accel_priv,select_queue_fallback_t fallback)1937 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
1938 void *accel_priv, select_queue_fallback_t fallback)
1939 {
1940 struct bnx2x *bp = netdev_priv(dev);
1941
1942 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1943 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1944 u16 ether_type = ntohs(hdr->h_proto);
1945
1946 /* Skip VLAN tag if present */
1947 if (ether_type == ETH_P_8021Q) {
1948 struct vlan_ethhdr *vhdr =
1949 (struct vlan_ethhdr *)skb->data;
1950
1951 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1952 }
1953
1954 /* If ethertype is FCoE or FIP - use FCoE ring */
1955 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1956 return bnx2x_fcoe_tx(bp, txq_index);
1957 }
1958
1959 /* select a non-FCoE queue */
1960 return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp));
1961 }
1962
bnx2x_set_num_queues(struct bnx2x * bp)1963 void bnx2x_set_num_queues(struct bnx2x *bp)
1964 {
1965 /* RSS queues */
1966 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1967
1968 /* override in STORAGE SD modes */
1969 if (IS_MF_STORAGE_ONLY(bp))
1970 bp->num_ethernet_queues = 1;
1971
1972 /* Add special queues */
1973 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1974 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1975
1976 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1977 }
1978
1979 /**
1980 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1981 *
1982 * @bp: Driver handle
1983 *
1984 * We currently support for at most 16 Tx queues for each CoS thus we will
1985 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1986 * bp->max_cos.
1987 *
1988 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1989 * index after all ETH L2 indices.
1990 *
1991 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1992 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1993 * 16..31,...) with indices that are not coupled with any real Tx queue.
1994 *
1995 * The proper configuration of skb->queue_mapping is handled by
1996 * bnx2x_select_queue() and __skb_tx_hash().
1997 *
1998 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1999 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
2000 */
bnx2x_set_real_num_queues(struct bnx2x * bp,int include_cnic)2001 static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
2002 {
2003 int rc, tx, rx;
2004
2005 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
2006 rx = BNX2X_NUM_ETH_QUEUES(bp);
2007
2008 /* account for fcoe queue */
2009 if (include_cnic && !NO_FCOE(bp)) {
2010 rx++;
2011 tx++;
2012 }
2013
2014 rc = netif_set_real_num_tx_queues(bp->dev, tx);
2015 if (rc) {
2016 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
2017 return rc;
2018 }
2019 rc = netif_set_real_num_rx_queues(bp->dev, rx);
2020 if (rc) {
2021 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
2022 return rc;
2023 }
2024
2025 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
2026 tx, rx);
2027
2028 return rc;
2029 }
2030
bnx2x_set_rx_buf_size(struct bnx2x * bp)2031 static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
2032 {
2033 int i;
2034
2035 for_each_queue(bp, i) {
2036 struct bnx2x_fastpath *fp = &bp->fp[i];
2037 u32 mtu;
2038
2039 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
2040 if (IS_FCOE_IDX(i))
2041 /*
2042 * Although there are no IP frames expected to arrive to
2043 * this ring we still want to add an
2044 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
2045 * overrun attack.
2046 */
2047 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
2048 else
2049 mtu = bp->dev->mtu;
2050 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
2051 IP_HEADER_ALIGNMENT_PADDING +
2052 ETH_OVREHEAD +
2053 mtu +
2054 BNX2X_FW_RX_ALIGN_END;
2055 fp->rx_buf_size = SKB_DATA_ALIGN(fp->rx_buf_size);
2056 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
2057 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
2058 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
2059 else
2060 fp->rx_frag_size = 0;
2061 }
2062 }
2063
bnx2x_init_rss(struct bnx2x * bp)2064 static int bnx2x_init_rss(struct bnx2x *bp)
2065 {
2066 int i;
2067 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
2068
2069 /* Prepare the initial contents for the indirection table if RSS is
2070 * enabled
2071 */
2072 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
2073 bp->rss_conf_obj.ind_table[i] =
2074 bp->fp->cl_id +
2075 ethtool_rxfh_indir_default(i, num_eth_queues);
2076
2077 /*
2078 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
2079 * per-port, so if explicit configuration is needed , do it only
2080 * for a PMF.
2081 *
2082 * For 57712 and newer on the other hand it's a per-function
2083 * configuration.
2084 */
2085 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
2086 }
2087
bnx2x_rss(struct bnx2x * bp,struct bnx2x_rss_config_obj * rss_obj,bool config_hash,bool enable)2088 int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
2089 bool config_hash, bool enable)
2090 {
2091 struct bnx2x_config_rss_params params = {NULL};
2092
2093 /* Although RSS is meaningless when there is a single HW queue we
2094 * still need it enabled in order to have HW Rx hash generated.
2095 *
2096 * if (!is_eth_multi(bp))
2097 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
2098 */
2099
2100 params.rss_obj = rss_obj;
2101
2102 __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags);
2103
2104 if (enable) {
2105 __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags);
2106
2107 /* RSS configuration */
2108 __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags);
2109 __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags);
2110 __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags);
2111 __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags);
2112 if (rss_obj->udp_rss_v4)
2113 __set_bit(BNX2X_RSS_IPV4_UDP, ¶ms.rss_flags);
2114 if (rss_obj->udp_rss_v6)
2115 __set_bit(BNX2X_RSS_IPV6_UDP, ¶ms.rss_flags);
2116
2117 if (!CHIP_IS_E1x(bp)) {
2118 /* valid only for TUNN_MODE_VXLAN tunnel mode */
2119 __set_bit(BNX2X_RSS_IPV4_VXLAN, ¶ms.rss_flags);
2120 __set_bit(BNX2X_RSS_IPV6_VXLAN, ¶ms.rss_flags);
2121
2122 /* valid only for TUNN_MODE_GRE tunnel mode */
2123 __set_bit(BNX2X_RSS_TUNN_INNER_HDRS, ¶ms.rss_flags);
2124 }
2125 } else {
2126 __set_bit(BNX2X_RSS_MODE_DISABLED, ¶ms.rss_flags);
2127 }
2128
2129 /* Hash bits */
2130 params.rss_result_mask = MULTI_MASK;
2131
2132 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
2133
2134 if (config_hash) {
2135 /* RSS keys */
2136 netdev_rss_key_fill(params.rss_key, T_ETH_RSS_KEY * 4);
2137 __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags);
2138 }
2139
2140 if (IS_PF(bp))
2141 return bnx2x_config_rss(bp, ¶ms);
2142 else
2143 return bnx2x_vfpf_config_rss(bp, ¶ms);
2144 }
2145
bnx2x_init_hw(struct bnx2x * bp,u32 load_code)2146 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
2147 {
2148 struct bnx2x_func_state_params func_params = {NULL};
2149
2150 /* Prepare parameters for function state transitions */
2151 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2152
2153 func_params.f_obj = &bp->func_obj;
2154 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2155
2156 func_params.params.hw_init.load_phase = load_code;
2157
2158 return bnx2x_func_state_change(bp, &func_params);
2159 }
2160
2161 /*
2162 * Cleans the object that have internal lists without sending
2163 * ramrods. Should be run when interrupts are disabled.
2164 */
bnx2x_squeeze_objects(struct bnx2x * bp)2165 void bnx2x_squeeze_objects(struct bnx2x *bp)
2166 {
2167 int rc;
2168 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
2169 struct bnx2x_mcast_ramrod_params rparam = {NULL};
2170 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
2171
2172 /***************** Cleanup MACs' object first *************************/
2173
2174 /* Wait for completion of requested */
2175 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2176 /* Perform a dry cleanup */
2177 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2178
2179 /* Clean ETH primary MAC */
2180 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
2181 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
2182 &ramrod_flags);
2183 if (rc != 0)
2184 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2185
2186 /* Cleanup UC list */
2187 vlan_mac_flags = 0;
2188 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2189 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2190 &ramrod_flags);
2191 if (rc != 0)
2192 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2193
2194 /***************** Now clean mcast object *****************************/
2195 rparam.mcast_obj = &bp->mcast_obj;
2196 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2197
2198 /* Add a DEL command... - Since we're doing a driver cleanup only,
2199 * we take a lock surrounding both the initial send and the CONTs,
2200 * as we don't want a true completion to disrupt us in the middle.
2201 */
2202 netif_addr_lock_bh(bp->dev);
2203 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2204 if (rc < 0)
2205 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2206 rc);
2207
2208 /* ...and wait until all pending commands are cleared */
2209 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2210 while (rc != 0) {
2211 if (rc < 0) {
2212 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2213 rc);
2214 netif_addr_unlock_bh(bp->dev);
2215 return;
2216 }
2217
2218 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2219 }
2220 netif_addr_unlock_bh(bp->dev);
2221 }
2222
2223 #ifndef BNX2X_STOP_ON_ERROR
2224 #define LOAD_ERROR_EXIT(bp, label) \
2225 do { \
2226 (bp)->state = BNX2X_STATE_ERROR; \
2227 goto label; \
2228 } while (0)
2229
2230 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2231 do { \
2232 bp->cnic_loaded = false; \
2233 goto label; \
2234 } while (0)
2235 #else /*BNX2X_STOP_ON_ERROR*/
2236 #define LOAD_ERROR_EXIT(bp, label) \
2237 do { \
2238 (bp)->state = BNX2X_STATE_ERROR; \
2239 (bp)->panic = 1; \
2240 return -EBUSY; \
2241 } while (0)
2242 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2243 do { \
2244 bp->cnic_loaded = false; \
2245 (bp)->panic = 1; \
2246 return -EBUSY; \
2247 } while (0)
2248 #endif /*BNX2X_STOP_ON_ERROR*/
2249
bnx2x_free_fw_stats_mem(struct bnx2x * bp)2250 static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2251 {
2252 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2253 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2254 return;
2255 }
2256
bnx2x_alloc_fw_stats_mem(struct bnx2x * bp)2257 static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2258 {
2259 int num_groups, vf_headroom = 0;
2260 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
2261
2262 /* number of queues for statistics is number of eth queues + FCoE */
2263 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
2264
2265 /* Total number of FW statistics requests =
2266 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2267 * and fcoe l2 queue) stats + num of queues (which includes another 1
2268 * for fcoe l2 queue if applicable)
2269 */
2270 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2271
2272 /* vf stats appear in the request list, but their data is allocated by
2273 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2274 * it is used to determine where to place the vf stats queries in the
2275 * request struct
2276 */
2277 if (IS_SRIOV(bp))
2278 vf_headroom = bnx2x_vf_headroom(bp);
2279
2280 /* Request is built from stats_query_header and an array of
2281 * stats_query_cmd_group each of which contains
2282 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2283 * configured in the stats_query_header.
2284 */
2285 num_groups =
2286 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2287 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
2288 1 : 0));
2289
2290 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2291 bp->fw_stats_num, vf_headroom, num_groups);
2292 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2293 num_groups * sizeof(struct stats_query_cmd_group);
2294
2295 /* Data for statistics requests + stats_counter
2296 * stats_counter holds per-STORM counters that are incremented
2297 * when STORM has finished with the current request.
2298 * memory for FCoE offloaded statistics are counted anyway,
2299 * even if they will not be sent.
2300 * VF stats are not accounted for here as the data of VF stats is stored
2301 * in memory allocated by the VF, not here.
2302 */
2303 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2304 sizeof(struct per_pf_stats) +
2305 sizeof(struct fcoe_statistics_params) +
2306 sizeof(struct per_queue_stats) * num_queue_stats +
2307 sizeof(struct stats_counter);
2308
2309 bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
2310 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2311 if (!bp->fw_stats)
2312 goto alloc_mem_err;
2313
2314 /* Set shortcuts */
2315 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2316 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2317 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2318 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2319 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2320 bp->fw_stats_req_sz;
2321
2322 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
2323 U64_HI(bp->fw_stats_req_mapping),
2324 U64_LO(bp->fw_stats_req_mapping));
2325 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
2326 U64_HI(bp->fw_stats_data_mapping),
2327 U64_LO(bp->fw_stats_data_mapping));
2328 return 0;
2329
2330 alloc_mem_err:
2331 bnx2x_free_fw_stats_mem(bp);
2332 BNX2X_ERR("Can't allocate FW stats memory\n");
2333 return -ENOMEM;
2334 }
2335
2336 /* send load request to mcp and analyze response */
bnx2x_nic_load_request(struct bnx2x * bp,u32 * load_code)2337 static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2338 {
2339 u32 param;
2340
2341 /* init fw_seq */
2342 bp->fw_seq =
2343 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2344 DRV_MSG_SEQ_NUMBER_MASK);
2345 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2346
2347 /* Get current FW pulse sequence */
2348 bp->fw_drv_pulse_wr_seq =
2349 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2350 DRV_PULSE_SEQ_MASK);
2351 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2352
2353 param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2354
2355 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2356 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2357
2358 /* load request */
2359 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
2360
2361 /* if mcp fails to respond we must abort */
2362 if (!(*load_code)) {
2363 BNX2X_ERR("MCP response failure, aborting\n");
2364 return -EBUSY;
2365 }
2366
2367 /* If mcp refused (e.g. other port is in diagnostic mode) we
2368 * must abort
2369 */
2370 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2371 BNX2X_ERR("MCP refused load request, aborting\n");
2372 return -EBUSY;
2373 }
2374 return 0;
2375 }
2376
2377 /* check whether another PF has already loaded FW to chip. In
2378 * virtualized environments a pf from another VM may have already
2379 * initialized the device including loading FW
2380 */
bnx2x_compare_fw_ver(struct bnx2x * bp,u32 load_code,bool print_err)2381 int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
2382 {
2383 /* is another pf loaded on this engine? */
2384 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2385 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2386 /* build my FW version dword */
2387 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2388 (BCM_5710_FW_MINOR_VERSION << 8) +
2389 (BCM_5710_FW_REVISION_VERSION << 16) +
2390 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2391
2392 /* read loaded FW from chip */
2393 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2394
2395 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2396 loaded_fw, my_fw);
2397
2398 /* abort nic load if version mismatch */
2399 if (my_fw != loaded_fw) {
2400 if (print_err)
2401 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2402 loaded_fw, my_fw);
2403 else
2404 BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
2405 loaded_fw, my_fw);
2406 return -EBUSY;
2407 }
2408 }
2409 return 0;
2410 }
2411
2412 /* returns the "mcp load_code" according to global load_count array */
bnx2x_nic_load_no_mcp(struct bnx2x * bp,int port)2413 static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2414 {
2415 int path = BP_PATH(bp);
2416
2417 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2418 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2419 bnx2x_load_count[path][2]);
2420 bnx2x_load_count[path][0]++;
2421 bnx2x_load_count[path][1 + port]++;
2422 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2423 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2424 bnx2x_load_count[path][2]);
2425 if (bnx2x_load_count[path][0] == 1)
2426 return FW_MSG_CODE_DRV_LOAD_COMMON;
2427 else if (bnx2x_load_count[path][1 + port] == 1)
2428 return FW_MSG_CODE_DRV_LOAD_PORT;
2429 else
2430 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2431 }
2432
2433 /* mark PMF if applicable */
bnx2x_nic_load_pmf(struct bnx2x * bp,u32 load_code)2434 static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2435 {
2436 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2437 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2438 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2439 bp->port.pmf = 1;
2440 /* We need the barrier to ensure the ordering between the
2441 * writing to bp->port.pmf here and reading it from the
2442 * bnx2x_periodic_task().
2443 */
2444 smp_mb();
2445 } else {
2446 bp->port.pmf = 0;
2447 }
2448
2449 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2450 }
2451
bnx2x_nic_load_afex_dcc(struct bnx2x * bp,int load_code)2452 static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2453 {
2454 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2455 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2456 (bp->common.shmem2_base)) {
2457 if (SHMEM2_HAS(bp, dcc_support))
2458 SHMEM2_WR(bp, dcc_support,
2459 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2460 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2461 if (SHMEM2_HAS(bp, afex_driver_support))
2462 SHMEM2_WR(bp, afex_driver_support,
2463 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2464 }
2465
2466 /* Set AFEX default VLAN tag to an invalid value */
2467 bp->afex_def_vlan_tag = -1;
2468 }
2469
2470 /**
2471 * bnx2x_bz_fp - zero content of the fastpath structure.
2472 *
2473 * @bp: driver handle
2474 * @index: fastpath index to be zeroed
2475 *
2476 * Makes sure the contents of the bp->fp[index].napi is kept
2477 * intact.
2478 */
bnx2x_bz_fp(struct bnx2x * bp,int index)2479 static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2480 {
2481 struct bnx2x_fastpath *fp = &bp->fp[index];
2482 int cos;
2483 struct napi_struct orig_napi = fp->napi;
2484 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
2485
2486 /* bzero bnx2x_fastpath contents */
2487 if (fp->tpa_info)
2488 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2489 sizeof(struct bnx2x_agg_info));
2490 memset(fp, 0, sizeof(*fp));
2491
2492 /* Restore the NAPI object as it has been already initialized */
2493 fp->napi = orig_napi;
2494 fp->tpa_info = orig_tpa_info;
2495 fp->bp = bp;
2496 fp->index = index;
2497 if (IS_ETH_FP(fp))
2498 fp->max_cos = bp->max_cos;
2499 else
2500 /* Special queues support only one CoS */
2501 fp->max_cos = 1;
2502
2503 /* Init txdata pointers */
2504 if (IS_FCOE_FP(fp))
2505 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
2506 if (IS_ETH_FP(fp))
2507 for_each_cos_in_tx_queue(fp, cos)
2508 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2509 BNX2X_NUM_ETH_QUEUES(bp) + index];
2510
2511 /* set the tpa flag for each queue. The tpa flag determines the queue
2512 * minimal size so it must be set prior to queue memory allocation
2513 */
2514 if (bp->dev->features & NETIF_F_LRO)
2515 fp->mode = TPA_MODE_LRO;
2516 else if (bp->dev->features & NETIF_F_GRO &&
2517 bnx2x_mtu_allows_gro(bp->dev->mtu))
2518 fp->mode = TPA_MODE_GRO;
2519 else
2520 fp->mode = TPA_MODE_DISABLED;
2521
2522 /* We don't want TPA if it's disabled in bp
2523 * or if this is an FCoE L2 ring.
2524 */
2525 if (bp->disable_tpa || IS_FCOE_FP(fp))
2526 fp->mode = TPA_MODE_DISABLED;
2527 }
2528
bnx2x_set_os_driver_state(struct bnx2x * bp,u32 state)2529 void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state)
2530 {
2531 u32 cur;
2532
2533 if (!IS_MF_BD(bp) || !SHMEM2_HAS(bp, os_driver_state) || IS_VF(bp))
2534 return;
2535
2536 cur = SHMEM2_RD(bp, os_driver_state[BP_FW_MB_IDX(bp)]);
2537 DP(NETIF_MSG_IFUP, "Driver state %08x-->%08x\n",
2538 cur, state);
2539
2540 SHMEM2_WR(bp, os_driver_state[BP_FW_MB_IDX(bp)], state);
2541 }
2542
bnx2x_load_cnic(struct bnx2x * bp)2543 int bnx2x_load_cnic(struct bnx2x *bp)
2544 {
2545 int i, rc, port = BP_PORT(bp);
2546
2547 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2548
2549 mutex_init(&bp->cnic_mutex);
2550
2551 if (IS_PF(bp)) {
2552 rc = bnx2x_alloc_mem_cnic(bp);
2553 if (rc) {
2554 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2555 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2556 }
2557 }
2558
2559 rc = bnx2x_alloc_fp_mem_cnic(bp);
2560 if (rc) {
2561 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2562 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2563 }
2564
2565 /* Update the number of queues with the cnic queues */
2566 rc = bnx2x_set_real_num_queues(bp, 1);
2567 if (rc) {
2568 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2569 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2570 }
2571
2572 /* Add all CNIC NAPI objects */
2573 bnx2x_add_all_napi_cnic(bp);
2574 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2575 bnx2x_napi_enable_cnic(bp);
2576
2577 rc = bnx2x_init_hw_func_cnic(bp);
2578 if (rc)
2579 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2580
2581 bnx2x_nic_init_cnic(bp);
2582
2583 if (IS_PF(bp)) {
2584 /* Enable Timer scan */
2585 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2586
2587 /* setup cnic queues */
2588 for_each_cnic_queue(bp, i) {
2589 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2590 if (rc) {
2591 BNX2X_ERR("Queue setup failed\n");
2592 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2593 }
2594 }
2595 }
2596
2597 /* Initialize Rx filter. */
2598 bnx2x_set_rx_mode_inner(bp);
2599
2600 /* re-read iscsi info */
2601 bnx2x_get_iscsi_info(bp);
2602 bnx2x_setup_cnic_irq_info(bp);
2603 bnx2x_setup_cnic_info(bp);
2604 bp->cnic_loaded = true;
2605 if (bp->state == BNX2X_STATE_OPEN)
2606 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2607
2608 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2609
2610 return 0;
2611
2612 #ifndef BNX2X_STOP_ON_ERROR
2613 load_error_cnic2:
2614 /* Disable Timer scan */
2615 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2616
2617 load_error_cnic1:
2618 bnx2x_napi_disable_cnic(bp);
2619 /* Update the number of queues without the cnic queues */
2620 if (bnx2x_set_real_num_queues(bp, 0))
2621 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2622 load_error_cnic0:
2623 BNX2X_ERR("CNIC-related load failed\n");
2624 bnx2x_free_fp_mem_cnic(bp);
2625 bnx2x_free_mem_cnic(bp);
2626 return rc;
2627 #endif /* ! BNX2X_STOP_ON_ERROR */
2628 }
2629
2630 /* must be called with rtnl_lock */
bnx2x_nic_load(struct bnx2x * bp,int load_mode)2631 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2632 {
2633 int port = BP_PORT(bp);
2634 int i, rc = 0, load_code = 0;
2635
2636 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2637 DP(NETIF_MSG_IFUP,
2638 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2639
2640 #ifdef BNX2X_STOP_ON_ERROR
2641 if (unlikely(bp->panic)) {
2642 BNX2X_ERR("Can't load NIC when there is panic\n");
2643 return -EPERM;
2644 }
2645 #endif
2646
2647 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2648
2649 /* zero the structure w/o any lock, before SP handler is initialized */
2650 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2651 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2652 &bp->last_reported_link.link_report_flags);
2653
2654 if (IS_PF(bp))
2655 /* must be called before memory allocation and HW init */
2656 bnx2x_ilt_set_info(bp);
2657
2658 /*
2659 * Zero fastpath structures preserving invariants like napi, which are
2660 * allocated only once, fp index, max_cos, bp pointer.
2661 * Also set fp->mode and txdata_ptr.
2662 */
2663 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2664 for_each_queue(bp, i)
2665 bnx2x_bz_fp(bp, i);
2666 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2667 bp->num_cnic_queues) *
2668 sizeof(struct bnx2x_fp_txdata));
2669
2670 bp->fcoe_init = false;
2671
2672 /* Set the receive queues buffer size */
2673 bnx2x_set_rx_buf_size(bp);
2674
2675 if (IS_PF(bp)) {
2676 rc = bnx2x_alloc_mem(bp);
2677 if (rc) {
2678 BNX2X_ERR("Unable to allocate bp memory\n");
2679 return rc;
2680 }
2681 }
2682
2683 /* need to be done after alloc mem, since it's self adjusting to amount
2684 * of memory available for RSS queues
2685 */
2686 rc = bnx2x_alloc_fp_mem(bp);
2687 if (rc) {
2688 BNX2X_ERR("Unable to allocate memory for fps\n");
2689 LOAD_ERROR_EXIT(bp, load_error0);
2690 }
2691
2692 /* Allocated memory for FW statistics */
2693 rc = bnx2x_alloc_fw_stats_mem(bp);
2694 if (rc)
2695 LOAD_ERROR_EXIT(bp, load_error0);
2696
2697 /* request pf to initialize status blocks */
2698 if (IS_VF(bp)) {
2699 rc = bnx2x_vfpf_init(bp);
2700 if (rc)
2701 LOAD_ERROR_EXIT(bp, load_error0);
2702 }
2703
2704 /* As long as bnx2x_alloc_mem() may possibly update
2705 * bp->num_queues, bnx2x_set_real_num_queues() should always
2706 * come after it. At this stage cnic queues are not counted.
2707 */
2708 rc = bnx2x_set_real_num_queues(bp, 0);
2709 if (rc) {
2710 BNX2X_ERR("Unable to set real_num_queues\n");
2711 LOAD_ERROR_EXIT(bp, load_error0);
2712 }
2713
2714 /* configure multi cos mappings in kernel.
2715 * this configuration may be overridden by a multi class queue
2716 * discipline or by a dcbx negotiation result.
2717 */
2718 bnx2x_setup_tc(bp->dev, bp->max_cos);
2719
2720 /* Add all NAPI objects */
2721 bnx2x_add_all_napi(bp);
2722 DP(NETIF_MSG_IFUP, "napi added\n");
2723 bnx2x_napi_enable(bp);
2724
2725 if (IS_PF(bp)) {
2726 /* set pf load just before approaching the MCP */
2727 bnx2x_set_pf_load(bp);
2728
2729 /* if mcp exists send load request and analyze response */
2730 if (!BP_NOMCP(bp)) {
2731 /* attempt to load pf */
2732 rc = bnx2x_nic_load_request(bp, &load_code);
2733 if (rc)
2734 LOAD_ERROR_EXIT(bp, load_error1);
2735
2736 /* what did mcp say? */
2737 rc = bnx2x_compare_fw_ver(bp, load_code, true);
2738 if (rc) {
2739 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2740 LOAD_ERROR_EXIT(bp, load_error2);
2741 }
2742 } else {
2743 load_code = bnx2x_nic_load_no_mcp(bp, port);
2744 }
2745
2746 /* mark pmf if applicable */
2747 bnx2x_nic_load_pmf(bp, load_code);
2748
2749 /* Init Function state controlling object */
2750 bnx2x__init_func_obj(bp);
2751
2752 /* Initialize HW */
2753 rc = bnx2x_init_hw(bp, load_code);
2754 if (rc) {
2755 BNX2X_ERR("HW init failed, aborting\n");
2756 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2757 LOAD_ERROR_EXIT(bp, load_error2);
2758 }
2759 }
2760
2761 bnx2x_pre_irq_nic_init(bp);
2762
2763 /* Connect to IRQs */
2764 rc = bnx2x_setup_irqs(bp);
2765 if (rc) {
2766 BNX2X_ERR("setup irqs failed\n");
2767 if (IS_PF(bp))
2768 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2769 LOAD_ERROR_EXIT(bp, load_error2);
2770 }
2771
2772 /* Init per-function objects */
2773 if (IS_PF(bp)) {
2774 /* Setup NIC internals and enable interrupts */
2775 bnx2x_post_irq_nic_init(bp, load_code);
2776
2777 bnx2x_init_bp_objs(bp);
2778 bnx2x_iov_nic_init(bp);
2779
2780 /* Set AFEX default VLAN tag to an invalid value */
2781 bp->afex_def_vlan_tag = -1;
2782 bnx2x_nic_load_afex_dcc(bp, load_code);
2783 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2784 rc = bnx2x_func_start(bp);
2785 if (rc) {
2786 BNX2X_ERR("Function start failed!\n");
2787 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2788
2789 LOAD_ERROR_EXIT(bp, load_error3);
2790 }
2791
2792 /* Send LOAD_DONE command to MCP */
2793 if (!BP_NOMCP(bp)) {
2794 load_code = bnx2x_fw_command(bp,
2795 DRV_MSG_CODE_LOAD_DONE, 0);
2796 if (!load_code) {
2797 BNX2X_ERR("MCP response failure, aborting\n");
2798 rc = -EBUSY;
2799 LOAD_ERROR_EXIT(bp, load_error3);
2800 }
2801 }
2802
2803 /* initialize FW coalescing state machines in RAM */
2804 bnx2x_update_coalesce(bp);
2805 }
2806
2807 /* setup the leading queue */
2808 rc = bnx2x_setup_leading(bp);
2809 if (rc) {
2810 BNX2X_ERR("Setup leading failed!\n");
2811 LOAD_ERROR_EXIT(bp, load_error3);
2812 }
2813
2814 /* set up the rest of the queues */
2815 for_each_nondefault_eth_queue(bp, i) {
2816 if (IS_PF(bp))
2817 rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2818 else /* VF */
2819 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
2820 if (rc) {
2821 BNX2X_ERR("Queue %d setup failed\n", i);
2822 LOAD_ERROR_EXIT(bp, load_error3);
2823 }
2824 }
2825
2826 /* setup rss */
2827 rc = bnx2x_init_rss(bp);
2828 if (rc) {
2829 BNX2X_ERR("PF RSS init failed\n");
2830 LOAD_ERROR_EXIT(bp, load_error3);
2831 }
2832
2833 /* Now when Clients are configured we are ready to work */
2834 bp->state = BNX2X_STATE_OPEN;
2835
2836 /* Configure a ucast MAC */
2837 if (IS_PF(bp))
2838 rc = bnx2x_set_eth_mac(bp, true);
2839 else /* vf */
2840 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2841 true);
2842 if (rc) {
2843 BNX2X_ERR("Setting Ethernet MAC failed\n");
2844 LOAD_ERROR_EXIT(bp, load_error3);
2845 }
2846
2847 if (IS_PF(bp) && bp->pending_max) {
2848 bnx2x_update_max_mf_config(bp, bp->pending_max);
2849 bp->pending_max = 0;
2850 }
2851
2852 bp->force_link_down = false;
2853 if (bp->port.pmf) {
2854 rc = bnx2x_initial_phy_init(bp, load_mode);
2855 if (rc)
2856 LOAD_ERROR_EXIT(bp, load_error3);
2857 }
2858 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2859
2860 /* Start fast path */
2861
2862 /* Re-configure vlan filters */
2863 rc = bnx2x_vlan_reconfigure_vid(bp);
2864 if (rc)
2865 LOAD_ERROR_EXIT(bp, load_error3);
2866
2867 /* Initialize Rx filter. */
2868 bnx2x_set_rx_mode_inner(bp);
2869
2870 if (bp->flags & PTP_SUPPORTED) {
2871 bnx2x_init_ptp(bp);
2872 bnx2x_configure_ptp_filters(bp);
2873 }
2874 /* Start Tx */
2875 switch (load_mode) {
2876 case LOAD_NORMAL:
2877 /* Tx queue should be only re-enabled */
2878 netif_tx_wake_all_queues(bp->dev);
2879 break;
2880
2881 case LOAD_OPEN:
2882 netif_tx_start_all_queues(bp->dev);
2883 smp_mb__after_atomic();
2884 break;
2885
2886 case LOAD_DIAG:
2887 case LOAD_LOOPBACK_EXT:
2888 bp->state = BNX2X_STATE_DIAG;
2889 break;
2890
2891 default:
2892 break;
2893 }
2894
2895 if (bp->port.pmf)
2896 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2897 else
2898 bnx2x__link_status_update(bp);
2899
2900 /* start the timer */
2901 mod_timer(&bp->timer, jiffies + bp->current_interval);
2902
2903 if (CNIC_ENABLED(bp))
2904 bnx2x_load_cnic(bp);
2905
2906 if (IS_PF(bp))
2907 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
2908
2909 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2910 /* mark driver is loaded in shmem2 */
2911 u32 val;
2912 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2913 val &= ~DRV_FLAGS_MTU_MASK;
2914 val |= (bp->dev->mtu << DRV_FLAGS_MTU_SHIFT);
2915 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2916 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2917 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2918 }
2919
2920 /* Wait for all pending SP commands to complete */
2921 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2922 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2923 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2924 return -EBUSY;
2925 }
2926
2927 /* Update driver data for On-Chip MFW dump. */
2928 if (IS_PF(bp))
2929 bnx2x_update_mfw_dump(bp);
2930
2931 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2932 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2933 bnx2x_dcbx_init(bp, false);
2934
2935 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2936 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_ACTIVE);
2937
2938 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2939
2940 return 0;
2941
2942 #ifndef BNX2X_STOP_ON_ERROR
2943 load_error3:
2944 if (IS_PF(bp)) {
2945 bnx2x_int_disable_sync(bp, 1);
2946
2947 /* Clean queueable objects */
2948 bnx2x_squeeze_objects(bp);
2949 }
2950
2951 /* Free SKBs, SGEs, TPA pool and driver internals */
2952 bnx2x_free_skbs(bp);
2953 for_each_rx_queue(bp, i)
2954 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2955
2956 /* Release IRQs */
2957 bnx2x_free_irq(bp);
2958 load_error2:
2959 if (IS_PF(bp) && !BP_NOMCP(bp)) {
2960 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2961 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2962 }
2963
2964 bp->port.pmf = 0;
2965 load_error1:
2966 bnx2x_napi_disable(bp);
2967 bnx2x_del_all_napi(bp);
2968
2969 /* clear pf_load status, as it was already set */
2970 if (IS_PF(bp))
2971 bnx2x_clear_pf_load(bp);
2972 load_error0:
2973 bnx2x_free_fw_stats_mem(bp);
2974 bnx2x_free_fp_mem(bp);
2975 bnx2x_free_mem(bp);
2976
2977 return rc;
2978 #endif /* ! BNX2X_STOP_ON_ERROR */
2979 }
2980
bnx2x_drain_tx_queues(struct bnx2x * bp)2981 int bnx2x_drain_tx_queues(struct bnx2x *bp)
2982 {
2983 u8 rc = 0, cos, i;
2984
2985 /* Wait until tx fastpath tasks complete */
2986 for_each_tx_queue(bp, i) {
2987 struct bnx2x_fastpath *fp = &bp->fp[i];
2988
2989 for_each_cos_in_tx_queue(fp, cos)
2990 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2991 if (rc)
2992 return rc;
2993 }
2994 return 0;
2995 }
2996
2997 /* must be called with rtnl_lock */
bnx2x_nic_unload(struct bnx2x * bp,int unload_mode,bool keep_link)2998 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2999 {
3000 int i;
3001 bool global = false;
3002
3003 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
3004
3005 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
3006 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
3007
3008 /* mark driver is unloaded in shmem2 */
3009 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
3010 u32 val;
3011 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
3012 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
3013 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
3014 }
3015
3016 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
3017 (bp->state == BNX2X_STATE_CLOSED ||
3018 bp->state == BNX2X_STATE_ERROR)) {
3019 /* We can get here if the driver has been unloaded
3020 * during parity error recovery and is either waiting for a
3021 * leader to complete or for other functions to unload and
3022 * then ifdown has been issued. In this case we want to
3023 * unload and let other functions to complete a recovery
3024 * process.
3025 */
3026 bp->recovery_state = BNX2X_RECOVERY_DONE;
3027 bp->is_leader = 0;
3028 bnx2x_release_leader_lock(bp);
3029 smp_mb();
3030
3031 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
3032 BNX2X_ERR("Can't unload in closed or error state\n");
3033 return -EINVAL;
3034 }
3035
3036 /* Nothing to do during unload if previous bnx2x_nic_load()
3037 * have not completed successfully - all resources are released.
3038 *
3039 * we can get here only after unsuccessful ndo_* callback, during which
3040 * dev->IFF_UP flag is still on.
3041 */
3042 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
3043 return 0;
3044
3045 /* It's important to set the bp->state to the value different from
3046 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
3047 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
3048 */
3049 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
3050 smp_mb();
3051
3052 /* indicate to VFs that the PF is going down */
3053 bnx2x_iov_channel_down(bp);
3054
3055 if (CNIC_LOADED(bp))
3056 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
3057
3058 /* Stop Tx */
3059 bnx2x_tx_disable(bp);
3060 netdev_reset_tc(bp->dev);
3061
3062 bp->rx_mode = BNX2X_RX_MODE_NONE;
3063
3064 del_timer_sync(&bp->timer);
3065
3066 if (IS_PF(bp) && !BP_NOMCP(bp)) {
3067 /* Set ALWAYS_ALIVE bit in shmem */
3068 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
3069 bnx2x_drv_pulse(bp);
3070 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
3071 bnx2x_save_statistics(bp);
3072 }
3073
3074 /* wait till consumers catch up with producers in all queues */
3075 bnx2x_drain_tx_queues(bp);
3076
3077 /* if VF indicate to PF this function is going down (PF will delete sp
3078 * elements and clear initializations
3079 */
3080 if (IS_VF(bp))
3081 bnx2x_vfpf_close_vf(bp);
3082 else if (unload_mode != UNLOAD_RECOVERY)
3083 /* if this is a normal/close unload need to clean up chip*/
3084 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
3085 else {
3086 /* Send the UNLOAD_REQUEST to the MCP */
3087 bnx2x_send_unload_req(bp, unload_mode);
3088
3089 /* Prevent transactions to host from the functions on the
3090 * engine that doesn't reset global blocks in case of global
3091 * attention once global blocks are reset and gates are opened
3092 * (the engine which leader will perform the recovery
3093 * last).
3094 */
3095 if (!CHIP_IS_E1x(bp))
3096 bnx2x_pf_disable(bp);
3097
3098 /* Disable HW interrupts, NAPI */
3099 bnx2x_netif_stop(bp, 1);
3100 /* Delete all NAPI objects */
3101 bnx2x_del_all_napi(bp);
3102 if (CNIC_LOADED(bp))
3103 bnx2x_del_all_napi_cnic(bp);
3104 /* Release IRQs */
3105 bnx2x_free_irq(bp);
3106
3107 /* Report UNLOAD_DONE to MCP */
3108 bnx2x_send_unload_done(bp, false);
3109 }
3110
3111 /*
3112 * At this stage no more interrupts will arrive so we may safely clean
3113 * the queueable objects here in case they failed to get cleaned so far.
3114 */
3115 if (IS_PF(bp))
3116 bnx2x_squeeze_objects(bp);
3117
3118 /* There should be no more pending SP commands at this stage */
3119 bp->sp_state = 0;
3120
3121 bp->port.pmf = 0;
3122
3123 /* clear pending work in rtnl task */
3124 bp->sp_rtnl_state = 0;
3125 smp_mb();
3126
3127 /* Free SKBs, SGEs, TPA pool and driver internals */
3128 bnx2x_free_skbs(bp);
3129 if (CNIC_LOADED(bp))
3130 bnx2x_free_skbs_cnic(bp);
3131 for_each_rx_queue(bp, i)
3132 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
3133
3134 bnx2x_free_fp_mem(bp);
3135 if (CNIC_LOADED(bp))
3136 bnx2x_free_fp_mem_cnic(bp);
3137
3138 if (IS_PF(bp)) {
3139 if (CNIC_LOADED(bp))
3140 bnx2x_free_mem_cnic(bp);
3141 }
3142 bnx2x_free_mem(bp);
3143
3144 bp->state = BNX2X_STATE_CLOSED;
3145 bp->cnic_loaded = false;
3146
3147 /* Clear driver version indication in shmem */
3148 if (IS_PF(bp) && !BP_NOMCP(bp))
3149 bnx2x_update_mng_version(bp);
3150
3151 /* Check if there are pending parity attentions. If there are - set
3152 * RECOVERY_IN_PROGRESS.
3153 */
3154 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
3155 bnx2x_set_reset_in_progress(bp);
3156
3157 /* Set RESET_IS_GLOBAL if needed */
3158 if (global)
3159 bnx2x_set_reset_global(bp);
3160 }
3161
3162 /* The last driver must disable a "close the gate" if there is no
3163 * parity attention or "process kill" pending.
3164 */
3165 if (IS_PF(bp) &&
3166 !bnx2x_clear_pf_load(bp) &&
3167 bnx2x_reset_is_done(bp, BP_PATH(bp)))
3168 bnx2x_disable_close_the_gate(bp);
3169
3170 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3171
3172 return 0;
3173 }
3174
bnx2x_set_power_state(struct bnx2x * bp,pci_power_t state)3175 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3176 {
3177 u16 pmcsr;
3178
3179 /* If there is no power capability, silently succeed */
3180 if (!bp->pdev->pm_cap) {
3181 BNX2X_DEV_INFO("No power capability. Breaking.\n");
3182 return 0;
3183 }
3184
3185 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
3186
3187 switch (state) {
3188 case PCI_D0:
3189 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3190 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3191 PCI_PM_CTRL_PME_STATUS));
3192
3193 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3194 /* delay required during transition out of D3hot */
3195 msleep(20);
3196 break;
3197
3198 case PCI_D3hot:
3199 /* If there are other clients above don't
3200 shut down the power */
3201 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3202 return 0;
3203 /* Don't shut down the power for emulation and FPGA */
3204 if (CHIP_REV_IS_SLOW(bp))
3205 return 0;
3206
3207 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3208 pmcsr |= 3;
3209
3210 if (bp->wol)
3211 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3212
3213 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3214 pmcsr);
3215
3216 /* No more memory access after this point until
3217 * device is brought back to D0.
3218 */
3219 break;
3220
3221 default:
3222 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
3223 return -EINVAL;
3224 }
3225 return 0;
3226 }
3227
3228 /*
3229 * net_device service functions
3230 */
bnx2x_poll(struct napi_struct * napi,int budget)3231 static int bnx2x_poll(struct napi_struct *napi, int budget)
3232 {
3233 int work_done = 0;
3234 u8 cos;
3235 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3236 napi);
3237 struct bnx2x *bp = fp->bp;
3238
3239 while (1) {
3240 #ifdef BNX2X_STOP_ON_ERROR
3241 if (unlikely(bp->panic)) {
3242 napi_complete(napi);
3243 return 0;
3244 }
3245 #endif
3246 if (!bnx2x_fp_lock_napi(fp))
3247 return budget;
3248
3249 for_each_cos_in_tx_queue(fp, cos)
3250 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3251 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
3252
3253 if (bnx2x_has_rx_work(fp)) {
3254 work_done += bnx2x_rx_int(fp, budget - work_done);
3255
3256 /* must not complete if we consumed full budget */
3257 if (work_done >= budget) {
3258 bnx2x_fp_unlock_napi(fp);
3259 break;
3260 }
3261 }
3262
3263 bnx2x_fp_unlock_napi(fp);
3264
3265 /* Fall out from the NAPI loop if needed */
3266 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3267
3268 /* No need to update SB for FCoE L2 ring as long as
3269 * it's connected to the default SB and the SB
3270 * has been updated when NAPI was scheduled.
3271 */
3272 if (IS_FCOE_FP(fp)) {
3273 napi_complete(napi);
3274 break;
3275 }
3276 bnx2x_update_fpsb_idx(fp);
3277 /* bnx2x_has_rx_work() reads the status block,
3278 * thus we need to ensure that status block indices
3279 * have been actually read (bnx2x_update_fpsb_idx)
3280 * prior to this check (bnx2x_has_rx_work) so that
3281 * we won't write the "newer" value of the status block
3282 * to IGU (if there was a DMA right after
3283 * bnx2x_has_rx_work and if there is no rmb, the memory
3284 * reading (bnx2x_update_fpsb_idx) may be postponed
3285 * to right before bnx2x_ack_sb). In this case there
3286 * will never be another interrupt until there is
3287 * another update of the status block, while there
3288 * is still unhandled work.
3289 */
3290 rmb();
3291
3292 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3293 napi_complete(napi);
3294 /* Re-enable interrupts */
3295 DP(NETIF_MSG_RX_STATUS,
3296 "Update index to %d\n", fp->fp_hc_idx);
3297 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3298 le16_to_cpu(fp->fp_hc_idx),
3299 IGU_INT_ENABLE, 1);
3300 break;
3301 }
3302 }
3303 }
3304
3305 return work_done;
3306 }
3307
3308 #ifdef CONFIG_NET_RX_BUSY_POLL
3309 /* must be called with local_bh_disable()d */
bnx2x_low_latency_recv(struct napi_struct * napi)3310 int bnx2x_low_latency_recv(struct napi_struct *napi)
3311 {
3312 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3313 napi);
3314 struct bnx2x *bp = fp->bp;
3315 int found = 0;
3316
3317 if ((bp->state == BNX2X_STATE_CLOSED) ||
3318 (bp->state == BNX2X_STATE_ERROR) ||
3319 (bp->dev->features & (NETIF_F_LRO | NETIF_F_GRO)))
3320 return LL_FLUSH_FAILED;
3321
3322 if (!bnx2x_fp_lock_poll(fp))
3323 return LL_FLUSH_BUSY;
3324
3325 if (bnx2x_has_rx_work(fp))
3326 found = bnx2x_rx_int(fp, 4);
3327
3328 bnx2x_fp_unlock_poll(fp);
3329
3330 return found;
3331 }
3332 #endif
3333
3334 /* we split the first BD into headers and data BDs
3335 * to ease the pain of our fellow microcode engineers
3336 * we use one mapping for both BDs
3337 */
bnx2x_tx_split(struct bnx2x * bp,struct bnx2x_fp_txdata * txdata,struct sw_tx_bd * tx_buf,struct eth_tx_start_bd ** tx_bd,u16 hlen,u16 bd_prod)3338 static u16 bnx2x_tx_split(struct bnx2x *bp,
3339 struct bnx2x_fp_txdata *txdata,
3340 struct sw_tx_bd *tx_buf,
3341 struct eth_tx_start_bd **tx_bd, u16 hlen,
3342 u16 bd_prod)
3343 {
3344 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3345 struct eth_tx_bd *d_tx_bd;
3346 dma_addr_t mapping;
3347 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3348
3349 /* first fix first BD */
3350 h_tx_bd->nbytes = cpu_to_le16(hlen);
3351
3352 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3353 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
3354
3355 /* now get a new data BD
3356 * (after the pbd) and fill it */
3357 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3358 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3359
3360 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3361 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3362
3363 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3364 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3365 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3366
3367 /* this marks the BD as one that has no individual mapping */
3368 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3369
3370 DP(NETIF_MSG_TX_QUEUED,
3371 "TSO split data size is %d (%x:%x)\n",
3372 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3373
3374 /* update tx_bd */
3375 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3376
3377 return bd_prod;
3378 }
3379
3380 #define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3381 #define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
bnx2x_csum_fix(unsigned char * t_header,u16 csum,s8 fix)3382 static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3383 {
3384 __sum16 tsum = (__force __sum16) csum;
3385
3386 if (fix > 0)
3387 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3388 csum_partial(t_header - fix, fix, 0)));
3389
3390 else if (fix < 0)
3391 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3392 csum_partial(t_header, -fix, 0)));
3393
3394 return bswab16(tsum);
3395 }
3396
bnx2x_xmit_type(struct bnx2x * bp,struct sk_buff * skb)3397 static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3398 {
3399 u32 rc;
3400 __u8 prot = 0;
3401 __be16 protocol;
3402
3403 if (skb->ip_summed != CHECKSUM_PARTIAL)
3404 return XMIT_PLAIN;
3405
3406 protocol = vlan_get_protocol(skb);
3407 if (protocol == htons(ETH_P_IPV6)) {
3408 rc = XMIT_CSUM_V6;
3409 prot = ipv6_hdr(skb)->nexthdr;
3410 } else {
3411 rc = XMIT_CSUM_V4;
3412 prot = ip_hdr(skb)->protocol;
3413 }
3414
3415 if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3416 if (inner_ip_hdr(skb)->version == 6) {
3417 rc |= XMIT_CSUM_ENC_V6;
3418 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3419 rc |= XMIT_CSUM_TCP;
3420 } else {
3421 rc |= XMIT_CSUM_ENC_V4;
3422 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
3423 rc |= XMIT_CSUM_TCP;
3424 }
3425 }
3426 if (prot == IPPROTO_TCP)
3427 rc |= XMIT_CSUM_TCP;
3428
3429 if (skb_is_gso(skb)) {
3430 if (skb_is_gso_v6(skb)) {
3431 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3432 if (rc & XMIT_CSUM_ENC)
3433 rc |= XMIT_GSO_ENC_V6;
3434 } else {
3435 rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3436 if (rc & XMIT_CSUM_ENC)
3437 rc |= XMIT_GSO_ENC_V4;
3438 }
3439 }
3440
3441 return rc;
3442 }
3443
3444 /* VXLAN: 4 = 1 (for linear data BD) + 3 (2 for PBD and last BD) */
3445 #define BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS 4
3446
3447 /* Regular: 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3448 #define BNX2X_NUM_TSO_WIN_SUB_BDS 3
3449
3450 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3451 /* check if packet requires linearization (packet is too fragmented)
3452 no need to check fragmentation if page size > 8K (there will be no
3453 violation to FW restrictions) */
bnx2x_pkt_req_lin(struct bnx2x * bp,struct sk_buff * skb,u32 xmit_type)3454 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3455 u32 xmit_type)
3456 {
3457 int first_bd_sz = 0, num_tso_win_sub = BNX2X_NUM_TSO_WIN_SUB_BDS;
3458 int to_copy = 0, hlen = 0;
3459
3460 if (xmit_type & XMIT_GSO_ENC)
3461 num_tso_win_sub = BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS;
3462
3463 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - num_tso_win_sub)) {
3464 if (xmit_type & XMIT_GSO) {
3465 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3466 int wnd_size = MAX_FETCH_BD - num_tso_win_sub;
3467 /* Number of windows to check */
3468 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3469 int wnd_idx = 0;
3470 int frag_idx = 0;
3471 u32 wnd_sum = 0;
3472
3473 /* Headers length */
3474 if (xmit_type & XMIT_GSO_ENC)
3475 hlen = (int)(skb_inner_transport_header(skb) -
3476 skb->data) +
3477 inner_tcp_hdrlen(skb);
3478 else
3479 hlen = (int)(skb_transport_header(skb) -
3480 skb->data) + tcp_hdrlen(skb);
3481
3482 /* Amount of data (w/o headers) on linear part of SKB*/
3483 first_bd_sz = skb_headlen(skb) - hlen;
3484
3485 wnd_sum = first_bd_sz;
3486
3487 /* Calculate the first sum - it's special */
3488 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3489 wnd_sum +=
3490 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
3491
3492 /* If there was data on linear skb data - check it */
3493 if (first_bd_sz > 0) {
3494 if (unlikely(wnd_sum < lso_mss)) {
3495 to_copy = 1;
3496 goto exit_lbl;
3497 }
3498
3499 wnd_sum -= first_bd_sz;
3500 }
3501
3502 /* Others are easier: run through the frag list and
3503 check all windows */
3504 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3505 wnd_sum +=
3506 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
3507
3508 if (unlikely(wnd_sum < lso_mss)) {
3509 to_copy = 1;
3510 break;
3511 }
3512 wnd_sum -=
3513 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
3514 }
3515 } else {
3516 /* in non-LSO too fragmented packet should always
3517 be linearized */
3518 to_copy = 1;
3519 }
3520 }
3521
3522 exit_lbl:
3523 if (unlikely(to_copy))
3524 DP(NETIF_MSG_TX_QUEUED,
3525 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
3526 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3527 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3528
3529 return to_copy;
3530 }
3531 #endif
3532
3533 /**
3534 * bnx2x_set_pbd_gso - update PBD in GSO case.
3535 *
3536 * @skb: packet skb
3537 * @pbd: parse BD
3538 * @xmit_type: xmit flags
3539 */
bnx2x_set_pbd_gso(struct sk_buff * skb,struct eth_tx_parse_bd_e1x * pbd,u32 xmit_type)3540 static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3541 struct eth_tx_parse_bd_e1x *pbd,
3542 u32 xmit_type)
3543 {
3544 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
3545 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
3546 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
3547
3548 if (xmit_type & XMIT_GSO_V4) {
3549 pbd->ip_id = bswab16(ip_hdr(skb)->id);
3550 pbd->tcp_pseudo_csum =
3551 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3552 ip_hdr(skb)->daddr,
3553 0, IPPROTO_TCP, 0));
3554 } else {
3555 pbd->tcp_pseudo_csum =
3556 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3557 &ipv6_hdr(skb)->daddr,
3558 0, IPPROTO_TCP, 0));
3559 }
3560
3561 pbd->global_data |=
3562 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
3563 }
3564
3565 /**
3566 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3567 *
3568 * @bp: driver handle
3569 * @skb: packet skb
3570 * @parsing_data: data to be updated
3571 * @xmit_type: xmit flags
3572 *
3573 * 57712/578xx related, when skb has encapsulation
3574 */
bnx2x_set_pbd_csum_enc(struct bnx2x * bp,struct sk_buff * skb,u32 * parsing_data,u32 xmit_type)3575 static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3576 u32 *parsing_data, u32 xmit_type)
3577 {
3578 *parsing_data |=
3579 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3580 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3581 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3582
3583 if (xmit_type & XMIT_CSUM_TCP) {
3584 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3585 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3586 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3587
3588 return skb_inner_transport_header(skb) +
3589 inner_tcp_hdrlen(skb) - skb->data;
3590 }
3591
3592 /* We support checksum offload for TCP and UDP only.
3593 * No need to pass the UDP header length - it's a constant.
3594 */
3595 return skb_inner_transport_header(skb) +
3596 sizeof(struct udphdr) - skb->data;
3597 }
3598
3599 /**
3600 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
3601 *
3602 * @bp: driver handle
3603 * @skb: packet skb
3604 * @parsing_data: data to be updated
3605 * @xmit_type: xmit flags
3606 *
3607 * 57712/578xx related
3608 */
bnx2x_set_pbd_csum_e2(struct bnx2x * bp,struct sk_buff * skb,u32 * parsing_data,u32 xmit_type)3609 static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3610 u32 *parsing_data, u32 xmit_type)
3611 {
3612 *parsing_data |=
3613 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3614 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3615 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3616
3617 if (xmit_type & XMIT_CSUM_TCP) {
3618 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3619 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3620 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3621
3622 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3623 }
3624 /* We support checksum offload for TCP and UDP only.
3625 * No need to pass the UDP header length - it's a constant.
3626 */
3627 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
3628 }
3629
3630 /* set FW indication according to inner or outer protocols if tunneled */
bnx2x_set_sbd_csum(struct bnx2x * bp,struct sk_buff * skb,struct eth_tx_start_bd * tx_start_bd,u32 xmit_type)3631 static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3632 struct eth_tx_start_bd *tx_start_bd,
3633 u32 xmit_type)
3634 {
3635 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3636
3637 if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
3638 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
3639
3640 if (!(xmit_type & XMIT_CSUM_TCP))
3641 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
3642 }
3643
3644 /**
3645 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3646 *
3647 * @bp: driver handle
3648 * @skb: packet skb
3649 * @pbd: parse BD to be updated
3650 * @xmit_type: xmit flags
3651 */
bnx2x_set_pbd_csum(struct bnx2x * bp,struct sk_buff * skb,struct eth_tx_parse_bd_e1x * pbd,u32 xmit_type)3652 static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3653 struct eth_tx_parse_bd_e1x *pbd,
3654 u32 xmit_type)
3655 {
3656 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
3657
3658 /* for now NS flag is not used in Linux */
3659 pbd->global_data =
3660 cpu_to_le16(hlen |
3661 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3662 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3663
3664 pbd->ip_hlen_w = (skb_transport_header(skb) -
3665 skb_network_header(skb)) >> 1;
3666
3667 hlen += pbd->ip_hlen_w;
3668
3669 /* We support checksum offload for TCP and UDP only */
3670 if (xmit_type & XMIT_CSUM_TCP)
3671 hlen += tcp_hdrlen(skb) / 2;
3672 else
3673 hlen += sizeof(struct udphdr) / 2;
3674
3675 pbd->total_hlen_w = cpu_to_le16(hlen);
3676 hlen = hlen*2;
3677
3678 if (xmit_type & XMIT_CSUM_TCP) {
3679 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
3680
3681 } else {
3682 s8 fix = SKB_CS_OFF(skb); /* signed! */
3683
3684 DP(NETIF_MSG_TX_QUEUED,
3685 "hlen %d fix %d csum before fix %x\n",
3686 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3687
3688 /* HW bug: fixup the CSUM */
3689 pbd->tcp_pseudo_csum =
3690 bnx2x_csum_fix(skb_transport_header(skb),
3691 SKB_CS(skb), fix);
3692
3693 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3694 pbd->tcp_pseudo_csum);
3695 }
3696
3697 return hlen;
3698 }
3699
bnx2x_update_pbds_gso_enc(struct sk_buff * skb,struct eth_tx_parse_bd_e2 * pbd_e2,struct eth_tx_parse_2nd_bd * pbd2,u16 * global_data,u32 xmit_type)3700 static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3701 struct eth_tx_parse_bd_e2 *pbd_e2,
3702 struct eth_tx_parse_2nd_bd *pbd2,
3703 u16 *global_data,
3704 u32 xmit_type)
3705 {
3706 u16 hlen_w = 0;
3707 u8 outerip_off, outerip_len = 0;
3708
3709 /* from outer IP to transport */
3710 hlen_w = (skb_inner_transport_header(skb) -
3711 skb_network_header(skb)) >> 1;
3712
3713 /* transport len */
3714 hlen_w += inner_tcp_hdrlen(skb) >> 1;
3715
3716 pbd2->fw_ip_hdr_to_payload_w = hlen_w;
3717
3718 /* outer IP header info */
3719 if (xmit_type & XMIT_CSUM_V4) {
3720 struct iphdr *iph = ip_hdr(skb);
3721 u32 csum = (__force u32)(~iph->check) -
3722 (__force u32)iph->tot_len -
3723 (__force u32)iph->frag_off;
3724
3725 outerip_len = iph->ihl << 1;
3726
3727 pbd2->fw_ip_csum_wo_len_flags_frag =
3728 bswab16(csum_fold((__force __wsum)csum));
3729 } else {
3730 pbd2->fw_ip_hdr_to_payload_w =
3731 hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
3732 pbd_e2->data.tunnel_data.flags |=
3733 ETH_TUNNEL_DATA_IPV6_OUTER;
3734 }
3735
3736 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3737
3738 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3739
3740 /* inner IP header info */
3741 if (xmit_type & XMIT_CSUM_ENC_V4) {
3742 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
3743
3744 pbd_e2->data.tunnel_data.pseudo_csum =
3745 bswab16(~csum_tcpudp_magic(
3746 inner_ip_hdr(skb)->saddr,
3747 inner_ip_hdr(skb)->daddr,
3748 0, IPPROTO_TCP, 0));
3749 } else {
3750 pbd_e2->data.tunnel_data.pseudo_csum =
3751 bswab16(~csum_ipv6_magic(
3752 &inner_ipv6_hdr(skb)->saddr,
3753 &inner_ipv6_hdr(skb)->daddr,
3754 0, IPPROTO_TCP, 0));
3755 }
3756
3757 outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3758
3759 *global_data |=
3760 outerip_off |
3761 (outerip_len <<
3762 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3763 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3764 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
3765
3766 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3767 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3768 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3769 }
3770 }
3771
bnx2x_set_ipv6_ext_e2(struct sk_buff * skb,u32 * parsing_data,u32 xmit_type)3772 static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff *skb, u32 *parsing_data,
3773 u32 xmit_type)
3774 {
3775 struct ipv6hdr *ipv6;
3776
3777 if (!(xmit_type & (XMIT_GSO_ENC_V6 | XMIT_GSO_V6)))
3778 return;
3779
3780 if (xmit_type & XMIT_GSO_ENC_V6)
3781 ipv6 = inner_ipv6_hdr(skb);
3782 else /* XMIT_GSO_V6 */
3783 ipv6 = ipv6_hdr(skb);
3784
3785 if (ipv6->nexthdr == NEXTHDR_IPV6)
3786 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3787 }
3788
3789 /* called with netif_tx_lock
3790 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3791 * netif_wake_queue()
3792 */
bnx2x_start_xmit(struct sk_buff * skb,struct net_device * dev)3793 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3794 {
3795 struct bnx2x *bp = netdev_priv(dev);
3796
3797 struct netdev_queue *txq;
3798 struct bnx2x_fp_txdata *txdata;
3799 struct sw_tx_bd *tx_buf;
3800 struct eth_tx_start_bd *tx_start_bd, *first_bd;
3801 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3802 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3803 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3804 struct eth_tx_parse_2nd_bd *pbd2 = NULL;
3805 u32 pbd_e2_parsing_data = 0;
3806 u16 pkt_prod, bd_prod;
3807 int nbd, txq_index;
3808 dma_addr_t mapping;
3809 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3810 int i;
3811 u8 hlen = 0;
3812 __le16 pkt_size = 0;
3813 struct ethhdr *eth;
3814 u8 mac_type = UNICAST_ADDRESS;
3815
3816 #ifdef BNX2X_STOP_ON_ERROR
3817 if (unlikely(bp->panic))
3818 return NETDEV_TX_BUSY;
3819 #endif
3820
3821 txq_index = skb_get_queue_mapping(skb);
3822 txq = netdev_get_tx_queue(dev, txq_index);
3823
3824 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
3825
3826 txdata = &bp->bnx2x_txq[txq_index];
3827
3828 /* enable this debug print to view the transmission queue being used
3829 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
3830 txq_index, fp_index, txdata_index); */
3831
3832 /* enable this debug print to view the transmission details
3833 DP(NETIF_MSG_TX_QUEUED,
3834 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
3835 txdata->cid, fp_index, txdata_index, txdata, fp); */
3836
3837 if (unlikely(bnx2x_tx_avail(bp, txdata) <
3838 skb_shinfo(skb)->nr_frags +
3839 BDS_PER_TX_PKT +
3840 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
3841 /* Handle special storage cases separately */
3842 if (txdata->tx_ring_size == 0) {
3843 struct bnx2x_eth_q_stats *q_stats =
3844 bnx2x_fp_qstats(bp, txdata->parent_fp);
3845 q_stats->driver_filtered_tx_pkt++;
3846 dev_kfree_skb(skb);
3847 return NETDEV_TX_OK;
3848 }
3849 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3850 netif_tx_stop_queue(txq);
3851 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3852
3853 return NETDEV_TX_BUSY;
3854 }
3855
3856 DP(NETIF_MSG_TX_QUEUED,
3857 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
3858 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
3859 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3860 skb->len);
3861
3862 eth = (struct ethhdr *)skb->data;
3863
3864 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3865 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3866 if (is_broadcast_ether_addr(eth->h_dest))
3867 mac_type = BROADCAST_ADDRESS;
3868 else
3869 mac_type = MULTICAST_ADDRESS;
3870 }
3871
3872 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3873 /* First, check if we need to linearize the skb (due to FW
3874 restrictions). No need to check fragmentation if page size > 8K
3875 (there will be no violation to FW restrictions) */
3876 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3877 /* Statistics of linearization */
3878 bp->lin_cnt++;
3879 if (skb_linearize(skb) != 0) {
3880 DP(NETIF_MSG_TX_QUEUED,
3881 "SKB linearization failed - silently dropping this SKB\n");
3882 dev_kfree_skb_any(skb);
3883 return NETDEV_TX_OK;
3884 }
3885 }
3886 #endif
3887 /* Map skb linear data for DMA */
3888 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3889 skb_headlen(skb), DMA_TO_DEVICE);
3890 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3891 DP(NETIF_MSG_TX_QUEUED,
3892 "SKB mapping failed - silently dropping this SKB\n");
3893 dev_kfree_skb_any(skb);
3894 return NETDEV_TX_OK;
3895 }
3896 /*
3897 Please read carefully. First we use one BD which we mark as start,
3898 then we have a parsing info BD (used for TSO or xsum),
3899 and only then we have the rest of the TSO BDs.
3900 (don't forget to mark the last one as last,
3901 and to unmap only AFTER you write to the BD ...)
3902 And above all, all pdb sizes are in words - NOT DWORDS!
3903 */
3904
3905 /* get current pkt produced now - advance it just before sending packet
3906 * since mapping of pages may fail and cause packet to be dropped
3907 */
3908 pkt_prod = txdata->tx_pkt_prod;
3909 bd_prod = TX_BD(txdata->tx_bd_prod);
3910
3911 /* get a tx_buf and first BD
3912 * tx_start_bd may be changed during SPLIT,
3913 * but first_bd will always stay first
3914 */
3915 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3916 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3917 first_bd = tx_start_bd;
3918
3919 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3920
3921 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
3922 if (!(bp->flags & TX_TIMESTAMPING_EN)) {
3923 BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n");
3924 } else if (bp->ptp_tx_skb) {
3925 BNX2X_ERR("The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n");
3926 } else {
3927 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3928 /* schedule check for Tx timestamp */
3929 bp->ptp_tx_skb = skb_get(skb);
3930 bp->ptp_tx_start = jiffies;
3931 schedule_work(&bp->ptp_task);
3932 }
3933 }
3934
3935 /* header nbd: indirectly zero other flags! */
3936 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
3937
3938 /* remember the first BD of the packet */
3939 tx_buf->first_bd = txdata->tx_bd_prod;
3940 tx_buf->skb = skb;
3941 tx_buf->flags = 0;
3942
3943 DP(NETIF_MSG_TX_QUEUED,
3944 "sending pkt %u @%p next_idx %u bd %u @%p\n",
3945 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3946
3947 if (skb_vlan_tag_present(skb)) {
3948 tx_start_bd->vlan_or_ethertype =
3949 cpu_to_le16(skb_vlan_tag_get(skb));
3950 tx_start_bd->bd_flags.as_bitfield |=
3951 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3952 } else {
3953 /* when transmitting in a vf, start bd must hold the ethertype
3954 * for fw to enforce it
3955 */
3956 u16 vlan_tci = 0;
3957 #ifndef BNX2X_STOP_ON_ERROR
3958 if (IS_VF(bp)) {
3959 #endif
3960 /* Still need to consider inband vlan for enforced */
3961 if (__vlan_get_tag(skb, &vlan_tci)) {
3962 tx_start_bd->vlan_or_ethertype =
3963 cpu_to_le16(ntohs(eth->h_proto));
3964 } else {
3965 tx_start_bd->bd_flags.as_bitfield |=
3966 (X_ETH_INBAND_VLAN <<
3967 ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3968 tx_start_bd->vlan_or_ethertype =
3969 cpu_to_le16(vlan_tci);
3970 }
3971 #ifndef BNX2X_STOP_ON_ERROR
3972 } else {
3973 /* used by FW for packet accounting */
3974 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3975 }
3976 #endif
3977 }
3978
3979 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3980
3981 /* turn on parsing and get a BD */
3982 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3983
3984 if (xmit_type & XMIT_CSUM)
3985 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3986
3987 if (!CHIP_IS_E1x(bp)) {
3988 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3989 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3990
3991 if (xmit_type & XMIT_CSUM_ENC) {
3992 u16 global_data = 0;
3993
3994 /* Set PBD in enc checksum offload case */
3995 hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3996 &pbd_e2_parsing_data,
3997 xmit_type);
3998
3999 /* turn on 2nd parsing and get a BD */
4000 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4001
4002 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
4003
4004 memset(pbd2, 0, sizeof(*pbd2));
4005
4006 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
4007 (skb_inner_network_header(skb) -
4008 skb->data) >> 1;
4009
4010 if (xmit_type & XMIT_GSO_ENC)
4011 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
4012 &global_data,
4013 xmit_type);
4014
4015 pbd2->global_data = cpu_to_le16(global_data);
4016
4017 /* add addition parse BD indication to start BD */
4018 SET_FLAG(tx_start_bd->general_data,
4019 ETH_TX_START_BD_PARSE_NBDS, 1);
4020 /* set encapsulation flag in start BD */
4021 SET_FLAG(tx_start_bd->general_data,
4022 ETH_TX_START_BD_TUNNEL_EXIST, 1);
4023
4024 tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
4025
4026 nbd++;
4027 } else if (xmit_type & XMIT_CSUM) {
4028 /* Set PBD in checksum offload case w/o encapsulation */
4029 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
4030 &pbd_e2_parsing_data,
4031 xmit_type);
4032 }
4033
4034 bnx2x_set_ipv6_ext_e2(skb, &pbd_e2_parsing_data, xmit_type);
4035 /* Add the macs to the parsing BD if this is a vf or if
4036 * Tx Switching is enabled.
4037 */
4038 if (IS_VF(bp)) {
4039 /* override GRE parameters in BD */
4040 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
4041 &pbd_e2->data.mac_addr.src_mid,
4042 &pbd_e2->data.mac_addr.src_lo,
4043 eth->h_source);
4044
4045 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
4046 &pbd_e2->data.mac_addr.dst_mid,
4047 &pbd_e2->data.mac_addr.dst_lo,
4048 eth->h_dest);
4049 } else {
4050 if (bp->flags & TX_SWITCHING)
4051 bnx2x_set_fw_mac_addr(
4052 &pbd_e2->data.mac_addr.dst_hi,
4053 &pbd_e2->data.mac_addr.dst_mid,
4054 &pbd_e2->data.mac_addr.dst_lo,
4055 eth->h_dest);
4056 #ifdef BNX2X_STOP_ON_ERROR
4057 /* Enforce security is always set in Stop on Error -
4058 * source mac should be present in the parsing BD
4059 */
4060 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
4061 &pbd_e2->data.mac_addr.src_mid,
4062 &pbd_e2->data.mac_addr.src_lo,
4063 eth->h_source);
4064 #endif
4065 }
4066
4067 SET_FLAG(pbd_e2_parsing_data,
4068 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
4069 } else {
4070 u16 global_data = 0;
4071 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
4072 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
4073 /* Set PBD in checksum offload case */
4074 if (xmit_type & XMIT_CSUM)
4075 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
4076
4077 SET_FLAG(global_data,
4078 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
4079 pbd_e1x->global_data |= cpu_to_le16(global_data);
4080 }
4081
4082 /* Setup the data pointer of the first BD of the packet */
4083 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4084 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4085 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
4086 pkt_size = tx_start_bd->nbytes;
4087
4088 DP(NETIF_MSG_TX_QUEUED,
4089 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
4090 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
4091 le16_to_cpu(tx_start_bd->nbytes),
4092 tx_start_bd->bd_flags.as_bitfield,
4093 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
4094
4095 if (xmit_type & XMIT_GSO) {
4096
4097 DP(NETIF_MSG_TX_QUEUED,
4098 "TSO packet len %d hlen %d total len %d tso size %d\n",
4099 skb->len, hlen, skb_headlen(skb),
4100 skb_shinfo(skb)->gso_size);
4101
4102 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
4103
4104 if (unlikely(skb_headlen(skb) > hlen)) {
4105 nbd++;
4106 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
4107 &tx_start_bd, hlen,
4108 bd_prod);
4109 }
4110 if (!CHIP_IS_E1x(bp))
4111 pbd_e2_parsing_data |=
4112 (skb_shinfo(skb)->gso_size <<
4113 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
4114 ETH_TX_PARSE_BD_E2_LSO_MSS;
4115 else
4116 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
4117 }
4118
4119 /* Set the PBD's parsing_data field if not zero
4120 * (for the chips newer than 57711).
4121 */
4122 if (pbd_e2_parsing_data)
4123 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
4124
4125 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
4126
4127 /* Handle fragmented skb */
4128 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4129 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4130
4131 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
4132 skb_frag_size(frag), DMA_TO_DEVICE);
4133 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
4134 unsigned int pkts_compl = 0, bytes_compl = 0;
4135
4136 DP(NETIF_MSG_TX_QUEUED,
4137 "Unable to map page - dropping packet...\n");
4138
4139 /* we need unmap all buffers already mapped
4140 * for this SKB;
4141 * first_bd->nbd need to be properly updated
4142 * before call to bnx2x_free_tx_pkt
4143 */
4144 first_bd->nbd = cpu_to_le16(nbd);
4145 bnx2x_free_tx_pkt(bp, txdata,
4146 TX_BD(txdata->tx_pkt_prod),
4147 &pkts_compl, &bytes_compl);
4148 return NETDEV_TX_OK;
4149 }
4150
4151 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4152 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4153 if (total_pkt_bd == NULL)
4154 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4155
4156 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4157 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4158 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
4159 le16_add_cpu(&pkt_size, skb_frag_size(frag));
4160 nbd++;
4161
4162 DP(NETIF_MSG_TX_QUEUED,
4163 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
4164 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
4165 le16_to_cpu(tx_data_bd->nbytes));
4166 }
4167
4168 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
4169
4170 /* update with actual num BDs */
4171 first_bd->nbd = cpu_to_le16(nbd);
4172
4173 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4174
4175 /* now send a tx doorbell, counting the next BD
4176 * if the packet contains or ends with it
4177 */
4178 if (TX_BD_POFF(bd_prod) < nbd)
4179 nbd++;
4180
4181 /* total_pkt_bytes should be set on the first data BD if
4182 * it's not an LSO packet and there is more than one
4183 * data BD. In this case pkt_size is limited by an MTU value.
4184 * However we prefer to set it for an LSO packet (while we don't
4185 * have to) in order to save some CPU cycles in a none-LSO
4186 * case, when we much more care about them.
4187 */
4188 if (total_pkt_bd != NULL)
4189 total_pkt_bd->total_pkt_bytes = pkt_size;
4190
4191 if (pbd_e1x)
4192 DP(NETIF_MSG_TX_QUEUED,
4193 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
4194 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
4195 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
4196 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
4197 le16_to_cpu(pbd_e1x->total_hlen_w));
4198 if (pbd_e2)
4199 DP(NETIF_MSG_TX_QUEUED,
4200 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
4201 pbd_e2,
4202 pbd_e2->data.mac_addr.dst_hi,
4203 pbd_e2->data.mac_addr.dst_mid,
4204 pbd_e2->data.mac_addr.dst_lo,
4205 pbd_e2->data.mac_addr.src_hi,
4206 pbd_e2->data.mac_addr.src_mid,
4207 pbd_e2->data.mac_addr.src_lo,
4208 pbd_e2->parsing_data);
4209 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
4210
4211 netdev_tx_sent_queue(txq, skb->len);
4212
4213 skb_tx_timestamp(skb);
4214
4215 txdata->tx_pkt_prod++;
4216 /*
4217 * Make sure that the BD data is updated before updating the producer
4218 * since FW might read the BD right after the producer is updated.
4219 * This is only applicable for weak-ordered memory model archs such
4220 * as IA-64. The following barrier is also mandatory since FW will
4221 * assumes packets must have BDs.
4222 */
4223 wmb();
4224
4225 txdata->tx_db.data.prod += nbd;
4226 barrier();
4227
4228 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
4229
4230 mmiowb();
4231
4232 txdata->tx_bd_prod += nbd;
4233
4234 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
4235 netif_tx_stop_queue(txq);
4236
4237 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
4238 * ordering of set_bit() in netif_tx_stop_queue() and read of
4239 * fp->bd_tx_cons */
4240 smp_mb();
4241
4242 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
4243 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
4244 netif_tx_wake_queue(txq);
4245 }
4246 txdata->tx_pkt++;
4247
4248 return NETDEV_TX_OK;
4249 }
4250
bnx2x_get_c2s_mapping(struct bnx2x * bp,u8 * c2s_map,u8 * c2s_default)4251 void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default)
4252 {
4253 int mfw_vn = BP_FW_MB_IDX(bp);
4254 u32 tmp;
4255
4256 /* If the shmem shouldn't affect configuration, reflect */
4257 if (!IS_MF_BD(bp)) {
4258 int i;
4259
4260 for (i = 0; i < BNX2X_MAX_PRIORITY; i++)
4261 c2s_map[i] = i;
4262 *c2s_default = 0;
4263
4264 return;
4265 }
4266
4267 tmp = SHMEM2_RD(bp, c2s_pcp_map_lower[mfw_vn]);
4268 tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4269 c2s_map[0] = tmp & 0xff;
4270 c2s_map[1] = (tmp >> 8) & 0xff;
4271 c2s_map[2] = (tmp >> 16) & 0xff;
4272 c2s_map[3] = (tmp >> 24) & 0xff;
4273
4274 tmp = SHMEM2_RD(bp, c2s_pcp_map_upper[mfw_vn]);
4275 tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4276 c2s_map[4] = tmp & 0xff;
4277 c2s_map[5] = (tmp >> 8) & 0xff;
4278 c2s_map[6] = (tmp >> 16) & 0xff;
4279 c2s_map[7] = (tmp >> 24) & 0xff;
4280
4281 tmp = SHMEM2_RD(bp, c2s_pcp_map_default[mfw_vn]);
4282 tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4283 *c2s_default = (tmp >> (8 * mfw_vn)) & 0xff;
4284 }
4285
4286 /**
4287 * bnx2x_setup_tc - routine to configure net_device for multi tc
4288 *
4289 * @netdev: net device to configure
4290 * @tc: number of traffic classes to enable
4291 *
4292 * callback connected to the ndo_setup_tc function pointer
4293 */
bnx2x_setup_tc(struct net_device * dev,u8 num_tc)4294 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4295 {
4296 struct bnx2x *bp = netdev_priv(dev);
4297 u8 c2s_map[BNX2X_MAX_PRIORITY], c2s_def;
4298 int cos, prio, count, offset;
4299
4300 /* setup tc must be called under rtnl lock */
4301 ASSERT_RTNL();
4302
4303 /* no traffic classes requested. Aborting */
4304 if (!num_tc) {
4305 netdev_reset_tc(dev);
4306 return 0;
4307 }
4308
4309 /* requested to support too many traffic classes */
4310 if (num_tc > bp->max_cos) {
4311 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
4312 num_tc, bp->max_cos);
4313 return -EINVAL;
4314 }
4315
4316 /* declare amount of supported traffic classes */
4317 if (netdev_set_num_tc(dev, num_tc)) {
4318 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
4319 return -EINVAL;
4320 }
4321
4322 bnx2x_get_c2s_mapping(bp, c2s_map, &c2s_def);
4323
4324 /* configure priority to traffic class mapping */
4325 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4326 int outer_prio = c2s_map[prio];
4327
4328 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[outer_prio]);
4329 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4330 "mapping priority %d to tc %d\n",
4331 outer_prio, bp->prio_to_cos[outer_prio]);
4332 }
4333
4334 /* Use this configuration to differentiate tc0 from other COSes
4335 This can be used for ets or pfc, and save the effort of setting
4336 up a multio class queue disc or negotiating DCBX with a switch
4337 netdev_set_prio_tc_map(dev, 0, 0);
4338 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
4339 for (prio = 1; prio < 16; prio++) {
4340 netdev_set_prio_tc_map(dev, prio, 1);
4341 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
4342 } */
4343
4344 /* configure traffic class to transmission queue mapping */
4345 for (cos = 0; cos < bp->max_cos; cos++) {
4346 count = BNX2X_NUM_ETH_QUEUES(bp);
4347 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
4348 netdev_set_tc_queue(dev, cos, count, offset);
4349 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4350 "mapping tc %d to offset %d count %d\n",
4351 cos, offset, count);
4352 }
4353
4354 return 0;
4355 }
4356
4357 /* called with rtnl_lock */
bnx2x_change_mac_addr(struct net_device * dev,void * p)4358 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4359 {
4360 struct sockaddr *addr = p;
4361 struct bnx2x *bp = netdev_priv(dev);
4362 int rc = 0;
4363
4364 if (!is_valid_ether_addr(addr->sa_data)) {
4365 BNX2X_ERR("Requested MAC address is not valid\n");
4366 return -EINVAL;
4367 }
4368
4369 if (IS_MF_STORAGE_ONLY(bp)) {
4370 BNX2X_ERR("Can't change address on STORAGE ONLY function\n");
4371 return -EINVAL;
4372 }
4373
4374 if (netif_running(dev)) {
4375 rc = bnx2x_set_eth_mac(bp, false);
4376 if (rc)
4377 return rc;
4378 }
4379
4380 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4381
4382 if (netif_running(dev))
4383 rc = bnx2x_set_eth_mac(bp, true);
4384
4385 if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4386 SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4387
4388 return rc;
4389 }
4390
bnx2x_free_fp_mem_at(struct bnx2x * bp,int fp_index)4391 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4392 {
4393 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4394 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
4395 u8 cos;
4396
4397 /* Common */
4398
4399 if (IS_FCOE_IDX(fp_index)) {
4400 memset(sb, 0, sizeof(union host_hc_status_block));
4401 fp->status_blk_mapping = 0;
4402 } else {
4403 /* status blocks */
4404 if (!CHIP_IS_E1x(bp))
4405 BNX2X_PCI_FREE(sb->e2_sb,
4406 bnx2x_fp(bp, fp_index,
4407 status_blk_mapping),
4408 sizeof(struct host_hc_status_block_e2));
4409 else
4410 BNX2X_PCI_FREE(sb->e1x_sb,
4411 bnx2x_fp(bp, fp_index,
4412 status_blk_mapping),
4413 sizeof(struct host_hc_status_block_e1x));
4414 }
4415
4416 /* Rx */
4417 if (!skip_rx_queue(bp, fp_index)) {
4418 bnx2x_free_rx_bds(fp);
4419
4420 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4421 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4422 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4423 bnx2x_fp(bp, fp_index, rx_desc_mapping),
4424 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4425
4426 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4427 bnx2x_fp(bp, fp_index, rx_comp_mapping),
4428 sizeof(struct eth_fast_path_rx_cqe) *
4429 NUM_RCQ_BD);
4430
4431 /* SGE ring */
4432 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4433 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4434 bnx2x_fp(bp, fp_index, rx_sge_mapping),
4435 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4436 }
4437
4438 /* Tx */
4439 if (!skip_tx_queue(bp, fp_index)) {
4440 /* fastpath tx rings: tx_buf tx_desc */
4441 for_each_cos_in_tx_queue(fp, cos) {
4442 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4443
4444 DP(NETIF_MSG_IFDOWN,
4445 "freeing tx memory of fp %d cos %d cid %d\n",
4446 fp_index, cos, txdata->cid);
4447
4448 BNX2X_FREE(txdata->tx_buf_ring);
4449 BNX2X_PCI_FREE(txdata->tx_desc_ring,
4450 txdata->tx_desc_mapping,
4451 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4452 }
4453 }
4454 /* end of fastpath */
4455 }
4456
bnx2x_free_fp_mem_cnic(struct bnx2x * bp)4457 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
4458 {
4459 int i;
4460 for_each_cnic_queue(bp, i)
4461 bnx2x_free_fp_mem_at(bp, i);
4462 }
4463
bnx2x_free_fp_mem(struct bnx2x * bp)4464 void bnx2x_free_fp_mem(struct bnx2x *bp)
4465 {
4466 int i;
4467 for_each_eth_queue(bp, i)
4468 bnx2x_free_fp_mem_at(bp, i);
4469 }
4470
set_sb_shortcuts(struct bnx2x * bp,int index)4471 static void set_sb_shortcuts(struct bnx2x *bp, int index)
4472 {
4473 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
4474 if (!CHIP_IS_E1x(bp)) {
4475 bnx2x_fp(bp, index, sb_index_values) =
4476 (__le16 *)status_blk.e2_sb->sb.index_values;
4477 bnx2x_fp(bp, index, sb_running_index) =
4478 (__le16 *)status_blk.e2_sb->sb.running_index;
4479 } else {
4480 bnx2x_fp(bp, index, sb_index_values) =
4481 (__le16 *)status_blk.e1x_sb->sb.index_values;
4482 bnx2x_fp(bp, index, sb_running_index) =
4483 (__le16 *)status_blk.e1x_sb->sb.running_index;
4484 }
4485 }
4486
4487 /* Returns the number of actually allocated BDs */
bnx2x_alloc_rx_bds(struct bnx2x_fastpath * fp,int rx_ring_size)4488 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4489 int rx_ring_size)
4490 {
4491 struct bnx2x *bp = fp->bp;
4492 u16 ring_prod, cqe_ring_prod;
4493 int i, failure_cnt = 0;
4494
4495 fp->rx_comp_cons = 0;
4496 cqe_ring_prod = ring_prod = 0;
4497
4498 /* This routine is called only during fo init so
4499 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4500 */
4501 for (i = 0; i < rx_ring_size; i++) {
4502 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
4503 failure_cnt++;
4504 continue;
4505 }
4506 ring_prod = NEXT_RX_IDX(ring_prod);
4507 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4508 WARN_ON(ring_prod <= (i - failure_cnt));
4509 }
4510
4511 if (failure_cnt)
4512 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4513 i - failure_cnt, fp->index);
4514
4515 fp->rx_bd_prod = ring_prod;
4516 /* Limit the CQE producer by the CQE ring size */
4517 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4518 cqe_ring_prod);
4519 fp->rx_pkt = fp->rx_calls = 0;
4520
4521 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
4522
4523 return i - failure_cnt;
4524 }
4525
bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath * fp)4526 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4527 {
4528 int i;
4529
4530 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4531 struct eth_rx_cqe_next_page *nextpg;
4532
4533 nextpg = (struct eth_rx_cqe_next_page *)
4534 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4535 nextpg->addr_hi =
4536 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4537 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4538 nextpg->addr_lo =
4539 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4540 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4541 }
4542 }
4543
bnx2x_alloc_fp_mem_at(struct bnx2x * bp,int index)4544 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4545 {
4546 union host_hc_status_block *sb;
4547 struct bnx2x_fastpath *fp = &bp->fp[index];
4548 int ring_size = 0;
4549 u8 cos;
4550 int rx_ring_size = 0;
4551
4552 if (!bp->rx_ring_size && IS_MF_STORAGE_ONLY(bp)) {
4553 rx_ring_size = MIN_RX_SIZE_NONTPA;
4554 bp->rx_ring_size = rx_ring_size;
4555 } else if (!bp->rx_ring_size) {
4556 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4557
4558 if (CHIP_IS_E3(bp)) {
4559 u32 cfg = SHMEM_RD(bp,
4560 dev_info.port_hw_config[BP_PORT(bp)].
4561 default_cfg);
4562
4563 /* Decrease ring size for 1G functions */
4564 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4565 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4566 rx_ring_size /= 10;
4567 }
4568
4569 /* allocate at least number of buffers required by FW */
4570 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4571 MIN_RX_SIZE_TPA, rx_ring_size);
4572
4573 bp->rx_ring_size = rx_ring_size;
4574 } else /* if rx_ring_size specified - use it */
4575 rx_ring_size = bp->rx_ring_size;
4576
4577 DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4578
4579 /* Common */
4580 sb = &bnx2x_fp(bp, index, status_blk);
4581
4582 if (!IS_FCOE_IDX(index)) {
4583 /* status blocks */
4584 if (!CHIP_IS_E1x(bp)) {
4585 sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4586 sizeof(struct host_hc_status_block_e2));
4587 if (!sb->e2_sb)
4588 goto alloc_mem_err;
4589 } else {
4590 sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4591 sizeof(struct host_hc_status_block_e1x));
4592 if (!sb->e1x_sb)
4593 goto alloc_mem_err;
4594 }
4595 }
4596
4597 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4598 * set shortcuts for it.
4599 */
4600 if (!IS_FCOE_IDX(index))
4601 set_sb_shortcuts(bp, index);
4602
4603 /* Tx */
4604 if (!skip_tx_queue(bp, index)) {
4605 /* fastpath tx rings: tx_buf tx_desc */
4606 for_each_cos_in_tx_queue(fp, cos) {
4607 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4608
4609 DP(NETIF_MSG_IFUP,
4610 "allocating tx memory of fp %d cos %d\n",
4611 index, cos);
4612
4613 txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
4614 sizeof(struct sw_tx_bd),
4615 GFP_KERNEL);
4616 if (!txdata->tx_buf_ring)
4617 goto alloc_mem_err;
4618 txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
4619 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4620 if (!txdata->tx_desc_ring)
4621 goto alloc_mem_err;
4622 }
4623 }
4624
4625 /* Rx */
4626 if (!skip_rx_queue(bp, index)) {
4627 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4628 bnx2x_fp(bp, index, rx_buf_ring) =
4629 kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
4630 if (!bnx2x_fp(bp, index, rx_buf_ring))
4631 goto alloc_mem_err;
4632 bnx2x_fp(bp, index, rx_desc_ring) =
4633 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
4634 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4635 if (!bnx2x_fp(bp, index, rx_desc_ring))
4636 goto alloc_mem_err;
4637
4638 /* Seed all CQEs by 1s */
4639 bnx2x_fp(bp, index, rx_comp_ring) =
4640 BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
4641 sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
4642 if (!bnx2x_fp(bp, index, rx_comp_ring))
4643 goto alloc_mem_err;
4644
4645 /* SGE ring */
4646 bnx2x_fp(bp, index, rx_page_ring) =
4647 kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
4648 GFP_KERNEL);
4649 if (!bnx2x_fp(bp, index, rx_page_ring))
4650 goto alloc_mem_err;
4651 bnx2x_fp(bp, index, rx_sge_ring) =
4652 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
4653 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4654 if (!bnx2x_fp(bp, index, rx_sge_ring))
4655 goto alloc_mem_err;
4656 /* RX BD ring */
4657 bnx2x_set_next_page_rx_bd(fp);
4658
4659 /* CQ ring */
4660 bnx2x_set_next_page_rx_cq(fp);
4661
4662 /* BDs */
4663 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4664 if (ring_size < rx_ring_size)
4665 goto alloc_mem_err;
4666 }
4667
4668 return 0;
4669
4670 /* handles low memory cases */
4671 alloc_mem_err:
4672 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4673 index, ring_size);
4674 /* FW will drop all packets if queue is not big enough,
4675 * In these cases we disable the queue
4676 * Min size is different for OOO, TPA and non-TPA queues
4677 */
4678 if (ring_size < (fp->mode == TPA_MODE_DISABLED ?
4679 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
4680 /* release memory allocated for this queue */
4681 bnx2x_free_fp_mem_at(bp, index);
4682 return -ENOMEM;
4683 }
4684 return 0;
4685 }
4686
bnx2x_alloc_fp_mem_cnic(struct bnx2x * bp)4687 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4688 {
4689 if (!NO_FCOE(bp))
4690 /* FCoE */
4691 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4692 /* we will fail load process instead of mark
4693 * NO_FCOE_FLAG
4694 */
4695 return -ENOMEM;
4696
4697 return 0;
4698 }
4699
bnx2x_alloc_fp_mem(struct bnx2x * bp)4700 static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4701 {
4702 int i;
4703
4704 /* 1. Allocate FP for leading - fatal if error
4705 * 2. Allocate RSS - fix number of queues if error
4706 */
4707
4708 /* leading */
4709 if (bnx2x_alloc_fp_mem_at(bp, 0))
4710 return -ENOMEM;
4711
4712 /* RSS */
4713 for_each_nondefault_eth_queue(bp, i)
4714 if (bnx2x_alloc_fp_mem_at(bp, i))
4715 break;
4716
4717 /* handle memory failures */
4718 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4719 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4720
4721 WARN_ON(delta < 0);
4722 bnx2x_shrink_eth_fp(bp, delta);
4723 if (CNIC_SUPPORT(bp))
4724 /* move non eth FPs next to last eth FP
4725 * must be done in that order
4726 * FCOE_IDX < FWD_IDX < OOO_IDX
4727 */
4728
4729 /* move FCoE fp even NO_FCOE_FLAG is on */
4730 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4731 bp->num_ethernet_queues -= delta;
4732 bp->num_queues = bp->num_ethernet_queues +
4733 bp->num_cnic_queues;
4734 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4735 bp->num_queues + delta, bp->num_queues);
4736 }
4737
4738 return 0;
4739 }
4740
bnx2x_free_mem_bp(struct bnx2x * bp)4741 void bnx2x_free_mem_bp(struct bnx2x *bp)
4742 {
4743 int i;
4744
4745 for (i = 0; i < bp->fp_array_size; i++)
4746 kfree(bp->fp[i].tpa_info);
4747 kfree(bp->fp);
4748 kfree(bp->sp_objs);
4749 kfree(bp->fp_stats);
4750 kfree(bp->bnx2x_txq);
4751 kfree(bp->msix_table);
4752 kfree(bp->ilt);
4753 }
4754
bnx2x_alloc_mem_bp(struct bnx2x * bp)4755 int bnx2x_alloc_mem_bp(struct bnx2x *bp)
4756 {
4757 struct bnx2x_fastpath *fp;
4758 struct msix_entry *tbl;
4759 struct bnx2x_ilt *ilt;
4760 int msix_table_size = 0;
4761 int fp_array_size, txq_array_size;
4762 int i;
4763
4764 /*
4765 * The biggest MSI-X table we might need is as a maximum number of fast
4766 * path IGU SBs plus default SB (for PF only).
4767 */
4768 msix_table_size = bp->igu_sb_cnt;
4769 if (IS_PF(bp))
4770 msix_table_size++;
4771 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
4772
4773 /* fp array: RSS plus CNIC related L2 queues */
4774 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
4775 bp->fp_array_size = fp_array_size;
4776 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
4777
4778 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
4779 if (!fp)
4780 goto alloc_err;
4781 for (i = 0; i < bp->fp_array_size; i++) {
4782 fp[i].tpa_info =
4783 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4784 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4785 if (!(fp[i].tpa_info))
4786 goto alloc_err;
4787 }
4788
4789 bp->fp = fp;
4790
4791 /* allocate sp objs */
4792 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
4793 GFP_KERNEL);
4794 if (!bp->sp_objs)
4795 goto alloc_err;
4796
4797 /* allocate fp_stats */
4798 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
4799 GFP_KERNEL);
4800 if (!bp->fp_stats)
4801 goto alloc_err;
4802
4803 /* Allocate memory for the transmission queues array */
4804 txq_array_size =
4805 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4806 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4807
4808 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4809 GFP_KERNEL);
4810 if (!bp->bnx2x_txq)
4811 goto alloc_err;
4812
4813 /* msix table */
4814 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
4815 if (!tbl)
4816 goto alloc_err;
4817 bp->msix_table = tbl;
4818
4819 /* ilt */
4820 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4821 if (!ilt)
4822 goto alloc_err;
4823 bp->ilt = ilt;
4824
4825 return 0;
4826 alloc_err:
4827 bnx2x_free_mem_bp(bp);
4828 return -ENOMEM;
4829 }
4830
bnx2x_reload_if_running(struct net_device * dev)4831 int bnx2x_reload_if_running(struct net_device *dev)
4832 {
4833 struct bnx2x *bp = netdev_priv(dev);
4834
4835 if (unlikely(!netif_running(dev)))
4836 return 0;
4837
4838 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
4839 return bnx2x_nic_load(bp, LOAD_NORMAL);
4840 }
4841
bnx2x_get_cur_phy_idx(struct bnx2x * bp)4842 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4843 {
4844 u32 sel_phy_idx = 0;
4845 if (bp->link_params.num_phys <= 1)
4846 return INT_PHY;
4847
4848 if (bp->link_vars.link_up) {
4849 sel_phy_idx = EXT_PHY1;
4850 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4851 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4852 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4853 sel_phy_idx = EXT_PHY2;
4854 } else {
4855
4856 switch (bnx2x_phy_selection(&bp->link_params)) {
4857 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4858 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4859 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4860 sel_phy_idx = EXT_PHY1;
4861 break;
4862 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4863 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4864 sel_phy_idx = EXT_PHY2;
4865 break;
4866 }
4867 }
4868
4869 return sel_phy_idx;
4870 }
bnx2x_get_link_cfg_idx(struct bnx2x * bp)4871 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4872 {
4873 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4874 /*
4875 * The selected activated PHY is always after swapping (in case PHY
4876 * swapping is enabled). So when swapping is enabled, we need to reverse
4877 * the configuration
4878 */
4879
4880 if (bp->link_params.multi_phy_config &
4881 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4882 if (sel_phy_idx == EXT_PHY1)
4883 sel_phy_idx = EXT_PHY2;
4884 else if (sel_phy_idx == EXT_PHY2)
4885 sel_phy_idx = EXT_PHY1;
4886 }
4887 return LINK_CONFIG_IDX(sel_phy_idx);
4888 }
4889
4890 #ifdef NETDEV_FCOE_WWNN
bnx2x_fcoe_get_wwn(struct net_device * dev,u64 * wwn,int type)4891 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4892 {
4893 struct bnx2x *bp = netdev_priv(dev);
4894 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4895
4896 switch (type) {
4897 case NETDEV_FCOE_WWNN:
4898 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4899 cp->fcoe_wwn_node_name_lo);
4900 break;
4901 case NETDEV_FCOE_WWPN:
4902 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4903 cp->fcoe_wwn_port_name_lo);
4904 break;
4905 default:
4906 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
4907 return -EINVAL;
4908 }
4909
4910 return 0;
4911 }
4912 #endif
4913
4914 /* called with rtnl_lock */
bnx2x_change_mtu(struct net_device * dev,int new_mtu)4915 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4916 {
4917 struct bnx2x *bp = netdev_priv(dev);
4918
4919 if (pci_num_vf(bp->pdev)) {
4920 DP(BNX2X_MSG_IOV, "VFs are enabled, can not change MTU\n");
4921 return -EPERM;
4922 }
4923
4924 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4925 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4926 return -EAGAIN;
4927 }
4928
4929 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
4930 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4931 BNX2X_ERR("Can't support requested MTU size\n");
4932 return -EINVAL;
4933 }
4934
4935 /* This does not race with packet allocation
4936 * because the actual alloc size is
4937 * only updated as part of load
4938 */
4939 dev->mtu = new_mtu;
4940
4941 if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4942 SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4943
4944 return bnx2x_reload_if_running(dev);
4945 }
4946
bnx2x_fix_features(struct net_device * dev,netdev_features_t features)4947 netdev_features_t bnx2x_fix_features(struct net_device *dev,
4948 netdev_features_t features)
4949 {
4950 struct bnx2x *bp = netdev_priv(dev);
4951
4952 if (pci_num_vf(bp->pdev)) {
4953 netdev_features_t changed = dev->features ^ features;
4954
4955 /* Revert the requested changes in features if they
4956 * would require internal reload of PF in bnx2x_set_features().
4957 */
4958 if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) {
4959 features &= ~NETIF_F_RXCSUM;
4960 features |= dev->features & NETIF_F_RXCSUM;
4961 }
4962
4963 if (changed & NETIF_F_LOOPBACK) {
4964 features &= ~NETIF_F_LOOPBACK;
4965 features |= dev->features & NETIF_F_LOOPBACK;
4966 }
4967 }
4968
4969 /* TPA requires Rx CSUM offloading */
4970 if (!(features & NETIF_F_RXCSUM)) {
4971 features &= ~NETIF_F_LRO;
4972 features &= ~NETIF_F_GRO;
4973 }
4974
4975 return features;
4976 }
4977
bnx2x_set_features(struct net_device * dev,netdev_features_t features)4978 int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4979 {
4980 struct bnx2x *bp = netdev_priv(dev);
4981 netdev_features_t changes = features ^ dev->features;
4982 bool bnx2x_reload = false;
4983 int rc;
4984
4985 /* VFs or non SRIOV PFs should be able to change loopback feature */
4986 if (!pci_num_vf(bp->pdev)) {
4987 if (features & NETIF_F_LOOPBACK) {
4988 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4989 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4990 bnx2x_reload = true;
4991 }
4992 } else {
4993 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4994 bp->link_params.loopback_mode = LOOPBACK_NONE;
4995 bnx2x_reload = true;
4996 }
4997 }
4998 }
4999
5000 /* if GRO is changed while LRO is enabled, don't force a reload */
5001 if ((changes & NETIF_F_GRO) && (features & NETIF_F_LRO))
5002 changes &= ~NETIF_F_GRO;
5003
5004 /* if GRO is changed while HW TPA is off, don't force a reload */
5005 if ((changes & NETIF_F_GRO) && bp->disable_tpa)
5006 changes &= ~NETIF_F_GRO;
5007
5008 if (changes)
5009 bnx2x_reload = true;
5010
5011 if (bnx2x_reload) {
5012 if (bp->recovery_state == BNX2X_RECOVERY_DONE) {
5013 dev->features = features;
5014 rc = bnx2x_reload_if_running(dev);
5015 return rc ? rc : 1;
5016 }
5017 /* else: bnx2x_nic_load() will be called at end of recovery */
5018 }
5019
5020 return 0;
5021 }
5022
bnx2x_tx_timeout(struct net_device * dev)5023 void bnx2x_tx_timeout(struct net_device *dev)
5024 {
5025 struct bnx2x *bp = netdev_priv(dev);
5026
5027 #ifdef BNX2X_STOP_ON_ERROR
5028 if (!bp->panic)
5029 bnx2x_panic();
5030 #endif
5031
5032 /* This allows the netif to be shutdown gracefully before resetting */
5033 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
5034 }
5035
bnx2x_suspend(struct pci_dev * pdev,pm_message_t state)5036 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
5037 {
5038 struct net_device *dev = pci_get_drvdata(pdev);
5039 struct bnx2x *bp;
5040
5041 if (!dev) {
5042 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
5043 return -ENODEV;
5044 }
5045 bp = netdev_priv(dev);
5046
5047 rtnl_lock();
5048
5049 pci_save_state(pdev);
5050
5051 if (!netif_running(dev)) {
5052 rtnl_unlock();
5053 return 0;
5054 }
5055
5056 netif_device_detach(dev);
5057
5058 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
5059
5060 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
5061
5062 rtnl_unlock();
5063
5064 return 0;
5065 }
5066
bnx2x_resume(struct pci_dev * pdev)5067 int bnx2x_resume(struct pci_dev *pdev)
5068 {
5069 struct net_device *dev = pci_get_drvdata(pdev);
5070 struct bnx2x *bp;
5071 int rc;
5072
5073 if (!dev) {
5074 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
5075 return -ENODEV;
5076 }
5077 bp = netdev_priv(dev);
5078
5079 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
5080 BNX2X_ERR("Handling parity error recovery. Try again later\n");
5081 return -EAGAIN;
5082 }
5083
5084 rtnl_lock();
5085
5086 pci_restore_state(pdev);
5087
5088 if (!netif_running(dev)) {
5089 rtnl_unlock();
5090 return 0;
5091 }
5092
5093 bnx2x_set_power_state(bp, PCI_D0);
5094 netif_device_attach(dev);
5095
5096 rc = bnx2x_nic_load(bp, LOAD_OPEN);
5097
5098 rtnl_unlock();
5099
5100 return rc;
5101 }
5102
bnx2x_set_ctx_validation(struct bnx2x * bp,struct eth_context * cxt,u32 cid)5103 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
5104 u32 cid)
5105 {
5106 if (!cxt) {
5107 BNX2X_ERR("bad context pointer %p\n", cxt);
5108 return;
5109 }
5110
5111 /* ustorm cxt validation */
5112 cxt->ustorm_ag_context.cdu_usage =
5113 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5114 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
5115 /* xcontext validation */
5116 cxt->xstorm_ag_context.cdu_reserved =
5117 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5118 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
5119 }
5120
storm_memset_hc_timeout(struct bnx2x * bp,u8 port,u8 fw_sb_id,u8 sb_index,u8 ticks)5121 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
5122 u8 fw_sb_id, u8 sb_index,
5123 u8 ticks)
5124 {
5125 u32 addr = BAR_CSTRORM_INTMEM +
5126 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
5127 REG_WR8(bp, addr, ticks);
5128 DP(NETIF_MSG_IFUP,
5129 "port %x fw_sb_id %d sb_index %d ticks %d\n",
5130 port, fw_sb_id, sb_index, ticks);
5131 }
5132
storm_memset_hc_disable(struct bnx2x * bp,u8 port,u16 fw_sb_id,u8 sb_index,u8 disable)5133 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
5134 u16 fw_sb_id, u8 sb_index,
5135 u8 disable)
5136 {
5137 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
5138 u32 addr = BAR_CSTRORM_INTMEM +
5139 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
5140 u8 flags = REG_RD8(bp, addr);
5141 /* clear and set */
5142 flags &= ~HC_INDEX_DATA_HC_ENABLED;
5143 flags |= enable_flag;
5144 REG_WR8(bp, addr, flags);
5145 DP(NETIF_MSG_IFUP,
5146 "port %x fw_sb_id %d sb_index %d disable %d\n",
5147 port, fw_sb_id, sb_index, disable);
5148 }
5149
bnx2x_update_coalesce_sb_index(struct bnx2x * bp,u8 fw_sb_id,u8 sb_index,u8 disable,u16 usec)5150 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
5151 u8 sb_index, u8 disable, u16 usec)
5152 {
5153 int port = BP_PORT(bp);
5154 u8 ticks = usec / BNX2X_BTR;
5155
5156 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
5157
5158 disable = disable ? 1 : (usec ? 0 : 1);
5159 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
5160 }
5161
bnx2x_schedule_sp_rtnl(struct bnx2x * bp,enum sp_rtnl_flag flag,u32 verbose)5162 void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
5163 u32 verbose)
5164 {
5165 smp_mb__before_atomic();
5166 set_bit(flag, &bp->sp_rtnl_state);
5167 smp_mb__after_atomic();
5168 DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
5169 flag);
5170 schedule_delayed_work(&bp->sp_rtnl_task, 0);
5171 }
5172 EXPORT_SYMBOL(bnx2x_schedule_sp_rtnl);
5173