• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* bnx2x_cmn.c: QLogic Everest network driver.
2  *
3  * Copyright (c) 2007-2013 Broadcom Corporation
4  * Copyright (c) 2014 QLogic Corporation
5  * All rights reserved
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation.
10  *
11  * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
12  * Written by: Eliezer Tamir
13  * Based on code from Michael Chan's bnx2 driver
14  * UDP CSUM errata workaround by Arik Gendelman
15  * Slowpath and fastpath rework by Vladislav Zolotarov
16  * Statistics and Link management by Yitchak Gertner
17  *
18  */
19 
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 
22 #include <linux/etherdevice.h>
23 #include <linux/if_vlan.h>
24 #include <linux/interrupt.h>
25 #include <linux/ip.h>
26 #include <linux/crash_dump.h>
27 #include <net/tcp.h>
28 #include <net/ipv6.h>
29 #include <net/ip6_checksum.h>
30 #include <net/busy_poll.h>
31 #include <linux/prefetch.h>
32 #include "bnx2x_cmn.h"
33 #include "bnx2x_init.h"
34 #include "bnx2x_sp.h"
35 
36 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
37 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
38 static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
39 static int bnx2x_poll(struct napi_struct *napi, int budget);
40 
bnx2x_add_all_napi_cnic(struct bnx2x * bp)41 static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
42 {
43 	int i;
44 
45 	/* Add NAPI objects */
46 	for_each_rx_queue_cnic(bp, i) {
47 		netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
48 			       bnx2x_poll, NAPI_POLL_WEIGHT);
49 	}
50 }
51 
bnx2x_add_all_napi(struct bnx2x * bp)52 static void bnx2x_add_all_napi(struct bnx2x *bp)
53 {
54 	int i;
55 
56 	/* Add NAPI objects */
57 	for_each_eth_queue(bp, i) {
58 		netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
59 			       bnx2x_poll, NAPI_POLL_WEIGHT);
60 	}
61 }
62 
bnx2x_calc_num_queues(struct bnx2x * bp)63 static int bnx2x_calc_num_queues(struct bnx2x *bp)
64 {
65 	int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
66 
67 	/* Reduce memory usage in kdump environment by using only one queue */
68 	if (is_kdump_kernel())
69 		nq = 1;
70 
71 	nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
72 	return nq;
73 }
74 
75 /**
76  * bnx2x_move_fp - move content of the fastpath structure.
77  *
78  * @bp:		driver handle
79  * @from:	source FP index
80  * @to:		destination FP index
81  *
82  * Makes sure the contents of the bp->fp[to].napi is kept
83  * intact. This is done by first copying the napi struct from
84  * the target to the source, and then mem copying the entire
85  * source onto the target. Update txdata pointers and related
86  * content.
87  */
bnx2x_move_fp(struct bnx2x * bp,int from,int to)88 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
89 {
90 	struct bnx2x_fastpath *from_fp = &bp->fp[from];
91 	struct bnx2x_fastpath *to_fp = &bp->fp[to];
92 	struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
93 	struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
94 	struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
95 	struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
96 	int old_max_eth_txqs, new_max_eth_txqs;
97 	int old_txdata_index = 0, new_txdata_index = 0;
98 	struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
99 
100 	/* Copy the NAPI object as it has been already initialized */
101 	from_fp->napi = to_fp->napi;
102 
103 	/* Move bnx2x_fastpath contents */
104 	memcpy(to_fp, from_fp, sizeof(*to_fp));
105 	to_fp->index = to;
106 
107 	/* Retain the tpa_info of the original `to' version as we don't want
108 	 * 2 FPs to contain the same tpa_info pointer.
109 	 */
110 	to_fp->tpa_info = old_tpa_info;
111 
112 	/* move sp_objs contents as well, as their indices match fp ones */
113 	memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
114 
115 	/* move fp_stats contents as well, as their indices match fp ones */
116 	memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
117 
118 	/* Update txdata pointers in fp and move txdata content accordingly:
119 	 * Each fp consumes 'max_cos' txdata structures, so the index should be
120 	 * decremented by max_cos x delta.
121 	 */
122 
123 	old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
124 	new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
125 				(bp)->max_cos;
126 	if (from == FCOE_IDX(bp)) {
127 		old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
128 		new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
129 	}
130 
131 	memcpy(&bp->bnx2x_txq[new_txdata_index],
132 	       &bp->bnx2x_txq[old_txdata_index],
133 	       sizeof(struct bnx2x_fp_txdata));
134 	to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
135 }
136 
137 /**
138  * bnx2x_fill_fw_str - Fill buffer with FW version string.
139  *
140  * @bp:        driver handle
141  * @buf:       character buffer to fill with the fw name
142  * @buf_len:   length of the above buffer
143  *
144  */
bnx2x_fill_fw_str(struct bnx2x * bp,char * buf,size_t buf_len)145 void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
146 {
147 	if (IS_PF(bp)) {
148 		u8 phy_fw_ver[PHY_FW_VER_LEN];
149 
150 		phy_fw_ver[0] = '\0';
151 		bnx2x_get_ext_phy_fw_version(&bp->link_params,
152 					     phy_fw_ver, PHY_FW_VER_LEN);
153 		strlcpy(buf, bp->fw_ver, buf_len);
154 		snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
155 			 "bc %d.%d.%d%s%s",
156 			 (bp->common.bc_ver & 0xff0000) >> 16,
157 			 (bp->common.bc_ver & 0xff00) >> 8,
158 			 (bp->common.bc_ver & 0xff),
159 			 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
160 	} else {
161 		bnx2x_vf_fill_fw_str(bp, buf, buf_len);
162 	}
163 }
164 
165 /**
166  * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
167  *
168  * @bp:	driver handle
169  * @delta:	number of eth queues which were not allocated
170  */
bnx2x_shrink_eth_fp(struct bnx2x * bp,int delta)171 static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
172 {
173 	int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
174 
175 	/* Queue pointer cannot be re-set on an fp-basis, as moving pointer
176 	 * backward along the array could cause memory to be overridden
177 	 */
178 	for (cos = 1; cos < bp->max_cos; cos++) {
179 		for (i = 0; i < old_eth_num - delta; i++) {
180 			struct bnx2x_fastpath *fp = &bp->fp[i];
181 			int new_idx = cos * (old_eth_num - delta) + i;
182 
183 			memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
184 			       sizeof(struct bnx2x_fp_txdata));
185 			fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
186 		}
187 	}
188 }
189 
190 int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
191 
192 /* free skb in the packet ring at pos idx
193  * return idx of last bd freed
194  */
bnx2x_free_tx_pkt(struct bnx2x * bp,struct bnx2x_fp_txdata * txdata,u16 idx,unsigned int * pkts_compl,unsigned int * bytes_compl)195 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
196 			     u16 idx, unsigned int *pkts_compl,
197 			     unsigned int *bytes_compl)
198 {
199 	struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
200 	struct eth_tx_start_bd *tx_start_bd;
201 	struct eth_tx_bd *tx_data_bd;
202 	struct sk_buff *skb = tx_buf->skb;
203 	u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
204 	int nbd;
205 	u16 split_bd_len = 0;
206 
207 	/* prefetch skb end pointer to speedup dev_kfree_skb() */
208 	prefetch(&skb->end);
209 
210 	DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d  buff @(%p)->skb %p\n",
211 	   txdata->txq_index, idx, tx_buf, skb);
212 
213 	tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
214 
215 	nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
216 #ifdef BNX2X_STOP_ON_ERROR
217 	if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
218 		BNX2X_ERR("BAD nbd!\n");
219 		bnx2x_panic();
220 	}
221 #endif
222 	new_cons = nbd + tx_buf->first_bd;
223 
224 	/* Get the next bd */
225 	bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
226 
227 	/* Skip a parse bd... */
228 	--nbd;
229 	bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
230 
231 	if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
232 		/* Skip second parse bd... */
233 		--nbd;
234 		bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
235 	}
236 
237 	/* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
238 	if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
239 		tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
240 		split_bd_len = BD_UNMAP_LEN(tx_data_bd);
241 		--nbd;
242 		bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
243 	}
244 
245 	/* unmap first bd */
246 	dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
247 			 BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
248 			 DMA_TO_DEVICE);
249 
250 	/* now free frags */
251 	while (nbd > 0) {
252 
253 		tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
254 		dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
255 			       BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
256 		if (--nbd)
257 			bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
258 	}
259 
260 	/* release skb */
261 	WARN_ON(!skb);
262 	if (likely(skb)) {
263 		(*pkts_compl)++;
264 		(*bytes_compl) += skb->len;
265 		dev_kfree_skb_any(skb);
266 	}
267 
268 	tx_buf->first_bd = 0;
269 	tx_buf->skb = NULL;
270 
271 	return new_cons;
272 }
273 
bnx2x_tx_int(struct bnx2x * bp,struct bnx2x_fp_txdata * txdata)274 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
275 {
276 	struct netdev_queue *txq;
277 	u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
278 	unsigned int pkts_compl = 0, bytes_compl = 0;
279 
280 #ifdef BNX2X_STOP_ON_ERROR
281 	if (unlikely(bp->panic))
282 		return -1;
283 #endif
284 
285 	txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
286 	hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
287 	sw_cons = txdata->tx_pkt_cons;
288 
289 	/* Ensure subsequent loads occur after hw_cons */
290 	smp_rmb();
291 
292 	while (sw_cons != hw_cons) {
293 		u16 pkt_cons;
294 
295 		pkt_cons = TX_BD(sw_cons);
296 
297 		DP(NETIF_MSG_TX_DONE,
298 		   "queue[%d]: hw_cons %u  sw_cons %u  pkt_cons %u\n",
299 		   txdata->txq_index, hw_cons, sw_cons, pkt_cons);
300 
301 		bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
302 					    &pkts_compl, &bytes_compl);
303 
304 		sw_cons++;
305 	}
306 
307 	netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
308 
309 	txdata->tx_pkt_cons = sw_cons;
310 	txdata->tx_bd_cons = bd_cons;
311 
312 	/* Need to make the tx_bd_cons update visible to start_xmit()
313 	 * before checking for netif_tx_queue_stopped().  Without the
314 	 * memory barrier, there is a small possibility that
315 	 * start_xmit() will miss it and cause the queue to be stopped
316 	 * forever.
317 	 * On the other hand we need an rmb() here to ensure the proper
318 	 * ordering of bit testing in the following
319 	 * netif_tx_queue_stopped(txq) call.
320 	 */
321 	smp_mb();
322 
323 	if (unlikely(netif_tx_queue_stopped(txq))) {
324 		/* Taking tx_lock() is needed to prevent re-enabling the queue
325 		 * while it's empty. This could have happen if rx_action() gets
326 		 * suspended in bnx2x_tx_int() after the condition before
327 		 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
328 		 *
329 		 * stops the queue->sees fresh tx_bd_cons->releases the queue->
330 		 * sends some packets consuming the whole queue again->
331 		 * stops the queue
332 		 */
333 
334 		__netif_tx_lock(txq, smp_processor_id());
335 
336 		if ((netif_tx_queue_stopped(txq)) &&
337 		    (bp->state == BNX2X_STATE_OPEN) &&
338 		    (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
339 			netif_tx_wake_queue(txq);
340 
341 		__netif_tx_unlock(txq);
342 	}
343 	return 0;
344 }
345 
bnx2x_update_last_max_sge(struct bnx2x_fastpath * fp,u16 idx)346 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
347 					     u16 idx)
348 {
349 	u16 last_max = fp->last_max_sge;
350 
351 	if (SUB_S16(idx, last_max) > 0)
352 		fp->last_max_sge = idx;
353 }
354 
bnx2x_update_sge_prod(struct bnx2x_fastpath * fp,u16 sge_len,struct eth_end_agg_rx_cqe * cqe)355 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
356 					 u16 sge_len,
357 					 struct eth_end_agg_rx_cqe *cqe)
358 {
359 	struct bnx2x *bp = fp->bp;
360 	u16 last_max, last_elem, first_elem;
361 	u16 delta = 0;
362 	u16 i;
363 
364 	if (!sge_len)
365 		return;
366 
367 	/* First mark all used pages */
368 	for (i = 0; i < sge_len; i++)
369 		BIT_VEC64_CLEAR_BIT(fp->sge_mask,
370 			RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
371 
372 	DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
373 	   sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
374 
375 	/* Here we assume that the last SGE index is the biggest */
376 	prefetch((void *)(fp->sge_mask));
377 	bnx2x_update_last_max_sge(fp,
378 		le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
379 
380 	last_max = RX_SGE(fp->last_max_sge);
381 	last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
382 	first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
383 
384 	/* If ring is not full */
385 	if (last_elem + 1 != first_elem)
386 		last_elem++;
387 
388 	/* Now update the prod */
389 	for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
390 		if (likely(fp->sge_mask[i]))
391 			break;
392 
393 		fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
394 		delta += BIT_VEC64_ELEM_SZ;
395 	}
396 
397 	if (delta > 0) {
398 		fp->rx_sge_prod += delta;
399 		/* clear page-end entries */
400 		bnx2x_clear_sge_mask_next_elems(fp);
401 	}
402 
403 	DP(NETIF_MSG_RX_STATUS,
404 	   "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
405 	   fp->last_max_sge, fp->rx_sge_prod);
406 }
407 
408 /* Get Toeplitz hash value in the skb using the value from the
409  * CQE (calculated by HW).
410  */
bnx2x_get_rxhash(const struct bnx2x * bp,const struct eth_fast_path_rx_cqe * cqe,enum pkt_hash_types * rxhash_type)411 static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
412 			    const struct eth_fast_path_rx_cqe *cqe,
413 			    enum pkt_hash_types *rxhash_type)
414 {
415 	/* Get Toeplitz hash from CQE */
416 	if ((bp->dev->features & NETIF_F_RXHASH) &&
417 	    (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
418 		enum eth_rss_hash_type htype;
419 
420 		htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
421 		*rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
422 				(htype == TCP_IPV6_HASH_TYPE)) ?
423 			       PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
424 
425 		return le32_to_cpu(cqe->rss_hash_result);
426 	}
427 	*rxhash_type = PKT_HASH_TYPE_NONE;
428 	return 0;
429 }
430 
bnx2x_tpa_start(struct bnx2x_fastpath * fp,u16 queue,u16 cons,u16 prod,struct eth_fast_path_rx_cqe * cqe)431 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
432 			    u16 cons, u16 prod,
433 			    struct eth_fast_path_rx_cqe *cqe)
434 {
435 	struct bnx2x *bp = fp->bp;
436 	struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
437 	struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
438 	struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
439 	dma_addr_t mapping;
440 	struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
441 	struct sw_rx_bd *first_buf = &tpa_info->first_buf;
442 
443 	/* print error if current state != stop */
444 	if (tpa_info->tpa_state != BNX2X_TPA_STOP)
445 		BNX2X_ERR("start of bin not in stop [%d]\n", queue);
446 
447 	/* Try to map an empty data buffer from the aggregation info  */
448 	mapping = dma_map_single(&bp->pdev->dev,
449 				 first_buf->data + NET_SKB_PAD,
450 				 fp->rx_buf_size, DMA_FROM_DEVICE);
451 	/*
452 	 *  ...if it fails - move the skb from the consumer to the producer
453 	 *  and set the current aggregation state as ERROR to drop it
454 	 *  when TPA_STOP arrives.
455 	 */
456 
457 	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
458 		/* Move the BD from the consumer to the producer */
459 		bnx2x_reuse_rx_data(fp, cons, prod);
460 		tpa_info->tpa_state = BNX2X_TPA_ERROR;
461 		return;
462 	}
463 
464 	/* move empty data from pool to prod */
465 	prod_rx_buf->data = first_buf->data;
466 	dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
467 	/* point prod_bd to new data */
468 	prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
469 	prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
470 
471 	/* move partial skb from cons to pool (don't unmap yet) */
472 	*first_buf = *cons_rx_buf;
473 
474 	/* mark bin state as START */
475 	tpa_info->parsing_flags =
476 		le16_to_cpu(cqe->pars_flags.flags);
477 	tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
478 	tpa_info->tpa_state = BNX2X_TPA_START;
479 	tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
480 	tpa_info->placement_offset = cqe->placement_offset;
481 	tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
482 	if (fp->mode == TPA_MODE_GRO) {
483 		u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
484 		tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
485 		tpa_info->gro_size = gro_size;
486 	}
487 
488 #ifdef BNX2X_STOP_ON_ERROR
489 	fp->tpa_queue_used |= (1 << queue);
490 	DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
491 	   fp->tpa_queue_used);
492 #endif
493 }
494 
495 /* Timestamp option length allowed for TPA aggregation:
496  *
497  *		nop nop kind length echo val
498  */
499 #define TPA_TSTAMP_OPT_LEN	12
500 /**
501  * bnx2x_set_gro_params - compute GRO values
502  *
503  * @skb:		packet skb
504  * @parsing_flags:	parsing flags from the START CQE
505  * @len_on_bd:		total length of the first packet for the
506  *			aggregation.
507  * @pkt_len:		length of all segments
508  *
509  * Approximate value of the MSS for this aggregation calculated using
510  * the first packet of it.
511  * Compute number of aggregated segments, and gso_type.
512  */
bnx2x_set_gro_params(struct sk_buff * skb,u16 parsing_flags,u16 len_on_bd,unsigned int pkt_len,u16 num_of_coalesced_segs)513 static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
514 				 u16 len_on_bd, unsigned int pkt_len,
515 				 u16 num_of_coalesced_segs)
516 {
517 	/* TPA aggregation won't have either IP options or TCP options
518 	 * other than timestamp or IPv6 extension headers.
519 	 */
520 	u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
521 
522 	if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
523 	    PRS_FLAG_OVERETH_IPV6) {
524 		hdrs_len += sizeof(struct ipv6hdr);
525 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
526 	} else {
527 		hdrs_len += sizeof(struct iphdr);
528 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
529 	}
530 
531 	/* Check if there was a TCP timestamp, if there is it's will
532 	 * always be 12 bytes length: nop nop kind length echo val.
533 	 *
534 	 * Otherwise FW would close the aggregation.
535 	 */
536 	if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
537 		hdrs_len += TPA_TSTAMP_OPT_LEN;
538 
539 	skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
540 
541 	/* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
542 	 * to skb_shinfo(skb)->gso_segs
543 	 */
544 	NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
545 }
546 
bnx2x_alloc_rx_sge(struct bnx2x * bp,struct bnx2x_fastpath * fp,u16 index,gfp_t gfp_mask)547 static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
548 			      u16 index, gfp_t gfp_mask)
549 {
550 	struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
551 	struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
552 	struct bnx2x_alloc_pool *pool = &fp->page_pool;
553 	dma_addr_t mapping;
554 
555 	if (!pool->page) {
556 		pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
557 		if (unlikely(!pool->page))
558 			return -ENOMEM;
559 
560 		pool->offset = 0;
561 	}
562 
563 	mapping = dma_map_page(&bp->pdev->dev, pool->page,
564 			       pool->offset, SGE_PAGE_SIZE, DMA_FROM_DEVICE);
565 	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
566 		BNX2X_ERR("Can't map sge\n");
567 		return -ENOMEM;
568 	}
569 
570 	sw_buf->page = pool->page;
571 	sw_buf->offset = pool->offset;
572 
573 	dma_unmap_addr_set(sw_buf, mapping, mapping);
574 
575 	sge->addr_hi = cpu_to_le32(U64_HI(mapping));
576 	sge->addr_lo = cpu_to_le32(U64_LO(mapping));
577 
578 	pool->offset += SGE_PAGE_SIZE;
579 	if (PAGE_SIZE - pool->offset >= SGE_PAGE_SIZE)
580 		get_page(pool->page);
581 	else
582 		pool->page = NULL;
583 	return 0;
584 }
585 
bnx2x_fill_frag_skb(struct bnx2x * bp,struct bnx2x_fastpath * fp,struct bnx2x_agg_info * tpa_info,u16 pages,struct sk_buff * skb,struct eth_end_agg_rx_cqe * cqe,u16 cqe_idx)586 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
587 			       struct bnx2x_agg_info *tpa_info,
588 			       u16 pages,
589 			       struct sk_buff *skb,
590 			       struct eth_end_agg_rx_cqe *cqe,
591 			       u16 cqe_idx)
592 {
593 	struct sw_rx_page *rx_pg, old_rx_pg;
594 	u32 i, frag_len, frag_size;
595 	int err, j, frag_id = 0;
596 	u16 len_on_bd = tpa_info->len_on_bd;
597 	u16 full_page = 0, gro_size = 0;
598 
599 	frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
600 
601 	if (fp->mode == TPA_MODE_GRO) {
602 		gro_size = tpa_info->gro_size;
603 		full_page = tpa_info->full_page;
604 	}
605 
606 	/* This is needed in order to enable forwarding support */
607 	if (frag_size)
608 		bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
609 				     le16_to_cpu(cqe->pkt_len),
610 				     le16_to_cpu(cqe->num_of_coalesced_segs));
611 
612 #ifdef BNX2X_STOP_ON_ERROR
613 	if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
614 		BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
615 			  pages, cqe_idx);
616 		BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
617 		bnx2x_panic();
618 		return -EINVAL;
619 	}
620 #endif
621 
622 	/* Run through the SGL and compose the fragmented skb */
623 	for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
624 		u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
625 
626 		/* FW gives the indices of the SGE as if the ring is an array
627 		   (meaning that "next" element will consume 2 indices) */
628 		if (fp->mode == TPA_MODE_GRO)
629 			frag_len = min_t(u32, frag_size, (u32)full_page);
630 		else /* LRO */
631 			frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
632 
633 		rx_pg = &fp->rx_page_ring[sge_idx];
634 		old_rx_pg = *rx_pg;
635 
636 		/* If we fail to allocate a substitute page, we simply stop
637 		   where we are and drop the whole packet */
638 		err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
639 		if (unlikely(err)) {
640 			bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
641 			return err;
642 		}
643 
644 		dma_unmap_page(&bp->pdev->dev,
645 			       dma_unmap_addr(&old_rx_pg, mapping),
646 			       SGE_PAGE_SIZE, DMA_FROM_DEVICE);
647 		/* Add one frag and update the appropriate fields in the skb */
648 		if (fp->mode == TPA_MODE_LRO)
649 			skb_fill_page_desc(skb, j, old_rx_pg.page,
650 					   old_rx_pg.offset, frag_len);
651 		else { /* GRO */
652 			int rem;
653 			int offset = 0;
654 			for (rem = frag_len; rem > 0; rem -= gro_size) {
655 				int len = rem > gro_size ? gro_size : rem;
656 				skb_fill_page_desc(skb, frag_id++,
657 						   old_rx_pg.page,
658 						   old_rx_pg.offset + offset,
659 						   len);
660 				if (offset)
661 					get_page(old_rx_pg.page);
662 				offset += len;
663 			}
664 		}
665 
666 		skb->data_len += frag_len;
667 		skb->truesize += SGE_PAGES;
668 		skb->len += frag_len;
669 
670 		frag_size -= frag_len;
671 	}
672 
673 	return 0;
674 }
675 
bnx2x_frag_free(const struct bnx2x_fastpath * fp,void * data)676 static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
677 {
678 	if (fp->rx_frag_size)
679 		skb_free_frag(data);
680 	else
681 		kfree(data);
682 }
683 
bnx2x_frag_alloc(const struct bnx2x_fastpath * fp,gfp_t gfp_mask)684 static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
685 {
686 	if (fp->rx_frag_size) {
687 		/* GFP_KERNEL allocations are used only during initialization */
688 		if (unlikely(gfpflags_allow_blocking(gfp_mask)))
689 			return (void *)__get_free_page(gfp_mask);
690 
691 		return netdev_alloc_frag(fp->rx_frag_size);
692 	}
693 
694 	return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
695 }
696 
697 #ifdef CONFIG_INET
bnx2x_gro_ip_csum(struct bnx2x * bp,struct sk_buff * skb)698 static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
699 {
700 	const struct iphdr *iph = ip_hdr(skb);
701 	struct tcphdr *th;
702 
703 	skb_set_transport_header(skb, sizeof(struct iphdr));
704 	th = tcp_hdr(skb);
705 
706 	th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
707 				  iph->saddr, iph->daddr, 0);
708 }
709 
bnx2x_gro_ipv6_csum(struct bnx2x * bp,struct sk_buff * skb)710 static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
711 {
712 	struct ipv6hdr *iph = ipv6_hdr(skb);
713 	struct tcphdr *th;
714 
715 	skb_set_transport_header(skb, sizeof(struct ipv6hdr));
716 	th = tcp_hdr(skb);
717 
718 	th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
719 				  &iph->saddr, &iph->daddr, 0);
720 }
721 
bnx2x_gro_csum(struct bnx2x * bp,struct sk_buff * skb,void (* gro_func)(struct bnx2x *,struct sk_buff *))722 static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
723 			    void (*gro_func)(struct bnx2x*, struct sk_buff*))
724 {
725 	skb_reset_network_header(skb);
726 	gro_func(bp, skb);
727 	tcp_gro_complete(skb);
728 }
729 #endif
730 
bnx2x_gro_receive(struct bnx2x * bp,struct bnx2x_fastpath * fp,struct sk_buff * skb)731 static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
732 			       struct sk_buff *skb)
733 {
734 #ifdef CONFIG_INET
735 	if (skb_shinfo(skb)->gso_size) {
736 		switch (be16_to_cpu(skb->protocol)) {
737 		case ETH_P_IP:
738 			bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
739 			break;
740 		case ETH_P_IPV6:
741 			bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
742 			break;
743 		default:
744 			WARN_ONCE(1, "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
745 				  be16_to_cpu(skb->protocol));
746 		}
747 	}
748 #endif
749 	skb_record_rx_queue(skb, fp->rx_queue);
750 	napi_gro_receive(&fp->napi, skb);
751 }
752 
bnx2x_tpa_stop(struct bnx2x * bp,struct bnx2x_fastpath * fp,struct bnx2x_agg_info * tpa_info,u16 pages,struct eth_end_agg_rx_cqe * cqe,u16 cqe_idx)753 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
754 			   struct bnx2x_agg_info *tpa_info,
755 			   u16 pages,
756 			   struct eth_end_agg_rx_cqe *cqe,
757 			   u16 cqe_idx)
758 {
759 	struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
760 	u8 pad = tpa_info->placement_offset;
761 	u16 len = tpa_info->len_on_bd;
762 	struct sk_buff *skb = NULL;
763 	u8 *new_data, *data = rx_buf->data;
764 	u8 old_tpa_state = tpa_info->tpa_state;
765 
766 	tpa_info->tpa_state = BNX2X_TPA_STOP;
767 
768 	/* If we there was an error during the handling of the TPA_START -
769 	 * drop this aggregation.
770 	 */
771 	if (old_tpa_state == BNX2X_TPA_ERROR)
772 		goto drop;
773 
774 	/* Try to allocate the new data */
775 	new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
776 	/* Unmap skb in the pool anyway, as we are going to change
777 	   pool entry status to BNX2X_TPA_STOP even if new skb allocation
778 	   fails. */
779 	dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
780 			 fp->rx_buf_size, DMA_FROM_DEVICE);
781 	if (likely(new_data))
782 		skb = build_skb(data, fp->rx_frag_size);
783 
784 	if (likely(skb)) {
785 #ifdef BNX2X_STOP_ON_ERROR
786 		if (pad + len > fp->rx_buf_size) {
787 			BNX2X_ERR("skb_put is about to fail...  pad %d  len %d  rx_buf_size %d\n",
788 				  pad, len, fp->rx_buf_size);
789 			bnx2x_panic();
790 			return;
791 		}
792 #endif
793 
794 		skb_reserve(skb, pad + NET_SKB_PAD);
795 		skb_put(skb, len);
796 		skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
797 
798 		skb->protocol = eth_type_trans(skb, bp->dev);
799 		skb->ip_summed = CHECKSUM_UNNECESSARY;
800 
801 		if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
802 					 skb, cqe, cqe_idx)) {
803 			if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
804 				__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
805 			bnx2x_gro_receive(bp, fp, skb);
806 		} else {
807 			DP(NETIF_MSG_RX_STATUS,
808 			   "Failed to allocate new pages - dropping packet!\n");
809 			dev_kfree_skb_any(skb);
810 		}
811 
812 		/* put new data in bin */
813 		rx_buf->data = new_data;
814 
815 		return;
816 	}
817 	if (new_data)
818 		bnx2x_frag_free(fp, new_data);
819 drop:
820 	/* drop the packet and keep the buffer in the bin */
821 	DP(NETIF_MSG_RX_STATUS,
822 	   "Failed to allocate or map a new skb - dropping packet!\n");
823 	bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
824 }
825 
bnx2x_alloc_rx_data(struct bnx2x * bp,struct bnx2x_fastpath * fp,u16 index,gfp_t gfp_mask)826 static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
827 			       u16 index, gfp_t gfp_mask)
828 {
829 	u8 *data;
830 	struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
831 	struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
832 	dma_addr_t mapping;
833 
834 	data = bnx2x_frag_alloc(fp, gfp_mask);
835 	if (unlikely(data == NULL))
836 		return -ENOMEM;
837 
838 	mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
839 				 fp->rx_buf_size,
840 				 DMA_FROM_DEVICE);
841 	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
842 		bnx2x_frag_free(fp, data);
843 		BNX2X_ERR("Can't map rx data\n");
844 		return -ENOMEM;
845 	}
846 
847 	rx_buf->data = data;
848 	dma_unmap_addr_set(rx_buf, mapping, mapping);
849 
850 	rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
851 	rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
852 
853 	return 0;
854 }
855 
856 static
bnx2x_csum_validate(struct sk_buff * skb,union eth_rx_cqe * cqe,struct bnx2x_fastpath * fp,struct bnx2x_eth_q_stats * qstats)857 void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
858 				 struct bnx2x_fastpath *fp,
859 				 struct bnx2x_eth_q_stats *qstats)
860 {
861 	/* Do nothing if no L4 csum validation was done.
862 	 * We do not check whether IP csum was validated. For IPv4 we assume
863 	 * that if the card got as far as validating the L4 csum, it also
864 	 * validated the IP csum. IPv6 has no IP csum.
865 	 */
866 	if (cqe->fast_path_cqe.status_flags &
867 	    ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
868 		return;
869 
870 	/* If L4 validation was done, check if an error was found. */
871 
872 	if (cqe->fast_path_cqe.type_error_flags &
873 	    (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
874 	     ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
875 		qstats->hw_csum_err++;
876 	else
877 		skb->ip_summed = CHECKSUM_UNNECESSARY;
878 }
879 
bnx2x_rx_int(struct bnx2x_fastpath * fp,int budget)880 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
881 {
882 	struct bnx2x *bp = fp->bp;
883 	u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
884 	u16 sw_comp_cons, sw_comp_prod;
885 	int rx_pkt = 0;
886 	union eth_rx_cqe *cqe;
887 	struct eth_fast_path_rx_cqe *cqe_fp;
888 
889 #ifdef BNX2X_STOP_ON_ERROR
890 	if (unlikely(bp->panic))
891 		return 0;
892 #endif
893 	if (budget <= 0)
894 		return rx_pkt;
895 
896 	bd_cons = fp->rx_bd_cons;
897 	bd_prod = fp->rx_bd_prod;
898 	bd_prod_fw = bd_prod;
899 	sw_comp_cons = fp->rx_comp_cons;
900 	sw_comp_prod = fp->rx_comp_prod;
901 
902 	comp_ring_cons = RCQ_BD(sw_comp_cons);
903 	cqe = &fp->rx_comp_ring[comp_ring_cons];
904 	cqe_fp = &cqe->fast_path_cqe;
905 
906 	DP(NETIF_MSG_RX_STATUS,
907 	   "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
908 
909 	while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
910 		struct sw_rx_bd *rx_buf = NULL;
911 		struct sk_buff *skb;
912 		u8 cqe_fp_flags;
913 		enum eth_rx_cqe_type cqe_fp_type;
914 		u16 len, pad, queue;
915 		u8 *data;
916 		u32 rxhash;
917 		enum pkt_hash_types rxhash_type;
918 
919 #ifdef BNX2X_STOP_ON_ERROR
920 		if (unlikely(bp->panic))
921 			return 0;
922 #endif
923 
924 		bd_prod = RX_BD(bd_prod);
925 		bd_cons = RX_BD(bd_cons);
926 
927 		/* A rmb() is required to ensure that the CQE is not read
928 		 * before it is written by the adapter DMA.  PCI ordering
929 		 * rules will make sure the other fields are written before
930 		 * the marker at the end of struct eth_fast_path_rx_cqe
931 		 * but without rmb() a weakly ordered processor can process
932 		 * stale data.  Without the barrier TPA state-machine might
933 		 * enter inconsistent state and kernel stack might be
934 		 * provided with incorrect packet description - these lead
935 		 * to various kernel crashed.
936 		 */
937 		rmb();
938 
939 		cqe_fp_flags = cqe_fp->type_error_flags;
940 		cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
941 
942 		DP(NETIF_MSG_RX_STATUS,
943 		   "CQE type %x  err %x  status %x  queue %x  vlan %x  len %u\n",
944 		   CQE_TYPE(cqe_fp_flags),
945 		   cqe_fp_flags, cqe_fp->status_flags,
946 		   le32_to_cpu(cqe_fp->rss_hash_result),
947 		   le16_to_cpu(cqe_fp->vlan_tag),
948 		   le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
949 
950 		/* is this a slowpath msg? */
951 		if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
952 			bnx2x_sp_event(fp, cqe);
953 			goto next_cqe;
954 		}
955 
956 		rx_buf = &fp->rx_buf_ring[bd_cons];
957 		data = rx_buf->data;
958 
959 		if (!CQE_TYPE_FAST(cqe_fp_type)) {
960 			struct bnx2x_agg_info *tpa_info;
961 			u16 frag_size, pages;
962 #ifdef BNX2X_STOP_ON_ERROR
963 			/* sanity check */
964 			if (fp->mode == TPA_MODE_DISABLED &&
965 			    (CQE_TYPE_START(cqe_fp_type) ||
966 			     CQE_TYPE_STOP(cqe_fp_type)))
967 				BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n",
968 					  CQE_TYPE(cqe_fp_type));
969 #endif
970 
971 			if (CQE_TYPE_START(cqe_fp_type)) {
972 				u16 queue = cqe_fp->queue_index;
973 				DP(NETIF_MSG_RX_STATUS,
974 				   "calling tpa_start on queue %d\n",
975 				   queue);
976 
977 				bnx2x_tpa_start(fp, queue,
978 						bd_cons, bd_prod,
979 						cqe_fp);
980 
981 				goto next_rx;
982 			}
983 			queue = cqe->end_agg_cqe.queue_index;
984 			tpa_info = &fp->tpa_info[queue];
985 			DP(NETIF_MSG_RX_STATUS,
986 			   "calling tpa_stop on queue %d\n",
987 			   queue);
988 
989 			frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
990 				    tpa_info->len_on_bd;
991 
992 			if (fp->mode == TPA_MODE_GRO)
993 				pages = (frag_size + tpa_info->full_page - 1) /
994 					 tpa_info->full_page;
995 			else
996 				pages = SGE_PAGE_ALIGN(frag_size) >>
997 					SGE_PAGE_SHIFT;
998 
999 			bnx2x_tpa_stop(bp, fp, tpa_info, pages,
1000 				       &cqe->end_agg_cqe, comp_ring_cons);
1001 #ifdef BNX2X_STOP_ON_ERROR
1002 			if (bp->panic)
1003 				return 0;
1004 #endif
1005 
1006 			bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
1007 			goto next_cqe;
1008 		}
1009 		/* non TPA */
1010 		len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
1011 		pad = cqe_fp->placement_offset;
1012 		dma_sync_single_for_cpu(&bp->pdev->dev,
1013 					dma_unmap_addr(rx_buf, mapping),
1014 					pad + RX_COPY_THRESH,
1015 					DMA_FROM_DEVICE);
1016 		pad += NET_SKB_PAD;
1017 		prefetch(data + pad); /* speedup eth_type_trans() */
1018 		/* is this an error packet? */
1019 		if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1020 			DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1021 			   "ERROR  flags %x  rx packet %u\n",
1022 			   cqe_fp_flags, sw_comp_cons);
1023 			bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
1024 			goto reuse_rx;
1025 		}
1026 
1027 		/* Since we don't have a jumbo ring
1028 		 * copy small packets if mtu > 1500
1029 		 */
1030 		if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1031 		    (len <= RX_COPY_THRESH)) {
1032 			skb = napi_alloc_skb(&fp->napi, len);
1033 			if (skb == NULL) {
1034 				DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1035 				   "ERROR  packet dropped because of alloc failure\n");
1036 				bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1037 				goto reuse_rx;
1038 			}
1039 			memcpy(skb->data, data + pad, len);
1040 			bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1041 		} else {
1042 			if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
1043 						       GFP_ATOMIC) == 0)) {
1044 				dma_unmap_single(&bp->pdev->dev,
1045 						 dma_unmap_addr(rx_buf, mapping),
1046 						 fp->rx_buf_size,
1047 						 DMA_FROM_DEVICE);
1048 				skb = build_skb(data, fp->rx_frag_size);
1049 				if (unlikely(!skb)) {
1050 					bnx2x_frag_free(fp, data);
1051 					bnx2x_fp_qstats(bp, fp)->
1052 							rx_skb_alloc_failed++;
1053 					goto next_rx;
1054 				}
1055 				skb_reserve(skb, pad);
1056 			} else {
1057 				DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1058 				   "ERROR  packet dropped because of alloc failure\n");
1059 				bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1060 reuse_rx:
1061 				bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1062 				goto next_rx;
1063 			}
1064 		}
1065 
1066 		skb_put(skb, len);
1067 		skb->protocol = eth_type_trans(skb, bp->dev);
1068 
1069 		/* Set Toeplitz hash for a none-LRO skb */
1070 		rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
1071 		skb_set_hash(skb, rxhash, rxhash_type);
1072 
1073 		skb_checksum_none_assert(skb);
1074 
1075 		if (bp->dev->features & NETIF_F_RXCSUM)
1076 			bnx2x_csum_validate(skb, cqe, fp,
1077 					    bnx2x_fp_qstats(bp, fp));
1078 
1079 		skb_record_rx_queue(skb, fp->rx_queue);
1080 
1081 		/* Check if this packet was timestamped */
1082 		if (unlikely(cqe->fast_path_cqe.type_error_flags &
1083 			     (1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT)))
1084 			bnx2x_set_rx_ts(bp, skb);
1085 
1086 		if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1087 		    PARSING_FLAGS_VLAN)
1088 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1089 					       le16_to_cpu(cqe_fp->vlan_tag));
1090 
1091 		napi_gro_receive(&fp->napi, skb);
1092 next_rx:
1093 		rx_buf->data = NULL;
1094 
1095 		bd_cons = NEXT_RX_IDX(bd_cons);
1096 		bd_prod = NEXT_RX_IDX(bd_prod);
1097 		bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1098 		rx_pkt++;
1099 next_cqe:
1100 		sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1101 		sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1102 
1103 		/* mark CQE as free */
1104 		BNX2X_SEED_CQE(cqe_fp);
1105 
1106 		if (rx_pkt == budget)
1107 			break;
1108 
1109 		comp_ring_cons = RCQ_BD(sw_comp_cons);
1110 		cqe = &fp->rx_comp_ring[comp_ring_cons];
1111 		cqe_fp = &cqe->fast_path_cqe;
1112 	} /* while */
1113 
1114 	fp->rx_bd_cons = bd_cons;
1115 	fp->rx_bd_prod = bd_prod_fw;
1116 	fp->rx_comp_cons = sw_comp_cons;
1117 	fp->rx_comp_prod = sw_comp_prod;
1118 
1119 	/* Update producers */
1120 	bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1121 			     fp->rx_sge_prod);
1122 
1123 	return rx_pkt;
1124 }
1125 
bnx2x_msix_fp_int(int irq,void * fp_cookie)1126 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1127 {
1128 	struct bnx2x_fastpath *fp = fp_cookie;
1129 	struct bnx2x *bp = fp->bp;
1130 	u8 cos;
1131 
1132 	DP(NETIF_MSG_INTR,
1133 	   "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
1134 	   fp->index, fp->fw_sb_id, fp->igu_sb_id);
1135 
1136 	bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1137 
1138 #ifdef BNX2X_STOP_ON_ERROR
1139 	if (unlikely(bp->panic))
1140 		return IRQ_HANDLED;
1141 #endif
1142 
1143 	/* Handle Rx and Tx according to MSI-X vector */
1144 	for_each_cos_in_tx_queue(fp, cos)
1145 		prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1146 
1147 	prefetch(&fp->sb_running_index[SM_RX_ID]);
1148 	napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
1149 
1150 	return IRQ_HANDLED;
1151 }
1152 
1153 /* HW Lock for shared dual port PHYs */
bnx2x_acquire_phy_lock(struct bnx2x * bp)1154 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1155 {
1156 	mutex_lock(&bp->port.phy_mutex);
1157 
1158 	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1159 }
1160 
bnx2x_release_phy_lock(struct bnx2x * bp)1161 void bnx2x_release_phy_lock(struct bnx2x *bp)
1162 {
1163 	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1164 
1165 	mutex_unlock(&bp->port.phy_mutex);
1166 }
1167 
1168 /* calculates MF speed according to current linespeed and MF configuration */
bnx2x_get_mf_speed(struct bnx2x * bp)1169 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1170 {
1171 	u16 line_speed = bp->link_vars.line_speed;
1172 	if (IS_MF(bp)) {
1173 		u16 maxCfg = bnx2x_extract_max_cfg(bp,
1174 						   bp->mf_config[BP_VN(bp)]);
1175 
1176 		/* Calculate the current MAX line speed limit for the MF
1177 		 * devices
1178 		 */
1179 		if (IS_MF_PERCENT_BW(bp))
1180 			line_speed = (line_speed * maxCfg) / 100;
1181 		else { /* SD mode */
1182 			u16 vn_max_rate = maxCfg * 100;
1183 
1184 			if (vn_max_rate < line_speed)
1185 				line_speed = vn_max_rate;
1186 		}
1187 	}
1188 
1189 	return line_speed;
1190 }
1191 
1192 /**
1193  * bnx2x_fill_report_data - fill link report data to report
1194  *
1195  * @bp:		driver handle
1196  * @data:	link state to update
1197  *
1198  * It uses a none-atomic bit operations because is called under the mutex.
1199  */
bnx2x_fill_report_data(struct bnx2x * bp,struct bnx2x_link_report_data * data)1200 static void bnx2x_fill_report_data(struct bnx2x *bp,
1201 				   struct bnx2x_link_report_data *data)
1202 {
1203 	memset(data, 0, sizeof(*data));
1204 
1205 	if (IS_PF(bp)) {
1206 		/* Fill the report data: effective line speed */
1207 		data->line_speed = bnx2x_get_mf_speed(bp);
1208 
1209 		/* Link is down */
1210 		if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1211 			__set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1212 				  &data->link_report_flags);
1213 
1214 		if (!BNX2X_NUM_ETH_QUEUES(bp))
1215 			__set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1216 				  &data->link_report_flags);
1217 
1218 		/* Full DUPLEX */
1219 		if (bp->link_vars.duplex == DUPLEX_FULL)
1220 			__set_bit(BNX2X_LINK_REPORT_FD,
1221 				  &data->link_report_flags);
1222 
1223 		/* Rx Flow Control is ON */
1224 		if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1225 			__set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1226 				  &data->link_report_flags);
1227 
1228 		/* Tx Flow Control is ON */
1229 		if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1230 			__set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1231 				  &data->link_report_flags);
1232 	} else { /* VF */
1233 		*data = bp->vf_link_vars;
1234 	}
1235 }
1236 
1237 /**
1238  * bnx2x_link_report - report link status to OS.
1239  *
1240  * @bp:		driver handle
1241  *
1242  * Calls the __bnx2x_link_report() under the same locking scheme
1243  * as a link/PHY state managing code to ensure a consistent link
1244  * reporting.
1245  */
1246 
bnx2x_link_report(struct bnx2x * bp)1247 void bnx2x_link_report(struct bnx2x *bp)
1248 {
1249 	bnx2x_acquire_phy_lock(bp);
1250 	__bnx2x_link_report(bp);
1251 	bnx2x_release_phy_lock(bp);
1252 }
1253 
1254 /**
1255  * __bnx2x_link_report - report link status to OS.
1256  *
1257  * @bp:		driver handle
1258  *
1259  * None atomic implementation.
1260  * Should be called under the phy_lock.
1261  */
__bnx2x_link_report(struct bnx2x * bp)1262 void __bnx2x_link_report(struct bnx2x *bp)
1263 {
1264 	struct bnx2x_link_report_data cur_data;
1265 
1266 	if (bp->force_link_down) {
1267 		bp->link_vars.link_up = 0;
1268 		return;
1269 	}
1270 
1271 	/* reread mf_cfg */
1272 	if (IS_PF(bp) && !CHIP_IS_E1(bp))
1273 		bnx2x_read_mf_cfg(bp);
1274 
1275 	/* Read the current link report info */
1276 	bnx2x_fill_report_data(bp, &cur_data);
1277 
1278 	/* Don't report link down or exactly the same link status twice */
1279 	if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1280 	    (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1281 		      &bp->last_reported_link.link_report_flags) &&
1282 	     test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1283 		      &cur_data.link_report_flags)))
1284 		return;
1285 
1286 	bp->link_cnt++;
1287 
1288 	/* We are going to report a new link parameters now -
1289 	 * remember the current data for the next time.
1290 	 */
1291 	memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1292 
1293 	/* propagate status to VFs */
1294 	if (IS_PF(bp))
1295 		bnx2x_iov_link_update(bp);
1296 
1297 	if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1298 		     &cur_data.link_report_flags)) {
1299 		netif_carrier_off(bp->dev);
1300 		netdev_err(bp->dev, "NIC Link is Down\n");
1301 		return;
1302 	} else {
1303 		const char *duplex;
1304 		const char *flow;
1305 
1306 		netif_carrier_on(bp->dev);
1307 
1308 		if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1309 				       &cur_data.link_report_flags))
1310 			duplex = "full";
1311 		else
1312 			duplex = "half";
1313 
1314 		/* Handle the FC at the end so that only these flags would be
1315 		 * possibly set. This way we may easily check if there is no FC
1316 		 * enabled.
1317 		 */
1318 		if (cur_data.link_report_flags) {
1319 			if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1320 				     &cur_data.link_report_flags)) {
1321 				if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1322 				     &cur_data.link_report_flags))
1323 					flow = "ON - receive & transmit";
1324 				else
1325 					flow = "ON - receive";
1326 			} else {
1327 				flow = "ON - transmit";
1328 			}
1329 		} else {
1330 			flow = "none";
1331 		}
1332 		netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1333 			    cur_data.line_speed, duplex, flow);
1334 	}
1335 }
1336 
bnx2x_set_next_page_sgl(struct bnx2x_fastpath * fp)1337 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1338 {
1339 	int i;
1340 
1341 	for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1342 		struct eth_rx_sge *sge;
1343 
1344 		sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1345 		sge->addr_hi =
1346 			cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1347 			BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1348 
1349 		sge->addr_lo =
1350 			cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1351 			BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1352 	}
1353 }
1354 
bnx2x_free_tpa_pool(struct bnx2x * bp,struct bnx2x_fastpath * fp,int last)1355 static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1356 				struct bnx2x_fastpath *fp, int last)
1357 {
1358 	int i;
1359 
1360 	for (i = 0; i < last; i++) {
1361 		struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1362 		struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1363 		u8 *data = first_buf->data;
1364 
1365 		if (data == NULL) {
1366 			DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1367 			continue;
1368 		}
1369 		if (tpa_info->tpa_state == BNX2X_TPA_START)
1370 			dma_unmap_single(&bp->pdev->dev,
1371 					 dma_unmap_addr(first_buf, mapping),
1372 					 fp->rx_buf_size, DMA_FROM_DEVICE);
1373 		bnx2x_frag_free(fp, data);
1374 		first_buf->data = NULL;
1375 	}
1376 }
1377 
bnx2x_init_rx_rings_cnic(struct bnx2x * bp)1378 void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1379 {
1380 	int j;
1381 
1382 	for_each_rx_queue_cnic(bp, j) {
1383 		struct bnx2x_fastpath *fp = &bp->fp[j];
1384 
1385 		fp->rx_bd_cons = 0;
1386 
1387 		/* Activate BD ring */
1388 		/* Warning!
1389 		 * this will generate an interrupt (to the TSTORM)
1390 		 * must only be done after chip is initialized
1391 		 */
1392 		bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1393 				     fp->rx_sge_prod);
1394 	}
1395 }
1396 
bnx2x_init_rx_rings(struct bnx2x * bp)1397 void bnx2x_init_rx_rings(struct bnx2x *bp)
1398 {
1399 	int func = BP_FUNC(bp);
1400 	u16 ring_prod;
1401 	int i, j;
1402 
1403 	/* Allocate TPA resources */
1404 	for_each_eth_queue(bp, j) {
1405 		struct bnx2x_fastpath *fp = &bp->fp[j];
1406 
1407 		DP(NETIF_MSG_IFUP,
1408 		   "mtu %d  rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1409 
1410 		if (fp->mode != TPA_MODE_DISABLED) {
1411 			/* Fill the per-aggregation pool */
1412 			for (i = 0; i < MAX_AGG_QS(bp); i++) {
1413 				struct bnx2x_agg_info *tpa_info =
1414 					&fp->tpa_info[i];
1415 				struct sw_rx_bd *first_buf =
1416 					&tpa_info->first_buf;
1417 
1418 				first_buf->data =
1419 					bnx2x_frag_alloc(fp, GFP_KERNEL);
1420 				if (!first_buf->data) {
1421 					BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1422 						  j);
1423 					bnx2x_free_tpa_pool(bp, fp, i);
1424 					fp->mode = TPA_MODE_DISABLED;
1425 					break;
1426 				}
1427 				dma_unmap_addr_set(first_buf, mapping, 0);
1428 				tpa_info->tpa_state = BNX2X_TPA_STOP;
1429 			}
1430 
1431 			/* "next page" elements initialization */
1432 			bnx2x_set_next_page_sgl(fp);
1433 
1434 			/* set SGEs bit mask */
1435 			bnx2x_init_sge_ring_bit_mask(fp);
1436 
1437 			/* Allocate SGEs and initialize the ring elements */
1438 			for (i = 0, ring_prod = 0;
1439 			     i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1440 
1441 				if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1442 						       GFP_KERNEL) < 0) {
1443 					BNX2X_ERR("was only able to allocate %d rx sges\n",
1444 						  i);
1445 					BNX2X_ERR("disabling TPA for queue[%d]\n",
1446 						  j);
1447 					/* Cleanup already allocated elements */
1448 					bnx2x_free_rx_sge_range(bp, fp,
1449 								ring_prod);
1450 					bnx2x_free_tpa_pool(bp, fp,
1451 							    MAX_AGG_QS(bp));
1452 					fp->mode = TPA_MODE_DISABLED;
1453 					ring_prod = 0;
1454 					break;
1455 				}
1456 				ring_prod = NEXT_SGE_IDX(ring_prod);
1457 			}
1458 
1459 			fp->rx_sge_prod = ring_prod;
1460 		}
1461 	}
1462 
1463 	for_each_eth_queue(bp, j) {
1464 		struct bnx2x_fastpath *fp = &bp->fp[j];
1465 
1466 		fp->rx_bd_cons = 0;
1467 
1468 		/* Activate BD ring */
1469 		/* Warning!
1470 		 * this will generate an interrupt (to the TSTORM)
1471 		 * must only be done after chip is initialized
1472 		 */
1473 		bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1474 				     fp->rx_sge_prod);
1475 
1476 		if (j != 0)
1477 			continue;
1478 
1479 		if (CHIP_IS_E1(bp)) {
1480 			REG_WR(bp, BAR_USTRORM_INTMEM +
1481 			       USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1482 			       U64_LO(fp->rx_comp_mapping));
1483 			REG_WR(bp, BAR_USTRORM_INTMEM +
1484 			       USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1485 			       U64_HI(fp->rx_comp_mapping));
1486 		}
1487 	}
1488 }
1489 
bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath * fp)1490 static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1491 {
1492 	u8 cos;
1493 	struct bnx2x *bp = fp->bp;
1494 
1495 	for_each_cos_in_tx_queue(fp, cos) {
1496 		struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1497 		unsigned pkts_compl = 0, bytes_compl = 0;
1498 
1499 		u16 sw_prod = txdata->tx_pkt_prod;
1500 		u16 sw_cons = txdata->tx_pkt_cons;
1501 
1502 		while (sw_cons != sw_prod) {
1503 			bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1504 					  &pkts_compl, &bytes_compl);
1505 			sw_cons++;
1506 		}
1507 
1508 		netdev_tx_reset_queue(
1509 			netdev_get_tx_queue(bp->dev,
1510 					    txdata->txq_index));
1511 	}
1512 }
1513 
bnx2x_free_tx_skbs_cnic(struct bnx2x * bp)1514 static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1515 {
1516 	int i;
1517 
1518 	for_each_tx_queue_cnic(bp, i) {
1519 		bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1520 	}
1521 }
1522 
bnx2x_free_tx_skbs(struct bnx2x * bp)1523 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1524 {
1525 	int i;
1526 
1527 	for_each_eth_queue(bp, i) {
1528 		bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1529 	}
1530 }
1531 
bnx2x_free_rx_bds(struct bnx2x_fastpath * fp)1532 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1533 {
1534 	struct bnx2x *bp = fp->bp;
1535 	int i;
1536 
1537 	/* ring wasn't allocated */
1538 	if (fp->rx_buf_ring == NULL)
1539 		return;
1540 
1541 	for (i = 0; i < NUM_RX_BD; i++) {
1542 		struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1543 		u8 *data = rx_buf->data;
1544 
1545 		if (data == NULL)
1546 			continue;
1547 		dma_unmap_single(&bp->pdev->dev,
1548 				 dma_unmap_addr(rx_buf, mapping),
1549 				 fp->rx_buf_size, DMA_FROM_DEVICE);
1550 
1551 		rx_buf->data = NULL;
1552 		bnx2x_frag_free(fp, data);
1553 	}
1554 }
1555 
bnx2x_free_rx_skbs_cnic(struct bnx2x * bp)1556 static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1557 {
1558 	int j;
1559 
1560 	for_each_rx_queue_cnic(bp, j) {
1561 		bnx2x_free_rx_bds(&bp->fp[j]);
1562 	}
1563 }
1564 
bnx2x_free_rx_skbs(struct bnx2x * bp)1565 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1566 {
1567 	int j;
1568 
1569 	for_each_eth_queue(bp, j) {
1570 		struct bnx2x_fastpath *fp = &bp->fp[j];
1571 
1572 		bnx2x_free_rx_bds(fp);
1573 
1574 		if (fp->mode != TPA_MODE_DISABLED)
1575 			bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1576 	}
1577 }
1578 
bnx2x_free_skbs_cnic(struct bnx2x * bp)1579 static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1580 {
1581 	bnx2x_free_tx_skbs_cnic(bp);
1582 	bnx2x_free_rx_skbs_cnic(bp);
1583 }
1584 
bnx2x_free_skbs(struct bnx2x * bp)1585 void bnx2x_free_skbs(struct bnx2x *bp)
1586 {
1587 	bnx2x_free_tx_skbs(bp);
1588 	bnx2x_free_rx_skbs(bp);
1589 }
1590 
bnx2x_update_max_mf_config(struct bnx2x * bp,u32 value)1591 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1592 {
1593 	/* load old values */
1594 	u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1595 
1596 	if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1597 		/* leave all but MAX value */
1598 		mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1599 
1600 		/* set new MAX value */
1601 		mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1602 				& FUNC_MF_CFG_MAX_BW_MASK;
1603 
1604 		bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1605 	}
1606 }
1607 
1608 /**
1609  * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1610  *
1611  * @bp:		driver handle
1612  * @nvecs:	number of vectors to be released
1613  */
bnx2x_free_msix_irqs(struct bnx2x * bp,int nvecs)1614 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1615 {
1616 	int i, offset = 0;
1617 
1618 	if (nvecs == offset)
1619 		return;
1620 
1621 	/* VFs don't have a default SB */
1622 	if (IS_PF(bp)) {
1623 		free_irq(bp->msix_table[offset].vector, bp->dev);
1624 		DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1625 		   bp->msix_table[offset].vector);
1626 		offset++;
1627 	}
1628 
1629 	if (CNIC_SUPPORT(bp)) {
1630 		if (nvecs == offset)
1631 			return;
1632 		offset++;
1633 	}
1634 
1635 	for_each_eth_queue(bp, i) {
1636 		if (nvecs == offset)
1637 			return;
1638 		DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1639 		   i, bp->msix_table[offset].vector);
1640 
1641 		free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1642 	}
1643 }
1644 
bnx2x_free_irq(struct bnx2x * bp)1645 void bnx2x_free_irq(struct bnx2x *bp)
1646 {
1647 	if (bp->flags & USING_MSIX_FLAG &&
1648 	    !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1649 		int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1650 
1651 		/* vfs don't have a default status block */
1652 		if (IS_PF(bp))
1653 			nvecs++;
1654 
1655 		bnx2x_free_msix_irqs(bp, nvecs);
1656 	} else {
1657 		free_irq(bp->dev->irq, bp->dev);
1658 	}
1659 }
1660 
bnx2x_enable_msix(struct bnx2x * bp)1661 int bnx2x_enable_msix(struct bnx2x *bp)
1662 {
1663 	int msix_vec = 0, i, rc;
1664 
1665 	/* VFs don't have a default status block */
1666 	if (IS_PF(bp)) {
1667 		bp->msix_table[msix_vec].entry = msix_vec;
1668 		BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1669 			       bp->msix_table[0].entry);
1670 		msix_vec++;
1671 	}
1672 
1673 	/* Cnic requires an msix vector for itself */
1674 	if (CNIC_SUPPORT(bp)) {
1675 		bp->msix_table[msix_vec].entry = msix_vec;
1676 		BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1677 			       msix_vec, bp->msix_table[msix_vec].entry);
1678 		msix_vec++;
1679 	}
1680 
1681 	/* We need separate vectors for ETH queues only (not FCoE) */
1682 	for_each_eth_queue(bp, i) {
1683 		bp->msix_table[msix_vec].entry = msix_vec;
1684 		BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1685 			       msix_vec, msix_vec, i);
1686 		msix_vec++;
1687 	}
1688 
1689 	DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1690 	   msix_vec);
1691 
1692 	rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
1693 				   BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
1694 	/*
1695 	 * reconfigure number of tx/rx queues according to available
1696 	 * MSI-X vectors
1697 	 */
1698 	if (rc == -ENOSPC) {
1699 		/* Get by with single vector */
1700 		rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
1701 		if (rc < 0) {
1702 			BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1703 				       rc);
1704 			goto no_msix;
1705 		}
1706 
1707 		BNX2X_DEV_INFO("Using single MSI-X vector\n");
1708 		bp->flags |= USING_SINGLE_MSIX_FLAG;
1709 
1710 		BNX2X_DEV_INFO("set number of queues to 1\n");
1711 		bp->num_ethernet_queues = 1;
1712 		bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1713 	} else if (rc < 0) {
1714 		BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1715 		goto no_msix;
1716 	} else if (rc < msix_vec) {
1717 		/* how less vectors we will have? */
1718 		int diff = msix_vec - rc;
1719 
1720 		BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1721 
1722 		/*
1723 		 * decrease number of queues by number of unallocated entries
1724 		 */
1725 		bp->num_ethernet_queues -= diff;
1726 		bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1727 
1728 		BNX2X_DEV_INFO("New queue configuration set: %d\n",
1729 			       bp->num_queues);
1730 	}
1731 
1732 	bp->flags |= USING_MSIX_FLAG;
1733 
1734 	return 0;
1735 
1736 no_msix:
1737 	/* fall to INTx if not enough memory */
1738 	if (rc == -ENOMEM)
1739 		bp->flags |= DISABLE_MSI_FLAG;
1740 
1741 	return rc;
1742 }
1743 
bnx2x_req_msix_irqs(struct bnx2x * bp)1744 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1745 {
1746 	int i, rc, offset = 0;
1747 
1748 	/* no default status block for vf */
1749 	if (IS_PF(bp)) {
1750 		rc = request_irq(bp->msix_table[offset++].vector,
1751 				 bnx2x_msix_sp_int, 0,
1752 				 bp->dev->name, bp->dev);
1753 		if (rc) {
1754 			BNX2X_ERR("request sp irq failed\n");
1755 			return -EBUSY;
1756 		}
1757 	}
1758 
1759 	if (CNIC_SUPPORT(bp))
1760 		offset++;
1761 
1762 	for_each_eth_queue(bp, i) {
1763 		struct bnx2x_fastpath *fp = &bp->fp[i];
1764 		snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1765 			 bp->dev->name, i);
1766 
1767 		rc = request_irq(bp->msix_table[offset].vector,
1768 				 bnx2x_msix_fp_int, 0, fp->name, fp);
1769 		if (rc) {
1770 			BNX2X_ERR("request fp #%d irq (%d) failed  rc %d\n", i,
1771 			      bp->msix_table[offset].vector, rc);
1772 			bnx2x_free_msix_irqs(bp, offset);
1773 			return -EBUSY;
1774 		}
1775 
1776 		offset++;
1777 	}
1778 
1779 	i = BNX2X_NUM_ETH_QUEUES(bp);
1780 	if (IS_PF(bp)) {
1781 		offset = 1 + CNIC_SUPPORT(bp);
1782 		netdev_info(bp->dev,
1783 			    "using MSI-X  IRQs: sp %d  fp[%d] %d ... fp[%d] %d\n",
1784 			    bp->msix_table[0].vector,
1785 			    0, bp->msix_table[offset].vector,
1786 			    i - 1, bp->msix_table[offset + i - 1].vector);
1787 	} else {
1788 		offset = CNIC_SUPPORT(bp);
1789 		netdev_info(bp->dev,
1790 			    "using MSI-X  IRQs: fp[%d] %d ... fp[%d] %d\n",
1791 			    0, bp->msix_table[offset].vector,
1792 			    i - 1, bp->msix_table[offset + i - 1].vector);
1793 	}
1794 	return 0;
1795 }
1796 
bnx2x_enable_msi(struct bnx2x * bp)1797 int bnx2x_enable_msi(struct bnx2x *bp)
1798 {
1799 	int rc;
1800 
1801 	rc = pci_enable_msi(bp->pdev);
1802 	if (rc) {
1803 		BNX2X_DEV_INFO("MSI is not attainable\n");
1804 		return -1;
1805 	}
1806 	bp->flags |= USING_MSI_FLAG;
1807 
1808 	return 0;
1809 }
1810 
bnx2x_req_irq(struct bnx2x * bp)1811 static int bnx2x_req_irq(struct bnx2x *bp)
1812 {
1813 	unsigned long flags;
1814 	unsigned int irq;
1815 
1816 	if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1817 		flags = 0;
1818 	else
1819 		flags = IRQF_SHARED;
1820 
1821 	if (bp->flags & USING_MSIX_FLAG)
1822 		irq = bp->msix_table[0].vector;
1823 	else
1824 		irq = bp->pdev->irq;
1825 
1826 	return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1827 }
1828 
bnx2x_setup_irqs(struct bnx2x * bp)1829 static int bnx2x_setup_irqs(struct bnx2x *bp)
1830 {
1831 	int rc = 0;
1832 	if (bp->flags & USING_MSIX_FLAG &&
1833 	    !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1834 		rc = bnx2x_req_msix_irqs(bp);
1835 		if (rc)
1836 			return rc;
1837 	} else {
1838 		rc = bnx2x_req_irq(bp);
1839 		if (rc) {
1840 			BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
1841 			return rc;
1842 		}
1843 		if (bp->flags & USING_MSI_FLAG) {
1844 			bp->dev->irq = bp->pdev->irq;
1845 			netdev_info(bp->dev, "using MSI IRQ %d\n",
1846 				    bp->dev->irq);
1847 		}
1848 		if (bp->flags & USING_MSIX_FLAG) {
1849 			bp->dev->irq = bp->msix_table[0].vector;
1850 			netdev_info(bp->dev, "using MSIX IRQ %d\n",
1851 				    bp->dev->irq);
1852 		}
1853 	}
1854 
1855 	return 0;
1856 }
1857 
bnx2x_napi_enable_cnic(struct bnx2x * bp)1858 static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1859 {
1860 	int i;
1861 
1862 	for_each_rx_queue_cnic(bp, i) {
1863 		napi_enable(&bnx2x_fp(bp, i, napi));
1864 	}
1865 }
1866 
bnx2x_napi_enable(struct bnx2x * bp)1867 static void bnx2x_napi_enable(struct bnx2x *bp)
1868 {
1869 	int i;
1870 
1871 	for_each_eth_queue(bp, i) {
1872 		napi_enable(&bnx2x_fp(bp, i, napi));
1873 	}
1874 }
1875 
bnx2x_napi_disable_cnic(struct bnx2x * bp)1876 static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1877 {
1878 	int i;
1879 
1880 	for_each_rx_queue_cnic(bp, i) {
1881 		napi_disable(&bnx2x_fp(bp, i, napi));
1882 	}
1883 }
1884 
bnx2x_napi_disable(struct bnx2x * bp)1885 static void bnx2x_napi_disable(struct bnx2x *bp)
1886 {
1887 	int i;
1888 
1889 	for_each_eth_queue(bp, i) {
1890 		napi_disable(&bnx2x_fp(bp, i, napi));
1891 	}
1892 }
1893 
bnx2x_netif_start(struct bnx2x * bp)1894 void bnx2x_netif_start(struct bnx2x *bp)
1895 {
1896 	if (netif_running(bp->dev)) {
1897 		bnx2x_napi_enable(bp);
1898 		if (CNIC_LOADED(bp))
1899 			bnx2x_napi_enable_cnic(bp);
1900 		bnx2x_int_enable(bp);
1901 		if (bp->state == BNX2X_STATE_OPEN)
1902 			netif_tx_wake_all_queues(bp->dev);
1903 	}
1904 }
1905 
bnx2x_netif_stop(struct bnx2x * bp,int disable_hw)1906 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1907 {
1908 	bnx2x_int_disable_sync(bp, disable_hw);
1909 	bnx2x_napi_disable(bp);
1910 	if (CNIC_LOADED(bp))
1911 		bnx2x_napi_disable_cnic(bp);
1912 }
1913 
bnx2x_select_queue(struct net_device * dev,struct sk_buff * skb,void * accel_priv,select_queue_fallback_t fallback)1914 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
1915 		       void *accel_priv, select_queue_fallback_t fallback)
1916 {
1917 	struct bnx2x *bp = netdev_priv(dev);
1918 
1919 	if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1920 		struct ethhdr *hdr = (struct ethhdr *)skb->data;
1921 		u16 ether_type = ntohs(hdr->h_proto);
1922 
1923 		/* Skip VLAN tag if present */
1924 		if (ether_type == ETH_P_8021Q) {
1925 			struct vlan_ethhdr *vhdr =
1926 				(struct vlan_ethhdr *)skb->data;
1927 
1928 			ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1929 		}
1930 
1931 		/* If ethertype is FCoE or FIP - use FCoE ring */
1932 		if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1933 			return bnx2x_fcoe_tx(bp, txq_index);
1934 	}
1935 
1936 	/* select a non-FCoE queue */
1937 	return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp));
1938 }
1939 
bnx2x_set_num_queues(struct bnx2x * bp)1940 void bnx2x_set_num_queues(struct bnx2x *bp)
1941 {
1942 	/* RSS queues */
1943 	bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1944 
1945 	/* override in STORAGE SD modes */
1946 	if (IS_MF_STORAGE_ONLY(bp))
1947 		bp->num_ethernet_queues = 1;
1948 
1949 	/* Add special queues */
1950 	bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1951 	bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1952 
1953 	BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1954 }
1955 
1956 /**
1957  * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1958  *
1959  * @bp:		Driver handle
1960  *
1961  * We currently support for at most 16 Tx queues for each CoS thus we will
1962  * allocate a multiple of 16 for ETH L2 rings according to the value of the
1963  * bp->max_cos.
1964  *
1965  * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1966  * index after all ETH L2 indices.
1967  *
1968  * If the actual number of Tx queues (for each CoS) is less than 16 then there
1969  * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1970  * 16..31,...) with indices that are not coupled with any real Tx queue.
1971  *
1972  * The proper configuration of skb->queue_mapping is handled by
1973  * bnx2x_select_queue() and __skb_tx_hash().
1974  *
1975  * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1976  * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1977  */
bnx2x_set_real_num_queues(struct bnx2x * bp,int include_cnic)1978 static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
1979 {
1980 	int rc, tx, rx;
1981 
1982 	tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1983 	rx = BNX2X_NUM_ETH_QUEUES(bp);
1984 
1985 /* account for fcoe queue */
1986 	if (include_cnic && !NO_FCOE(bp)) {
1987 		rx++;
1988 		tx++;
1989 	}
1990 
1991 	rc = netif_set_real_num_tx_queues(bp->dev, tx);
1992 	if (rc) {
1993 		BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1994 		return rc;
1995 	}
1996 	rc = netif_set_real_num_rx_queues(bp->dev, rx);
1997 	if (rc) {
1998 		BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1999 		return rc;
2000 	}
2001 
2002 	DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
2003 			  tx, rx);
2004 
2005 	return rc;
2006 }
2007 
bnx2x_set_rx_buf_size(struct bnx2x * bp)2008 static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
2009 {
2010 	int i;
2011 
2012 	for_each_queue(bp, i) {
2013 		struct bnx2x_fastpath *fp = &bp->fp[i];
2014 		u32 mtu;
2015 
2016 		/* Always use a mini-jumbo MTU for the FCoE L2 ring */
2017 		if (IS_FCOE_IDX(i))
2018 			/*
2019 			 * Although there are no IP frames expected to arrive to
2020 			 * this ring we still want to add an
2021 			 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
2022 			 * overrun attack.
2023 			 */
2024 			mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
2025 		else
2026 			mtu = bp->dev->mtu;
2027 		fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
2028 				  IP_HEADER_ALIGNMENT_PADDING +
2029 				  ETH_OVERHEAD +
2030 				  mtu +
2031 				  BNX2X_FW_RX_ALIGN_END;
2032 		fp->rx_buf_size = SKB_DATA_ALIGN(fp->rx_buf_size);
2033 		/* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
2034 		if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
2035 			fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
2036 		else
2037 			fp->rx_frag_size = 0;
2038 	}
2039 }
2040 
bnx2x_init_rss(struct bnx2x * bp)2041 static int bnx2x_init_rss(struct bnx2x *bp)
2042 {
2043 	int i;
2044 	u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
2045 
2046 	/* Prepare the initial contents for the indirection table if RSS is
2047 	 * enabled
2048 	 */
2049 	for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
2050 		bp->rss_conf_obj.ind_table[i] =
2051 			bp->fp->cl_id +
2052 			ethtool_rxfh_indir_default(i, num_eth_queues);
2053 
2054 	/*
2055 	 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
2056 	 * per-port, so if explicit configuration is needed , do it only
2057 	 * for a PMF.
2058 	 *
2059 	 * For 57712 and newer on the other hand it's a per-function
2060 	 * configuration.
2061 	 */
2062 	return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
2063 }
2064 
bnx2x_rss(struct bnx2x * bp,struct bnx2x_rss_config_obj * rss_obj,bool config_hash,bool enable)2065 int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
2066 	      bool config_hash, bool enable)
2067 {
2068 	struct bnx2x_config_rss_params params = {NULL};
2069 
2070 	/* Although RSS is meaningless when there is a single HW queue we
2071 	 * still need it enabled in order to have HW Rx hash generated.
2072 	 *
2073 	 * if (!is_eth_multi(bp))
2074 	 *      bp->multi_mode = ETH_RSS_MODE_DISABLED;
2075 	 */
2076 
2077 	params.rss_obj = rss_obj;
2078 
2079 	__set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
2080 
2081 	if (enable) {
2082 		__set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
2083 
2084 		/* RSS configuration */
2085 		__set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
2086 		__set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
2087 		__set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
2088 		__set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
2089 		if (rss_obj->udp_rss_v4)
2090 			__set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
2091 		if (rss_obj->udp_rss_v6)
2092 			__set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
2093 
2094 		if (!CHIP_IS_E1x(bp)) {
2095 			/* valid only for TUNN_MODE_VXLAN tunnel mode */
2096 			__set_bit(BNX2X_RSS_IPV4_VXLAN, &params.rss_flags);
2097 			__set_bit(BNX2X_RSS_IPV6_VXLAN, &params.rss_flags);
2098 
2099 			/* valid only for TUNN_MODE_GRE tunnel mode */
2100 			__set_bit(BNX2X_RSS_TUNN_INNER_HDRS, &params.rss_flags);
2101 		}
2102 	} else {
2103 		__set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
2104 	}
2105 
2106 	/* Hash bits */
2107 	params.rss_result_mask = MULTI_MASK;
2108 
2109 	memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
2110 
2111 	if (config_hash) {
2112 		/* RSS keys */
2113 		netdev_rss_key_fill(params.rss_key, T_ETH_RSS_KEY * 4);
2114 		__set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
2115 	}
2116 
2117 	if (IS_PF(bp))
2118 		return bnx2x_config_rss(bp, &params);
2119 	else
2120 		return bnx2x_vfpf_config_rss(bp, &params);
2121 }
2122 
bnx2x_init_hw(struct bnx2x * bp,u32 load_code)2123 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
2124 {
2125 	struct bnx2x_func_state_params func_params = {NULL};
2126 
2127 	/* Prepare parameters for function state transitions */
2128 	__set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2129 
2130 	func_params.f_obj = &bp->func_obj;
2131 	func_params.cmd = BNX2X_F_CMD_HW_INIT;
2132 
2133 	func_params.params.hw_init.load_phase = load_code;
2134 
2135 	return bnx2x_func_state_change(bp, &func_params);
2136 }
2137 
2138 /*
2139  * Cleans the object that have internal lists without sending
2140  * ramrods. Should be run when interrupts are disabled.
2141  */
bnx2x_squeeze_objects(struct bnx2x * bp)2142 void bnx2x_squeeze_objects(struct bnx2x *bp)
2143 {
2144 	int rc;
2145 	unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
2146 	struct bnx2x_mcast_ramrod_params rparam = {NULL};
2147 	struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
2148 
2149 	/***************** Cleanup MACs' object first *************************/
2150 
2151 	/* Wait for completion of requested */
2152 	__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2153 	/* Perform a dry cleanup */
2154 	__set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2155 
2156 	/* Clean ETH primary MAC */
2157 	__set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
2158 	rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
2159 				 &ramrod_flags);
2160 	if (rc != 0)
2161 		BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2162 
2163 	/* Cleanup UC list */
2164 	vlan_mac_flags = 0;
2165 	__set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2166 	rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2167 				 &ramrod_flags);
2168 	if (rc != 0)
2169 		BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2170 
2171 	/***************** Now clean mcast object *****************************/
2172 	rparam.mcast_obj = &bp->mcast_obj;
2173 	__set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2174 
2175 	/* Add a DEL command... - Since we're doing a driver cleanup only,
2176 	 * we take a lock surrounding both the initial send and the CONTs,
2177 	 * as we don't want a true completion to disrupt us in the middle.
2178 	 */
2179 	netif_addr_lock_bh(bp->dev);
2180 	rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2181 	if (rc < 0)
2182 		BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2183 			  rc);
2184 
2185 	/* ...and wait until all pending commands are cleared */
2186 	rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2187 	while (rc != 0) {
2188 		if (rc < 0) {
2189 			BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2190 				  rc);
2191 			netif_addr_unlock_bh(bp->dev);
2192 			return;
2193 		}
2194 
2195 		rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2196 	}
2197 	netif_addr_unlock_bh(bp->dev);
2198 }
2199 
2200 #ifndef BNX2X_STOP_ON_ERROR
2201 #define LOAD_ERROR_EXIT(bp, label) \
2202 	do { \
2203 		(bp)->state = BNX2X_STATE_ERROR; \
2204 		goto label; \
2205 	} while (0)
2206 
2207 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2208 	do { \
2209 		bp->cnic_loaded = false; \
2210 		goto label; \
2211 	} while (0)
2212 #else /*BNX2X_STOP_ON_ERROR*/
2213 #define LOAD_ERROR_EXIT(bp, label) \
2214 	do { \
2215 		(bp)->state = BNX2X_STATE_ERROR; \
2216 		(bp)->panic = 1; \
2217 		return -EBUSY; \
2218 	} while (0)
2219 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2220 	do { \
2221 		bp->cnic_loaded = false; \
2222 		(bp)->panic = 1; \
2223 		return -EBUSY; \
2224 	} while (0)
2225 #endif /*BNX2X_STOP_ON_ERROR*/
2226 
bnx2x_free_fw_stats_mem(struct bnx2x * bp)2227 static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2228 {
2229 	BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2230 		       bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2231 	return;
2232 }
2233 
bnx2x_alloc_fw_stats_mem(struct bnx2x * bp)2234 static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2235 {
2236 	int num_groups, vf_headroom = 0;
2237 	int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
2238 
2239 	/* number of queues for statistics is number of eth queues + FCoE */
2240 	u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
2241 
2242 	/* Total number of FW statistics requests =
2243 	 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2244 	 * and fcoe l2 queue) stats + num of queues (which includes another 1
2245 	 * for fcoe l2 queue if applicable)
2246 	 */
2247 	bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2248 
2249 	/* vf stats appear in the request list, but their data is allocated by
2250 	 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2251 	 * it is used to determine where to place the vf stats queries in the
2252 	 * request struct
2253 	 */
2254 	if (IS_SRIOV(bp))
2255 		vf_headroom = bnx2x_vf_headroom(bp);
2256 
2257 	/* Request is built from stats_query_header and an array of
2258 	 * stats_query_cmd_group each of which contains
2259 	 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2260 	 * configured in the stats_query_header.
2261 	 */
2262 	num_groups =
2263 		(((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2264 		 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
2265 		 1 : 0));
2266 
2267 	DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2268 	   bp->fw_stats_num, vf_headroom, num_groups);
2269 	bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2270 		num_groups * sizeof(struct stats_query_cmd_group);
2271 
2272 	/* Data for statistics requests + stats_counter
2273 	 * stats_counter holds per-STORM counters that are incremented
2274 	 * when STORM has finished with the current request.
2275 	 * memory for FCoE offloaded statistics are counted anyway,
2276 	 * even if they will not be sent.
2277 	 * VF stats are not accounted for here as the data of VF stats is stored
2278 	 * in memory allocated by the VF, not here.
2279 	 */
2280 	bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2281 		sizeof(struct per_pf_stats) +
2282 		sizeof(struct fcoe_statistics_params) +
2283 		sizeof(struct per_queue_stats) * num_queue_stats +
2284 		sizeof(struct stats_counter);
2285 
2286 	bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
2287 				       bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2288 	if (!bp->fw_stats)
2289 		goto alloc_mem_err;
2290 
2291 	/* Set shortcuts */
2292 	bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2293 	bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2294 	bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2295 		((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2296 	bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2297 		bp->fw_stats_req_sz;
2298 
2299 	DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
2300 	   U64_HI(bp->fw_stats_req_mapping),
2301 	   U64_LO(bp->fw_stats_req_mapping));
2302 	DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
2303 	   U64_HI(bp->fw_stats_data_mapping),
2304 	   U64_LO(bp->fw_stats_data_mapping));
2305 	return 0;
2306 
2307 alloc_mem_err:
2308 	bnx2x_free_fw_stats_mem(bp);
2309 	BNX2X_ERR("Can't allocate FW stats memory\n");
2310 	return -ENOMEM;
2311 }
2312 
2313 /* send load request to mcp and analyze response */
bnx2x_nic_load_request(struct bnx2x * bp,u32 * load_code)2314 static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2315 {
2316 	u32 param;
2317 
2318 	/* init fw_seq */
2319 	bp->fw_seq =
2320 		(SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2321 		 DRV_MSG_SEQ_NUMBER_MASK);
2322 	BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2323 
2324 	/* Get current FW pulse sequence */
2325 	bp->fw_drv_pulse_wr_seq =
2326 		(SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2327 		 DRV_PULSE_SEQ_MASK);
2328 	BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2329 
2330 	param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2331 
2332 	if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2333 		param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2334 
2335 	/* load request */
2336 	(*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
2337 
2338 	/* if mcp fails to respond we must abort */
2339 	if (!(*load_code)) {
2340 		BNX2X_ERR("MCP response failure, aborting\n");
2341 		return -EBUSY;
2342 	}
2343 
2344 	/* If mcp refused (e.g. other port is in diagnostic mode) we
2345 	 * must abort
2346 	 */
2347 	if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2348 		BNX2X_ERR("MCP refused load request, aborting\n");
2349 		return -EBUSY;
2350 	}
2351 	return 0;
2352 }
2353 
2354 /* check whether another PF has already loaded FW to chip. In
2355  * virtualized environments a pf from another VM may have already
2356  * initialized the device including loading FW
2357  */
bnx2x_compare_fw_ver(struct bnx2x * bp,u32 load_code,bool print_err)2358 int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
2359 {
2360 	/* is another pf loaded on this engine? */
2361 	if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2362 	    load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2363 		/* build my FW version dword */
2364 		u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2365 			(BCM_5710_FW_MINOR_VERSION << 8) +
2366 			(BCM_5710_FW_REVISION_VERSION << 16) +
2367 			(BCM_5710_FW_ENGINEERING_VERSION << 24);
2368 
2369 		/* read loaded FW from chip */
2370 		u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2371 
2372 		DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2373 		   loaded_fw, my_fw);
2374 
2375 		/* abort nic load if version mismatch */
2376 		if (my_fw != loaded_fw) {
2377 			if (print_err)
2378 				BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2379 					  loaded_fw, my_fw);
2380 			else
2381 				BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
2382 					       loaded_fw, my_fw);
2383 			return -EBUSY;
2384 		}
2385 	}
2386 	return 0;
2387 }
2388 
2389 /* returns the "mcp load_code" according to global load_count array */
bnx2x_nic_load_no_mcp(struct bnx2x * bp,int port)2390 static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2391 {
2392 	int path = BP_PATH(bp);
2393 
2394 	DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d]      %d, %d, %d\n",
2395 	   path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2396 	   bnx2x_load_count[path][2]);
2397 	bnx2x_load_count[path][0]++;
2398 	bnx2x_load_count[path][1 + port]++;
2399 	DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d]  %d, %d, %d\n",
2400 	   path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2401 	   bnx2x_load_count[path][2]);
2402 	if (bnx2x_load_count[path][0] == 1)
2403 		return FW_MSG_CODE_DRV_LOAD_COMMON;
2404 	else if (bnx2x_load_count[path][1 + port] == 1)
2405 		return FW_MSG_CODE_DRV_LOAD_PORT;
2406 	else
2407 		return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2408 }
2409 
2410 /* mark PMF if applicable */
bnx2x_nic_load_pmf(struct bnx2x * bp,u32 load_code)2411 static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2412 {
2413 	if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2414 	    (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2415 	    (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2416 		bp->port.pmf = 1;
2417 		/* We need the barrier to ensure the ordering between the
2418 		 * writing to bp->port.pmf here and reading it from the
2419 		 * bnx2x_periodic_task().
2420 		 */
2421 		smp_mb();
2422 	} else {
2423 		bp->port.pmf = 0;
2424 	}
2425 
2426 	DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2427 }
2428 
bnx2x_nic_load_afex_dcc(struct bnx2x * bp,int load_code)2429 static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2430 {
2431 	if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2432 	     (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2433 	    (bp->common.shmem2_base)) {
2434 		if (SHMEM2_HAS(bp, dcc_support))
2435 			SHMEM2_WR(bp, dcc_support,
2436 				  (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2437 				   SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2438 		if (SHMEM2_HAS(bp, afex_driver_support))
2439 			SHMEM2_WR(bp, afex_driver_support,
2440 				  SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2441 	}
2442 
2443 	/* Set AFEX default VLAN tag to an invalid value */
2444 	bp->afex_def_vlan_tag = -1;
2445 }
2446 
2447 /**
2448  * bnx2x_bz_fp - zero content of the fastpath structure.
2449  *
2450  * @bp:		driver handle
2451  * @index:	fastpath index to be zeroed
2452  *
2453  * Makes sure the contents of the bp->fp[index].napi is kept
2454  * intact.
2455  */
bnx2x_bz_fp(struct bnx2x * bp,int index)2456 static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2457 {
2458 	struct bnx2x_fastpath *fp = &bp->fp[index];
2459 	int cos;
2460 	struct napi_struct orig_napi = fp->napi;
2461 	struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
2462 
2463 	/* bzero bnx2x_fastpath contents */
2464 	if (fp->tpa_info)
2465 		memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2466 		       sizeof(struct bnx2x_agg_info));
2467 	memset(fp, 0, sizeof(*fp));
2468 
2469 	/* Restore the NAPI object as it has been already initialized */
2470 	fp->napi = orig_napi;
2471 	fp->tpa_info = orig_tpa_info;
2472 	fp->bp = bp;
2473 	fp->index = index;
2474 	if (IS_ETH_FP(fp))
2475 		fp->max_cos = bp->max_cos;
2476 	else
2477 		/* Special queues support only one CoS */
2478 		fp->max_cos = 1;
2479 
2480 	/* Init txdata pointers */
2481 	if (IS_FCOE_FP(fp))
2482 		fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
2483 	if (IS_ETH_FP(fp))
2484 		for_each_cos_in_tx_queue(fp, cos)
2485 			fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2486 				BNX2X_NUM_ETH_QUEUES(bp) + index];
2487 
2488 	/* set the tpa flag for each queue. The tpa flag determines the queue
2489 	 * minimal size so it must be set prior to queue memory allocation
2490 	 */
2491 	if (bp->dev->features & NETIF_F_LRO)
2492 		fp->mode = TPA_MODE_LRO;
2493 	else if (bp->dev->features & NETIF_F_GRO &&
2494 		 bnx2x_mtu_allows_gro(bp->dev->mtu))
2495 		fp->mode = TPA_MODE_GRO;
2496 	else
2497 		fp->mode = TPA_MODE_DISABLED;
2498 
2499 	/* We don't want TPA if it's disabled in bp
2500 	 * or if this is an FCoE L2 ring.
2501 	 */
2502 	if (bp->disable_tpa || IS_FCOE_FP(fp))
2503 		fp->mode = TPA_MODE_DISABLED;
2504 }
2505 
bnx2x_set_os_driver_state(struct bnx2x * bp,u32 state)2506 void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state)
2507 {
2508 	u32 cur;
2509 
2510 	if (!IS_MF_BD(bp) || !SHMEM2_HAS(bp, os_driver_state) || IS_VF(bp))
2511 		return;
2512 
2513 	cur = SHMEM2_RD(bp, os_driver_state[BP_FW_MB_IDX(bp)]);
2514 	DP(NETIF_MSG_IFUP, "Driver state %08x-->%08x\n",
2515 	   cur, state);
2516 
2517 	SHMEM2_WR(bp, os_driver_state[BP_FW_MB_IDX(bp)], state);
2518 }
2519 
bnx2x_load_cnic(struct bnx2x * bp)2520 int bnx2x_load_cnic(struct bnx2x *bp)
2521 {
2522 	int i, rc, port = BP_PORT(bp);
2523 
2524 	DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2525 
2526 	mutex_init(&bp->cnic_mutex);
2527 
2528 	if (IS_PF(bp)) {
2529 		rc = bnx2x_alloc_mem_cnic(bp);
2530 		if (rc) {
2531 			BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2532 			LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2533 		}
2534 	}
2535 
2536 	rc = bnx2x_alloc_fp_mem_cnic(bp);
2537 	if (rc) {
2538 		BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2539 		LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2540 	}
2541 
2542 	/* Update the number of queues with the cnic queues */
2543 	rc = bnx2x_set_real_num_queues(bp, 1);
2544 	if (rc) {
2545 		BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2546 		LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2547 	}
2548 
2549 	/* Add all CNIC NAPI objects */
2550 	bnx2x_add_all_napi_cnic(bp);
2551 	DP(NETIF_MSG_IFUP, "cnic napi added\n");
2552 	bnx2x_napi_enable_cnic(bp);
2553 
2554 	rc = bnx2x_init_hw_func_cnic(bp);
2555 	if (rc)
2556 		LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2557 
2558 	bnx2x_nic_init_cnic(bp);
2559 
2560 	if (IS_PF(bp)) {
2561 		/* Enable Timer scan */
2562 		REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2563 
2564 		/* setup cnic queues */
2565 		for_each_cnic_queue(bp, i) {
2566 			rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2567 			if (rc) {
2568 				BNX2X_ERR("Queue setup failed\n");
2569 				LOAD_ERROR_EXIT(bp, load_error_cnic2);
2570 			}
2571 		}
2572 	}
2573 
2574 	/* Initialize Rx filter. */
2575 	bnx2x_set_rx_mode_inner(bp);
2576 
2577 	/* re-read iscsi info */
2578 	bnx2x_get_iscsi_info(bp);
2579 	bnx2x_setup_cnic_irq_info(bp);
2580 	bnx2x_setup_cnic_info(bp);
2581 	bp->cnic_loaded = true;
2582 	if (bp->state == BNX2X_STATE_OPEN)
2583 		bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2584 
2585 	DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2586 
2587 	return 0;
2588 
2589 #ifndef BNX2X_STOP_ON_ERROR
2590 load_error_cnic2:
2591 	/* Disable Timer scan */
2592 	REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2593 
2594 load_error_cnic1:
2595 	bnx2x_napi_disable_cnic(bp);
2596 	/* Update the number of queues without the cnic queues */
2597 	if (bnx2x_set_real_num_queues(bp, 0))
2598 		BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2599 load_error_cnic0:
2600 	BNX2X_ERR("CNIC-related load failed\n");
2601 	bnx2x_free_fp_mem_cnic(bp);
2602 	bnx2x_free_mem_cnic(bp);
2603 	return rc;
2604 #endif /* ! BNX2X_STOP_ON_ERROR */
2605 }
2606 
2607 /* must be called with rtnl_lock */
bnx2x_nic_load(struct bnx2x * bp,int load_mode)2608 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2609 {
2610 	int port = BP_PORT(bp);
2611 	int i, rc = 0, load_code = 0;
2612 
2613 	DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2614 	DP(NETIF_MSG_IFUP,
2615 	   "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2616 
2617 #ifdef BNX2X_STOP_ON_ERROR
2618 	if (unlikely(bp->panic)) {
2619 		BNX2X_ERR("Can't load NIC when there is panic\n");
2620 		return -EPERM;
2621 	}
2622 #endif
2623 
2624 	bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2625 
2626 	/* zero the structure w/o any lock, before SP handler is initialized */
2627 	memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2628 	__set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2629 		&bp->last_reported_link.link_report_flags);
2630 
2631 	if (IS_PF(bp))
2632 		/* must be called before memory allocation and HW init */
2633 		bnx2x_ilt_set_info(bp);
2634 
2635 	/*
2636 	 * Zero fastpath structures preserving invariants like napi, which are
2637 	 * allocated only once, fp index, max_cos, bp pointer.
2638 	 * Also set fp->mode and txdata_ptr.
2639 	 */
2640 	DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2641 	for_each_queue(bp, i)
2642 		bnx2x_bz_fp(bp, i);
2643 	memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2644 				  bp->num_cnic_queues) *
2645 				  sizeof(struct bnx2x_fp_txdata));
2646 
2647 	bp->fcoe_init = false;
2648 
2649 	/* Set the receive queues buffer size */
2650 	bnx2x_set_rx_buf_size(bp);
2651 
2652 	if (IS_PF(bp)) {
2653 		rc = bnx2x_alloc_mem(bp);
2654 		if (rc) {
2655 			BNX2X_ERR("Unable to allocate bp memory\n");
2656 			return rc;
2657 		}
2658 	}
2659 
2660 	/* need to be done after alloc mem, since it's self adjusting to amount
2661 	 * of memory available for RSS queues
2662 	 */
2663 	rc = bnx2x_alloc_fp_mem(bp);
2664 	if (rc) {
2665 		BNX2X_ERR("Unable to allocate memory for fps\n");
2666 		LOAD_ERROR_EXIT(bp, load_error0);
2667 	}
2668 
2669 	/* Allocated memory for FW statistics  */
2670 	if (bnx2x_alloc_fw_stats_mem(bp))
2671 		LOAD_ERROR_EXIT(bp, load_error0);
2672 
2673 	/* request pf to initialize status blocks */
2674 	if (IS_VF(bp)) {
2675 		rc = bnx2x_vfpf_init(bp);
2676 		if (rc)
2677 			LOAD_ERROR_EXIT(bp, load_error0);
2678 	}
2679 
2680 	/* As long as bnx2x_alloc_mem() may possibly update
2681 	 * bp->num_queues, bnx2x_set_real_num_queues() should always
2682 	 * come after it. At this stage cnic queues are not counted.
2683 	 */
2684 	rc = bnx2x_set_real_num_queues(bp, 0);
2685 	if (rc) {
2686 		BNX2X_ERR("Unable to set real_num_queues\n");
2687 		LOAD_ERROR_EXIT(bp, load_error0);
2688 	}
2689 
2690 	/* configure multi cos mappings in kernel.
2691 	 * this configuration may be overridden by a multi class queue
2692 	 * discipline or by a dcbx negotiation result.
2693 	 */
2694 	bnx2x_setup_tc(bp->dev, bp->max_cos);
2695 
2696 	/* Add all NAPI objects */
2697 	bnx2x_add_all_napi(bp);
2698 	DP(NETIF_MSG_IFUP, "napi added\n");
2699 	bnx2x_napi_enable(bp);
2700 
2701 	if (IS_PF(bp)) {
2702 		/* set pf load just before approaching the MCP */
2703 		bnx2x_set_pf_load(bp);
2704 
2705 		/* if mcp exists send load request and analyze response */
2706 		if (!BP_NOMCP(bp)) {
2707 			/* attempt to load pf */
2708 			rc = bnx2x_nic_load_request(bp, &load_code);
2709 			if (rc)
2710 				LOAD_ERROR_EXIT(bp, load_error1);
2711 
2712 			/* what did mcp say? */
2713 			rc = bnx2x_compare_fw_ver(bp, load_code, true);
2714 			if (rc) {
2715 				bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2716 				LOAD_ERROR_EXIT(bp, load_error2);
2717 			}
2718 		} else {
2719 			load_code = bnx2x_nic_load_no_mcp(bp, port);
2720 		}
2721 
2722 		/* mark pmf if applicable */
2723 		bnx2x_nic_load_pmf(bp, load_code);
2724 
2725 		/* Init Function state controlling object */
2726 		bnx2x__init_func_obj(bp);
2727 
2728 		/* Initialize HW */
2729 		rc = bnx2x_init_hw(bp, load_code);
2730 		if (rc) {
2731 			BNX2X_ERR("HW init failed, aborting\n");
2732 			bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2733 			LOAD_ERROR_EXIT(bp, load_error2);
2734 		}
2735 	}
2736 
2737 	bnx2x_pre_irq_nic_init(bp);
2738 
2739 	/* Connect to IRQs */
2740 	rc = bnx2x_setup_irqs(bp);
2741 	if (rc) {
2742 		BNX2X_ERR("setup irqs failed\n");
2743 		if (IS_PF(bp))
2744 			bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2745 		LOAD_ERROR_EXIT(bp, load_error2);
2746 	}
2747 
2748 	/* Init per-function objects */
2749 	if (IS_PF(bp)) {
2750 		/* Setup NIC internals and enable interrupts */
2751 		bnx2x_post_irq_nic_init(bp, load_code);
2752 
2753 		bnx2x_init_bp_objs(bp);
2754 		bnx2x_iov_nic_init(bp);
2755 
2756 		/* Set AFEX default VLAN tag to an invalid value */
2757 		bp->afex_def_vlan_tag = -1;
2758 		bnx2x_nic_load_afex_dcc(bp, load_code);
2759 		bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2760 		rc = bnx2x_func_start(bp);
2761 		if (rc) {
2762 			BNX2X_ERR("Function start failed!\n");
2763 			bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2764 
2765 			LOAD_ERROR_EXIT(bp, load_error3);
2766 		}
2767 
2768 		/* Send LOAD_DONE command to MCP */
2769 		if (!BP_NOMCP(bp)) {
2770 			load_code = bnx2x_fw_command(bp,
2771 						     DRV_MSG_CODE_LOAD_DONE, 0);
2772 			if (!load_code) {
2773 				BNX2X_ERR("MCP response failure, aborting\n");
2774 				rc = -EBUSY;
2775 				LOAD_ERROR_EXIT(bp, load_error3);
2776 			}
2777 		}
2778 
2779 		/* initialize FW coalescing state machines in RAM */
2780 		bnx2x_update_coalesce(bp);
2781 	}
2782 
2783 	/* setup the leading queue */
2784 	rc = bnx2x_setup_leading(bp);
2785 	if (rc) {
2786 		BNX2X_ERR("Setup leading failed!\n");
2787 		LOAD_ERROR_EXIT(bp, load_error3);
2788 	}
2789 
2790 	/* set up the rest of the queues */
2791 	for_each_nondefault_eth_queue(bp, i) {
2792 		if (IS_PF(bp))
2793 			rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2794 		else /* VF */
2795 			rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
2796 		if (rc) {
2797 			BNX2X_ERR("Queue %d setup failed\n", i);
2798 			LOAD_ERROR_EXIT(bp, load_error3);
2799 		}
2800 	}
2801 
2802 	/* setup rss */
2803 	rc = bnx2x_init_rss(bp);
2804 	if (rc) {
2805 		BNX2X_ERR("PF RSS init failed\n");
2806 		LOAD_ERROR_EXIT(bp, load_error3);
2807 	}
2808 
2809 	/* Now when Clients are configured we are ready to work */
2810 	bp->state = BNX2X_STATE_OPEN;
2811 
2812 	/* Configure a ucast MAC */
2813 	if (IS_PF(bp))
2814 		rc = bnx2x_set_eth_mac(bp, true);
2815 	else /* vf */
2816 		rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2817 					   true);
2818 	if (rc) {
2819 		BNX2X_ERR("Setting Ethernet MAC failed\n");
2820 		LOAD_ERROR_EXIT(bp, load_error3);
2821 	}
2822 
2823 	if (IS_PF(bp) && bp->pending_max) {
2824 		bnx2x_update_max_mf_config(bp, bp->pending_max);
2825 		bp->pending_max = 0;
2826 	}
2827 
2828 	bp->force_link_down = false;
2829 	if (bp->port.pmf) {
2830 		rc = bnx2x_initial_phy_init(bp, load_mode);
2831 		if (rc)
2832 			LOAD_ERROR_EXIT(bp, load_error3);
2833 	}
2834 	bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2835 
2836 	/* Start fast path */
2837 
2838 	/* Re-configure vlan filters */
2839 	rc = bnx2x_vlan_reconfigure_vid(bp);
2840 	if (rc)
2841 		LOAD_ERROR_EXIT(bp, load_error3);
2842 
2843 	/* Initialize Rx filter. */
2844 	bnx2x_set_rx_mode_inner(bp);
2845 
2846 	if (bp->flags & PTP_SUPPORTED) {
2847 		bnx2x_init_ptp(bp);
2848 		bnx2x_configure_ptp_filters(bp);
2849 	}
2850 	/* Start Tx */
2851 	switch (load_mode) {
2852 	case LOAD_NORMAL:
2853 		/* Tx queue should be only re-enabled */
2854 		netif_tx_wake_all_queues(bp->dev);
2855 		break;
2856 
2857 	case LOAD_OPEN:
2858 		netif_tx_start_all_queues(bp->dev);
2859 		smp_mb__after_atomic();
2860 		break;
2861 
2862 	case LOAD_DIAG:
2863 	case LOAD_LOOPBACK_EXT:
2864 		bp->state = BNX2X_STATE_DIAG;
2865 		break;
2866 
2867 	default:
2868 		break;
2869 	}
2870 
2871 	if (bp->port.pmf)
2872 		bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2873 	else
2874 		bnx2x__link_status_update(bp);
2875 
2876 	/* start the timer */
2877 	mod_timer(&bp->timer, jiffies + bp->current_interval);
2878 
2879 	if (CNIC_ENABLED(bp))
2880 		bnx2x_load_cnic(bp);
2881 
2882 	if (IS_PF(bp))
2883 		bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
2884 
2885 	if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2886 		/* mark driver is loaded in shmem2 */
2887 		u32 val;
2888 		val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2889 		val &= ~DRV_FLAGS_MTU_MASK;
2890 		val |= (bp->dev->mtu << DRV_FLAGS_MTU_SHIFT);
2891 		SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2892 			  val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2893 			  DRV_FLAGS_CAPABILITIES_LOADED_L2);
2894 	}
2895 
2896 	/* Wait for all pending SP commands to complete */
2897 	if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2898 		BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2899 		bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2900 		return -EBUSY;
2901 	}
2902 
2903 	/* Update driver data for On-Chip MFW dump. */
2904 	if (IS_PF(bp))
2905 		bnx2x_update_mfw_dump(bp);
2906 
2907 	/* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2908 	if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2909 		bnx2x_dcbx_init(bp, false);
2910 
2911 	if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2912 		bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_ACTIVE);
2913 
2914 	DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2915 
2916 	return 0;
2917 
2918 #ifndef BNX2X_STOP_ON_ERROR
2919 load_error3:
2920 	if (IS_PF(bp)) {
2921 		bnx2x_int_disable_sync(bp, 1);
2922 
2923 		/* Clean queueable objects */
2924 		bnx2x_squeeze_objects(bp);
2925 	}
2926 
2927 	/* Free SKBs, SGEs, TPA pool and driver internals */
2928 	bnx2x_free_skbs(bp);
2929 	for_each_rx_queue(bp, i)
2930 		bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2931 
2932 	/* Release IRQs */
2933 	bnx2x_free_irq(bp);
2934 load_error2:
2935 	if (IS_PF(bp) && !BP_NOMCP(bp)) {
2936 		bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2937 		bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2938 	}
2939 
2940 	bp->port.pmf = 0;
2941 load_error1:
2942 	bnx2x_napi_disable(bp);
2943 	bnx2x_del_all_napi(bp);
2944 
2945 	/* clear pf_load status, as it was already set */
2946 	if (IS_PF(bp))
2947 		bnx2x_clear_pf_load(bp);
2948 load_error0:
2949 	bnx2x_free_fw_stats_mem(bp);
2950 	bnx2x_free_fp_mem(bp);
2951 	bnx2x_free_mem(bp);
2952 
2953 	return rc;
2954 #endif /* ! BNX2X_STOP_ON_ERROR */
2955 }
2956 
bnx2x_drain_tx_queues(struct bnx2x * bp)2957 int bnx2x_drain_tx_queues(struct bnx2x *bp)
2958 {
2959 	u8 rc = 0, cos, i;
2960 
2961 	/* Wait until tx fastpath tasks complete */
2962 	for_each_tx_queue(bp, i) {
2963 		struct bnx2x_fastpath *fp = &bp->fp[i];
2964 
2965 		for_each_cos_in_tx_queue(fp, cos)
2966 			rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2967 		if (rc)
2968 			return rc;
2969 	}
2970 	return 0;
2971 }
2972 
2973 /* must be called with rtnl_lock */
bnx2x_nic_unload(struct bnx2x * bp,int unload_mode,bool keep_link)2974 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2975 {
2976 	int i;
2977 	bool global = false;
2978 
2979 	DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2980 
2981 	if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2982 		bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
2983 
2984 	/* mark driver is unloaded in shmem2 */
2985 	if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2986 		u32 val;
2987 		val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2988 		SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2989 			  val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2990 	}
2991 
2992 	if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
2993 	    (bp->state == BNX2X_STATE_CLOSED ||
2994 	     bp->state == BNX2X_STATE_ERROR)) {
2995 		/* We can get here if the driver has been unloaded
2996 		 * during parity error recovery and is either waiting for a
2997 		 * leader to complete or for other functions to unload and
2998 		 * then ifdown has been issued. In this case we want to
2999 		 * unload and let other functions to complete a recovery
3000 		 * process.
3001 		 */
3002 		bp->recovery_state = BNX2X_RECOVERY_DONE;
3003 		bp->is_leader = 0;
3004 		bnx2x_release_leader_lock(bp);
3005 		smp_mb();
3006 
3007 		DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
3008 		BNX2X_ERR("Can't unload in closed or error state\n");
3009 		return -EINVAL;
3010 	}
3011 
3012 	/* Nothing to do during unload if previous bnx2x_nic_load()
3013 	 * have not completed successfully - all resources are released.
3014 	 *
3015 	 * we can get here only after unsuccessful ndo_* callback, during which
3016 	 * dev->IFF_UP flag is still on.
3017 	 */
3018 	if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
3019 		return 0;
3020 
3021 	/* It's important to set the bp->state to the value different from
3022 	 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
3023 	 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
3024 	 */
3025 	bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
3026 	smp_mb();
3027 
3028 	/* indicate to VFs that the PF is going down */
3029 	bnx2x_iov_channel_down(bp);
3030 
3031 	if (CNIC_LOADED(bp))
3032 		bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
3033 
3034 	/* Stop Tx */
3035 	bnx2x_tx_disable(bp);
3036 	netdev_reset_tc(bp->dev);
3037 
3038 	bp->rx_mode = BNX2X_RX_MODE_NONE;
3039 
3040 	del_timer_sync(&bp->timer);
3041 
3042 	if (IS_PF(bp) && !BP_NOMCP(bp)) {
3043 		/* Set ALWAYS_ALIVE bit in shmem */
3044 		bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
3045 		bnx2x_drv_pulse(bp);
3046 		bnx2x_stats_handle(bp, STATS_EVENT_STOP);
3047 		bnx2x_save_statistics(bp);
3048 	}
3049 
3050 	/* wait till consumers catch up with producers in all queues.
3051 	 * If we're recovering, FW can't write to host so no reason
3052 	 * to wait for the queues to complete all Tx.
3053 	 */
3054 	if (unload_mode != UNLOAD_RECOVERY)
3055 		bnx2x_drain_tx_queues(bp);
3056 
3057 	/* if VF indicate to PF this function is going down (PF will delete sp
3058 	 * elements and clear initializations
3059 	 */
3060 	if (IS_VF(bp)) {
3061 		bnx2x_clear_vlan_info(bp);
3062 		bnx2x_vfpf_close_vf(bp);
3063 	} else if (unload_mode != UNLOAD_RECOVERY) {
3064 		/* if this is a normal/close unload need to clean up chip*/
3065 		bnx2x_chip_cleanup(bp, unload_mode, keep_link);
3066 	} else {
3067 		/* Send the UNLOAD_REQUEST to the MCP */
3068 		bnx2x_send_unload_req(bp, unload_mode);
3069 
3070 		/* Prevent transactions to host from the functions on the
3071 		 * engine that doesn't reset global blocks in case of global
3072 		 * attention once global blocks are reset and gates are opened
3073 		 * (the engine which leader will perform the recovery
3074 		 * last).
3075 		 */
3076 		if (!CHIP_IS_E1x(bp))
3077 			bnx2x_pf_disable(bp);
3078 
3079 		/* Disable HW interrupts, NAPI */
3080 		bnx2x_netif_stop(bp, 1);
3081 		/* Delete all NAPI objects */
3082 		bnx2x_del_all_napi(bp);
3083 		if (CNIC_LOADED(bp))
3084 			bnx2x_del_all_napi_cnic(bp);
3085 		/* Release IRQs */
3086 		bnx2x_free_irq(bp);
3087 
3088 		/* Report UNLOAD_DONE to MCP */
3089 		bnx2x_send_unload_done(bp, false);
3090 	}
3091 
3092 	/*
3093 	 * At this stage no more interrupts will arrive so we may safely clean
3094 	 * the queueable objects here in case they failed to get cleaned so far.
3095 	 */
3096 	if (IS_PF(bp))
3097 		bnx2x_squeeze_objects(bp);
3098 
3099 	/* There should be no more pending SP commands at this stage */
3100 	bp->sp_state = 0;
3101 
3102 	bp->port.pmf = 0;
3103 
3104 	/* clear pending work in rtnl task */
3105 	bp->sp_rtnl_state = 0;
3106 	smp_mb();
3107 
3108 	/* Free SKBs, SGEs, TPA pool and driver internals */
3109 	bnx2x_free_skbs(bp);
3110 	if (CNIC_LOADED(bp))
3111 		bnx2x_free_skbs_cnic(bp);
3112 	for_each_rx_queue(bp, i)
3113 		bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
3114 
3115 	bnx2x_free_fp_mem(bp);
3116 	if (CNIC_LOADED(bp))
3117 		bnx2x_free_fp_mem_cnic(bp);
3118 
3119 	if (IS_PF(bp)) {
3120 		if (CNIC_LOADED(bp))
3121 			bnx2x_free_mem_cnic(bp);
3122 	}
3123 	bnx2x_free_mem(bp);
3124 
3125 	bp->state = BNX2X_STATE_CLOSED;
3126 	bp->cnic_loaded = false;
3127 
3128 	/* Clear driver version indication in shmem */
3129 	if (IS_PF(bp) && !BP_NOMCP(bp))
3130 		bnx2x_update_mng_version(bp);
3131 
3132 	/* Check if there are pending parity attentions. If there are - set
3133 	 * RECOVERY_IN_PROGRESS.
3134 	 */
3135 	if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
3136 		bnx2x_set_reset_in_progress(bp);
3137 
3138 		/* Set RESET_IS_GLOBAL if needed */
3139 		if (global)
3140 			bnx2x_set_reset_global(bp);
3141 	}
3142 
3143 	/* The last driver must disable a "close the gate" if there is no
3144 	 * parity attention or "process kill" pending.
3145 	 */
3146 	if (IS_PF(bp) &&
3147 	    !bnx2x_clear_pf_load(bp) &&
3148 	    bnx2x_reset_is_done(bp, BP_PATH(bp)))
3149 		bnx2x_disable_close_the_gate(bp);
3150 
3151 	DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3152 
3153 	return 0;
3154 }
3155 
bnx2x_set_power_state(struct bnx2x * bp,pci_power_t state)3156 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3157 {
3158 	u16 pmcsr;
3159 
3160 	/* If there is no power capability, silently succeed */
3161 	if (!bp->pdev->pm_cap) {
3162 		BNX2X_DEV_INFO("No power capability. Breaking.\n");
3163 		return 0;
3164 	}
3165 
3166 	pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
3167 
3168 	switch (state) {
3169 	case PCI_D0:
3170 		pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3171 				      ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3172 				       PCI_PM_CTRL_PME_STATUS));
3173 
3174 		if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3175 			/* delay required during transition out of D3hot */
3176 			msleep(20);
3177 		break;
3178 
3179 	case PCI_D3hot:
3180 		/* If there are other clients above don't
3181 		   shut down the power */
3182 		if (atomic_read(&bp->pdev->enable_cnt) != 1)
3183 			return 0;
3184 		/* Don't shut down the power for emulation and FPGA */
3185 		if (CHIP_REV_IS_SLOW(bp))
3186 			return 0;
3187 
3188 		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3189 		pmcsr |= 3;
3190 
3191 		if (bp->wol)
3192 			pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3193 
3194 		pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3195 				      pmcsr);
3196 
3197 		/* No more memory access after this point until
3198 		* device is brought back to D0.
3199 		*/
3200 		break;
3201 
3202 	default:
3203 		dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
3204 		return -EINVAL;
3205 	}
3206 	return 0;
3207 }
3208 
3209 /*
3210  * net_device service functions
3211  */
bnx2x_poll(struct napi_struct * napi,int budget)3212 static int bnx2x_poll(struct napi_struct *napi, int budget)
3213 {
3214 	struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3215 						 napi);
3216 	struct bnx2x *bp = fp->bp;
3217 	int rx_work_done;
3218 	u8 cos;
3219 
3220 #ifdef BNX2X_STOP_ON_ERROR
3221 	if (unlikely(bp->panic)) {
3222 		napi_complete(napi);
3223 		return 0;
3224 	}
3225 #endif
3226 	for_each_cos_in_tx_queue(fp, cos)
3227 		if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3228 			bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
3229 
3230 	rx_work_done = (bnx2x_has_rx_work(fp)) ? bnx2x_rx_int(fp, budget) : 0;
3231 
3232 	if (rx_work_done < budget) {
3233 		/* No need to update SB for FCoE L2 ring as long as
3234 		 * it's connected to the default SB and the SB
3235 		 * has been updated when NAPI was scheduled.
3236 		 */
3237 		if (IS_FCOE_FP(fp)) {
3238 			napi_complete_done(napi, rx_work_done);
3239 		} else {
3240 			bnx2x_update_fpsb_idx(fp);
3241 			/* bnx2x_has_rx_work() reads the status block,
3242 			 * thus we need to ensure that status block indices
3243 			 * have been actually read (bnx2x_update_fpsb_idx)
3244 			 * prior to this check (bnx2x_has_rx_work) so that
3245 			 * we won't write the "newer" value of the status block
3246 			 * to IGU (if there was a DMA right after
3247 			 * bnx2x_has_rx_work and if there is no rmb, the memory
3248 			 * reading (bnx2x_update_fpsb_idx) may be postponed
3249 			 * to right before bnx2x_ack_sb). In this case there
3250 			 * will never be another interrupt until there is
3251 			 * another update of the status block, while there
3252 			 * is still unhandled work.
3253 			 */
3254 			rmb();
3255 
3256 			if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3257 				if (napi_complete_done(napi, rx_work_done)) {
3258 					/* Re-enable interrupts */
3259 					DP(NETIF_MSG_RX_STATUS,
3260 					   "Update index to %d\n", fp->fp_hc_idx);
3261 					bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3262 						     le16_to_cpu(fp->fp_hc_idx),
3263 						     IGU_INT_ENABLE, 1);
3264 				}
3265 			} else {
3266 				rx_work_done = budget;
3267 			}
3268 		}
3269 	}
3270 
3271 	return rx_work_done;
3272 }
3273 
3274 /* we split the first BD into headers and data BDs
3275  * to ease the pain of our fellow microcode engineers
3276  * we use one mapping for both BDs
3277  */
bnx2x_tx_split(struct bnx2x * bp,struct bnx2x_fp_txdata * txdata,struct sw_tx_bd * tx_buf,struct eth_tx_start_bd ** tx_bd,u16 hlen,u16 bd_prod)3278 static u16 bnx2x_tx_split(struct bnx2x *bp,
3279 			  struct bnx2x_fp_txdata *txdata,
3280 			  struct sw_tx_bd *tx_buf,
3281 			  struct eth_tx_start_bd **tx_bd, u16 hlen,
3282 			  u16 bd_prod)
3283 {
3284 	struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3285 	struct eth_tx_bd *d_tx_bd;
3286 	dma_addr_t mapping;
3287 	int old_len = le16_to_cpu(h_tx_bd->nbytes);
3288 
3289 	/* first fix first BD */
3290 	h_tx_bd->nbytes = cpu_to_le16(hlen);
3291 
3292 	DP(NETIF_MSG_TX_QUEUED,	"TSO split header size is %d (%x:%x)\n",
3293 	   h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
3294 
3295 	/* now get a new data BD
3296 	 * (after the pbd) and fill it */
3297 	bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3298 	d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3299 
3300 	mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3301 			   le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3302 
3303 	d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3304 	d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3305 	d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3306 
3307 	/* this marks the BD as one that has no individual mapping */
3308 	tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3309 
3310 	DP(NETIF_MSG_TX_QUEUED,
3311 	   "TSO split data size is %d (%x:%x)\n",
3312 	   d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3313 
3314 	/* update tx_bd */
3315 	*tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3316 
3317 	return bd_prod;
3318 }
3319 
3320 #define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3321 #define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
bnx2x_csum_fix(unsigned char * t_header,u16 csum,s8 fix)3322 static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3323 {
3324 	__sum16 tsum = (__force __sum16) csum;
3325 
3326 	if (fix > 0)
3327 		tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3328 				  csum_partial(t_header - fix, fix, 0)));
3329 
3330 	else if (fix < 0)
3331 		tsum = ~csum_fold(csum_add((__force __wsum) csum,
3332 				  csum_partial(t_header, -fix, 0)));
3333 
3334 	return bswab16(tsum);
3335 }
3336 
bnx2x_xmit_type(struct bnx2x * bp,struct sk_buff * skb)3337 static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3338 {
3339 	u32 rc;
3340 	__u8 prot = 0;
3341 	__be16 protocol;
3342 
3343 	if (skb->ip_summed != CHECKSUM_PARTIAL)
3344 		return XMIT_PLAIN;
3345 
3346 	protocol = vlan_get_protocol(skb);
3347 	if (protocol == htons(ETH_P_IPV6)) {
3348 		rc = XMIT_CSUM_V6;
3349 		prot = ipv6_hdr(skb)->nexthdr;
3350 	} else {
3351 		rc = XMIT_CSUM_V4;
3352 		prot = ip_hdr(skb)->protocol;
3353 	}
3354 
3355 	if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3356 		if (inner_ip_hdr(skb)->version == 6) {
3357 			rc |= XMIT_CSUM_ENC_V6;
3358 			if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3359 				rc |= XMIT_CSUM_TCP;
3360 		} else {
3361 			rc |= XMIT_CSUM_ENC_V4;
3362 			if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
3363 				rc |= XMIT_CSUM_TCP;
3364 		}
3365 	}
3366 	if (prot == IPPROTO_TCP)
3367 		rc |= XMIT_CSUM_TCP;
3368 
3369 	if (skb_is_gso(skb)) {
3370 		if (skb_is_gso_v6(skb)) {
3371 			rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3372 			if (rc & XMIT_CSUM_ENC)
3373 				rc |= XMIT_GSO_ENC_V6;
3374 		} else {
3375 			rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3376 			if (rc & XMIT_CSUM_ENC)
3377 				rc |= XMIT_GSO_ENC_V4;
3378 		}
3379 	}
3380 
3381 	return rc;
3382 }
3383 
3384 /* VXLAN: 4 = 1 (for linear data BD) + 3 (2 for PBD and last BD) */
3385 #define BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS         4
3386 
3387 /* Regular: 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3388 #define BNX2X_NUM_TSO_WIN_SUB_BDS               3
3389 
3390 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3391 /* check if packet requires linearization (packet is too fragmented)
3392    no need to check fragmentation if page size > 8K (there will be no
3393    violation to FW restrictions) */
bnx2x_pkt_req_lin(struct bnx2x * bp,struct sk_buff * skb,u32 xmit_type)3394 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3395 			     u32 xmit_type)
3396 {
3397 	int first_bd_sz = 0, num_tso_win_sub = BNX2X_NUM_TSO_WIN_SUB_BDS;
3398 	int to_copy = 0, hlen = 0;
3399 
3400 	if (xmit_type & XMIT_GSO_ENC)
3401 		num_tso_win_sub = BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS;
3402 
3403 	if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - num_tso_win_sub)) {
3404 		if (xmit_type & XMIT_GSO) {
3405 			unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3406 			int wnd_size = MAX_FETCH_BD - num_tso_win_sub;
3407 			/* Number of windows to check */
3408 			int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3409 			int wnd_idx = 0;
3410 			int frag_idx = 0;
3411 			u32 wnd_sum = 0;
3412 
3413 			/* Headers length */
3414 			if (xmit_type & XMIT_GSO_ENC)
3415 				hlen = (int)(skb_inner_transport_header(skb) -
3416 					     skb->data) +
3417 					     inner_tcp_hdrlen(skb);
3418 			else
3419 				hlen = (int)(skb_transport_header(skb) -
3420 					     skb->data) + tcp_hdrlen(skb);
3421 
3422 			/* Amount of data (w/o headers) on linear part of SKB*/
3423 			first_bd_sz = skb_headlen(skb) - hlen;
3424 
3425 			wnd_sum  = first_bd_sz;
3426 
3427 			/* Calculate the first sum - it's special */
3428 			for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3429 				wnd_sum +=
3430 					skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
3431 
3432 			/* If there was data on linear skb data - check it */
3433 			if (first_bd_sz > 0) {
3434 				if (unlikely(wnd_sum < lso_mss)) {
3435 					to_copy = 1;
3436 					goto exit_lbl;
3437 				}
3438 
3439 				wnd_sum -= first_bd_sz;
3440 			}
3441 
3442 			/* Others are easier: run through the frag list and
3443 			   check all windows */
3444 			for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3445 				wnd_sum +=
3446 			  skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
3447 
3448 				if (unlikely(wnd_sum < lso_mss)) {
3449 					to_copy = 1;
3450 					break;
3451 				}
3452 				wnd_sum -=
3453 					skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
3454 			}
3455 		} else {
3456 			/* in non-LSO too fragmented packet should always
3457 			   be linearized */
3458 			to_copy = 1;
3459 		}
3460 	}
3461 
3462 exit_lbl:
3463 	if (unlikely(to_copy))
3464 		DP(NETIF_MSG_TX_QUEUED,
3465 		   "Linearization IS REQUIRED for %s packet. num_frags %d  hlen %d  first_bd_sz %d\n",
3466 		   (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3467 		   skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3468 
3469 	return to_copy;
3470 }
3471 #endif
3472 
3473 /**
3474  * bnx2x_set_pbd_gso - update PBD in GSO case.
3475  *
3476  * @skb:	packet skb
3477  * @pbd:	parse BD
3478  * @xmit_type:	xmit flags
3479  */
bnx2x_set_pbd_gso(struct sk_buff * skb,struct eth_tx_parse_bd_e1x * pbd,u32 xmit_type)3480 static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3481 			      struct eth_tx_parse_bd_e1x *pbd,
3482 			      u32 xmit_type)
3483 {
3484 	pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
3485 	pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
3486 	pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
3487 
3488 	if (xmit_type & XMIT_GSO_V4) {
3489 		pbd->ip_id = bswab16(ip_hdr(skb)->id);
3490 		pbd->tcp_pseudo_csum =
3491 			bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3492 						   ip_hdr(skb)->daddr,
3493 						   0, IPPROTO_TCP, 0));
3494 	} else {
3495 		pbd->tcp_pseudo_csum =
3496 			bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3497 						 &ipv6_hdr(skb)->daddr,
3498 						 0, IPPROTO_TCP, 0));
3499 	}
3500 
3501 	pbd->global_data |=
3502 		cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
3503 }
3504 
3505 /**
3506  * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3507  *
3508  * @bp:			driver handle
3509  * @skb:		packet skb
3510  * @parsing_data:	data to be updated
3511  * @xmit_type:		xmit flags
3512  *
3513  * 57712/578xx related, when skb has encapsulation
3514  */
bnx2x_set_pbd_csum_enc(struct bnx2x * bp,struct sk_buff * skb,u32 * parsing_data,u32 xmit_type)3515 static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3516 				 u32 *parsing_data, u32 xmit_type)
3517 {
3518 	*parsing_data |=
3519 		((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3520 		ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3521 		ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3522 
3523 	if (xmit_type & XMIT_CSUM_TCP) {
3524 		*parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3525 			ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3526 			ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3527 
3528 		return skb_inner_transport_header(skb) +
3529 			inner_tcp_hdrlen(skb) - skb->data;
3530 	}
3531 
3532 	/* We support checksum offload for TCP and UDP only.
3533 	 * No need to pass the UDP header length - it's a constant.
3534 	 */
3535 	return skb_inner_transport_header(skb) +
3536 		sizeof(struct udphdr) - skb->data;
3537 }
3538 
3539 /**
3540  * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
3541  *
3542  * @bp:			driver handle
3543  * @skb:		packet skb
3544  * @parsing_data:	data to be updated
3545  * @xmit_type:		xmit flags
3546  *
3547  * 57712/578xx related
3548  */
bnx2x_set_pbd_csum_e2(struct bnx2x * bp,struct sk_buff * skb,u32 * parsing_data,u32 xmit_type)3549 static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3550 				u32 *parsing_data, u32 xmit_type)
3551 {
3552 	*parsing_data |=
3553 		((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3554 		ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3555 		ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3556 
3557 	if (xmit_type & XMIT_CSUM_TCP) {
3558 		*parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3559 			ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3560 			ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3561 
3562 		return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3563 	}
3564 	/* We support checksum offload for TCP and UDP only.
3565 	 * No need to pass the UDP header length - it's a constant.
3566 	 */
3567 	return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
3568 }
3569 
3570 /* set FW indication according to inner or outer protocols if tunneled */
bnx2x_set_sbd_csum(struct bnx2x * bp,struct sk_buff * skb,struct eth_tx_start_bd * tx_start_bd,u32 xmit_type)3571 static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3572 			       struct eth_tx_start_bd *tx_start_bd,
3573 			       u32 xmit_type)
3574 {
3575 	tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3576 
3577 	if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
3578 		tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
3579 
3580 	if (!(xmit_type & XMIT_CSUM_TCP))
3581 		tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
3582 }
3583 
3584 /**
3585  * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3586  *
3587  * @bp:		driver handle
3588  * @skb:	packet skb
3589  * @pbd:	parse BD to be updated
3590  * @xmit_type:	xmit flags
3591  */
bnx2x_set_pbd_csum(struct bnx2x * bp,struct sk_buff * skb,struct eth_tx_parse_bd_e1x * pbd,u32 xmit_type)3592 static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3593 			     struct eth_tx_parse_bd_e1x *pbd,
3594 			     u32 xmit_type)
3595 {
3596 	u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
3597 
3598 	/* for now NS flag is not used in Linux */
3599 	pbd->global_data =
3600 		cpu_to_le16(hlen |
3601 			    ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3602 			     ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3603 
3604 	pbd->ip_hlen_w = (skb_transport_header(skb) -
3605 			skb_network_header(skb)) >> 1;
3606 
3607 	hlen += pbd->ip_hlen_w;
3608 
3609 	/* We support checksum offload for TCP and UDP only */
3610 	if (xmit_type & XMIT_CSUM_TCP)
3611 		hlen += tcp_hdrlen(skb) / 2;
3612 	else
3613 		hlen += sizeof(struct udphdr) / 2;
3614 
3615 	pbd->total_hlen_w = cpu_to_le16(hlen);
3616 	hlen = hlen*2;
3617 
3618 	if (xmit_type & XMIT_CSUM_TCP) {
3619 		pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
3620 
3621 	} else {
3622 		s8 fix = SKB_CS_OFF(skb); /* signed! */
3623 
3624 		DP(NETIF_MSG_TX_QUEUED,
3625 		   "hlen %d  fix %d  csum before fix %x\n",
3626 		   le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3627 
3628 		/* HW bug: fixup the CSUM */
3629 		pbd->tcp_pseudo_csum =
3630 			bnx2x_csum_fix(skb_transport_header(skb),
3631 				       SKB_CS(skb), fix);
3632 
3633 		DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3634 		   pbd->tcp_pseudo_csum);
3635 	}
3636 
3637 	return hlen;
3638 }
3639 
bnx2x_update_pbds_gso_enc(struct sk_buff * skb,struct eth_tx_parse_bd_e2 * pbd_e2,struct eth_tx_parse_2nd_bd * pbd2,u16 * global_data,u32 xmit_type)3640 static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3641 				      struct eth_tx_parse_bd_e2 *pbd_e2,
3642 				      struct eth_tx_parse_2nd_bd *pbd2,
3643 				      u16 *global_data,
3644 				      u32 xmit_type)
3645 {
3646 	u16 hlen_w = 0;
3647 	u8 outerip_off, outerip_len = 0;
3648 
3649 	/* from outer IP to transport */
3650 	hlen_w = (skb_inner_transport_header(skb) -
3651 		  skb_network_header(skb)) >> 1;
3652 
3653 	/* transport len */
3654 	hlen_w += inner_tcp_hdrlen(skb) >> 1;
3655 
3656 	pbd2->fw_ip_hdr_to_payload_w = hlen_w;
3657 
3658 	/* outer IP header info */
3659 	if (xmit_type & XMIT_CSUM_V4) {
3660 		struct iphdr *iph = ip_hdr(skb);
3661 		u32 csum = (__force u32)(~iph->check) -
3662 			   (__force u32)iph->tot_len -
3663 			   (__force u32)iph->frag_off;
3664 
3665 		outerip_len = iph->ihl << 1;
3666 
3667 		pbd2->fw_ip_csum_wo_len_flags_frag =
3668 			bswab16(csum_fold((__force __wsum)csum));
3669 	} else {
3670 		pbd2->fw_ip_hdr_to_payload_w =
3671 			hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
3672 		pbd_e2->data.tunnel_data.flags |=
3673 			ETH_TUNNEL_DATA_IPV6_OUTER;
3674 	}
3675 
3676 	pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3677 
3678 	pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3679 
3680 	/* inner IP header info */
3681 	if (xmit_type & XMIT_CSUM_ENC_V4) {
3682 		pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
3683 
3684 		pbd_e2->data.tunnel_data.pseudo_csum =
3685 			bswab16(~csum_tcpudp_magic(
3686 					inner_ip_hdr(skb)->saddr,
3687 					inner_ip_hdr(skb)->daddr,
3688 					0, IPPROTO_TCP, 0));
3689 	} else {
3690 		pbd_e2->data.tunnel_data.pseudo_csum =
3691 			bswab16(~csum_ipv6_magic(
3692 					&inner_ipv6_hdr(skb)->saddr,
3693 					&inner_ipv6_hdr(skb)->daddr,
3694 					0, IPPROTO_TCP, 0));
3695 	}
3696 
3697 	outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3698 
3699 	*global_data |=
3700 		outerip_off |
3701 		(outerip_len <<
3702 			ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3703 		((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3704 			ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
3705 
3706 	if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3707 		SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3708 		pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3709 	}
3710 }
3711 
bnx2x_set_ipv6_ext_e2(struct sk_buff * skb,u32 * parsing_data,u32 xmit_type)3712 static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff *skb, u32 *parsing_data,
3713 					 u32 xmit_type)
3714 {
3715 	struct ipv6hdr *ipv6;
3716 
3717 	if (!(xmit_type & (XMIT_GSO_ENC_V6 | XMIT_GSO_V6)))
3718 		return;
3719 
3720 	if (xmit_type & XMIT_GSO_ENC_V6)
3721 		ipv6 = inner_ipv6_hdr(skb);
3722 	else /* XMIT_GSO_V6 */
3723 		ipv6 = ipv6_hdr(skb);
3724 
3725 	if (ipv6->nexthdr == NEXTHDR_IPV6)
3726 		*parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3727 }
3728 
3729 /* called with netif_tx_lock
3730  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3731  * netif_wake_queue()
3732  */
bnx2x_start_xmit(struct sk_buff * skb,struct net_device * dev)3733 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3734 {
3735 	struct bnx2x *bp = netdev_priv(dev);
3736 
3737 	struct netdev_queue *txq;
3738 	struct bnx2x_fp_txdata *txdata;
3739 	struct sw_tx_bd *tx_buf;
3740 	struct eth_tx_start_bd *tx_start_bd, *first_bd;
3741 	struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3742 	struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3743 	struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3744 	struct eth_tx_parse_2nd_bd *pbd2 = NULL;
3745 	u32 pbd_e2_parsing_data = 0;
3746 	u16 pkt_prod, bd_prod;
3747 	int nbd, txq_index;
3748 	dma_addr_t mapping;
3749 	u32 xmit_type = bnx2x_xmit_type(bp, skb);
3750 	int i;
3751 	u8 hlen = 0;
3752 	__le16 pkt_size = 0;
3753 	struct ethhdr *eth;
3754 	u8 mac_type = UNICAST_ADDRESS;
3755 
3756 #ifdef BNX2X_STOP_ON_ERROR
3757 	if (unlikely(bp->panic))
3758 		return NETDEV_TX_BUSY;
3759 #endif
3760 
3761 	txq_index = skb_get_queue_mapping(skb);
3762 	txq = netdev_get_tx_queue(dev, txq_index);
3763 
3764 	BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
3765 
3766 	txdata = &bp->bnx2x_txq[txq_index];
3767 
3768 	/* enable this debug print to view the transmission queue being used
3769 	DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
3770 	   txq_index, fp_index, txdata_index); */
3771 
3772 	/* enable this debug print to view the transmission details
3773 	DP(NETIF_MSG_TX_QUEUED,
3774 	   "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
3775 	   txdata->cid, fp_index, txdata_index, txdata, fp); */
3776 
3777 	if (unlikely(bnx2x_tx_avail(bp, txdata) <
3778 			skb_shinfo(skb)->nr_frags +
3779 			BDS_PER_TX_PKT +
3780 			NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
3781 		/* Handle special storage cases separately */
3782 		if (txdata->tx_ring_size == 0) {
3783 			struct bnx2x_eth_q_stats *q_stats =
3784 				bnx2x_fp_qstats(bp, txdata->parent_fp);
3785 			q_stats->driver_filtered_tx_pkt++;
3786 			dev_kfree_skb(skb);
3787 			return NETDEV_TX_OK;
3788 		}
3789 		bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3790 		netif_tx_stop_queue(txq);
3791 		BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3792 
3793 		return NETDEV_TX_BUSY;
3794 	}
3795 
3796 	DP(NETIF_MSG_TX_QUEUED,
3797 	   "queue[%d]: SKB: summed %x  protocol %x protocol(%x,%x) gso type %x  xmit_type %x len %d\n",
3798 	   txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
3799 	   ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3800 	   skb->len);
3801 
3802 	eth = (struct ethhdr *)skb->data;
3803 
3804 	/* set flag according to packet type (UNICAST_ADDRESS is default)*/
3805 	if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3806 		if (is_broadcast_ether_addr(eth->h_dest))
3807 			mac_type = BROADCAST_ADDRESS;
3808 		else
3809 			mac_type = MULTICAST_ADDRESS;
3810 	}
3811 
3812 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3813 	/* First, check if we need to linearize the skb (due to FW
3814 	   restrictions). No need to check fragmentation if page size > 8K
3815 	   (there will be no violation to FW restrictions) */
3816 	if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3817 		/* Statistics of linearization */
3818 		bp->lin_cnt++;
3819 		if (skb_linearize(skb) != 0) {
3820 			DP(NETIF_MSG_TX_QUEUED,
3821 			   "SKB linearization failed - silently dropping this SKB\n");
3822 			dev_kfree_skb_any(skb);
3823 			return NETDEV_TX_OK;
3824 		}
3825 	}
3826 #endif
3827 	/* Map skb linear data for DMA */
3828 	mapping = dma_map_single(&bp->pdev->dev, skb->data,
3829 				 skb_headlen(skb), DMA_TO_DEVICE);
3830 	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3831 		DP(NETIF_MSG_TX_QUEUED,
3832 		   "SKB mapping failed - silently dropping this SKB\n");
3833 		dev_kfree_skb_any(skb);
3834 		return NETDEV_TX_OK;
3835 	}
3836 	/*
3837 	Please read carefully. First we use one BD which we mark as start,
3838 	then we have a parsing info BD (used for TSO or xsum),
3839 	and only then we have the rest of the TSO BDs.
3840 	(don't forget to mark the last one as last,
3841 	and to unmap only AFTER you write to the BD ...)
3842 	And above all, all pdb sizes are in words - NOT DWORDS!
3843 	*/
3844 
3845 	/* get current pkt produced now - advance it just before sending packet
3846 	 * since mapping of pages may fail and cause packet to be dropped
3847 	 */
3848 	pkt_prod = txdata->tx_pkt_prod;
3849 	bd_prod = TX_BD(txdata->tx_bd_prod);
3850 
3851 	/* get a tx_buf and first BD
3852 	 * tx_start_bd may be changed during SPLIT,
3853 	 * but first_bd will always stay first
3854 	 */
3855 	tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3856 	tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3857 	first_bd = tx_start_bd;
3858 
3859 	tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3860 
3861 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
3862 		if (!(bp->flags & TX_TIMESTAMPING_EN)) {
3863 			bp->eth_stats.ptp_skip_tx_ts++;
3864 			BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n");
3865 		} else if (bp->ptp_tx_skb) {
3866 			bp->eth_stats.ptp_skip_tx_ts++;
3867 			dev_err_once(&bp->dev->dev,
3868 					"Device supports only a single outstanding packet to timestamp, this packet won't be timestamped\n");
3869 		} else {
3870 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3871 			/* schedule check for Tx timestamp */
3872 			bp->ptp_tx_skb = skb_get(skb);
3873 			bp->ptp_tx_start = jiffies;
3874 			schedule_work(&bp->ptp_task);
3875 		}
3876 	}
3877 
3878 	/* header nbd: indirectly zero other flags! */
3879 	tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
3880 
3881 	/* remember the first BD of the packet */
3882 	tx_buf->first_bd = txdata->tx_bd_prod;
3883 	tx_buf->skb = skb;
3884 	tx_buf->flags = 0;
3885 
3886 	DP(NETIF_MSG_TX_QUEUED,
3887 	   "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
3888 	   pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3889 
3890 	if (skb_vlan_tag_present(skb)) {
3891 		tx_start_bd->vlan_or_ethertype =
3892 		    cpu_to_le16(skb_vlan_tag_get(skb));
3893 		tx_start_bd->bd_flags.as_bitfield |=
3894 		    (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3895 	} else {
3896 		/* when transmitting in a vf, start bd must hold the ethertype
3897 		 * for fw to enforce it
3898 		 */
3899 		u16 vlan_tci = 0;
3900 #ifndef BNX2X_STOP_ON_ERROR
3901 		if (IS_VF(bp)) {
3902 #endif
3903 			/* Still need to consider inband vlan for enforced */
3904 			if (__vlan_get_tag(skb, &vlan_tci)) {
3905 				tx_start_bd->vlan_or_ethertype =
3906 					cpu_to_le16(ntohs(eth->h_proto));
3907 			} else {
3908 				tx_start_bd->bd_flags.as_bitfield |=
3909 					(X_ETH_INBAND_VLAN <<
3910 					 ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3911 				tx_start_bd->vlan_or_ethertype =
3912 					cpu_to_le16(vlan_tci);
3913 			}
3914 #ifndef BNX2X_STOP_ON_ERROR
3915 		} else {
3916 			/* used by FW for packet accounting */
3917 			tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3918 		}
3919 #endif
3920 	}
3921 
3922 	nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3923 
3924 	/* turn on parsing and get a BD */
3925 	bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3926 
3927 	if (xmit_type & XMIT_CSUM)
3928 		bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3929 
3930 	if (!CHIP_IS_E1x(bp)) {
3931 		pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3932 		memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3933 
3934 		if (xmit_type & XMIT_CSUM_ENC) {
3935 			u16 global_data = 0;
3936 
3937 			/* Set PBD in enc checksum offload case */
3938 			hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3939 						      &pbd_e2_parsing_data,
3940 						      xmit_type);
3941 
3942 			/* turn on 2nd parsing and get a BD */
3943 			bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3944 
3945 			pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3946 
3947 			memset(pbd2, 0, sizeof(*pbd2));
3948 
3949 			pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3950 				(skb_inner_network_header(skb) -
3951 				 skb->data) >> 1;
3952 
3953 			if (xmit_type & XMIT_GSO_ENC)
3954 				bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3955 							  &global_data,
3956 							  xmit_type);
3957 
3958 			pbd2->global_data = cpu_to_le16(global_data);
3959 
3960 			/* add addition parse BD indication to start BD */
3961 			SET_FLAG(tx_start_bd->general_data,
3962 				 ETH_TX_START_BD_PARSE_NBDS, 1);
3963 			/* set encapsulation flag in start BD */
3964 			SET_FLAG(tx_start_bd->general_data,
3965 				 ETH_TX_START_BD_TUNNEL_EXIST, 1);
3966 
3967 			tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
3968 
3969 			nbd++;
3970 		} else if (xmit_type & XMIT_CSUM) {
3971 			/* Set PBD in checksum offload case w/o encapsulation */
3972 			hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3973 						     &pbd_e2_parsing_data,
3974 						     xmit_type);
3975 		}
3976 
3977 		bnx2x_set_ipv6_ext_e2(skb, &pbd_e2_parsing_data, xmit_type);
3978 		/* Add the macs to the parsing BD if this is a vf or if
3979 		 * Tx Switching is enabled.
3980 		 */
3981 		if (IS_VF(bp)) {
3982 			/* override GRE parameters in BD */
3983 			bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3984 					      &pbd_e2->data.mac_addr.src_mid,
3985 					      &pbd_e2->data.mac_addr.src_lo,
3986 					      eth->h_source);
3987 
3988 			bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3989 					      &pbd_e2->data.mac_addr.dst_mid,
3990 					      &pbd_e2->data.mac_addr.dst_lo,
3991 					      eth->h_dest);
3992 		} else {
3993 			if (bp->flags & TX_SWITCHING)
3994 				bnx2x_set_fw_mac_addr(
3995 						&pbd_e2->data.mac_addr.dst_hi,
3996 						&pbd_e2->data.mac_addr.dst_mid,
3997 						&pbd_e2->data.mac_addr.dst_lo,
3998 						eth->h_dest);
3999 #ifdef BNX2X_STOP_ON_ERROR
4000 			/* Enforce security is always set in Stop on Error -
4001 			 * source mac should be present in the parsing BD
4002 			 */
4003 			bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
4004 					      &pbd_e2->data.mac_addr.src_mid,
4005 					      &pbd_e2->data.mac_addr.src_lo,
4006 					      eth->h_source);
4007 #endif
4008 		}
4009 
4010 		SET_FLAG(pbd_e2_parsing_data,
4011 			 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
4012 	} else {
4013 		u16 global_data = 0;
4014 		pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
4015 		memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
4016 		/* Set PBD in checksum offload case */
4017 		if (xmit_type & XMIT_CSUM)
4018 			hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
4019 
4020 		SET_FLAG(global_data,
4021 			 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
4022 		pbd_e1x->global_data |= cpu_to_le16(global_data);
4023 	}
4024 
4025 	/* Setup the data pointer of the first BD of the packet */
4026 	tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4027 	tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4028 	tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
4029 	pkt_size = tx_start_bd->nbytes;
4030 
4031 	DP(NETIF_MSG_TX_QUEUED,
4032 	   "first bd @%p  addr (%x:%x)  nbytes %d  flags %x  vlan %x\n",
4033 	   tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
4034 	   le16_to_cpu(tx_start_bd->nbytes),
4035 	   tx_start_bd->bd_flags.as_bitfield,
4036 	   le16_to_cpu(tx_start_bd->vlan_or_ethertype));
4037 
4038 	if (xmit_type & XMIT_GSO) {
4039 
4040 		DP(NETIF_MSG_TX_QUEUED,
4041 		   "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
4042 		   skb->len, hlen, skb_headlen(skb),
4043 		   skb_shinfo(skb)->gso_size);
4044 
4045 		tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
4046 
4047 		if (unlikely(skb_headlen(skb) > hlen)) {
4048 			nbd++;
4049 			bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
4050 						 &tx_start_bd, hlen,
4051 						 bd_prod);
4052 		}
4053 		if (!CHIP_IS_E1x(bp))
4054 			pbd_e2_parsing_data |=
4055 				(skb_shinfo(skb)->gso_size <<
4056 				 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
4057 				 ETH_TX_PARSE_BD_E2_LSO_MSS;
4058 		else
4059 			bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
4060 	}
4061 
4062 	/* Set the PBD's parsing_data field if not zero
4063 	 * (for the chips newer than 57711).
4064 	 */
4065 	if (pbd_e2_parsing_data)
4066 		pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
4067 
4068 	tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
4069 
4070 	/* Handle fragmented skb */
4071 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4072 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4073 
4074 		mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
4075 					   skb_frag_size(frag), DMA_TO_DEVICE);
4076 		if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
4077 			unsigned int pkts_compl = 0, bytes_compl = 0;
4078 
4079 			DP(NETIF_MSG_TX_QUEUED,
4080 			   "Unable to map page - dropping packet...\n");
4081 
4082 			/* we need unmap all buffers already mapped
4083 			 * for this SKB;
4084 			 * first_bd->nbd need to be properly updated
4085 			 * before call to bnx2x_free_tx_pkt
4086 			 */
4087 			first_bd->nbd = cpu_to_le16(nbd);
4088 			bnx2x_free_tx_pkt(bp, txdata,
4089 					  TX_BD(txdata->tx_pkt_prod),
4090 					  &pkts_compl, &bytes_compl);
4091 			return NETDEV_TX_OK;
4092 		}
4093 
4094 		bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4095 		tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4096 		if (total_pkt_bd == NULL)
4097 			total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4098 
4099 		tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4100 		tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4101 		tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
4102 		le16_add_cpu(&pkt_size, skb_frag_size(frag));
4103 		nbd++;
4104 
4105 		DP(NETIF_MSG_TX_QUEUED,
4106 		   "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
4107 		   i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
4108 		   le16_to_cpu(tx_data_bd->nbytes));
4109 	}
4110 
4111 	DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
4112 
4113 	/* update with actual num BDs */
4114 	first_bd->nbd = cpu_to_le16(nbd);
4115 
4116 	bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4117 
4118 	/* now send a tx doorbell, counting the next BD
4119 	 * if the packet contains or ends with it
4120 	 */
4121 	if (TX_BD_POFF(bd_prod) < nbd)
4122 		nbd++;
4123 
4124 	/* total_pkt_bytes should be set on the first data BD if
4125 	 * it's not an LSO packet and there is more than one
4126 	 * data BD. In this case pkt_size is limited by an MTU value.
4127 	 * However we prefer to set it for an LSO packet (while we don't
4128 	 * have to) in order to save some CPU cycles in a none-LSO
4129 	 * case, when we much more care about them.
4130 	 */
4131 	if (total_pkt_bd != NULL)
4132 		total_pkt_bd->total_pkt_bytes = pkt_size;
4133 
4134 	if (pbd_e1x)
4135 		DP(NETIF_MSG_TX_QUEUED,
4136 		   "PBD (E1X) @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
4137 		   pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
4138 		   pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
4139 		   pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
4140 		    le16_to_cpu(pbd_e1x->total_hlen_w));
4141 	if (pbd_e2)
4142 		DP(NETIF_MSG_TX_QUEUED,
4143 		   "PBD (E2) @%p  dst %x %x %x src %x %x %x parsing_data %x\n",
4144 		   pbd_e2,
4145 		   pbd_e2->data.mac_addr.dst_hi,
4146 		   pbd_e2->data.mac_addr.dst_mid,
4147 		   pbd_e2->data.mac_addr.dst_lo,
4148 		   pbd_e2->data.mac_addr.src_hi,
4149 		   pbd_e2->data.mac_addr.src_mid,
4150 		   pbd_e2->data.mac_addr.src_lo,
4151 		   pbd_e2->parsing_data);
4152 	DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
4153 
4154 	netdev_tx_sent_queue(txq, skb->len);
4155 
4156 	skb_tx_timestamp(skb);
4157 
4158 	txdata->tx_pkt_prod++;
4159 	/*
4160 	 * Make sure that the BD data is updated before updating the producer
4161 	 * since FW might read the BD right after the producer is updated.
4162 	 * This is only applicable for weak-ordered memory model archs such
4163 	 * as IA-64. The following barrier is also mandatory since FW will
4164 	 * assumes packets must have BDs.
4165 	 */
4166 	wmb();
4167 
4168 	txdata->tx_db.data.prod += nbd;
4169 	barrier();
4170 
4171 	DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
4172 
4173 	mmiowb();
4174 
4175 	txdata->tx_bd_prod += nbd;
4176 
4177 	if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
4178 		netif_tx_stop_queue(txq);
4179 
4180 		/* paired memory barrier is in bnx2x_tx_int(), we have to keep
4181 		 * ordering of set_bit() in netif_tx_stop_queue() and read of
4182 		 * fp->bd_tx_cons */
4183 		smp_mb();
4184 
4185 		bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
4186 		if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
4187 			netif_tx_wake_queue(txq);
4188 	}
4189 	txdata->tx_pkt++;
4190 
4191 	return NETDEV_TX_OK;
4192 }
4193 
bnx2x_get_c2s_mapping(struct bnx2x * bp,u8 * c2s_map,u8 * c2s_default)4194 void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default)
4195 {
4196 	int mfw_vn = BP_FW_MB_IDX(bp);
4197 	u32 tmp;
4198 
4199 	/* If the shmem shouldn't affect configuration, reflect */
4200 	if (!IS_MF_BD(bp)) {
4201 		int i;
4202 
4203 		for (i = 0; i < BNX2X_MAX_PRIORITY; i++)
4204 			c2s_map[i] = i;
4205 		*c2s_default = 0;
4206 
4207 		return;
4208 	}
4209 
4210 	tmp = SHMEM2_RD(bp, c2s_pcp_map_lower[mfw_vn]);
4211 	tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4212 	c2s_map[0] = tmp & 0xff;
4213 	c2s_map[1] = (tmp >> 8) & 0xff;
4214 	c2s_map[2] = (tmp >> 16) & 0xff;
4215 	c2s_map[3] = (tmp >> 24) & 0xff;
4216 
4217 	tmp = SHMEM2_RD(bp, c2s_pcp_map_upper[mfw_vn]);
4218 	tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4219 	c2s_map[4] = tmp & 0xff;
4220 	c2s_map[5] = (tmp >> 8) & 0xff;
4221 	c2s_map[6] = (tmp >> 16) & 0xff;
4222 	c2s_map[7] = (tmp >> 24) & 0xff;
4223 
4224 	tmp = SHMEM2_RD(bp, c2s_pcp_map_default[mfw_vn]);
4225 	tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4226 	*c2s_default = (tmp >> (8 * mfw_vn)) & 0xff;
4227 }
4228 
4229 /**
4230  * bnx2x_setup_tc - routine to configure net_device for multi tc
4231  *
4232  * @netdev: net device to configure
4233  * @tc: number of traffic classes to enable
4234  *
4235  * callback connected to the ndo_setup_tc function pointer
4236  */
bnx2x_setup_tc(struct net_device * dev,u8 num_tc)4237 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4238 {
4239 	struct bnx2x *bp = netdev_priv(dev);
4240 	u8 c2s_map[BNX2X_MAX_PRIORITY], c2s_def;
4241 	int cos, prio, count, offset;
4242 
4243 	/* setup tc must be called under rtnl lock */
4244 	ASSERT_RTNL();
4245 
4246 	/* no traffic classes requested. Aborting */
4247 	if (!num_tc) {
4248 		netdev_reset_tc(dev);
4249 		return 0;
4250 	}
4251 
4252 	/* requested to support too many traffic classes */
4253 	if (num_tc > bp->max_cos) {
4254 		BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
4255 			  num_tc, bp->max_cos);
4256 		return -EINVAL;
4257 	}
4258 
4259 	/* declare amount of supported traffic classes */
4260 	if (netdev_set_num_tc(dev, num_tc)) {
4261 		BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
4262 		return -EINVAL;
4263 	}
4264 
4265 	bnx2x_get_c2s_mapping(bp, c2s_map, &c2s_def);
4266 
4267 	/* configure priority to traffic class mapping */
4268 	for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4269 		int outer_prio = c2s_map[prio];
4270 
4271 		netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[outer_prio]);
4272 		DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4273 		   "mapping priority %d to tc %d\n",
4274 		   outer_prio, bp->prio_to_cos[outer_prio]);
4275 	}
4276 
4277 	/* Use this configuration to differentiate tc0 from other COSes
4278 	   This can be used for ets or pfc, and save the effort of setting
4279 	   up a multio class queue disc or negotiating DCBX with a switch
4280 	netdev_set_prio_tc_map(dev, 0, 0);
4281 	DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
4282 	for (prio = 1; prio < 16; prio++) {
4283 		netdev_set_prio_tc_map(dev, prio, 1);
4284 		DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
4285 	} */
4286 
4287 	/* configure traffic class to transmission queue mapping */
4288 	for (cos = 0; cos < bp->max_cos; cos++) {
4289 		count = BNX2X_NUM_ETH_QUEUES(bp);
4290 		offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
4291 		netdev_set_tc_queue(dev, cos, count, offset);
4292 		DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4293 		   "mapping tc %d to offset %d count %d\n",
4294 		   cos, offset, count);
4295 	}
4296 
4297 	return 0;
4298 }
4299 
__bnx2x_setup_tc(struct net_device * dev,enum tc_setup_type type,void * type_data)4300 int __bnx2x_setup_tc(struct net_device *dev, enum tc_setup_type type,
4301 		     void *type_data)
4302 {
4303 	struct tc_mqprio_qopt *mqprio = type_data;
4304 
4305 	if (type != TC_SETUP_MQPRIO)
4306 		return -EOPNOTSUPP;
4307 
4308 	mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
4309 
4310 	return bnx2x_setup_tc(dev, mqprio->num_tc);
4311 }
4312 
4313 /* called with rtnl_lock */
bnx2x_change_mac_addr(struct net_device * dev,void * p)4314 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4315 {
4316 	struct sockaddr *addr = p;
4317 	struct bnx2x *bp = netdev_priv(dev);
4318 	int rc = 0;
4319 
4320 	if (!is_valid_ether_addr(addr->sa_data)) {
4321 		BNX2X_ERR("Requested MAC address is not valid\n");
4322 		return -EINVAL;
4323 	}
4324 
4325 	if (IS_MF_STORAGE_ONLY(bp)) {
4326 		BNX2X_ERR("Can't change address on STORAGE ONLY function\n");
4327 		return -EINVAL;
4328 	}
4329 
4330 	if (netif_running(dev))  {
4331 		rc = bnx2x_set_eth_mac(bp, false);
4332 		if (rc)
4333 			return rc;
4334 	}
4335 
4336 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4337 
4338 	if (netif_running(dev))
4339 		rc = bnx2x_set_eth_mac(bp, true);
4340 
4341 	if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4342 		SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4343 
4344 	return rc;
4345 }
4346 
bnx2x_free_fp_mem_at(struct bnx2x * bp,int fp_index)4347 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4348 {
4349 	union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4350 	struct bnx2x_fastpath *fp = &bp->fp[fp_index];
4351 	u8 cos;
4352 
4353 	/* Common */
4354 
4355 	if (IS_FCOE_IDX(fp_index)) {
4356 		memset(sb, 0, sizeof(union host_hc_status_block));
4357 		fp->status_blk_mapping = 0;
4358 	} else {
4359 		/* status blocks */
4360 		if (!CHIP_IS_E1x(bp))
4361 			BNX2X_PCI_FREE(sb->e2_sb,
4362 				       bnx2x_fp(bp, fp_index,
4363 						status_blk_mapping),
4364 				       sizeof(struct host_hc_status_block_e2));
4365 		else
4366 			BNX2X_PCI_FREE(sb->e1x_sb,
4367 				       bnx2x_fp(bp, fp_index,
4368 						status_blk_mapping),
4369 				       sizeof(struct host_hc_status_block_e1x));
4370 	}
4371 
4372 	/* Rx */
4373 	if (!skip_rx_queue(bp, fp_index)) {
4374 		bnx2x_free_rx_bds(fp);
4375 
4376 		/* fastpath rx rings: rx_buf rx_desc rx_comp */
4377 		BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4378 		BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4379 			       bnx2x_fp(bp, fp_index, rx_desc_mapping),
4380 			       sizeof(struct eth_rx_bd) * NUM_RX_BD);
4381 
4382 		BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4383 			       bnx2x_fp(bp, fp_index, rx_comp_mapping),
4384 			       sizeof(struct eth_fast_path_rx_cqe) *
4385 			       NUM_RCQ_BD);
4386 
4387 		/* SGE ring */
4388 		BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4389 		BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4390 			       bnx2x_fp(bp, fp_index, rx_sge_mapping),
4391 			       BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4392 	}
4393 
4394 	/* Tx */
4395 	if (!skip_tx_queue(bp, fp_index)) {
4396 		/* fastpath tx rings: tx_buf tx_desc */
4397 		for_each_cos_in_tx_queue(fp, cos) {
4398 			struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4399 
4400 			DP(NETIF_MSG_IFDOWN,
4401 			   "freeing tx memory of fp %d cos %d cid %d\n",
4402 			   fp_index, cos, txdata->cid);
4403 
4404 			BNX2X_FREE(txdata->tx_buf_ring);
4405 			BNX2X_PCI_FREE(txdata->tx_desc_ring,
4406 				txdata->tx_desc_mapping,
4407 				sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4408 		}
4409 	}
4410 	/* end of fastpath */
4411 }
4412 
bnx2x_free_fp_mem_cnic(struct bnx2x * bp)4413 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
4414 {
4415 	int i;
4416 	for_each_cnic_queue(bp, i)
4417 		bnx2x_free_fp_mem_at(bp, i);
4418 }
4419 
bnx2x_free_fp_mem(struct bnx2x * bp)4420 void bnx2x_free_fp_mem(struct bnx2x *bp)
4421 {
4422 	int i;
4423 	for_each_eth_queue(bp, i)
4424 		bnx2x_free_fp_mem_at(bp, i);
4425 }
4426 
set_sb_shortcuts(struct bnx2x * bp,int index)4427 static void set_sb_shortcuts(struct bnx2x *bp, int index)
4428 {
4429 	union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
4430 	if (!CHIP_IS_E1x(bp)) {
4431 		bnx2x_fp(bp, index, sb_index_values) =
4432 			(__le16 *)status_blk.e2_sb->sb.index_values;
4433 		bnx2x_fp(bp, index, sb_running_index) =
4434 			(__le16 *)status_blk.e2_sb->sb.running_index;
4435 	} else {
4436 		bnx2x_fp(bp, index, sb_index_values) =
4437 			(__le16 *)status_blk.e1x_sb->sb.index_values;
4438 		bnx2x_fp(bp, index, sb_running_index) =
4439 			(__le16 *)status_blk.e1x_sb->sb.running_index;
4440 	}
4441 }
4442 
4443 /* Returns the number of actually allocated BDs */
bnx2x_alloc_rx_bds(struct bnx2x_fastpath * fp,int rx_ring_size)4444 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4445 			      int rx_ring_size)
4446 {
4447 	struct bnx2x *bp = fp->bp;
4448 	u16 ring_prod, cqe_ring_prod;
4449 	int i, failure_cnt = 0;
4450 
4451 	fp->rx_comp_cons = 0;
4452 	cqe_ring_prod = ring_prod = 0;
4453 
4454 	/* This routine is called only during fo init so
4455 	 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4456 	 */
4457 	for (i = 0; i < rx_ring_size; i++) {
4458 		if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
4459 			failure_cnt++;
4460 			continue;
4461 		}
4462 		ring_prod = NEXT_RX_IDX(ring_prod);
4463 		cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4464 		WARN_ON(ring_prod <= (i - failure_cnt));
4465 	}
4466 
4467 	if (failure_cnt)
4468 		BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4469 			  i - failure_cnt, fp->index);
4470 
4471 	fp->rx_bd_prod = ring_prod;
4472 	/* Limit the CQE producer by the CQE ring size */
4473 	fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4474 			       cqe_ring_prod);
4475 
4476 	bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
4477 
4478 	return i - failure_cnt;
4479 }
4480 
bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath * fp)4481 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4482 {
4483 	int i;
4484 
4485 	for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4486 		struct eth_rx_cqe_next_page *nextpg;
4487 
4488 		nextpg = (struct eth_rx_cqe_next_page *)
4489 			&fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4490 		nextpg->addr_hi =
4491 			cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4492 				   BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4493 		nextpg->addr_lo =
4494 			cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4495 				   BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4496 	}
4497 }
4498 
bnx2x_alloc_fp_mem_at(struct bnx2x * bp,int index)4499 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4500 {
4501 	union host_hc_status_block *sb;
4502 	struct bnx2x_fastpath *fp = &bp->fp[index];
4503 	int ring_size = 0;
4504 	u8 cos;
4505 	int rx_ring_size = 0;
4506 
4507 	if (!bp->rx_ring_size && IS_MF_STORAGE_ONLY(bp)) {
4508 		rx_ring_size = MIN_RX_SIZE_NONTPA;
4509 		bp->rx_ring_size = rx_ring_size;
4510 	} else if (!bp->rx_ring_size) {
4511 		rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4512 
4513 		if (CHIP_IS_E3(bp)) {
4514 			u32 cfg = SHMEM_RD(bp,
4515 					   dev_info.port_hw_config[BP_PORT(bp)].
4516 					   default_cfg);
4517 
4518 			/* Decrease ring size for 1G functions */
4519 			if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4520 			    PORT_HW_CFG_NET_SERDES_IF_SGMII)
4521 				rx_ring_size /= 10;
4522 		}
4523 
4524 		/* allocate at least number of buffers required by FW */
4525 		rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4526 				     MIN_RX_SIZE_TPA, rx_ring_size);
4527 
4528 		bp->rx_ring_size = rx_ring_size;
4529 	} else /* if rx_ring_size specified - use it */
4530 		rx_ring_size = bp->rx_ring_size;
4531 
4532 	DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4533 
4534 	/* Common */
4535 	sb = &bnx2x_fp(bp, index, status_blk);
4536 
4537 	if (!IS_FCOE_IDX(index)) {
4538 		/* status blocks */
4539 		if (!CHIP_IS_E1x(bp)) {
4540 			sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4541 						    sizeof(struct host_hc_status_block_e2));
4542 			if (!sb->e2_sb)
4543 				goto alloc_mem_err;
4544 		} else {
4545 			sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4546 						     sizeof(struct host_hc_status_block_e1x));
4547 			if (!sb->e1x_sb)
4548 				goto alloc_mem_err;
4549 		}
4550 	}
4551 
4552 	/* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4553 	 * set shortcuts for it.
4554 	 */
4555 	if (!IS_FCOE_IDX(index))
4556 		set_sb_shortcuts(bp, index);
4557 
4558 	/* Tx */
4559 	if (!skip_tx_queue(bp, index)) {
4560 		/* fastpath tx rings: tx_buf tx_desc */
4561 		for_each_cos_in_tx_queue(fp, cos) {
4562 			struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4563 
4564 			DP(NETIF_MSG_IFUP,
4565 			   "allocating tx memory of fp %d cos %d\n",
4566 			   index, cos);
4567 
4568 			txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
4569 						      sizeof(struct sw_tx_bd),
4570 						      GFP_KERNEL);
4571 			if (!txdata->tx_buf_ring)
4572 				goto alloc_mem_err;
4573 			txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
4574 							       sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4575 			if (!txdata->tx_desc_ring)
4576 				goto alloc_mem_err;
4577 		}
4578 	}
4579 
4580 	/* Rx */
4581 	if (!skip_rx_queue(bp, index)) {
4582 		/* fastpath rx rings: rx_buf rx_desc rx_comp */
4583 		bnx2x_fp(bp, index, rx_buf_ring) =
4584 			kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
4585 		if (!bnx2x_fp(bp, index, rx_buf_ring))
4586 			goto alloc_mem_err;
4587 		bnx2x_fp(bp, index, rx_desc_ring) =
4588 			BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
4589 					sizeof(struct eth_rx_bd) * NUM_RX_BD);
4590 		if (!bnx2x_fp(bp, index, rx_desc_ring))
4591 			goto alloc_mem_err;
4592 
4593 		/* Seed all CQEs by 1s */
4594 		bnx2x_fp(bp, index, rx_comp_ring) =
4595 			BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
4596 					 sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
4597 		if (!bnx2x_fp(bp, index, rx_comp_ring))
4598 			goto alloc_mem_err;
4599 
4600 		/* SGE ring */
4601 		bnx2x_fp(bp, index, rx_page_ring) =
4602 			kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
4603 				GFP_KERNEL);
4604 		if (!bnx2x_fp(bp, index, rx_page_ring))
4605 			goto alloc_mem_err;
4606 		bnx2x_fp(bp, index, rx_sge_ring) =
4607 			BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
4608 					BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4609 		if (!bnx2x_fp(bp, index, rx_sge_ring))
4610 			goto alloc_mem_err;
4611 		/* RX BD ring */
4612 		bnx2x_set_next_page_rx_bd(fp);
4613 
4614 		/* CQ ring */
4615 		bnx2x_set_next_page_rx_cq(fp);
4616 
4617 		/* BDs */
4618 		ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4619 		if (ring_size < rx_ring_size)
4620 			goto alloc_mem_err;
4621 	}
4622 
4623 	return 0;
4624 
4625 /* handles low memory cases */
4626 alloc_mem_err:
4627 	BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4628 						index, ring_size);
4629 	/* FW will drop all packets if queue is not big enough,
4630 	 * In these cases we disable the queue
4631 	 * Min size is different for OOO, TPA and non-TPA queues
4632 	 */
4633 	if (ring_size < (fp->mode == TPA_MODE_DISABLED ?
4634 				MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
4635 			/* release memory allocated for this queue */
4636 			bnx2x_free_fp_mem_at(bp, index);
4637 			return -ENOMEM;
4638 	}
4639 	return 0;
4640 }
4641 
bnx2x_alloc_fp_mem_cnic(struct bnx2x * bp)4642 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4643 {
4644 	if (!NO_FCOE(bp))
4645 		/* FCoE */
4646 		if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4647 			/* we will fail load process instead of mark
4648 			 * NO_FCOE_FLAG
4649 			 */
4650 			return -ENOMEM;
4651 
4652 	return 0;
4653 }
4654 
bnx2x_alloc_fp_mem(struct bnx2x * bp)4655 static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4656 {
4657 	int i;
4658 
4659 	/* 1. Allocate FP for leading - fatal if error
4660 	 * 2. Allocate RSS - fix number of queues if error
4661 	 */
4662 
4663 	/* leading */
4664 	if (bnx2x_alloc_fp_mem_at(bp, 0))
4665 		return -ENOMEM;
4666 
4667 	/* RSS */
4668 	for_each_nondefault_eth_queue(bp, i)
4669 		if (bnx2x_alloc_fp_mem_at(bp, i))
4670 			break;
4671 
4672 	/* handle memory failures */
4673 	if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4674 		int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4675 
4676 		WARN_ON(delta < 0);
4677 		bnx2x_shrink_eth_fp(bp, delta);
4678 		if (CNIC_SUPPORT(bp))
4679 			/* move non eth FPs next to last eth FP
4680 			 * must be done in that order
4681 			 * FCOE_IDX < FWD_IDX < OOO_IDX
4682 			 */
4683 
4684 			/* move FCoE fp even NO_FCOE_FLAG is on */
4685 			bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4686 		bp->num_ethernet_queues -= delta;
4687 		bp->num_queues = bp->num_ethernet_queues +
4688 				 bp->num_cnic_queues;
4689 		BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4690 			  bp->num_queues + delta, bp->num_queues);
4691 	}
4692 
4693 	return 0;
4694 }
4695 
bnx2x_free_mem_bp(struct bnx2x * bp)4696 void bnx2x_free_mem_bp(struct bnx2x *bp)
4697 {
4698 	int i;
4699 
4700 	for (i = 0; i < bp->fp_array_size; i++)
4701 		kfree(bp->fp[i].tpa_info);
4702 	kfree(bp->fp);
4703 	kfree(bp->sp_objs);
4704 	kfree(bp->fp_stats);
4705 	kfree(bp->bnx2x_txq);
4706 	kfree(bp->msix_table);
4707 	kfree(bp->ilt);
4708 }
4709 
bnx2x_alloc_mem_bp(struct bnx2x * bp)4710 int bnx2x_alloc_mem_bp(struct bnx2x *bp)
4711 {
4712 	struct bnx2x_fastpath *fp;
4713 	struct msix_entry *tbl;
4714 	struct bnx2x_ilt *ilt;
4715 	int msix_table_size = 0;
4716 	int fp_array_size, txq_array_size;
4717 	int i;
4718 
4719 	/*
4720 	 * The biggest MSI-X table we might need is as a maximum number of fast
4721 	 * path IGU SBs plus default SB (for PF only).
4722 	 */
4723 	msix_table_size = bp->igu_sb_cnt;
4724 	if (IS_PF(bp))
4725 		msix_table_size++;
4726 	BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
4727 
4728 	/* fp array: RSS plus CNIC related L2 queues */
4729 	fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
4730 	bp->fp_array_size = fp_array_size;
4731 	BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
4732 
4733 	fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
4734 	if (!fp)
4735 		goto alloc_err;
4736 	for (i = 0; i < bp->fp_array_size; i++) {
4737 		fp[i].tpa_info =
4738 			kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4739 				sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4740 		if (!(fp[i].tpa_info))
4741 			goto alloc_err;
4742 	}
4743 
4744 	bp->fp = fp;
4745 
4746 	/* allocate sp objs */
4747 	bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
4748 			      GFP_KERNEL);
4749 	if (!bp->sp_objs)
4750 		goto alloc_err;
4751 
4752 	/* allocate fp_stats */
4753 	bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
4754 			       GFP_KERNEL);
4755 	if (!bp->fp_stats)
4756 		goto alloc_err;
4757 
4758 	/* Allocate memory for the transmission queues array */
4759 	txq_array_size =
4760 		BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4761 	BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4762 
4763 	bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4764 				GFP_KERNEL);
4765 	if (!bp->bnx2x_txq)
4766 		goto alloc_err;
4767 
4768 	/* msix table */
4769 	tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
4770 	if (!tbl)
4771 		goto alloc_err;
4772 	bp->msix_table = tbl;
4773 
4774 	/* ilt */
4775 	ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4776 	if (!ilt)
4777 		goto alloc_err;
4778 	bp->ilt = ilt;
4779 
4780 	return 0;
4781 alloc_err:
4782 	bnx2x_free_mem_bp(bp);
4783 	return -ENOMEM;
4784 }
4785 
bnx2x_reload_if_running(struct net_device * dev)4786 int bnx2x_reload_if_running(struct net_device *dev)
4787 {
4788 	struct bnx2x *bp = netdev_priv(dev);
4789 
4790 	if (unlikely(!netif_running(dev)))
4791 		return 0;
4792 
4793 	bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
4794 	return bnx2x_nic_load(bp, LOAD_NORMAL);
4795 }
4796 
bnx2x_get_cur_phy_idx(struct bnx2x * bp)4797 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4798 {
4799 	u32 sel_phy_idx = 0;
4800 	if (bp->link_params.num_phys <= 1)
4801 		return INT_PHY;
4802 
4803 	if (bp->link_vars.link_up) {
4804 		sel_phy_idx = EXT_PHY1;
4805 		/* In case link is SERDES, check if the EXT_PHY2 is the one */
4806 		if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4807 		    (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4808 			sel_phy_idx = EXT_PHY2;
4809 	} else {
4810 
4811 		switch (bnx2x_phy_selection(&bp->link_params)) {
4812 		case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4813 		case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4814 		case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4815 		       sel_phy_idx = EXT_PHY1;
4816 		       break;
4817 		case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4818 		case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4819 		       sel_phy_idx = EXT_PHY2;
4820 		       break;
4821 		}
4822 	}
4823 
4824 	return sel_phy_idx;
4825 }
bnx2x_get_link_cfg_idx(struct bnx2x * bp)4826 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4827 {
4828 	u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4829 	/*
4830 	 * The selected activated PHY is always after swapping (in case PHY
4831 	 * swapping is enabled). So when swapping is enabled, we need to reverse
4832 	 * the configuration
4833 	 */
4834 
4835 	if (bp->link_params.multi_phy_config &
4836 	    PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4837 		if (sel_phy_idx == EXT_PHY1)
4838 			sel_phy_idx = EXT_PHY2;
4839 		else if (sel_phy_idx == EXT_PHY2)
4840 			sel_phy_idx = EXT_PHY1;
4841 	}
4842 	return LINK_CONFIG_IDX(sel_phy_idx);
4843 }
4844 
4845 #ifdef NETDEV_FCOE_WWNN
bnx2x_fcoe_get_wwn(struct net_device * dev,u64 * wwn,int type)4846 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4847 {
4848 	struct bnx2x *bp = netdev_priv(dev);
4849 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4850 
4851 	switch (type) {
4852 	case NETDEV_FCOE_WWNN:
4853 		*wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4854 				cp->fcoe_wwn_node_name_lo);
4855 		break;
4856 	case NETDEV_FCOE_WWPN:
4857 		*wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4858 				cp->fcoe_wwn_port_name_lo);
4859 		break;
4860 	default:
4861 		BNX2X_ERR("Wrong WWN type requested - %d\n", type);
4862 		return -EINVAL;
4863 	}
4864 
4865 	return 0;
4866 }
4867 #endif
4868 
4869 /* called with rtnl_lock */
bnx2x_change_mtu(struct net_device * dev,int new_mtu)4870 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4871 {
4872 	struct bnx2x *bp = netdev_priv(dev);
4873 
4874 	if (pci_num_vf(bp->pdev)) {
4875 		DP(BNX2X_MSG_IOV, "VFs are enabled, can not change MTU\n");
4876 		return -EPERM;
4877 	}
4878 
4879 	if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4880 		BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4881 		return -EAGAIN;
4882 	}
4883 
4884 	/* This does not race with packet allocation
4885 	 * because the actual alloc size is
4886 	 * only updated as part of load
4887 	 */
4888 	dev->mtu = new_mtu;
4889 
4890 	if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4891 		SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4892 
4893 	return bnx2x_reload_if_running(dev);
4894 }
4895 
bnx2x_fix_features(struct net_device * dev,netdev_features_t features)4896 netdev_features_t bnx2x_fix_features(struct net_device *dev,
4897 				     netdev_features_t features)
4898 {
4899 	struct bnx2x *bp = netdev_priv(dev);
4900 
4901 	if (pci_num_vf(bp->pdev)) {
4902 		netdev_features_t changed = dev->features ^ features;
4903 
4904 		/* Revert the requested changes in features if they
4905 		 * would require internal reload of PF in bnx2x_set_features().
4906 		 */
4907 		if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) {
4908 			features &= ~NETIF_F_RXCSUM;
4909 			features |= dev->features & NETIF_F_RXCSUM;
4910 		}
4911 
4912 		if (changed & NETIF_F_LOOPBACK) {
4913 			features &= ~NETIF_F_LOOPBACK;
4914 			features |= dev->features & NETIF_F_LOOPBACK;
4915 		}
4916 	}
4917 
4918 	/* TPA requires Rx CSUM offloading */
4919 	if (!(features & NETIF_F_RXCSUM)) {
4920 		features &= ~NETIF_F_LRO;
4921 		features &= ~NETIF_F_GRO;
4922 	}
4923 
4924 	return features;
4925 }
4926 
bnx2x_set_features(struct net_device * dev,netdev_features_t features)4927 int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4928 {
4929 	struct bnx2x *bp = netdev_priv(dev);
4930 	netdev_features_t changes = features ^ dev->features;
4931 	bool bnx2x_reload = false;
4932 	int rc;
4933 
4934 	/* VFs or non SRIOV PFs should be able to change loopback feature */
4935 	if (!pci_num_vf(bp->pdev)) {
4936 		if (features & NETIF_F_LOOPBACK) {
4937 			if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4938 				bp->link_params.loopback_mode = LOOPBACK_BMAC;
4939 				bnx2x_reload = true;
4940 			}
4941 		} else {
4942 			if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4943 				bp->link_params.loopback_mode = LOOPBACK_NONE;
4944 				bnx2x_reload = true;
4945 			}
4946 		}
4947 	}
4948 
4949 	/* if GRO is changed while LRO is enabled, don't force a reload */
4950 	if ((changes & NETIF_F_GRO) && (features & NETIF_F_LRO))
4951 		changes &= ~NETIF_F_GRO;
4952 
4953 	/* if GRO is changed while HW TPA is off, don't force a reload */
4954 	if ((changes & NETIF_F_GRO) && bp->disable_tpa)
4955 		changes &= ~NETIF_F_GRO;
4956 
4957 	if (changes)
4958 		bnx2x_reload = true;
4959 
4960 	if (bnx2x_reload) {
4961 		if (bp->recovery_state == BNX2X_RECOVERY_DONE) {
4962 			dev->features = features;
4963 			rc = bnx2x_reload_if_running(dev);
4964 			return rc ? rc : 1;
4965 		}
4966 		/* else: bnx2x_nic_load() will be called at end of recovery */
4967 	}
4968 
4969 	return 0;
4970 }
4971 
bnx2x_tx_timeout(struct net_device * dev)4972 void bnx2x_tx_timeout(struct net_device *dev)
4973 {
4974 	struct bnx2x *bp = netdev_priv(dev);
4975 
4976 #ifdef BNX2X_STOP_ON_ERROR
4977 	if (!bp->panic)
4978 		bnx2x_panic();
4979 #endif
4980 
4981 	/* This allows the netif to be shutdown gracefully before resetting */
4982 	bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
4983 }
4984 
bnx2x_suspend(struct pci_dev * pdev,pm_message_t state)4985 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4986 {
4987 	struct net_device *dev = pci_get_drvdata(pdev);
4988 	struct bnx2x *bp;
4989 
4990 	if (!dev) {
4991 		dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4992 		return -ENODEV;
4993 	}
4994 	bp = netdev_priv(dev);
4995 
4996 	rtnl_lock();
4997 
4998 	pci_save_state(pdev);
4999 
5000 	if (!netif_running(dev)) {
5001 		rtnl_unlock();
5002 		return 0;
5003 	}
5004 
5005 	netif_device_detach(dev);
5006 
5007 	bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
5008 
5009 	bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
5010 
5011 	rtnl_unlock();
5012 
5013 	return 0;
5014 }
5015 
bnx2x_resume(struct pci_dev * pdev)5016 int bnx2x_resume(struct pci_dev *pdev)
5017 {
5018 	struct net_device *dev = pci_get_drvdata(pdev);
5019 	struct bnx2x *bp;
5020 	int rc;
5021 
5022 	if (!dev) {
5023 		dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
5024 		return -ENODEV;
5025 	}
5026 	bp = netdev_priv(dev);
5027 
5028 	if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
5029 		BNX2X_ERR("Handling parity error recovery. Try again later\n");
5030 		return -EAGAIN;
5031 	}
5032 
5033 	rtnl_lock();
5034 
5035 	pci_restore_state(pdev);
5036 
5037 	if (!netif_running(dev)) {
5038 		rtnl_unlock();
5039 		return 0;
5040 	}
5041 
5042 	bnx2x_set_power_state(bp, PCI_D0);
5043 	netif_device_attach(dev);
5044 
5045 	rc = bnx2x_nic_load(bp, LOAD_OPEN);
5046 
5047 	rtnl_unlock();
5048 
5049 	return rc;
5050 }
5051 
bnx2x_set_ctx_validation(struct bnx2x * bp,struct eth_context * cxt,u32 cid)5052 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
5053 			      u32 cid)
5054 {
5055 	if (!cxt) {
5056 		BNX2X_ERR("bad context pointer %p\n", cxt);
5057 		return;
5058 	}
5059 
5060 	/* ustorm cxt validation */
5061 	cxt->ustorm_ag_context.cdu_usage =
5062 		CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5063 			CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
5064 	/* xcontext validation */
5065 	cxt->xstorm_ag_context.cdu_reserved =
5066 		CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5067 			CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
5068 }
5069 
storm_memset_hc_timeout(struct bnx2x * bp,u8 port,u8 fw_sb_id,u8 sb_index,u8 ticks)5070 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
5071 				    u8 fw_sb_id, u8 sb_index,
5072 				    u8 ticks)
5073 {
5074 	u32 addr = BAR_CSTRORM_INTMEM +
5075 		   CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
5076 	REG_WR8(bp, addr, ticks);
5077 	DP(NETIF_MSG_IFUP,
5078 	   "port %x fw_sb_id %d sb_index %d ticks %d\n",
5079 	   port, fw_sb_id, sb_index, ticks);
5080 }
5081 
storm_memset_hc_disable(struct bnx2x * bp,u8 port,u16 fw_sb_id,u8 sb_index,u8 disable)5082 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
5083 				    u16 fw_sb_id, u8 sb_index,
5084 				    u8 disable)
5085 {
5086 	u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
5087 	u32 addr = BAR_CSTRORM_INTMEM +
5088 		   CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
5089 	u8 flags = REG_RD8(bp, addr);
5090 	/* clear and set */
5091 	flags &= ~HC_INDEX_DATA_HC_ENABLED;
5092 	flags |= enable_flag;
5093 	REG_WR8(bp, addr, flags);
5094 	DP(NETIF_MSG_IFUP,
5095 	   "port %x fw_sb_id %d sb_index %d disable %d\n",
5096 	   port, fw_sb_id, sb_index, disable);
5097 }
5098 
bnx2x_update_coalesce_sb_index(struct bnx2x * bp,u8 fw_sb_id,u8 sb_index,u8 disable,u16 usec)5099 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
5100 				    u8 sb_index, u8 disable, u16 usec)
5101 {
5102 	int port = BP_PORT(bp);
5103 	u8 ticks = usec / BNX2X_BTR;
5104 
5105 	storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
5106 
5107 	disable = disable ? 1 : (usec ? 0 : 1);
5108 	storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
5109 }
5110 
bnx2x_schedule_sp_rtnl(struct bnx2x * bp,enum sp_rtnl_flag flag,u32 verbose)5111 void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
5112 			    u32 verbose)
5113 {
5114 	smp_mb__before_atomic();
5115 	set_bit(flag, &bp->sp_rtnl_state);
5116 	smp_mb__after_atomic();
5117 	DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
5118 	   flag);
5119 	schedule_delayed_work(&bp->sp_rtnl_task, 0);
5120 }
5121