• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Broadcom NetXtreme-C/E network driver.
2  *
3  * Copyright (c) 2016-2017 Broadcom Limited
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  */
9 #include <linux/kernel.h>
10 #include <linux/errno.h>
11 #include <linux/pci.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/if_vlan.h>
15 #include <linux/bpf.h>
16 #include <linux/bpf_trace.h>
17 #include <linux/filter.h>
18 #include <net/page_pool.h>
19 #include "bnxt_hsi.h"
20 #include "bnxt.h"
21 #include "bnxt_xdp.h"
22 
23 DEFINE_STATIC_KEY_FALSE(bnxt_xdp_locking_key);
24 
bnxt_xmit_bd(struct bnxt * bp,struct bnxt_tx_ring_info * txr,dma_addr_t mapping,u32 len,struct xdp_buff * xdp)25 struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
26 				   struct bnxt_tx_ring_info *txr,
27 				   dma_addr_t mapping, u32 len,
28 				   struct xdp_buff *xdp)
29 {
30 	struct skb_shared_info *sinfo;
31 	struct bnxt_sw_tx_bd *tx_buf;
32 	struct tx_bd *txbd;
33 	int num_frags = 0;
34 	u32 flags;
35 	u16 prod;
36 	int i;
37 
38 	if (xdp && xdp_buff_has_frags(xdp)) {
39 		sinfo = xdp_get_shared_info_from_buff(xdp);
40 		num_frags = sinfo->nr_frags;
41 	}
42 
43 	/* fill up the first buffer */
44 	prod = txr->tx_prod;
45 	tx_buf = &txr->tx_buf_ring[prod];
46 	tx_buf->nr_frags = num_frags;
47 	if (xdp)
48 		tx_buf->page = virt_to_head_page(xdp->data);
49 
50 	txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
51 	flags = (len << TX_BD_LEN_SHIFT) |
52 		((num_frags + 1) << TX_BD_FLAGS_BD_CNT_SHIFT) |
53 		bnxt_lhint_arr[len >> 9];
54 	txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
55 	txbd->tx_bd_opaque = prod;
56 	txbd->tx_bd_haddr = cpu_to_le64(mapping);
57 
58 	/* now let us fill up the frags into the next buffers */
59 	for (i = 0; i < num_frags ; i++) {
60 		skb_frag_t *frag = &sinfo->frags[i];
61 		struct bnxt_sw_tx_bd *frag_tx_buf;
62 		struct pci_dev *pdev = bp->pdev;
63 		dma_addr_t frag_mapping;
64 		int frag_len;
65 
66 		prod = NEXT_TX(prod);
67 		txr->tx_prod = prod;
68 
69 		/* first fill up the first buffer */
70 		frag_tx_buf = &txr->tx_buf_ring[prod];
71 		frag_tx_buf->page = skb_frag_page(frag);
72 
73 		txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
74 
75 		frag_len = skb_frag_size(frag);
76 		frag_mapping = skb_frag_dma_map(&pdev->dev, frag, 0,
77 						frag_len, DMA_TO_DEVICE);
78 
79 		if (unlikely(dma_mapping_error(&pdev->dev, frag_mapping)))
80 			return NULL;
81 
82 		dma_unmap_addr_set(frag_tx_buf, mapping, frag_mapping);
83 
84 		flags = frag_len << TX_BD_LEN_SHIFT;
85 		txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
86 		txbd->tx_bd_haddr = cpu_to_le64(frag_mapping);
87 
88 		len = frag_len;
89 	}
90 
91 	flags &= ~TX_BD_LEN;
92 	txbd->tx_bd_len_flags_type = cpu_to_le32(((len) << TX_BD_LEN_SHIFT) | flags |
93 			TX_BD_FLAGS_PACKET_END);
94 	/* Sync TX BD */
95 	wmb();
96 	prod = NEXT_TX(prod);
97 	txr->tx_prod = prod;
98 
99 	return tx_buf;
100 }
101 
__bnxt_xmit_xdp(struct bnxt * bp,struct bnxt_tx_ring_info * txr,dma_addr_t mapping,u32 len,u16 rx_prod,struct xdp_buff * xdp)102 static void __bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
103 			    dma_addr_t mapping, u32 len, u16 rx_prod,
104 			    struct xdp_buff *xdp)
105 {
106 	struct bnxt_sw_tx_bd *tx_buf;
107 
108 	tx_buf = bnxt_xmit_bd(bp, txr, mapping, len, xdp);
109 	tx_buf->rx_prod = rx_prod;
110 	tx_buf->action = XDP_TX;
111 
112 }
113 
__bnxt_xmit_xdp_redirect(struct bnxt * bp,struct bnxt_tx_ring_info * txr,dma_addr_t mapping,u32 len,struct xdp_frame * xdpf)114 static void __bnxt_xmit_xdp_redirect(struct bnxt *bp,
115 				     struct bnxt_tx_ring_info *txr,
116 				     dma_addr_t mapping, u32 len,
117 				     struct xdp_frame *xdpf)
118 {
119 	struct bnxt_sw_tx_bd *tx_buf;
120 
121 	tx_buf = bnxt_xmit_bd(bp, txr, mapping, len, NULL);
122 	tx_buf->action = XDP_REDIRECT;
123 	tx_buf->xdpf = xdpf;
124 	dma_unmap_addr_set(tx_buf, mapping, mapping);
125 	dma_unmap_len_set(tx_buf, len, 0);
126 }
127 
bnxt_tx_int_xdp(struct bnxt * bp,struct bnxt_napi * bnapi,int nr_pkts)128 void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
129 {
130 	struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
131 	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
132 	bool rx_doorbell_needed = false;
133 	struct bnxt_sw_tx_bd *tx_buf;
134 	u16 tx_cons = txr->tx_cons;
135 	u16 last_tx_cons = tx_cons;
136 	int i, j, frags;
137 
138 	for (i = 0; i < nr_pkts; i++) {
139 		tx_buf = &txr->tx_buf_ring[tx_cons];
140 
141 		if (tx_buf->action == XDP_REDIRECT) {
142 			struct pci_dev *pdev = bp->pdev;
143 
144 			dma_unmap_single(&pdev->dev,
145 					 dma_unmap_addr(tx_buf, mapping),
146 					 dma_unmap_len(tx_buf, len),
147 					 DMA_TO_DEVICE);
148 			xdp_return_frame(tx_buf->xdpf);
149 			tx_buf->action = 0;
150 			tx_buf->xdpf = NULL;
151 		} else if (tx_buf->action == XDP_TX) {
152 			rx_doorbell_needed = true;
153 			last_tx_cons = tx_cons;
154 
155 			frags = tx_buf->nr_frags;
156 			for (j = 0; j < frags; j++) {
157 				tx_cons = NEXT_TX(tx_cons);
158 				tx_buf = &txr->tx_buf_ring[tx_cons];
159 				page_pool_recycle_direct(rxr->page_pool, tx_buf->page);
160 			}
161 		}
162 		tx_cons = NEXT_TX(tx_cons);
163 	}
164 	txr->tx_cons = tx_cons;
165 	if (rx_doorbell_needed) {
166 		tx_buf = &txr->tx_buf_ring[last_tx_cons];
167 		bnxt_db_write(bp, &rxr->rx_db, tx_buf->rx_prod);
168 
169 	}
170 }
171 
bnxt_xdp_attached(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)172 bool bnxt_xdp_attached(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
173 {
174 	struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog);
175 
176 	return !!xdp_prog;
177 }
178 
bnxt_xdp_buff_init(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 cons,u8 * data_ptr,unsigned int len,struct xdp_buff * xdp)179 void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
180 			u16 cons, u8 *data_ptr, unsigned int len,
181 			struct xdp_buff *xdp)
182 {
183 	u32 buflen = BNXT_RX_PAGE_SIZE;
184 	struct bnxt_sw_rx_bd *rx_buf;
185 	struct pci_dev *pdev;
186 	dma_addr_t mapping;
187 	u32 offset;
188 
189 	pdev = bp->pdev;
190 	rx_buf = &rxr->rx_buf_ring[cons];
191 	offset = bp->rx_offset;
192 
193 	mapping = rx_buf->mapping - bp->rx_dma_offset;
194 	dma_sync_single_for_cpu(&pdev->dev, mapping + offset, len, bp->rx_dir);
195 
196 	xdp_init_buff(xdp, buflen, &rxr->xdp_rxq);
197 	xdp_prepare_buff(xdp, data_ptr - offset, offset, len, false);
198 }
199 
bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info * rxr,struct xdp_buff * xdp)200 void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
201 			      struct xdp_buff *xdp)
202 {
203 	struct skb_shared_info *shinfo;
204 	int i;
205 
206 	if (!xdp || !xdp_buff_has_frags(xdp))
207 		return;
208 	shinfo = xdp_get_shared_info_from_buff(xdp);
209 	for (i = 0; i < shinfo->nr_frags; i++) {
210 		struct page *page = skb_frag_page(&shinfo->frags[i]);
211 
212 		page_pool_recycle_direct(rxr->page_pool, page);
213 	}
214 	shinfo->nr_frags = 0;
215 }
216 
217 /* returns the following:
218  * true    - packet consumed by XDP and new buffer is allocated.
219  * false   - packet should be passed to the stack.
220  */
bnxt_rx_xdp(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 cons,struct xdp_buff xdp,struct page * page,u8 ** data_ptr,unsigned int * len,u8 * event)221 bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
222 		 struct xdp_buff xdp, struct page *page, u8 **data_ptr,
223 		 unsigned int *len, u8 *event)
224 {
225 	struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog);
226 	struct bnxt_tx_ring_info *txr;
227 	struct bnxt_sw_rx_bd *rx_buf;
228 	struct pci_dev *pdev;
229 	dma_addr_t mapping;
230 	u32 tx_needed = 1;
231 	void *orig_data;
232 	u32 tx_avail;
233 	u32 offset;
234 	u32 act;
235 
236 	if (!xdp_prog)
237 		return false;
238 
239 	pdev = bp->pdev;
240 	offset = bp->rx_offset;
241 
242 	txr = rxr->bnapi->tx_ring;
243 	/* BNXT_RX_PAGE_MODE(bp) when XDP enabled */
244 	orig_data = xdp.data;
245 
246 	act = bpf_prog_run_xdp(xdp_prog, &xdp);
247 
248 	tx_avail = bnxt_tx_avail(bp, txr);
249 	/* If the tx ring is not full, we must not update the rx producer yet
250 	 * because we may still be transmitting on some BDs.
251 	 */
252 	if (tx_avail != bp->tx_ring_size)
253 		*event &= ~BNXT_RX_EVENT;
254 
255 	*len = xdp.data_end - xdp.data;
256 	if (orig_data != xdp.data) {
257 		offset = xdp.data - xdp.data_hard_start;
258 		*data_ptr = xdp.data_hard_start + offset;
259 	}
260 
261 	switch (act) {
262 	case XDP_PASS:
263 		return false;
264 
265 	case XDP_TX:
266 		rx_buf = &rxr->rx_buf_ring[cons];
267 		mapping = rx_buf->mapping - bp->rx_dma_offset;
268 		*event = 0;
269 
270 		if (unlikely(xdp_buff_has_frags(&xdp))) {
271 			struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(&xdp);
272 
273 			tx_needed += sinfo->nr_frags;
274 			*event = BNXT_AGG_EVENT;
275 		}
276 
277 		if (tx_avail < tx_needed) {
278 			trace_xdp_exception(bp->dev, xdp_prog, act);
279 			bnxt_xdp_buff_frags_free(rxr, &xdp);
280 			bnxt_reuse_rx_data(rxr, cons, page);
281 			return true;
282 		}
283 
284 		dma_sync_single_for_device(&pdev->dev, mapping + offset, *len,
285 					   bp->rx_dir);
286 
287 		*event |= BNXT_TX_EVENT;
288 		__bnxt_xmit_xdp(bp, txr, mapping + offset, *len,
289 				NEXT_RX(rxr->rx_prod), &xdp);
290 		bnxt_reuse_rx_data(rxr, cons, page);
291 		return true;
292 	case XDP_REDIRECT:
293 		/* if we are calling this here then we know that the
294 		 * redirect is coming from a frame received by the
295 		 * bnxt_en driver.
296 		 */
297 		rx_buf = &rxr->rx_buf_ring[cons];
298 		mapping = rx_buf->mapping - bp->rx_dma_offset;
299 		dma_unmap_page_attrs(&pdev->dev, mapping,
300 				     BNXT_RX_PAGE_SIZE, bp->rx_dir,
301 				     DMA_ATTR_WEAK_ORDERING);
302 
303 		/* if we are unable to allocate a new buffer, abort and reuse */
304 		if (bnxt_alloc_rx_data(bp, rxr, rxr->rx_prod, GFP_ATOMIC)) {
305 			trace_xdp_exception(bp->dev, xdp_prog, act);
306 			bnxt_xdp_buff_frags_free(rxr, &xdp);
307 			bnxt_reuse_rx_data(rxr, cons, page);
308 			return true;
309 		}
310 
311 		if (xdp_do_redirect(bp->dev, &xdp, xdp_prog)) {
312 			trace_xdp_exception(bp->dev, xdp_prog, act);
313 			page_pool_recycle_direct(rxr->page_pool, page);
314 			return true;
315 		}
316 
317 		*event |= BNXT_REDIRECT_EVENT;
318 		break;
319 	default:
320 		bpf_warn_invalid_xdp_action(bp->dev, xdp_prog, act);
321 		fallthrough;
322 	case XDP_ABORTED:
323 		trace_xdp_exception(bp->dev, xdp_prog, act);
324 		fallthrough;
325 	case XDP_DROP:
326 		bnxt_xdp_buff_frags_free(rxr, &xdp);
327 		bnxt_reuse_rx_data(rxr, cons, page);
328 		break;
329 	}
330 	return true;
331 }
332 
bnxt_xdp_xmit(struct net_device * dev,int num_frames,struct xdp_frame ** frames,u32 flags)333 int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
334 		  struct xdp_frame **frames, u32 flags)
335 {
336 	struct bnxt *bp = netdev_priv(dev);
337 	struct bpf_prog *xdp_prog = READ_ONCE(bp->xdp_prog);
338 	struct pci_dev *pdev = bp->pdev;
339 	struct bnxt_tx_ring_info *txr;
340 	dma_addr_t mapping;
341 	int nxmit = 0;
342 	int ring;
343 	int i;
344 
345 	if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
346 	    !bp->tx_nr_rings_xdp ||
347 	    !xdp_prog)
348 		return -EINVAL;
349 
350 	ring = smp_processor_id() % bp->tx_nr_rings_xdp;
351 	txr = &bp->tx_ring[ring];
352 
353 	if (READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING)
354 		return -EINVAL;
355 
356 	if (static_branch_unlikely(&bnxt_xdp_locking_key))
357 		spin_lock(&txr->xdp_tx_lock);
358 
359 	for (i = 0; i < num_frames; i++) {
360 		struct xdp_frame *xdp = frames[i];
361 
362 		if (!bnxt_tx_avail(bp, txr))
363 			break;
364 
365 		mapping = dma_map_single(&pdev->dev, xdp->data, xdp->len,
366 					 DMA_TO_DEVICE);
367 
368 		if (dma_mapping_error(&pdev->dev, mapping))
369 			break;
370 
371 		__bnxt_xmit_xdp_redirect(bp, txr, mapping, xdp->len, xdp);
372 		nxmit++;
373 	}
374 
375 	if (flags & XDP_XMIT_FLUSH) {
376 		/* Sync BD data before updating doorbell */
377 		wmb();
378 		bnxt_db_write(bp, &txr->tx_db, txr->tx_prod);
379 	}
380 
381 	if (static_branch_unlikely(&bnxt_xdp_locking_key))
382 		spin_unlock(&txr->xdp_tx_lock);
383 
384 	return nxmit;
385 }
386 
387 /* Under rtnl_lock */
bnxt_xdp_set(struct bnxt * bp,struct bpf_prog * prog)388 static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
389 {
390 	struct net_device *dev = bp->dev;
391 	int tx_xdp = 0, rc, tc;
392 	struct bpf_prog *old;
393 
394 	if (prog && !prog->aux->xdp_has_frags &&
395 	    bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
396 		netdev_warn(dev, "MTU %d larger than %d without XDP frag support.\n",
397 			    bp->dev->mtu, BNXT_MAX_PAGE_MODE_MTU);
398 		return -EOPNOTSUPP;
399 	}
400 	if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) {
401 		netdev_warn(dev, "ethtool rx/tx channels must be combined to support XDP.\n");
402 		return -EOPNOTSUPP;
403 	}
404 	if (prog)
405 		tx_xdp = bp->rx_nr_rings;
406 
407 	tc = netdev_get_num_tc(dev);
408 	if (!tc)
409 		tc = 1;
410 	rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
411 			      true, tc, tx_xdp);
412 	if (rc) {
413 		netdev_warn(dev, "Unable to reserve enough TX rings to support XDP.\n");
414 		return rc;
415 	}
416 	if (netif_running(dev))
417 		bnxt_close_nic(bp, true, false);
418 
419 	old = xchg(&bp->xdp_prog, prog);
420 	if (old)
421 		bpf_prog_put(old);
422 
423 	if (prog) {
424 		bnxt_set_rx_skb_mode(bp, true);
425 	} else {
426 		int rx, tx;
427 
428 		bnxt_set_rx_skb_mode(bp, false);
429 		bnxt_get_max_rings(bp, &rx, &tx, true);
430 		if (rx > 1) {
431 			bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS;
432 			bp->dev->hw_features |= NETIF_F_LRO;
433 		}
434 	}
435 	bp->tx_nr_rings_xdp = tx_xdp;
436 	bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc + tx_xdp;
437 	bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings);
438 	bnxt_set_tpa_flags(bp);
439 	bnxt_set_ring_params(bp);
440 
441 	if (netif_running(dev))
442 		return bnxt_open_nic(bp, true, false);
443 
444 	return 0;
445 }
446 
bnxt_xdp(struct net_device * dev,struct netdev_bpf * xdp)447 int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp)
448 {
449 	struct bnxt *bp = netdev_priv(dev);
450 	int rc;
451 
452 	switch (xdp->command) {
453 	case XDP_SETUP_PROG:
454 		rc = bnxt_xdp_set(bp, xdp->prog);
455 		break;
456 	default:
457 		rc = -EINVAL;
458 		break;
459 	}
460 	return rc;
461 }
462 
463 struct sk_buff *
bnxt_xdp_build_skb(struct bnxt * bp,struct sk_buff * skb,u8 num_frags,struct page_pool * pool,struct xdp_buff * xdp,struct rx_cmp_ext * rxcmp1)464 bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags,
465 		   struct page_pool *pool, struct xdp_buff *xdp,
466 		   struct rx_cmp_ext *rxcmp1)
467 {
468 	struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
469 
470 	if (!skb)
471 		return NULL;
472 	skb_checksum_none_assert(skb);
473 	if (RX_CMP_L4_CS_OK(rxcmp1)) {
474 		if (bp->dev->features & NETIF_F_RXCSUM) {
475 			skb->ip_summed = CHECKSUM_UNNECESSARY;
476 			skb->csum_level = RX_CMP_ENCAP(rxcmp1);
477 		}
478 	}
479 	xdp_update_skb_shared_info(skb, num_frags,
480 				   sinfo->xdp_frags_size,
481 				   BNXT_RX_PAGE_SIZE * sinfo->nr_frags,
482 				   xdp_buff_is_frag_pfmemalloc(xdp));
483 	return skb;
484 }
485