• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2018 Netronome Systems, Inc */
3 /* Copyright (C) 2021 Corigine, Inc */
4 
5 #include <linux/dma-direction.h>
6 #include <linux/dma-mapping.h>
7 #include <linux/slab.h>
8 #include <net/xdp_sock_drv.h>
9 #include <trace/events/xdp.h>
10 
11 #include "nfp_app.h"
12 #include "nfp_net.h"
13 #include "nfp_net_dp.h"
14 #include "nfp_net_xsk.h"
15 
16 static void
nfp_net_xsk_rx_bufs_stash(struct nfp_net_rx_ring * rx_ring,unsigned int idx,struct xdp_buff * xdp)17 nfp_net_xsk_rx_bufs_stash(struct nfp_net_rx_ring *rx_ring, unsigned int idx,
18 			  struct xdp_buff *xdp)
19 {
20 	unsigned int headroom;
21 
22 	headroom = xsk_pool_get_headroom(rx_ring->r_vec->xsk_pool);
23 
24 	rx_ring->rxds[idx].fld.reserved = 0;
25 	rx_ring->rxds[idx].fld.meta_len_dd = 0;
26 
27 	rx_ring->xsk_rxbufs[idx].xdp = xdp;
28 	rx_ring->xsk_rxbufs[idx].dma_addr =
29 		xsk_buff_xdp_get_frame_dma(xdp) + headroom;
30 }
31 
nfp_net_xsk_rx_unstash(struct nfp_net_xsk_rx_buf * rxbuf)32 void nfp_net_xsk_rx_unstash(struct nfp_net_xsk_rx_buf *rxbuf)
33 {
34 	rxbuf->dma_addr = 0;
35 	rxbuf->xdp = NULL;
36 }
37 
nfp_net_xsk_rx_free(struct nfp_net_xsk_rx_buf * rxbuf)38 void nfp_net_xsk_rx_free(struct nfp_net_xsk_rx_buf *rxbuf)
39 {
40 	if (rxbuf->xdp)
41 		xsk_buff_free(rxbuf->xdp);
42 
43 	nfp_net_xsk_rx_unstash(rxbuf);
44 }
45 
nfp_net_xsk_rx_bufs_free(struct nfp_net_rx_ring * rx_ring)46 void nfp_net_xsk_rx_bufs_free(struct nfp_net_rx_ring *rx_ring)
47 {
48 	unsigned int i;
49 
50 	if (!rx_ring->cnt)
51 		return;
52 
53 	for (i = 0; i < rx_ring->cnt - 1; i++)
54 		nfp_net_xsk_rx_free(&rx_ring->xsk_rxbufs[i]);
55 }
56 
nfp_net_xsk_rx_ring_fill_freelist(struct nfp_net_rx_ring * rx_ring)57 void nfp_net_xsk_rx_ring_fill_freelist(struct nfp_net_rx_ring *rx_ring)
58 {
59 	struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
60 	struct xsk_buff_pool *pool = r_vec->xsk_pool;
61 	unsigned int wr_idx, wr_ptr_add = 0;
62 	struct xdp_buff *xdp;
63 
64 	while (nfp_net_rx_space(rx_ring)) {
65 		wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
66 
67 		xdp = xsk_buff_alloc(pool);
68 		if (!xdp)
69 			break;
70 
71 		nfp_net_xsk_rx_bufs_stash(rx_ring, wr_idx, xdp);
72 
73 		/* DMA address is expanded to 48-bit width in freelist for NFP3800,
74 		 * so the *_48b macro is used accordingly, it's also OK to fill
75 		 * a 40-bit address since the top 8 bits are get set to 0.
76 		 */
77 		nfp_desc_set_dma_addr_48b(&rx_ring->rxds[wr_idx].fld,
78 					  rx_ring->xsk_rxbufs[wr_idx].dma_addr);
79 
80 		rx_ring->wr_p++;
81 		wr_ptr_add++;
82 	}
83 
84 	/* Ensure all records are visible before incrementing write counter. */
85 	wmb();
86 	nfp_qcp_wr_ptr_add(rx_ring->qcp_fl, wr_ptr_add);
87 }
88 
nfp_net_xsk_rx_drop(struct nfp_net_r_vector * r_vec,struct nfp_net_xsk_rx_buf * xrxbuf)89 void nfp_net_xsk_rx_drop(struct nfp_net_r_vector *r_vec,
90 			 struct nfp_net_xsk_rx_buf *xrxbuf)
91 {
92 	u64_stats_update_begin(&r_vec->rx_sync);
93 	r_vec->rx_drops++;
94 	u64_stats_update_end(&r_vec->rx_sync);
95 
96 	nfp_net_xsk_rx_free(xrxbuf);
97 }
98 
nfp_net_xsk_pool_unmap(struct device * dev,struct xsk_buff_pool * pool)99 static void nfp_net_xsk_pool_unmap(struct device *dev,
100 				   struct xsk_buff_pool *pool)
101 {
102 	return xsk_pool_dma_unmap(pool, 0);
103 }
104 
nfp_net_xsk_pool_map(struct device * dev,struct xsk_buff_pool * pool)105 static int nfp_net_xsk_pool_map(struct device *dev, struct xsk_buff_pool *pool)
106 {
107 	return xsk_pool_dma_map(pool, dev, 0);
108 }
109 
nfp_net_xsk_setup_pool(struct net_device * netdev,struct xsk_buff_pool * pool,u16 queue_id)110 int nfp_net_xsk_setup_pool(struct net_device *netdev,
111 			   struct xsk_buff_pool *pool, u16 queue_id)
112 {
113 	struct nfp_net *nn = netdev_priv(netdev);
114 
115 	struct xsk_buff_pool *prev_pool;
116 	struct nfp_net_dp *dp;
117 	int err;
118 
119 	/* NFDK doesn't implement xsk yet. */
120 	if (nn->dp.ops->version == NFP_NFD_VER_NFDK)
121 		return -EOPNOTSUPP;
122 
123 	/* Reject on old FWs so we can drop some checks on datapath. */
124 	if (nn->dp.rx_offset != NFP_NET_CFG_RX_OFFSET_DYNAMIC)
125 		return -EOPNOTSUPP;
126 	if (!nn->dp.chained_metadata_format)
127 		return -EOPNOTSUPP;
128 
129 	/* Install */
130 	if (pool) {
131 		err = nfp_net_xsk_pool_map(nn->dp.dev, pool);
132 		if (err)
133 			return err;
134 	}
135 
136 	/* Reconfig/swap */
137 	dp = nfp_net_clone_dp(nn);
138 	if (!dp) {
139 		err = -ENOMEM;
140 		goto err_unmap;
141 	}
142 
143 	prev_pool = dp->xsk_pools[queue_id];
144 	dp->xsk_pools[queue_id] = pool;
145 
146 	err = nfp_net_ring_reconfig(nn, dp, NULL);
147 	if (err)
148 		goto err_unmap;
149 
150 	/* Uninstall */
151 	if (prev_pool)
152 		nfp_net_xsk_pool_unmap(nn->dp.dev, prev_pool);
153 
154 	return 0;
155 err_unmap:
156 	if (pool)
157 		nfp_net_xsk_pool_unmap(nn->dp.dev, pool);
158 
159 	return err;
160 }
161 
nfp_net_xsk_wakeup(struct net_device * netdev,u32 queue_id,u32 flags)162 int nfp_net_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags)
163 {
164 	struct nfp_net *nn = netdev_priv(netdev);
165 
166 	/* queue_id comes from a zero-copy socket, installed with XDP_SETUP_XSK_POOL,
167 	 * so it must be within our vector range.  Moreover, our napi structs
168 	 * are statically allocated, so we can always kick them without worrying
169 	 * if reconfig is in progress or interface down.
170 	 */
171 	napi_schedule(&nn->r_vecs[queue_id].napi);
172 
173 	return 0;
174 }
175