• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2021, Microsoft Corporation. */
3 
4 #include <linux/inetdevice.h>
5 #include <linux/etherdevice.h>
6 #include <linux/mm.h>
7 #include <linux/bpf.h>
8 #include <linux/bpf_trace.h>
9 #include <net/xdp.h>
10 
11 #include "mana.h"
12 
mana_xdp_tx(struct sk_buff * skb,struct net_device * ndev)13 void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev)
14 {
15 	u16 txq_idx = skb_get_queue_mapping(skb);
16 	struct netdev_queue *ndevtxq;
17 	int rc;
18 
19 	__skb_push(skb, ETH_HLEN);
20 
21 	ndevtxq = netdev_get_tx_queue(ndev, txq_idx);
22 	__netif_tx_lock(ndevtxq, smp_processor_id());
23 
24 	rc = mana_start_xmit(skb, ndev);
25 
26 	__netif_tx_unlock(ndevtxq);
27 
28 	if (dev_xmit_complete(rc))
29 		return;
30 
31 	dev_kfree_skb_any(skb);
32 	ndev->stats.tx_dropped++;
33 }
34 
mana_xdp_xmit_fm(struct net_device * ndev,struct xdp_frame * frame,u16 q_idx)35 static int mana_xdp_xmit_fm(struct net_device *ndev, struct xdp_frame *frame,
36 			    u16 q_idx)
37 {
38 	struct sk_buff *skb;
39 
40 	skb = xdp_build_skb_from_frame(frame, ndev);
41 	if (unlikely(!skb))
42 		return -ENOMEM;
43 
44 	skb_set_queue_mapping(skb, q_idx);
45 
46 	mana_xdp_tx(skb, ndev);
47 
48 	return 0;
49 }
50 
mana_xdp_xmit(struct net_device * ndev,int n,struct xdp_frame ** frames,u32 flags)51 int mana_xdp_xmit(struct net_device *ndev, int n, struct xdp_frame **frames,
52 		  u32 flags)
53 {
54 	struct mana_port_context *apc = netdev_priv(ndev);
55 	struct mana_stats_tx *tx_stats;
56 	int i, count = 0;
57 	u16 q_idx;
58 
59 	if (unlikely(!apc->port_is_up))
60 		return 0;
61 
62 	q_idx = smp_processor_id() % ndev->real_num_tx_queues;
63 
64 	for (i = 0; i < n; i++) {
65 		if (mana_xdp_xmit_fm(ndev, frames[i], q_idx))
66 			break;
67 
68 		count++;
69 	}
70 
71 	tx_stats = &apc->tx_qp[q_idx].txq.stats;
72 
73 	u64_stats_update_begin(&tx_stats->syncp);
74 	tx_stats->xdp_xmit += count;
75 	u64_stats_update_end(&tx_stats->syncp);
76 
77 	return count;
78 }
79 
mana_run_xdp(struct net_device * ndev,struct mana_rxq * rxq,struct xdp_buff * xdp,void * buf_va,uint pkt_len)80 u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
81 		 struct xdp_buff *xdp, void *buf_va, uint pkt_len)
82 {
83 	struct mana_stats_rx *rx_stats;
84 	struct bpf_prog *prog;
85 	u32 act = XDP_PASS;
86 
87 	rcu_read_lock();
88 	prog = rcu_dereference(rxq->bpf_prog);
89 
90 	if (!prog)
91 		goto out;
92 
93 	xdp_init_buff(xdp, PAGE_SIZE, &rxq->xdp_rxq);
94 	xdp_prepare_buff(xdp, buf_va, XDP_PACKET_HEADROOM, pkt_len, false);
95 
96 	act = bpf_prog_run_xdp(prog, xdp);
97 
98 	rx_stats = &rxq->stats;
99 
100 	switch (act) {
101 	case XDP_PASS:
102 	case XDP_TX:
103 	case XDP_DROP:
104 		break;
105 
106 	case XDP_REDIRECT:
107 		rxq->xdp_rc = xdp_do_redirect(ndev, xdp, prog);
108 		if (!rxq->xdp_rc) {
109 			rxq->xdp_flush = true;
110 
111 			u64_stats_update_begin(&rx_stats->syncp);
112 			rx_stats->packets++;
113 			rx_stats->bytes += pkt_len;
114 			rx_stats->xdp_redirect++;
115 			u64_stats_update_end(&rx_stats->syncp);
116 
117 			break;
118 		}
119 
120 		fallthrough;
121 
122 	case XDP_ABORTED:
123 		trace_xdp_exception(ndev, prog, act);
124 		break;
125 
126 	default:
127 		bpf_warn_invalid_xdp_action(ndev, prog, act);
128 	}
129 
130 out:
131 	rcu_read_unlock();
132 
133 	return act;
134 }
135 
mana_xdp_fraglen(unsigned int len)136 static unsigned int mana_xdp_fraglen(unsigned int len)
137 {
138 	return SKB_DATA_ALIGN(len) +
139 	       SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
140 }
141 
mana_xdp_get(struct mana_port_context * apc)142 struct bpf_prog *mana_xdp_get(struct mana_port_context *apc)
143 {
144 	ASSERT_RTNL();
145 
146 	return apc->bpf_prog;
147 }
148 
mana_chn_xdp_get(struct mana_port_context * apc)149 static struct bpf_prog *mana_chn_xdp_get(struct mana_port_context *apc)
150 {
151 	return rtnl_dereference(apc->rxqs[0]->bpf_prog);
152 }
153 
154 /* Set xdp program on channels */
mana_chn_setxdp(struct mana_port_context * apc,struct bpf_prog * prog)155 void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog)
156 {
157 	struct bpf_prog *old_prog = mana_chn_xdp_get(apc);
158 	unsigned int num_queues = apc->num_queues;
159 	int i;
160 
161 	ASSERT_RTNL();
162 
163 	if (old_prog == prog)
164 		return;
165 
166 	if (prog)
167 		bpf_prog_add(prog, num_queues);
168 
169 	for (i = 0; i < num_queues; i++)
170 		rcu_assign_pointer(apc->rxqs[i]->bpf_prog, prog);
171 
172 	if (old_prog)
173 		for (i = 0; i < num_queues; i++)
174 			bpf_prog_put(old_prog);
175 }
176 
mana_xdp_set(struct net_device * ndev,struct bpf_prog * prog,struct netlink_ext_ack * extack)177 static int mana_xdp_set(struct net_device *ndev, struct bpf_prog *prog,
178 			struct netlink_ext_ack *extack)
179 {
180 	struct mana_port_context *apc = netdev_priv(ndev);
181 	struct bpf_prog *old_prog;
182 	int buf_max;
183 
184 	old_prog = mana_xdp_get(apc);
185 
186 	if (!old_prog && !prog)
187 		return 0;
188 
189 	buf_max = XDP_PACKET_HEADROOM + mana_xdp_fraglen(ndev->mtu + ETH_HLEN);
190 	if (prog && buf_max > PAGE_SIZE) {
191 		netdev_err(ndev, "XDP: mtu:%u too large, buf_max:%u\n",
192 			   ndev->mtu, buf_max);
193 		NL_SET_ERR_MSG_MOD(extack, "XDP: mtu too large");
194 
195 		return -EOPNOTSUPP;
196 	}
197 
198 	/* One refcnt of the prog is hold by the caller already, so
199 	 * don't increase refcnt for this one.
200 	 */
201 	apc->bpf_prog = prog;
202 
203 	if (old_prog)
204 		bpf_prog_put(old_prog);
205 
206 	if (apc->port_is_up)
207 		mana_chn_setxdp(apc, prog);
208 
209 	return 0;
210 }
211 
mana_bpf(struct net_device * ndev,struct netdev_bpf * bpf)212 int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
213 {
214 	struct netlink_ext_ack *extack = bpf->extack;
215 	int ret;
216 
217 	switch (bpf->command) {
218 	case XDP_SETUP_PROG:
219 		return mana_xdp_set(ndev, bpf->prog, extack);
220 
221 	default:
222 		return -EOPNOTSUPP;
223 	}
224 
225 	return ret;
226 }
227