• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
3  *
4  * RMNET Data virtual network driver
5  */
6 
7 #include <linux/etherdevice.h>
8 #include <linux/if_arp.h>
9 #include <net/pkt_sched.h>
10 #include "rmnet_config.h"
11 #include "rmnet_handlers.h"
12 #include "rmnet_private.h"
13 #include "rmnet_map.h"
14 #include "rmnet_vnd.h"
15 
16 /* RX/TX Fixup */
17 
rmnet_vnd_rx_fixup(struct sk_buff * skb,struct net_device * dev)18 void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev)
19 {
20 	struct rmnet_priv *priv = netdev_priv(dev);
21 	struct rmnet_pcpu_stats *pcpu_ptr;
22 
23 	pcpu_ptr = this_cpu_ptr(priv->pcpu_stats);
24 
25 	u64_stats_update_begin(&pcpu_ptr->syncp);
26 	pcpu_ptr->stats.rx_pkts++;
27 	pcpu_ptr->stats.rx_bytes += skb->len;
28 	u64_stats_update_end(&pcpu_ptr->syncp);
29 }
30 
rmnet_vnd_tx_fixup(struct sk_buff * skb,struct net_device * dev)31 void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev)
32 {
33 	struct rmnet_priv *priv = netdev_priv(dev);
34 	struct rmnet_pcpu_stats *pcpu_ptr;
35 
36 	pcpu_ptr = this_cpu_ptr(priv->pcpu_stats);
37 
38 	u64_stats_update_begin(&pcpu_ptr->syncp);
39 	pcpu_ptr->stats.tx_pkts++;
40 	pcpu_ptr->stats.tx_bytes += skb->len;
41 	u64_stats_update_end(&pcpu_ptr->syncp);
42 }
43 
44 /* Network Device Operations */
45 
rmnet_vnd_start_xmit(struct sk_buff * skb,struct net_device * dev)46 static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb,
47 					struct net_device *dev)
48 {
49 	struct rmnet_priv *priv;
50 
51 	priv = netdev_priv(dev);
52 	if (priv->real_dev) {
53 		rmnet_egress_handler(skb);
54 	} else {
55 		this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
56 		kfree_skb(skb);
57 	}
58 	return NETDEV_TX_OK;
59 }
60 
rmnet_vnd_headroom(struct rmnet_port * port)61 static int rmnet_vnd_headroom(struct rmnet_port *port)
62 {
63 	u32 headroom;
64 
65 	headroom = sizeof(struct rmnet_map_header);
66 
67 	if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4)
68 		headroom += sizeof(struct rmnet_map_ul_csum_header);
69 
70 	return headroom;
71 }
72 
rmnet_vnd_change_mtu(struct net_device * rmnet_dev,int new_mtu)73 static int rmnet_vnd_change_mtu(struct net_device *rmnet_dev, int new_mtu)
74 {
75 	struct rmnet_priv *priv = netdev_priv(rmnet_dev);
76 	struct rmnet_port *port;
77 	u32 headroom;
78 
79 	port = rmnet_get_port_rtnl(priv->real_dev);
80 
81 	headroom = rmnet_vnd_headroom(port);
82 
83 	if (new_mtu < 0 || new_mtu > RMNET_MAX_PACKET_SIZE ||
84 	    new_mtu > (priv->real_dev->mtu - headroom))
85 		return -EINVAL;
86 
87 	rmnet_dev->mtu = new_mtu;
88 	return 0;
89 }
90 
rmnet_vnd_get_iflink(const struct net_device * dev)91 static int rmnet_vnd_get_iflink(const struct net_device *dev)
92 {
93 	struct rmnet_priv *priv = netdev_priv(dev);
94 
95 	return priv->real_dev->ifindex;
96 }
97 
rmnet_vnd_init(struct net_device * dev)98 static int rmnet_vnd_init(struct net_device *dev)
99 {
100 	struct rmnet_priv *priv = netdev_priv(dev);
101 	int err;
102 
103 	priv->pcpu_stats = alloc_percpu(struct rmnet_pcpu_stats);
104 	if (!priv->pcpu_stats)
105 		return -ENOMEM;
106 
107 	err = gro_cells_init(&priv->gro_cells, dev);
108 	if (err) {
109 		free_percpu(priv->pcpu_stats);
110 		return err;
111 	}
112 
113 	return 0;
114 }
115 
rmnet_vnd_uninit(struct net_device * dev)116 static void rmnet_vnd_uninit(struct net_device *dev)
117 {
118 	struct rmnet_priv *priv = netdev_priv(dev);
119 
120 	gro_cells_destroy(&priv->gro_cells);
121 	free_percpu(priv->pcpu_stats);
122 }
123 
rmnet_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * s)124 static void rmnet_get_stats64(struct net_device *dev,
125 			      struct rtnl_link_stats64 *s)
126 {
127 	struct rmnet_priv *priv = netdev_priv(dev);
128 	struct rmnet_vnd_stats total_stats = { };
129 	struct rmnet_pcpu_stats *pcpu_ptr;
130 	struct rmnet_vnd_stats snapshot;
131 	unsigned int cpu, start;
132 
133 	for_each_possible_cpu(cpu) {
134 		pcpu_ptr = per_cpu_ptr(priv->pcpu_stats, cpu);
135 
136 		do {
137 			start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp);
138 			snapshot = pcpu_ptr->stats;	/* struct assignment */
139 		} while (u64_stats_fetch_retry_irq(&pcpu_ptr->syncp, start));
140 
141 		total_stats.rx_pkts += snapshot.rx_pkts;
142 		total_stats.rx_bytes += snapshot.rx_bytes;
143 		total_stats.tx_pkts += snapshot.tx_pkts;
144 		total_stats.tx_bytes += snapshot.tx_bytes;
145 		total_stats.tx_drops += snapshot.tx_drops;
146 	}
147 
148 	s->rx_packets = total_stats.rx_pkts;
149 	s->rx_bytes = total_stats.rx_bytes;
150 	s->tx_packets = total_stats.tx_pkts;
151 	s->tx_bytes = total_stats.tx_bytes;
152 	s->tx_dropped = total_stats.tx_drops;
153 }
154 
155 static const struct net_device_ops rmnet_vnd_ops = {
156 	.ndo_start_xmit = rmnet_vnd_start_xmit,
157 	.ndo_change_mtu = rmnet_vnd_change_mtu,
158 	.ndo_get_iflink = rmnet_vnd_get_iflink,
159 	.ndo_add_slave  = rmnet_add_bridge,
160 	.ndo_del_slave  = rmnet_del_bridge,
161 	.ndo_init       = rmnet_vnd_init,
162 	.ndo_uninit     = rmnet_vnd_uninit,
163 	.ndo_get_stats64 = rmnet_get_stats64,
164 };
165 
166 static const char rmnet_gstrings_stats[][ETH_GSTRING_LEN] = {
167 	"Checksum ok",
168 	"Checksum valid bit not set",
169 	"Checksum validation failed",
170 	"Checksum error bad buffer",
171 	"Checksum error bad ip version",
172 	"Checksum error bad transport",
173 	"Checksum skipped on ip fragment",
174 	"Checksum skipped",
175 	"Checksum computed in software",
176 };
177 
rmnet_get_strings(struct net_device * dev,u32 stringset,u8 * buf)178 static void rmnet_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
179 {
180 	switch (stringset) {
181 	case ETH_SS_STATS:
182 		memcpy(buf, &rmnet_gstrings_stats,
183 		       sizeof(rmnet_gstrings_stats));
184 		break;
185 	}
186 }
187 
rmnet_get_sset_count(struct net_device * dev,int sset)188 static int rmnet_get_sset_count(struct net_device *dev, int sset)
189 {
190 	switch (sset) {
191 	case ETH_SS_STATS:
192 		return ARRAY_SIZE(rmnet_gstrings_stats);
193 	default:
194 		return -EOPNOTSUPP;
195 	}
196 }
197 
rmnet_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)198 static void rmnet_get_ethtool_stats(struct net_device *dev,
199 				    struct ethtool_stats *stats, u64 *data)
200 {
201 	struct rmnet_priv *priv = netdev_priv(dev);
202 	struct rmnet_priv_stats *st = &priv->stats;
203 
204 	if (!data)
205 		return;
206 
207 	memcpy(data, st, ARRAY_SIZE(rmnet_gstrings_stats) * sizeof(u64));
208 }
209 
210 static const struct ethtool_ops rmnet_ethtool_ops = {
211 	.get_ethtool_stats = rmnet_get_ethtool_stats,
212 	.get_strings = rmnet_get_strings,
213 	.get_sset_count = rmnet_get_sset_count,
214 };
215 
216 /* Called by kernel whenever a new rmnet<n> device is created. Sets MTU,
217  * flags, ARP type, needed headroom, etc...
218  */
rmnet_vnd_setup(struct net_device * rmnet_dev)219 void rmnet_vnd_setup(struct net_device *rmnet_dev)
220 {
221 	rmnet_dev->netdev_ops = &rmnet_vnd_ops;
222 	rmnet_dev->mtu = RMNET_DFLT_PACKET_SIZE;
223 	rmnet_dev->needed_headroom = RMNET_NEEDED_HEADROOM;
224 	eth_random_addr(rmnet_dev->dev_addr);
225 	rmnet_dev->tx_queue_len = RMNET_TX_QUEUE_LEN;
226 
227 	/* Raw IP mode */
228 	rmnet_dev->header_ops = NULL;  /* No header */
229 	rmnet_dev->type = ARPHRD_RAWIP;
230 	rmnet_dev->hard_header_len = 0;
231 	rmnet_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
232 
233 	rmnet_dev->needs_free_netdev = true;
234 	rmnet_dev->ethtool_ops = &rmnet_ethtool_ops;
235 
236 	rmnet_dev->features |= NETIF_F_LLTX;
237 
238 	/* This perm addr will be used as interface identifier by IPv6 */
239 	rmnet_dev->addr_assign_type = NET_ADDR_RANDOM;
240 	eth_random_addr(rmnet_dev->perm_addr);
241 }
242 
243 /* Exposed API */
244 
rmnet_vnd_newlink(u8 id,struct net_device * rmnet_dev,struct rmnet_port * port,struct net_device * real_dev,struct rmnet_endpoint * ep,struct netlink_ext_ack * extack)245 int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
246 		      struct rmnet_port *port,
247 		      struct net_device *real_dev,
248 		      struct rmnet_endpoint *ep,
249 		      struct netlink_ext_ack *extack)
250 
251 {
252 	struct rmnet_priv *priv = netdev_priv(rmnet_dev);
253 	u32 headroom;
254 	int rc;
255 
256 	if (rmnet_get_endpoint(port, id)) {
257 		NL_SET_ERR_MSG_MOD(extack, "MUX ID already exists");
258 		return -EBUSY;
259 	}
260 
261 	rmnet_dev->hw_features = NETIF_F_RXCSUM;
262 	rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
263 	rmnet_dev->hw_features |= NETIF_F_SG;
264 
265 	priv->real_dev = real_dev;
266 
267 	headroom = rmnet_vnd_headroom(port);
268 
269 	if (rmnet_vnd_change_mtu(rmnet_dev, real_dev->mtu - headroom)) {
270 		NL_SET_ERR_MSG_MOD(extack, "Invalid MTU on real dev");
271 		return -EINVAL;
272 	}
273 
274 	rc = register_netdevice(rmnet_dev);
275 	if (!rc) {
276 		ep->egress_dev = rmnet_dev;
277 		ep->mux_id = id;
278 		port->nr_rmnet_devs++;
279 
280 		rmnet_dev->rtnl_link_ops = &rmnet_link_ops;
281 
282 		priv->mux_id = id;
283 
284 		netdev_dbg(rmnet_dev, "rmnet dev created\n");
285 	}
286 
287 	return rc;
288 }
289 
rmnet_vnd_dellink(u8 id,struct rmnet_port * port,struct rmnet_endpoint * ep)290 int rmnet_vnd_dellink(u8 id, struct rmnet_port *port,
291 		      struct rmnet_endpoint *ep)
292 {
293 	if (id >= RMNET_MAX_LOGICAL_EP || !ep->egress_dev)
294 		return -EINVAL;
295 
296 	ep->egress_dev = NULL;
297 	port->nr_rmnet_devs--;
298 	return 0;
299 }
300 
rmnet_vnd_do_flow_control(struct net_device * rmnet_dev,int enable)301 int rmnet_vnd_do_flow_control(struct net_device *rmnet_dev, int enable)
302 {
303 	netdev_dbg(rmnet_dev, "Setting VND TX queue state to %d\n", enable);
304 	/* Although we expect similar number of enable/disable
305 	 * commands, optimize for the disable. That is more
306 	 * latency sensitive than enable
307 	 */
308 	if (unlikely(enable))
309 		netif_wake_queue(rmnet_dev);
310 	else
311 		netif_stop_queue(rmnet_dev);
312 
313 	return 0;
314 }
315 
rmnet_vnd_validate_real_dev_mtu(struct net_device * real_dev)316 int rmnet_vnd_validate_real_dev_mtu(struct net_device *real_dev)
317 {
318 	struct hlist_node *tmp_ep;
319 	struct rmnet_endpoint *ep;
320 	struct rmnet_port *port;
321 	unsigned long bkt_ep;
322 	u32 headroom;
323 
324 	port = rmnet_get_port_rtnl(real_dev);
325 
326 	headroom = rmnet_vnd_headroom(port);
327 
328 	hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) {
329 		if (ep->egress_dev->mtu > (real_dev->mtu - headroom))
330 			return -1;
331 	}
332 
333 	return 0;
334 }
335 
rmnet_vnd_update_dev_mtu(struct rmnet_port * port,struct net_device * real_dev)336 int rmnet_vnd_update_dev_mtu(struct rmnet_port *port,
337 			     struct net_device *real_dev)
338 {
339 	struct hlist_node *tmp_ep;
340 	struct rmnet_endpoint *ep;
341 	unsigned long bkt_ep;
342 	u32 headroom;
343 
344 	headroom = rmnet_vnd_headroom(port);
345 
346 	hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) {
347 		if (ep->egress_dev->mtu <= (real_dev->mtu - headroom))
348 			continue;
349 
350 		if (rmnet_vnd_change_mtu(ep->egress_dev,
351 					 real_dev->mtu - headroom))
352 			return -1;
353 	}
354 
355 	return 0;
356 }
357