1 /* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 *
13 * RMNET Data virtual network driver
14 *
15 */
16
17 #include <linux/etherdevice.h>
18 #include <linux/if_arp.h>
19 #include <net/pkt_sched.h>
20 #include "rmnet_config.h"
21 #include "rmnet_handlers.h"
22 #include "rmnet_private.h"
23 #include "rmnet_map.h"
24 #include "rmnet_vnd.h"
25
26 /* RX/TX Fixup */
27
rmnet_vnd_rx_fixup(struct sk_buff * skb,struct net_device * dev)28 void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev)
29 {
30 struct rmnet_priv *priv = netdev_priv(dev);
31 struct rmnet_pcpu_stats *pcpu_ptr;
32
33 pcpu_ptr = this_cpu_ptr(priv->pcpu_stats);
34
35 u64_stats_update_begin(&pcpu_ptr->syncp);
36 pcpu_ptr->stats.rx_pkts++;
37 pcpu_ptr->stats.rx_bytes += skb->len;
38 u64_stats_update_end(&pcpu_ptr->syncp);
39 }
40
rmnet_vnd_tx_fixup(struct sk_buff * skb,struct net_device * dev)41 void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev)
42 {
43 struct rmnet_priv *priv = netdev_priv(dev);
44 struct rmnet_pcpu_stats *pcpu_ptr;
45
46 pcpu_ptr = this_cpu_ptr(priv->pcpu_stats);
47
48 u64_stats_update_begin(&pcpu_ptr->syncp);
49 pcpu_ptr->stats.tx_pkts++;
50 pcpu_ptr->stats.tx_bytes += skb->len;
51 u64_stats_update_end(&pcpu_ptr->syncp);
52 }
53
54 /* Network Device Operations */
55
rmnet_vnd_start_xmit(struct sk_buff * skb,struct net_device * dev)56 static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb,
57 struct net_device *dev)
58 {
59 struct rmnet_priv *priv;
60
61 priv = netdev_priv(dev);
62 if (priv->real_dev) {
63 rmnet_egress_handler(skb);
64 } else {
65 this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
66 kfree_skb(skb);
67 }
68 return NETDEV_TX_OK;
69 }
70
rmnet_vnd_change_mtu(struct net_device * rmnet_dev,int new_mtu)71 static int rmnet_vnd_change_mtu(struct net_device *rmnet_dev, int new_mtu)
72 {
73 if (new_mtu < 0 || new_mtu > RMNET_MAX_PACKET_SIZE)
74 return -EINVAL;
75
76 rmnet_dev->mtu = new_mtu;
77 return 0;
78 }
79
rmnet_vnd_get_iflink(const struct net_device * dev)80 static int rmnet_vnd_get_iflink(const struct net_device *dev)
81 {
82 struct rmnet_priv *priv = netdev_priv(dev);
83
84 return priv->real_dev->ifindex;
85 }
86
rmnet_vnd_init(struct net_device * dev)87 static int rmnet_vnd_init(struct net_device *dev)
88 {
89 struct rmnet_priv *priv = netdev_priv(dev);
90 int err;
91
92 priv->pcpu_stats = alloc_percpu(struct rmnet_pcpu_stats);
93 if (!priv->pcpu_stats)
94 return -ENOMEM;
95
96 err = gro_cells_init(&priv->gro_cells, dev);
97 if (err) {
98 free_percpu(priv->pcpu_stats);
99 return err;
100 }
101
102 return 0;
103 }
104
rmnet_vnd_uninit(struct net_device * dev)105 static void rmnet_vnd_uninit(struct net_device *dev)
106 {
107 struct rmnet_priv *priv = netdev_priv(dev);
108
109 gro_cells_destroy(&priv->gro_cells);
110 free_percpu(priv->pcpu_stats);
111 }
112
rmnet_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * s)113 static void rmnet_get_stats64(struct net_device *dev,
114 struct rtnl_link_stats64 *s)
115 {
116 struct rmnet_priv *priv = netdev_priv(dev);
117 struct rmnet_vnd_stats total_stats;
118 struct rmnet_pcpu_stats *pcpu_ptr;
119 unsigned int cpu, start;
120
121 memset(&total_stats, 0, sizeof(struct rmnet_vnd_stats));
122
123 for_each_possible_cpu(cpu) {
124 pcpu_ptr = per_cpu_ptr(priv->pcpu_stats, cpu);
125
126 do {
127 start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp);
128 total_stats.rx_pkts += pcpu_ptr->stats.rx_pkts;
129 total_stats.rx_bytes += pcpu_ptr->stats.rx_bytes;
130 total_stats.tx_pkts += pcpu_ptr->stats.tx_pkts;
131 total_stats.tx_bytes += pcpu_ptr->stats.tx_bytes;
132 } while (u64_stats_fetch_retry_irq(&pcpu_ptr->syncp, start));
133
134 total_stats.tx_drops += pcpu_ptr->stats.tx_drops;
135 }
136
137 s->rx_packets = total_stats.rx_pkts;
138 s->rx_bytes = total_stats.rx_bytes;
139 s->tx_packets = total_stats.tx_pkts;
140 s->tx_bytes = total_stats.tx_bytes;
141 s->tx_dropped = total_stats.tx_drops;
142 }
143
144 static const struct net_device_ops rmnet_vnd_ops = {
145 .ndo_start_xmit = rmnet_vnd_start_xmit,
146 .ndo_change_mtu = rmnet_vnd_change_mtu,
147 .ndo_get_iflink = rmnet_vnd_get_iflink,
148 .ndo_add_slave = rmnet_add_bridge,
149 .ndo_del_slave = rmnet_del_bridge,
150 .ndo_init = rmnet_vnd_init,
151 .ndo_uninit = rmnet_vnd_uninit,
152 .ndo_get_stats64 = rmnet_get_stats64,
153 };
154
155 static const char rmnet_gstrings_stats[][ETH_GSTRING_LEN] = {
156 "Checksum ok",
157 "Checksum valid bit not set",
158 "Checksum validation failed",
159 "Checksum error bad buffer",
160 "Checksum error bad ip version",
161 "Checksum error bad transport",
162 "Checksum skipped on ip fragment",
163 "Checksum skipped",
164 "Checksum computed in software",
165 };
166
rmnet_get_strings(struct net_device * dev,u32 stringset,u8 * buf)167 static void rmnet_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
168 {
169 switch (stringset) {
170 case ETH_SS_STATS:
171 memcpy(buf, &rmnet_gstrings_stats,
172 sizeof(rmnet_gstrings_stats));
173 break;
174 }
175 }
176
rmnet_get_sset_count(struct net_device * dev,int sset)177 static int rmnet_get_sset_count(struct net_device *dev, int sset)
178 {
179 switch (sset) {
180 case ETH_SS_STATS:
181 return ARRAY_SIZE(rmnet_gstrings_stats);
182 default:
183 return -EOPNOTSUPP;
184 }
185 }
186
rmnet_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)187 static void rmnet_get_ethtool_stats(struct net_device *dev,
188 struct ethtool_stats *stats, u64 *data)
189 {
190 struct rmnet_priv *priv = netdev_priv(dev);
191 struct rmnet_priv_stats *st = &priv->stats;
192
193 if (!data)
194 return;
195
196 memcpy(data, st, ARRAY_SIZE(rmnet_gstrings_stats) * sizeof(u64));
197 }
198
199 static const struct ethtool_ops rmnet_ethtool_ops = {
200 .get_ethtool_stats = rmnet_get_ethtool_stats,
201 .get_strings = rmnet_get_strings,
202 .get_sset_count = rmnet_get_sset_count,
203 };
204
205 /* Called by kernel whenever a new rmnet<n> device is created. Sets MTU,
206 * flags, ARP type, needed headroom, etc...
207 */
rmnet_vnd_setup(struct net_device * rmnet_dev)208 void rmnet_vnd_setup(struct net_device *rmnet_dev)
209 {
210 rmnet_dev->netdev_ops = &rmnet_vnd_ops;
211 rmnet_dev->mtu = RMNET_DFLT_PACKET_SIZE;
212 rmnet_dev->needed_headroom = RMNET_NEEDED_HEADROOM;
213 eth_random_addr(rmnet_dev->dev_addr);
214 rmnet_dev->tx_queue_len = RMNET_TX_QUEUE_LEN;
215
216 /* Raw IP mode */
217 rmnet_dev->header_ops = NULL; /* No header */
218 rmnet_dev->type = ARPHRD_RAWIP;
219 rmnet_dev->hard_header_len = 0;
220 rmnet_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
221
222 rmnet_dev->needs_free_netdev = true;
223 rmnet_dev->ethtool_ops = &rmnet_ethtool_ops;
224
225 /* This perm addr will be used as interface identifier by IPv6 */
226 rmnet_dev->addr_assign_type = NET_ADDR_RANDOM;
227 eth_random_addr(rmnet_dev->perm_addr);
228 }
229
230 /* Exposed API */
231
rmnet_vnd_newlink(u8 id,struct net_device * rmnet_dev,struct rmnet_port * port,struct net_device * real_dev,struct rmnet_endpoint * ep)232 int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
233 struct rmnet_port *port,
234 struct net_device *real_dev,
235 struct rmnet_endpoint *ep)
236 {
237 struct rmnet_priv *priv = netdev_priv(rmnet_dev);
238 int rc;
239
240 if (ep->egress_dev)
241 return -EINVAL;
242
243 if (rmnet_get_endpoint(port, id))
244 return -EBUSY;
245
246 rmnet_dev->hw_features = NETIF_F_RXCSUM;
247 rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
248 rmnet_dev->hw_features |= NETIF_F_SG;
249
250 priv->real_dev = real_dev;
251
252 rc = register_netdevice(rmnet_dev);
253 if (!rc) {
254 ep->egress_dev = rmnet_dev;
255 ep->mux_id = id;
256 port->nr_rmnet_devs++;
257
258 rmnet_dev->rtnl_link_ops = &rmnet_link_ops;
259
260 priv->mux_id = id;
261
262 netdev_dbg(rmnet_dev, "rmnet dev created\n");
263 }
264
265 return rc;
266 }
267
rmnet_vnd_dellink(u8 id,struct rmnet_port * port,struct rmnet_endpoint * ep)268 int rmnet_vnd_dellink(u8 id, struct rmnet_port *port,
269 struct rmnet_endpoint *ep)
270 {
271 if (id >= RMNET_MAX_LOGICAL_EP || !ep->egress_dev)
272 return -EINVAL;
273
274 ep->egress_dev = NULL;
275 port->nr_rmnet_devs--;
276 return 0;
277 }
278
rmnet_vnd_do_flow_control(struct net_device * rmnet_dev,int enable)279 int rmnet_vnd_do_flow_control(struct net_device *rmnet_dev, int enable)
280 {
281 netdev_dbg(rmnet_dev, "Setting VND TX queue state to %d\n", enable);
282 /* Although we expect similar number of enable/disable
283 * commands, optimize for the disable. That is more
284 * latency sensitive than enable
285 */
286 if (unlikely(enable))
287 netif_wake_queue(rmnet_dev);
288 else
289 netif_stop_queue(rmnet_dev);
290
291 return 0;
292 }
293