1 /* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 *
13 * RMNET Data virtual network driver
14 *
15 */
16
17 #include <linux/etherdevice.h>
18 #include <linux/if_arp.h>
19 #include <net/pkt_sched.h>
20 #include "rmnet_config.h"
21 #include "rmnet_handlers.h"
22 #include "rmnet_private.h"
23 #include "rmnet_map.h"
24 #include "rmnet_vnd.h"
25
26 /* RX/TX Fixup */
27
rmnet_vnd_rx_fixup(struct sk_buff * skb,struct net_device * dev)28 void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev)
29 {
30 dev->stats.rx_packets++;
31 dev->stats.rx_bytes += skb->len;
32 }
33
rmnet_vnd_tx_fixup(struct sk_buff * skb,struct net_device * dev)34 void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev)
35 {
36 dev->stats.tx_packets++;
37 dev->stats.tx_bytes += skb->len;
38 }
39
40 /* Network Device Operations */
41
rmnet_vnd_start_xmit(struct sk_buff * skb,struct net_device * dev)42 static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb,
43 struct net_device *dev)
44 {
45 struct rmnet_priv *priv;
46
47 priv = netdev_priv(dev);
48 if (priv->local_ep.egress_dev) {
49 rmnet_egress_handler(skb, &priv->local_ep);
50 } else {
51 dev->stats.tx_dropped++;
52 kfree_skb(skb);
53 }
54 return NETDEV_TX_OK;
55 }
56
rmnet_vnd_change_mtu(struct net_device * rmnet_dev,int new_mtu)57 static int rmnet_vnd_change_mtu(struct net_device *rmnet_dev, int new_mtu)
58 {
59 if (new_mtu < 0 || new_mtu > RMNET_MAX_PACKET_SIZE)
60 return -EINVAL;
61
62 rmnet_dev->mtu = new_mtu;
63 return 0;
64 }
65
rmnet_vnd_get_iflink(const struct net_device * dev)66 static int rmnet_vnd_get_iflink(const struct net_device *dev)
67 {
68 struct rmnet_priv *priv = netdev_priv(dev);
69
70 return priv->real_dev->ifindex;
71 }
72
73 static const struct net_device_ops rmnet_vnd_ops = {
74 .ndo_start_xmit = rmnet_vnd_start_xmit,
75 .ndo_change_mtu = rmnet_vnd_change_mtu,
76 .ndo_get_iflink = rmnet_vnd_get_iflink,
77 };
78
79 /* Called by kernel whenever a new rmnet<n> device is created. Sets MTU,
80 * flags, ARP type, needed headroom, etc...
81 */
rmnet_vnd_setup(struct net_device * rmnet_dev)82 void rmnet_vnd_setup(struct net_device *rmnet_dev)
83 {
84 rmnet_dev->netdev_ops = &rmnet_vnd_ops;
85 rmnet_dev->mtu = RMNET_DFLT_PACKET_SIZE;
86 rmnet_dev->needed_headroom = RMNET_NEEDED_HEADROOM;
87 random_ether_addr(rmnet_dev->dev_addr);
88 rmnet_dev->tx_queue_len = RMNET_TX_QUEUE_LEN;
89
90 /* Raw IP mode */
91 rmnet_dev->header_ops = NULL; /* No header */
92 rmnet_dev->type = ARPHRD_RAWIP;
93 rmnet_dev->hard_header_len = 0;
94 rmnet_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
95
96 rmnet_dev->needs_free_netdev = true;
97 }
98
99 /* Exposed API */
100
rmnet_vnd_newlink(u8 id,struct net_device * rmnet_dev,struct rmnet_port * port,struct net_device * real_dev)101 int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
102 struct rmnet_port *port,
103 struct net_device *real_dev)
104 {
105 struct rmnet_priv *priv = netdev_priv(rmnet_dev);
106 int rc;
107
108 if (port->rmnet_devices[id])
109 return -EINVAL;
110
111 priv->real_dev = real_dev;
112
113 rc = register_netdevice(rmnet_dev);
114 if (!rc) {
115 port->rmnet_devices[id] = rmnet_dev;
116 port->nr_rmnet_devs++;
117
118 rmnet_dev->rtnl_link_ops = &rmnet_link_ops;
119
120 priv->mux_id = id;
121
122 netdev_dbg(rmnet_dev, "rmnet dev created\n");
123 }
124
125 return rc;
126 }
127
rmnet_vnd_dellink(u8 id,struct rmnet_port * port)128 int rmnet_vnd_dellink(u8 id, struct rmnet_port *port)
129 {
130 if (id >= RMNET_MAX_LOGICAL_EP || !port->rmnet_devices[id])
131 return -EINVAL;
132
133 port->rmnet_devices[id] = NULL;
134 port->nr_rmnet_devs--;
135 return 0;
136 }
137
rmnet_vnd_get_mux(struct net_device * rmnet_dev)138 u8 rmnet_vnd_get_mux(struct net_device *rmnet_dev)
139 {
140 struct rmnet_priv *priv;
141
142 priv = netdev_priv(rmnet_dev);
143 return priv->mux_id;
144 }
145
146 /* Gets the logical endpoint configuration for a RmNet virtual network device
147 * node. Caller should confirm that devices is a RmNet VND before calling.
148 */
rmnet_vnd_get_endpoint(struct net_device * rmnet_dev)149 struct rmnet_endpoint *rmnet_vnd_get_endpoint(struct net_device *rmnet_dev)
150 {
151 struct rmnet_priv *priv;
152
153 if (!rmnet_dev)
154 return NULL;
155
156 priv = netdev_priv(rmnet_dev);
157
158 return &priv->local_ep;
159 }
160
rmnet_vnd_do_flow_control(struct net_device * rmnet_dev,int enable)161 int rmnet_vnd_do_flow_control(struct net_device *rmnet_dev, int enable)
162 {
163 netdev_dbg(rmnet_dev, "Setting VND TX queue state to %d\n", enable);
164 /* Although we expect similar number of enable/disable
165 * commands, optimize for the disable. That is more
166 * latency sensitive than enable
167 */
168 if (unlikely(enable))
169 netif_wake_queue(rmnet_dev);
170 else
171 netif_stop_queue(rmnet_dev);
172
173 return 0;
174 }
175