1 /* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * RMNET Data ingress/egress handler
13 *
14 */
15
16 #include <linux/netdevice.h>
17 #include <linux/netdev_features.h>
18 #include <linux/if_arp.h>
19 #include <net/sock.h>
20 #include "rmnet_private.h"
21 #include "rmnet_config.h"
22 #include "rmnet_vnd.h"
23 #include "rmnet_map.h"
24 #include "rmnet_handlers.h"
25
26 #define RMNET_IP_VERSION_4 0x40
27 #define RMNET_IP_VERSION_6 0x60
28
29 /* Helper Functions */
30
rmnet_set_skb_proto(struct sk_buff * skb)31 static void rmnet_set_skb_proto(struct sk_buff *skb)
32 {
33 switch (skb->data[0] & 0xF0) {
34 case RMNET_IP_VERSION_4:
35 skb->protocol = htons(ETH_P_IP);
36 break;
37 case RMNET_IP_VERSION_6:
38 skb->protocol = htons(ETH_P_IPV6);
39 break;
40 default:
41 skb->protocol = htons(ETH_P_MAP);
42 break;
43 }
44 }
45
46 /* Generic handler */
47
48 static void
rmnet_deliver_skb(struct sk_buff * skb)49 rmnet_deliver_skb(struct sk_buff *skb)
50 {
51 struct rmnet_priv *priv = netdev_priv(skb->dev);
52
53 skb_reset_transport_header(skb);
54 skb_reset_network_header(skb);
55 rmnet_vnd_rx_fixup(skb, skb->dev);
56
57 skb->pkt_type = PACKET_HOST;
58 skb_set_mac_header(skb, 0);
59 gro_cells_receive(&priv->gro_cells, skb);
60 }
61
62 /* MAP handler */
63
64 static void
__rmnet_map_ingress_handler(struct sk_buff * skb,struct rmnet_port * port)65 __rmnet_map_ingress_handler(struct sk_buff *skb,
66 struct rmnet_port *port)
67 {
68 struct rmnet_endpoint *ep;
69 u16 len, pad;
70 u8 mux_id;
71
72 if (RMNET_MAP_GET_CD_BIT(skb)) {
73 if (port->data_format & RMNET_FLAGS_INGRESS_MAP_COMMANDS)
74 return rmnet_map_command(skb, port);
75
76 goto free_skb;
77 }
78
79 mux_id = RMNET_MAP_GET_MUX_ID(skb);
80 pad = RMNET_MAP_GET_PAD(skb);
81 len = RMNET_MAP_GET_LENGTH(skb) - pad;
82
83 if (mux_id >= RMNET_MAX_LOGICAL_EP)
84 goto free_skb;
85
86 ep = rmnet_get_endpoint(port, mux_id);
87 if (!ep)
88 goto free_skb;
89
90 skb->dev = ep->egress_dev;
91
92 /* Subtract MAP header */
93 skb_pull(skb, sizeof(struct rmnet_map_header));
94 rmnet_set_skb_proto(skb);
95
96 if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
97 if (!rmnet_map_checksum_downlink_packet(skb, len + pad))
98 skb->ip_summed = CHECKSUM_UNNECESSARY;
99 }
100
101 skb_trim(skb, len);
102 rmnet_deliver_skb(skb);
103 return;
104
105 free_skb:
106 kfree_skb(skb);
107 }
108
109 static void
rmnet_map_ingress_handler(struct sk_buff * skb,struct rmnet_port * port)110 rmnet_map_ingress_handler(struct sk_buff *skb,
111 struct rmnet_port *port)
112 {
113 struct sk_buff *skbn;
114
115 if (skb->dev->type == ARPHRD_ETHER) {
116 if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_ATOMIC)) {
117 kfree_skb(skb);
118 return;
119 }
120
121 skb_push(skb, ETH_HLEN);
122 }
123
124 if (port->data_format & RMNET_FLAGS_INGRESS_DEAGGREGATION) {
125 while ((skbn = rmnet_map_deaggregate(skb, port)) != NULL)
126 __rmnet_map_ingress_handler(skbn, port);
127
128 consume_skb(skb);
129 } else {
130 __rmnet_map_ingress_handler(skb, port);
131 }
132 }
133
rmnet_map_egress_handler(struct sk_buff * skb,struct rmnet_port * port,u8 mux_id,struct net_device * orig_dev)134 static int rmnet_map_egress_handler(struct sk_buff *skb,
135 struct rmnet_port *port, u8 mux_id,
136 struct net_device *orig_dev)
137 {
138 int required_headroom, additional_header_len;
139 struct rmnet_map_header *map_header;
140
141 additional_header_len = 0;
142 required_headroom = sizeof(struct rmnet_map_header);
143
144 if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4) {
145 additional_header_len = sizeof(struct rmnet_map_ul_csum_header);
146 required_headroom += additional_header_len;
147 }
148
149 if (skb_headroom(skb) < required_headroom) {
150 if (pskb_expand_head(skb, required_headroom, 0, GFP_ATOMIC))
151 return -ENOMEM;
152 }
153
154 if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4)
155 rmnet_map_checksum_uplink_packet(skb, orig_dev);
156
157 map_header = rmnet_map_add_map_header(skb, additional_header_len, 0);
158 if (!map_header)
159 return -ENOMEM;
160
161 map_header->mux_id = mux_id;
162
163 skb->protocol = htons(ETH_P_MAP);
164
165 return 0;
166 }
167
168 static void
rmnet_bridge_handler(struct sk_buff * skb,struct net_device * bridge_dev)169 rmnet_bridge_handler(struct sk_buff *skb, struct net_device *bridge_dev)
170 {
171 if (skb_mac_header_was_set(skb))
172 skb_push(skb, skb->mac_len);
173
174 if (bridge_dev) {
175 skb->dev = bridge_dev;
176 dev_queue_xmit(skb);
177 }
178 }
179
180 /* Ingress / Egress Entry Points */
181
182 /* Processes packet as per ingress data format for receiving device. Logical
183 * endpoint is determined from packet inspection. Packet is then sent to the
184 * egress device listed in the logical endpoint configuration.
185 */
rmnet_rx_handler(struct sk_buff ** pskb)186 rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
187 {
188 struct sk_buff *skb = *pskb;
189 struct rmnet_port *port;
190 struct net_device *dev;
191
192 if (!skb)
193 goto done;
194
195 if (skb->pkt_type == PACKET_LOOPBACK)
196 return RX_HANDLER_PASS;
197
198 dev = skb->dev;
199 port = rmnet_get_port_rcu(dev);
200
201 switch (port->rmnet_mode) {
202 case RMNET_EPMODE_VND:
203 rmnet_map_ingress_handler(skb, port);
204 break;
205 case RMNET_EPMODE_BRIDGE:
206 rmnet_bridge_handler(skb, port->bridge_ep);
207 break;
208 }
209
210 done:
211 return RX_HANDLER_CONSUMED;
212 }
213
214 /* Modifies packet as per logical endpoint configuration and egress data format
215 * for egress device configured in logical endpoint. Packet is then transmitted
216 * on the egress device.
217 */
rmnet_egress_handler(struct sk_buff * skb)218 void rmnet_egress_handler(struct sk_buff *skb)
219 {
220 struct net_device *orig_dev;
221 struct rmnet_port *port;
222 struct rmnet_priv *priv;
223 u8 mux_id;
224
225 sk_pacing_shift_update(skb->sk, 8);
226
227 orig_dev = skb->dev;
228 priv = netdev_priv(orig_dev);
229 skb->dev = priv->real_dev;
230 mux_id = priv->mux_id;
231
232 port = rmnet_get_port_rcu(skb->dev);
233 if (!port)
234 goto drop;
235
236 if (rmnet_map_egress_handler(skb, port, mux_id, orig_dev))
237 goto drop;
238
239 rmnet_vnd_tx_fixup(skb, orig_dev);
240
241 dev_queue_xmit(skb);
242 return;
243
244 drop:
245 this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
246 kfree_skb(skb);
247 }
248