1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * IPV4 GSO/GRO offload support
4 * Linux INET implementation
5 *
6 * GRE GSO support
7 */
8
9 #include <linux/skbuff.h>
10 #include <linux/init.h>
11 #include <net/protocol.h>
12 #include <net/gre.h>
13 #include <net/gro.h>
14
gre_gso_segment(struct sk_buff * skb,netdev_features_t features)15 static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
16 netdev_features_t features)
17 {
18 int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
19 bool need_csum, offload_csum, gso_partial, need_ipsec;
20 struct sk_buff *segs = ERR_PTR(-EINVAL);
21 u16 mac_offset = skb->mac_header;
22 __be16 protocol = skb->protocol;
23 u16 mac_len = skb->mac_len;
24 int gre_offset, outer_hlen;
25
26 if (!skb->encapsulation)
27 goto out;
28
29 if (unlikely(tnl_hlen < sizeof(struct gre_base_hdr)))
30 goto out;
31
32 if (unlikely(!pskb_may_pull(skb, tnl_hlen)))
33 goto out;
34
35 /* setup inner skb. */
36 skb->encapsulation = 0;
37 SKB_GSO_CB(skb)->encap_level = 0;
38 __skb_pull(skb, tnl_hlen);
39 skb_reset_mac_header(skb);
40 skb_set_network_header(skb, skb_inner_network_offset(skb));
41 skb->mac_len = skb_inner_network_offset(skb);
42 skb->protocol = skb->inner_protocol;
43
44 need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM);
45 skb->encap_hdr_csum = need_csum;
46
47 features &= skb->dev->hw_enc_features;
48 if (need_csum)
49 features &= ~NETIF_F_SCTP_CRC;
50
51 need_ipsec = skb_dst(skb) && dst_xfrm(skb_dst(skb));
52 /* Try to offload checksum if possible */
53 offload_csum = !!(need_csum && !need_ipsec &&
54 (skb->dev->features & NETIF_F_HW_CSUM));
55
56 /* segment inner packet. */
57 segs = skb_mac_gso_segment(skb, features);
58 if (IS_ERR_OR_NULL(segs)) {
59 skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset,
60 mac_len);
61 goto out;
62 }
63
64 gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL);
65
66 outer_hlen = skb_tnl_header_len(skb);
67 gre_offset = outer_hlen - tnl_hlen;
68 skb = segs;
69 do {
70 struct gre_base_hdr *greh;
71 __sum16 *pcsum;
72
73 /* Set up inner headers if we are offloading inner checksum */
74 if (skb->ip_summed == CHECKSUM_PARTIAL) {
75 skb_reset_inner_headers(skb);
76 skb->encapsulation = 1;
77 }
78
79 skb->mac_len = mac_len;
80 skb->protocol = protocol;
81
82 __skb_push(skb, outer_hlen);
83 skb_reset_mac_header(skb);
84 skb_set_network_header(skb, mac_len);
85 skb_set_transport_header(skb, gre_offset);
86
87 if (!need_csum)
88 continue;
89
90 greh = (struct gre_base_hdr *)skb_transport_header(skb);
91 pcsum = (__sum16 *)(greh + 1);
92
93 if (gso_partial && skb_is_gso(skb)) {
94 unsigned int partial_adj;
95
96 /* Adjust checksum to account for the fact that
97 * the partial checksum is based on actual size
98 * whereas headers should be based on MSS size.
99 */
100 partial_adj = skb->len + skb_headroom(skb) -
101 SKB_GSO_CB(skb)->data_offset -
102 skb_shinfo(skb)->gso_size;
103 *pcsum = ~csum_fold((__force __wsum)htonl(partial_adj));
104 } else {
105 *pcsum = 0;
106 }
107
108 *(pcsum + 1) = 0;
109 if (skb->encapsulation || !offload_csum) {
110 *pcsum = gso_make_checksum(skb, 0);
111 } else {
112 skb->ip_summed = CHECKSUM_PARTIAL;
113 skb->csum_start = skb_transport_header(skb) - skb->head;
114 skb->csum_offset = sizeof(*greh);
115 }
116 } while ((skb = skb->next));
117 out:
118 return segs;
119 }
120
gre_gro_receive(struct list_head * head,struct sk_buff * skb)121 static struct sk_buff *gre_gro_receive(struct list_head *head,
122 struct sk_buff *skb)
123 {
124 struct sk_buff *pp = NULL;
125 struct sk_buff *p;
126 const struct gre_base_hdr *greh;
127 unsigned int hlen, grehlen;
128 unsigned int off;
129 int flush = 1;
130 struct packet_offload *ptype;
131 __be16 type;
132
133 if (NAPI_GRO_CB(skb)->encap_mark)
134 goto out;
135
136 NAPI_GRO_CB(skb)->encap_mark = 1;
137
138 off = skb_gro_offset(skb);
139 hlen = off + sizeof(*greh);
140 greh = skb_gro_header(skb, hlen, off);
141 if (unlikely(!greh))
142 goto out;
143
144 /* Only support version 0 and K (key), C (csum) flags. Note that
145 * although the support for the S (seq#) flag can be added easily
146 * for GRO, this is problematic for GSO hence can not be enabled
147 * here because a GRO pkt may end up in the forwarding path, thus
148 * requiring GSO support to break it up correctly.
149 */
150 if ((greh->flags & ~(GRE_KEY|GRE_CSUM)) != 0)
151 goto out;
152
153 /* We can only support GRE_CSUM if we can track the location of
154 * the GRE header. In the case of FOU/GUE we cannot because the
155 * outer UDP header displaces the GRE header leaving us in a state
156 * of limbo.
157 */
158 if ((greh->flags & GRE_CSUM) && NAPI_GRO_CB(skb)->is_fou)
159 goto out;
160
161 type = greh->protocol;
162
163 ptype = gro_find_receive_by_type(type);
164 if (!ptype)
165 goto out;
166
167 grehlen = GRE_HEADER_SECTION;
168
169 if (greh->flags & GRE_KEY)
170 grehlen += GRE_HEADER_SECTION;
171
172 if (greh->flags & GRE_CSUM)
173 grehlen += GRE_HEADER_SECTION;
174
175 hlen = off + grehlen;
176 if (skb_gro_header_hard(skb, hlen)) {
177 greh = skb_gro_header_slow(skb, hlen, off);
178 if (unlikely(!greh))
179 goto out;
180 }
181
182 /* Don't bother verifying checksum if we're going to flush anyway. */
183 if ((greh->flags & GRE_CSUM) && !NAPI_GRO_CB(skb)->flush) {
184 if (skb_gro_checksum_simple_validate(skb))
185 goto out;
186
187 skb_gro_checksum_try_convert(skb, IPPROTO_GRE,
188 null_compute_pseudo);
189 }
190
191 list_for_each_entry(p, head, list) {
192 const struct gre_base_hdr *greh2;
193
194 if (!NAPI_GRO_CB(p)->same_flow)
195 continue;
196
197 /* The following checks are needed to ensure only pkts
198 * from the same tunnel are considered for aggregation.
199 * The criteria for "the same tunnel" includes:
200 * 1) same version (we only support version 0 here)
201 * 2) same protocol (we only support ETH_P_IP for now)
202 * 3) same set of flags
203 * 4) same key if the key field is present.
204 */
205 greh2 = (struct gre_base_hdr *)(p->data + off);
206
207 if (greh2->flags != greh->flags ||
208 greh2->protocol != greh->protocol) {
209 NAPI_GRO_CB(p)->same_flow = 0;
210 continue;
211 }
212 if (greh->flags & GRE_KEY) {
213 /* compare keys */
214 if (*(__be32 *)(greh2+1) != *(__be32 *)(greh+1)) {
215 NAPI_GRO_CB(p)->same_flow = 0;
216 continue;
217 }
218 }
219 }
220
221 skb_gro_pull(skb, grehlen);
222
223 /* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/
224 skb_gro_postpull_rcsum(skb, greh, grehlen);
225
226 pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
227 flush = 0;
228
229 out:
230 skb_gro_flush_final(skb, pp, flush);
231
232 return pp;
233 }
234
gre_gro_complete(struct sk_buff * skb,int nhoff)235 static int gre_gro_complete(struct sk_buff *skb, int nhoff)
236 {
237 struct gre_base_hdr *greh = (struct gre_base_hdr *)(skb->data + nhoff);
238 struct packet_offload *ptype;
239 unsigned int grehlen = sizeof(*greh);
240 int err = -ENOENT;
241 __be16 type;
242
243 skb->encapsulation = 1;
244 skb_shinfo(skb)->gso_type = SKB_GSO_GRE;
245
246 type = greh->protocol;
247 if (greh->flags & GRE_KEY)
248 grehlen += GRE_HEADER_SECTION;
249
250 if (greh->flags & GRE_CSUM)
251 grehlen += GRE_HEADER_SECTION;
252
253 ptype = gro_find_complete_by_type(type);
254 if (ptype)
255 err = ptype->callbacks.gro_complete(skb, nhoff + grehlen);
256
257 skb_set_inner_mac_header(skb, nhoff + grehlen);
258
259 return err;
260 }
261
262 static const struct net_offload gre_offload = {
263 .callbacks = {
264 .gso_segment = gre_gso_segment,
265 .gro_receive = gre_gro_receive,
266 .gro_complete = gre_gro_complete,
267 },
268 };
269
gre_offload_init(void)270 static int __init gre_offload_init(void)
271 {
272 int err;
273
274 err = inet_add_offload(&gre_offload, IPPROTO_GRE);
275 #if IS_ENABLED(CONFIG_IPV6)
276 if (err)
277 return err;
278
279 err = inet6_add_offload(&gre_offload, IPPROTO_GRE);
280 if (err)
281 inet_del_offload(&gre_offload, IPPROTO_GRE);
282 #endif
283
284 return err;
285 }
286 device_initcall(gre_offload_init);
287