1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/skbuff.h>
3 #include <linux/netdevice.h>
4 #include <linux/if_vlan.h>
5 #include <linux/netpoll.h>
6 #include <linux/export.h>
7 #include <net/gro.h>
8 #include "vlan.h"
9
vlan_do_receive(struct sk_buff ** skbp)10 bool vlan_do_receive(struct sk_buff **skbp)
11 {
12 struct sk_buff *skb = *skbp;
13 __be16 vlan_proto = skb->vlan_proto;
14 u16 vlan_id = skb_vlan_tag_get_id(skb);
15 struct net_device *vlan_dev;
16 struct vlan_pcpu_stats *rx_stats;
17
18 vlan_dev = vlan_find_dev(skb->dev, vlan_proto, vlan_id);
19 if (!vlan_dev)
20 return false;
21
22 skb = *skbp = skb_share_check(skb, GFP_ATOMIC);
23 if (unlikely(!skb))
24 return false;
25
26 if (unlikely(!(vlan_dev->flags & IFF_UP))) {
27 kfree_skb(skb);
28 *skbp = NULL;
29 return false;
30 }
31
32 skb->dev = vlan_dev;
33 if (unlikely(skb->pkt_type == PACKET_OTHERHOST)) {
34 /* Our lower layer thinks this is not local, let's make sure.
35 * This allows the VLAN to have a different MAC than the
36 * underlying device, and still route correctly. */
37 if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, vlan_dev->dev_addr))
38 skb->pkt_type = PACKET_HOST;
39 }
40
41 if (!(vlan_dev_priv(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR) &&
42 !netif_is_macvlan_port(vlan_dev) &&
43 !netif_is_bridge_port(vlan_dev)) {
44 unsigned int offset = skb->data - skb_mac_header(skb);
45
46 /*
47 * vlan_insert_tag expect skb->data pointing to mac header.
48 * So change skb->data before calling it and change back to
49 * original position later
50 */
51 skb_push(skb, offset);
52 skb = *skbp = vlan_insert_inner_tag(skb, skb->vlan_proto,
53 skb->vlan_tci, skb->mac_len);
54 if (!skb)
55 return false;
56 skb_pull(skb, offset + VLAN_HLEN);
57 skb_reset_mac_len(skb);
58 }
59
60 skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci);
61 __vlan_hwaccel_clear_tag(skb);
62
63 rx_stats = this_cpu_ptr(vlan_dev_priv(vlan_dev)->vlan_pcpu_stats);
64
65 u64_stats_update_begin(&rx_stats->syncp);
66 rx_stats->rx_packets++;
67 rx_stats->rx_bytes += skb->len;
68 if (skb->pkt_type == PACKET_MULTICAST)
69 rx_stats->rx_multicast++;
70 u64_stats_update_end(&rx_stats->syncp);
71
72 return true;
73 }
74
75 /* Must be invoked with rcu_read_lock. */
__vlan_find_dev_deep_rcu(struct net_device * dev,__be16 vlan_proto,u16 vlan_id)76 struct net_device *__vlan_find_dev_deep_rcu(struct net_device *dev,
77 __be16 vlan_proto, u16 vlan_id)
78 {
79 struct vlan_info *vlan_info = rcu_dereference(dev->vlan_info);
80
81 if (vlan_info) {
82 return vlan_group_get_device(&vlan_info->grp,
83 vlan_proto, vlan_id);
84 } else {
85 /*
86 * Lower devices of master uppers (bonding, team) do not have
87 * grp assigned to themselves. Grp is assigned to upper device
88 * instead.
89 */
90 struct net_device *upper_dev;
91
92 upper_dev = netdev_master_upper_dev_get_rcu(dev);
93 if (upper_dev)
94 return __vlan_find_dev_deep_rcu(upper_dev,
95 vlan_proto, vlan_id);
96 }
97
98 return NULL;
99 }
100 EXPORT_SYMBOL(__vlan_find_dev_deep_rcu);
101
vlan_dev_real_dev(const struct net_device * dev)102 struct net_device *vlan_dev_real_dev(const struct net_device *dev)
103 {
104 struct net_device *ret = vlan_dev_priv(dev)->real_dev;
105
106 while (is_vlan_dev(ret))
107 ret = vlan_dev_priv(ret)->real_dev;
108
109 return ret;
110 }
111 EXPORT_SYMBOL(vlan_dev_real_dev);
112
vlan_dev_vlan_id(const struct net_device * dev)113 u16 vlan_dev_vlan_id(const struct net_device *dev)
114 {
115 return vlan_dev_priv(dev)->vlan_id;
116 }
117 EXPORT_SYMBOL(vlan_dev_vlan_id);
118
vlan_dev_vlan_proto(const struct net_device * dev)119 __be16 vlan_dev_vlan_proto(const struct net_device *dev)
120 {
121 return vlan_dev_priv(dev)->vlan_proto;
122 }
123 EXPORT_SYMBOL(vlan_dev_vlan_proto);
124
125 /*
126 * vlan info and vid list
127 */
128
vlan_group_free(struct vlan_group * grp)129 static void vlan_group_free(struct vlan_group *grp)
130 {
131 int i, j;
132
133 for (i = 0; i < VLAN_PROTO_NUM; i++)
134 for (j = 0; j < VLAN_GROUP_ARRAY_SPLIT_PARTS; j++)
135 kfree(grp->vlan_devices_arrays[i][j]);
136 }
137
vlan_info_free(struct vlan_info * vlan_info)138 static void vlan_info_free(struct vlan_info *vlan_info)
139 {
140 vlan_group_free(&vlan_info->grp);
141 kfree(vlan_info);
142 }
143
vlan_info_rcu_free(struct rcu_head * rcu)144 static void vlan_info_rcu_free(struct rcu_head *rcu)
145 {
146 vlan_info_free(container_of(rcu, struct vlan_info, rcu));
147 }
148
vlan_info_alloc(struct net_device * dev)149 static struct vlan_info *vlan_info_alloc(struct net_device *dev)
150 {
151 struct vlan_info *vlan_info;
152
153 vlan_info = kzalloc(sizeof(struct vlan_info), GFP_KERNEL);
154 if (!vlan_info)
155 return NULL;
156
157 vlan_info->real_dev = dev;
158 INIT_LIST_HEAD(&vlan_info->vid_list);
159 return vlan_info;
160 }
161
162 struct vlan_vid_info {
163 struct list_head list;
164 __be16 proto;
165 u16 vid;
166 int refcount;
167 };
168
vlan_hw_filter_capable(const struct net_device * dev,__be16 proto)169 static bool vlan_hw_filter_capable(const struct net_device *dev, __be16 proto)
170 {
171 if (proto == htons(ETH_P_8021Q) &&
172 dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
173 return true;
174 if (proto == htons(ETH_P_8021AD) &&
175 dev->features & NETIF_F_HW_VLAN_STAG_FILTER)
176 return true;
177 return false;
178 }
179
vlan_vid_info_get(struct vlan_info * vlan_info,__be16 proto,u16 vid)180 static struct vlan_vid_info *vlan_vid_info_get(struct vlan_info *vlan_info,
181 __be16 proto, u16 vid)
182 {
183 struct vlan_vid_info *vid_info;
184
185 list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
186 if (vid_info->proto == proto && vid_info->vid == vid)
187 return vid_info;
188 }
189 return NULL;
190 }
191
vlan_vid_info_alloc(__be16 proto,u16 vid)192 static struct vlan_vid_info *vlan_vid_info_alloc(__be16 proto, u16 vid)
193 {
194 struct vlan_vid_info *vid_info;
195
196 vid_info = kzalloc(sizeof(struct vlan_vid_info), GFP_KERNEL);
197 if (!vid_info)
198 return NULL;
199 vid_info->proto = proto;
200 vid_info->vid = vid;
201
202 return vid_info;
203 }
204
vlan_add_rx_filter_info(struct net_device * dev,__be16 proto,u16 vid)205 static int vlan_add_rx_filter_info(struct net_device *dev, __be16 proto, u16 vid)
206 {
207 if (!vlan_hw_filter_capable(dev, proto))
208 return 0;
209
210 if (netif_device_present(dev))
211 return dev->netdev_ops->ndo_vlan_rx_add_vid(dev, proto, vid);
212 else
213 return -ENODEV;
214 }
215
vlan_kill_rx_filter_info(struct net_device * dev,__be16 proto,u16 vid)216 static int vlan_kill_rx_filter_info(struct net_device *dev, __be16 proto, u16 vid)
217 {
218 if (!vlan_hw_filter_capable(dev, proto))
219 return 0;
220
221 if (netif_device_present(dev))
222 return dev->netdev_ops->ndo_vlan_rx_kill_vid(dev, proto, vid);
223 else
224 return -ENODEV;
225 }
226
vlan_for_each(struct net_device * dev,int (* action)(struct net_device * dev,int vid,void * arg),void * arg)227 int vlan_for_each(struct net_device *dev,
228 int (*action)(struct net_device *dev, int vid, void *arg),
229 void *arg)
230 {
231 struct vlan_vid_info *vid_info;
232 struct vlan_info *vlan_info;
233 struct net_device *vdev;
234 int ret;
235
236 ASSERT_RTNL();
237
238 vlan_info = rtnl_dereference(dev->vlan_info);
239 if (!vlan_info)
240 return 0;
241
242 list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
243 vdev = vlan_group_get_device(&vlan_info->grp, vid_info->proto,
244 vid_info->vid);
245 ret = action(vdev, vid_info->vid, arg);
246 if (ret)
247 return ret;
248 }
249
250 return 0;
251 }
252 EXPORT_SYMBOL(vlan_for_each);
253
vlan_filter_push_vids(struct vlan_info * vlan_info,__be16 proto)254 int vlan_filter_push_vids(struct vlan_info *vlan_info, __be16 proto)
255 {
256 struct net_device *real_dev = vlan_info->real_dev;
257 struct vlan_vid_info *vlan_vid_info;
258 int err;
259
260 list_for_each_entry(vlan_vid_info, &vlan_info->vid_list, list) {
261 if (vlan_vid_info->proto == proto) {
262 err = vlan_add_rx_filter_info(real_dev, proto,
263 vlan_vid_info->vid);
264 if (err)
265 goto unwind;
266 }
267 }
268
269 return 0;
270
271 unwind:
272 list_for_each_entry_continue_reverse(vlan_vid_info,
273 &vlan_info->vid_list, list) {
274 if (vlan_vid_info->proto == proto)
275 vlan_kill_rx_filter_info(real_dev, proto,
276 vlan_vid_info->vid);
277 }
278
279 return err;
280 }
281 EXPORT_SYMBOL(vlan_filter_push_vids);
282
vlan_filter_drop_vids(struct vlan_info * vlan_info,__be16 proto)283 void vlan_filter_drop_vids(struct vlan_info *vlan_info, __be16 proto)
284 {
285 struct vlan_vid_info *vlan_vid_info;
286
287 list_for_each_entry(vlan_vid_info, &vlan_info->vid_list, list)
288 if (vlan_vid_info->proto == proto)
289 vlan_kill_rx_filter_info(vlan_info->real_dev,
290 vlan_vid_info->proto,
291 vlan_vid_info->vid);
292 }
293 EXPORT_SYMBOL(vlan_filter_drop_vids);
294
__vlan_vid_add(struct vlan_info * vlan_info,__be16 proto,u16 vid,struct vlan_vid_info ** pvid_info)295 static int __vlan_vid_add(struct vlan_info *vlan_info, __be16 proto, u16 vid,
296 struct vlan_vid_info **pvid_info)
297 {
298 struct net_device *dev = vlan_info->real_dev;
299 struct vlan_vid_info *vid_info;
300 int err;
301
302 vid_info = vlan_vid_info_alloc(proto, vid);
303 if (!vid_info)
304 return -ENOMEM;
305
306 err = vlan_add_rx_filter_info(dev, proto, vid);
307 if (err) {
308 kfree(vid_info);
309 return err;
310 }
311
312 list_add(&vid_info->list, &vlan_info->vid_list);
313 vlan_info->nr_vids++;
314 *pvid_info = vid_info;
315 return 0;
316 }
317
vlan_vid_add(struct net_device * dev,__be16 proto,u16 vid)318 int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid)
319 {
320 struct vlan_info *vlan_info;
321 struct vlan_vid_info *vid_info;
322 bool vlan_info_created = false;
323 int err;
324
325 ASSERT_RTNL();
326
327 vlan_info = rtnl_dereference(dev->vlan_info);
328 if (!vlan_info) {
329 vlan_info = vlan_info_alloc(dev);
330 if (!vlan_info)
331 return -ENOMEM;
332 vlan_info_created = true;
333 }
334 vid_info = vlan_vid_info_get(vlan_info, proto, vid);
335 if (!vid_info) {
336 err = __vlan_vid_add(vlan_info, proto, vid, &vid_info);
337 if (err)
338 goto out_free_vlan_info;
339 }
340 vid_info->refcount++;
341
342 if (vlan_info_created)
343 rcu_assign_pointer(dev->vlan_info, vlan_info);
344
345 return 0;
346
347 out_free_vlan_info:
348 if (vlan_info_created)
349 kfree(vlan_info);
350 return err;
351 }
352 EXPORT_SYMBOL(vlan_vid_add);
353
__vlan_vid_del(struct vlan_info * vlan_info,struct vlan_vid_info * vid_info)354 static void __vlan_vid_del(struct vlan_info *vlan_info,
355 struct vlan_vid_info *vid_info)
356 {
357 struct net_device *dev = vlan_info->real_dev;
358 __be16 proto = vid_info->proto;
359 u16 vid = vid_info->vid;
360 int err;
361
362 err = vlan_kill_rx_filter_info(dev, proto, vid);
363 if (err && dev->reg_state != NETREG_UNREGISTERING)
364 netdev_warn(dev, "failed to kill vid %04x/%d\n", proto, vid);
365
366 list_del(&vid_info->list);
367 kfree(vid_info);
368 vlan_info->nr_vids--;
369 }
370
vlan_vid_del(struct net_device * dev,__be16 proto,u16 vid)371 void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid)
372 {
373 struct vlan_info *vlan_info;
374 struct vlan_vid_info *vid_info;
375
376 ASSERT_RTNL();
377
378 vlan_info = rtnl_dereference(dev->vlan_info);
379 if (!vlan_info)
380 return;
381
382 vid_info = vlan_vid_info_get(vlan_info, proto, vid);
383 if (!vid_info)
384 return;
385 vid_info->refcount--;
386 if (vid_info->refcount == 0) {
387 __vlan_vid_del(vlan_info, vid_info);
388 if (vlan_info->nr_vids == 0) {
389 RCU_INIT_POINTER(dev->vlan_info, NULL);
390 call_rcu(&vlan_info->rcu, vlan_info_rcu_free);
391 }
392 }
393 }
394 EXPORT_SYMBOL(vlan_vid_del);
395
vlan_vids_add_by_dev(struct net_device * dev,const struct net_device * by_dev)396 int vlan_vids_add_by_dev(struct net_device *dev,
397 const struct net_device *by_dev)
398 {
399 struct vlan_vid_info *vid_info;
400 struct vlan_info *vlan_info;
401 int err;
402
403 ASSERT_RTNL();
404
405 vlan_info = rtnl_dereference(by_dev->vlan_info);
406 if (!vlan_info)
407 return 0;
408
409 list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
410 if (!vlan_hw_filter_capable(by_dev, vid_info->proto))
411 continue;
412 err = vlan_vid_add(dev, vid_info->proto, vid_info->vid);
413 if (err)
414 goto unwind;
415 }
416 return 0;
417
418 unwind:
419 list_for_each_entry_continue_reverse(vid_info,
420 &vlan_info->vid_list,
421 list) {
422 if (!vlan_hw_filter_capable(by_dev, vid_info->proto))
423 continue;
424 vlan_vid_del(dev, vid_info->proto, vid_info->vid);
425 }
426
427 return err;
428 }
429 EXPORT_SYMBOL(vlan_vids_add_by_dev);
430
vlan_vids_del_by_dev(struct net_device * dev,const struct net_device * by_dev)431 void vlan_vids_del_by_dev(struct net_device *dev,
432 const struct net_device *by_dev)
433 {
434 struct vlan_vid_info *vid_info;
435 struct vlan_info *vlan_info;
436
437 ASSERT_RTNL();
438
439 vlan_info = rtnl_dereference(by_dev->vlan_info);
440 if (!vlan_info)
441 return;
442
443 list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
444 if (!vlan_hw_filter_capable(by_dev, vid_info->proto))
445 continue;
446 vlan_vid_del(dev, vid_info->proto, vid_info->vid);
447 }
448 }
449 EXPORT_SYMBOL(vlan_vids_del_by_dev);
450
vlan_uses_dev(const struct net_device * dev)451 bool vlan_uses_dev(const struct net_device *dev)
452 {
453 struct vlan_info *vlan_info;
454
455 ASSERT_RTNL();
456
457 vlan_info = rtnl_dereference(dev->vlan_info);
458 if (!vlan_info)
459 return false;
460 return vlan_info->grp.nr_vlan_devs ? true : false;
461 }
462 EXPORT_SYMBOL(vlan_uses_dev);
463
vlan_gro_receive(struct list_head * head,struct sk_buff * skb)464 static struct sk_buff *vlan_gro_receive(struct list_head *head,
465 struct sk_buff *skb)
466 {
467 const struct packet_offload *ptype;
468 unsigned int hlen, off_vlan;
469 struct sk_buff *pp = NULL;
470 struct vlan_hdr *vhdr;
471 struct sk_buff *p;
472 __be16 type;
473 int flush = 1;
474
475 off_vlan = skb_gro_offset(skb);
476 hlen = off_vlan + sizeof(*vhdr);
477 vhdr = skb_gro_header_fast(skb, off_vlan);
478 if (skb_gro_header_hard(skb, hlen)) {
479 vhdr = skb_gro_header_slow(skb, hlen, off_vlan);
480 if (unlikely(!vhdr))
481 goto out;
482 }
483
484 type = vhdr->h_vlan_encapsulated_proto;
485
486 rcu_read_lock();
487 ptype = gro_find_receive_by_type(type);
488 if (!ptype)
489 goto out_unlock;
490
491 flush = 0;
492
493 list_for_each_entry(p, head, list) {
494 struct vlan_hdr *vhdr2;
495
496 if (!NAPI_GRO_CB(p)->same_flow)
497 continue;
498
499 vhdr2 = (struct vlan_hdr *)(p->data + off_vlan);
500 if (compare_vlan_header(vhdr, vhdr2))
501 NAPI_GRO_CB(p)->same_flow = 0;
502 }
503
504 skb_gro_pull(skb, sizeof(*vhdr));
505 skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr));
506
507 pp = indirect_call_gro_receive_inet(ptype->callbacks.gro_receive,
508 ipv6_gro_receive, inet_gro_receive,
509 head, skb);
510
511 out_unlock:
512 rcu_read_unlock();
513 out:
514 skb_gro_flush_final(skb, pp, flush);
515
516 return pp;
517 }
518
vlan_gro_complete(struct sk_buff * skb,int nhoff)519 static int vlan_gro_complete(struct sk_buff *skb, int nhoff)
520 {
521 struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data + nhoff);
522 __be16 type = vhdr->h_vlan_encapsulated_proto;
523 struct packet_offload *ptype;
524 int err = -ENOENT;
525
526 rcu_read_lock();
527 ptype = gro_find_complete_by_type(type);
528 if (ptype)
529 err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete,
530 ipv6_gro_complete, inet_gro_complete,
531 skb, nhoff + sizeof(*vhdr));
532
533 rcu_read_unlock();
534 return err;
535 }
536
537 static struct packet_offload vlan_packet_offloads[] __read_mostly = {
538 {
539 .type = cpu_to_be16(ETH_P_8021Q),
540 .priority = 10,
541 .callbacks = {
542 .gro_receive = vlan_gro_receive,
543 .gro_complete = vlan_gro_complete,
544 },
545 },
546 {
547 .type = cpu_to_be16(ETH_P_8021AD),
548 .priority = 10,
549 .callbacks = {
550 .gro_receive = vlan_gro_receive,
551 .gro_complete = vlan_gro_complete,
552 },
553 },
554 };
555
vlan_offload_init(void)556 static int __init vlan_offload_init(void)
557 {
558 unsigned int i;
559
560 for (i = 0; i < ARRAY_SIZE(vlan_packet_offloads); i++)
561 dev_add_offload(&vlan_packet_offloads[i]);
562
563 return 0;
564 }
565
566 fs_initcall(vlan_offload_init);
567