• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * INET		An implementation of the TCP/IP protocol suite for the LINUX
4  *		operating system.  INET is implemented using the  BSD Socket
5  *		interface as the means of communication with the user level.
6  *
7  *		PACKET - implements raw packet sockets.
8  *
9  * Authors:	Ross Biro
10  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
12  *
13  * Fixes:
14  *		Alan Cox	:	verify_area() now used correctly
15  *		Alan Cox	:	new skbuff lists, look ma no backlogs!
16  *		Alan Cox	:	tidied skbuff lists.
17  *		Alan Cox	:	Now uses generic datagram routines I
18  *					added. Also fixed the peek/read crash
19  *					from all old Linux datagram code.
20  *		Alan Cox	:	Uses the improved datagram code.
21  *		Alan Cox	:	Added NULL's for socket options.
22  *		Alan Cox	:	Re-commented the code.
23  *		Alan Cox	:	Use new kernel side addressing
24  *		Rob Janssen	:	Correct MTU usage.
25  *		Dave Platt	:	Counter leaks caused by incorrect
26  *					interrupt locking and some slightly
27  *					dubious gcc output. Can you read
28  *					compiler: it said _VOLATILE_
29  *	Richard Kooijman	:	Timestamp fixes.
30  *		Alan Cox	:	New buffers. Use sk->mac.raw.
31  *		Alan Cox	:	sendmsg/recvmsg support.
32  *		Alan Cox	:	Protocol setting support
33  *	Alexey Kuznetsov	:	Untied from IPv4 stack.
34  *	Cyrus Durgin		:	Fixed kerneld for kmod.
35  *	Michal Ostrowski        :       Module initialization cleanup.
36  *         Ulises Alonso        :       Frame number limit removal and
37  *                                      packet_set_ring memory leak.
38  *		Eric Biederman	:	Allow for > 8 byte hardware addresses.
39  *					The convention is that longer addresses
40  *					will simply extend the hardware address
41  *					byte arrays at the end of sockaddr_ll
42  *					and packet_mreq.
43  *		Johann Baudy	:	Added TX RING.
44  *		Chetan Loke	:	Implemented TPACKET_V3 block abstraction
45  *					layer.
46  *					Copyright (C) 2011, <lokec@ccs.neu.edu>
47  */
48 
49 #include <linux/types.h>
50 #include <linux/mm.h>
51 #include <linux/capability.h>
52 #include <linux/fcntl.h>
53 #include <linux/socket.h>
54 #include <linux/in.h>
55 #include <linux/inet.h>
56 #include <linux/netdevice.h>
57 #include <linux/if_packet.h>
58 #include <linux/wireless.h>
59 #include <linux/kernel.h>
60 #include <linux/kmod.h>
61 #include <linux/slab.h>
62 #include <linux/vmalloc.h>
63 #include <net/net_namespace.h>
64 #include <net/ip.h>
65 #include <net/protocol.h>
66 #include <linux/skbuff.h>
67 #include <net/sock.h>
68 #include <linux/errno.h>
69 #include <linux/timer.h>
70 #include <linux/uaccess.h>
71 #include <asm/ioctls.h>
72 #include <asm/page.h>
73 #include <asm/cacheflush.h>
74 #include <asm/io.h>
75 #include <linux/proc_fs.h>
76 #include <linux/seq_file.h>
77 #include <linux/poll.h>
78 #include <linux/module.h>
79 #include <linux/init.h>
80 #include <linux/mutex.h>
81 #include <linux/if_vlan.h>
82 #include <linux/virtio_net.h>
83 #include <linux/errqueue.h>
84 #include <linux/net_tstamp.h>
85 #include <linux/percpu.h>
86 #ifdef CONFIG_INET
87 #include <net/inet_common.h>
88 #endif
89 #include <linux/bpf.h>
90 #include <net/compat.h>
91 
92 #include "internal.h"
93 
94 /*
95    Assumptions:
96    - If the device has no dev->header_ops->create, there is no LL header
97      visible above the device. In this case, its hard_header_len should be 0.
98      The device may prepend its own header internally. In this case, its
99      needed_headroom should be set to the space needed for it to add its
100      internal header.
101      For example, a WiFi driver pretending to be an Ethernet driver should
102      set its hard_header_len to be the Ethernet header length, and set its
103      needed_headroom to be (the real WiFi header length - the fake Ethernet
104      header length).
105    - packet socket receives packets with pulled ll header,
106      so that SOCK_RAW should push it back.
107 
108 On receive:
109 -----------
110 
111 Incoming, dev_has_header(dev) == true
112    mac_header -> ll header
113    data       -> data
114 
115 Outgoing, dev_has_header(dev) == true
116    mac_header -> ll header
117    data       -> ll header
118 
119 Incoming, dev_has_header(dev) == false
120    mac_header -> data
121      However drivers often make it point to the ll header.
122      This is incorrect because the ll header should be invisible to us.
123    data       -> data
124 
125 Outgoing, dev_has_header(dev) == false
126    mac_header -> data. ll header is invisible to us.
127    data       -> data
128 
129 Resume
130   If dev_has_header(dev) == false we are unable to restore the ll header,
131     because it is invisible to us.
132 
133 
134 On transmit:
135 ------------
136 
137 dev->header_ops != NULL
138    mac_header -> ll header
139    data       -> ll header
140 
141 dev->header_ops == NULL (ll header is invisible to us)
142    mac_header -> data
143    data       -> data
144 
145    We should set network_header on output to the correct position,
146    packet classifier depends on it.
147  */
148 
149 /* Private packet socket structures. */
150 
151 /* identical to struct packet_mreq except it has
152  * a longer address field.
153  */
154 struct packet_mreq_max {
155 	int		mr_ifindex;
156 	unsigned short	mr_type;
157 	unsigned short	mr_alen;
158 	unsigned char	mr_address[MAX_ADDR_LEN];
159 };
160 
161 union tpacket_uhdr {
162 	struct tpacket_hdr  *h1;
163 	struct tpacket2_hdr *h2;
164 	struct tpacket3_hdr *h3;
165 	void *raw;
166 };
167 
168 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
169 		int closing, int tx_ring);
170 
171 #define V3_ALIGNMENT	(8)
172 
173 #define BLK_HDR_LEN	(ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
174 
175 #define BLK_PLUS_PRIV(sz_of_priv) \
176 	(BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
177 
178 #define BLOCK_STATUS(x)	((x)->hdr.bh1.block_status)
179 #define BLOCK_NUM_PKTS(x)	((x)->hdr.bh1.num_pkts)
180 #define BLOCK_O2FP(x)		((x)->hdr.bh1.offset_to_first_pkt)
181 #define BLOCK_LEN(x)		((x)->hdr.bh1.blk_len)
182 #define BLOCK_SNUM(x)		((x)->hdr.bh1.seq_num)
183 #define BLOCK_O2PRIV(x)	((x)->offset_to_priv)
184 
185 struct packet_sock;
186 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
187 		       struct packet_type *pt, struct net_device *orig_dev);
188 
189 static void *packet_previous_frame(struct packet_sock *po,
190 		struct packet_ring_buffer *rb,
191 		int status);
192 static void packet_increment_head(struct packet_ring_buffer *buff);
193 static int prb_curr_blk_in_use(struct tpacket_block_desc *);
194 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
195 			struct packet_sock *);
196 static void prb_retire_current_block(struct tpacket_kbdq_core *,
197 		struct packet_sock *, unsigned int status);
198 static int prb_queue_frozen(struct tpacket_kbdq_core *);
199 static void prb_open_block(struct tpacket_kbdq_core *,
200 		struct tpacket_block_desc *);
201 static void prb_retire_rx_blk_timer_expired(struct timer_list *);
202 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
203 static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
204 static void prb_clear_rxhash(struct tpacket_kbdq_core *,
205 		struct tpacket3_hdr *);
206 static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
207 		struct tpacket3_hdr *);
208 static void packet_flush_mclist(struct sock *sk);
209 static u16 packet_pick_tx_queue(struct sk_buff *skb);
210 
211 struct packet_skb_cb {
212 	union {
213 		struct sockaddr_pkt pkt;
214 		union {
215 			/* Trick: alias skb original length with
216 			 * ll.sll_family and ll.protocol in order
217 			 * to save room.
218 			 */
219 			unsigned int origlen;
220 			struct sockaddr_ll ll;
221 		};
222 	} sa;
223 };
224 
225 #define vio_le() virtio_legacy_is_little_endian()
226 
227 #define PACKET_SKB_CB(__skb)	((struct packet_skb_cb *)((__skb)->cb))
228 
229 #define GET_PBDQC_FROM_RB(x)	((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
230 #define GET_PBLOCK_DESC(x, bid)	\
231 	((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
232 #define GET_CURR_PBLOCK_DESC_FROM_CORE(x)	\
233 	((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
234 #define GET_NEXT_PRB_BLK_NUM(x) \
235 	(((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
236 	((x)->kactive_blk_num+1) : 0)
237 
238 static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
239 static void __fanout_link(struct sock *sk, struct packet_sock *po);
240 
packet_direct_xmit(struct sk_buff * skb)241 static int packet_direct_xmit(struct sk_buff *skb)
242 {
243 	return dev_direct_xmit(skb, packet_pick_tx_queue(skb));
244 }
245 
packet_cached_dev_get(struct packet_sock * po)246 static struct net_device *packet_cached_dev_get(struct packet_sock *po)
247 {
248 	struct net_device *dev;
249 
250 	rcu_read_lock();
251 	dev = rcu_dereference(po->cached_dev);
252 	if (likely(dev))
253 		dev_hold(dev);
254 	rcu_read_unlock();
255 
256 	return dev;
257 }
258 
packet_cached_dev_assign(struct packet_sock * po,struct net_device * dev)259 static void packet_cached_dev_assign(struct packet_sock *po,
260 				     struct net_device *dev)
261 {
262 	rcu_assign_pointer(po->cached_dev, dev);
263 }
264 
packet_cached_dev_reset(struct packet_sock * po)265 static void packet_cached_dev_reset(struct packet_sock *po)
266 {
267 	RCU_INIT_POINTER(po->cached_dev, NULL);
268 }
269 
packet_use_direct_xmit(const struct packet_sock * po)270 static bool packet_use_direct_xmit(const struct packet_sock *po)
271 {
272 	return po->xmit == packet_direct_xmit;
273 }
274 
packet_pick_tx_queue(struct sk_buff * skb)275 static u16 packet_pick_tx_queue(struct sk_buff *skb)
276 {
277 	struct net_device *dev = skb->dev;
278 	const struct net_device_ops *ops = dev->netdev_ops;
279 	int cpu = raw_smp_processor_id();
280 	u16 queue_index;
281 
282 #ifdef CONFIG_XPS
283 	skb->sender_cpu = cpu + 1;
284 #endif
285 	skb_record_rx_queue(skb, cpu % dev->real_num_tx_queues);
286 	if (ops->ndo_select_queue) {
287 		queue_index = ops->ndo_select_queue(dev, skb, NULL);
288 		queue_index = netdev_cap_txqueue(dev, queue_index);
289 	} else {
290 		queue_index = netdev_pick_tx(dev, skb, NULL);
291 	}
292 
293 	return queue_index;
294 }
295 
296 /* __register_prot_hook must be invoked through register_prot_hook
297  * or from a context in which asynchronous accesses to the packet
298  * socket is not possible (packet_create()).
299  */
__register_prot_hook(struct sock * sk)300 static void __register_prot_hook(struct sock *sk)
301 {
302 	struct packet_sock *po = pkt_sk(sk);
303 
304 	if (!po->running) {
305 		if (po->fanout)
306 			__fanout_link(sk, po);
307 		else
308 			dev_add_pack(&po->prot_hook);
309 
310 		sock_hold(sk);
311 		po->running = 1;
312 	}
313 }
314 
register_prot_hook(struct sock * sk)315 static void register_prot_hook(struct sock *sk)
316 {
317 	lockdep_assert_held_once(&pkt_sk(sk)->bind_lock);
318 	__register_prot_hook(sk);
319 }
320 
321 /* If the sync parameter is true, we will temporarily drop
322  * the po->bind_lock and do a synchronize_net to make sure no
323  * asynchronous packet processing paths still refer to the elements
324  * of po->prot_hook.  If the sync parameter is false, it is the
325  * callers responsibility to take care of this.
326  */
__unregister_prot_hook(struct sock * sk,bool sync)327 static void __unregister_prot_hook(struct sock *sk, bool sync)
328 {
329 	struct packet_sock *po = pkt_sk(sk);
330 
331 	lockdep_assert_held_once(&po->bind_lock);
332 
333 	po->running = 0;
334 
335 	if (po->fanout)
336 		__fanout_unlink(sk, po);
337 	else
338 		__dev_remove_pack(&po->prot_hook);
339 
340 	__sock_put(sk);
341 
342 	if (sync) {
343 		spin_unlock(&po->bind_lock);
344 		synchronize_net();
345 		spin_lock(&po->bind_lock);
346 	}
347 }
348 
unregister_prot_hook(struct sock * sk,bool sync)349 static void unregister_prot_hook(struct sock *sk, bool sync)
350 {
351 	struct packet_sock *po = pkt_sk(sk);
352 
353 	if (po->running)
354 		__unregister_prot_hook(sk, sync);
355 }
356 
pgv_to_page(void * addr)357 static inline struct page * __pure pgv_to_page(void *addr)
358 {
359 	if (is_vmalloc_addr(addr))
360 		return vmalloc_to_page(addr);
361 	return virt_to_page(addr);
362 }
363 
__packet_set_status(struct packet_sock * po,void * frame,int status)364 static void __packet_set_status(struct packet_sock *po, void *frame, int status)
365 {
366 	union tpacket_uhdr h;
367 
368 	h.raw = frame;
369 	switch (po->tp_version) {
370 	case TPACKET_V1:
371 		h.h1->tp_status = status;
372 		flush_dcache_page(pgv_to_page(&h.h1->tp_status));
373 		break;
374 	case TPACKET_V2:
375 		h.h2->tp_status = status;
376 		flush_dcache_page(pgv_to_page(&h.h2->tp_status));
377 		break;
378 	case TPACKET_V3:
379 		h.h3->tp_status = status;
380 		flush_dcache_page(pgv_to_page(&h.h3->tp_status));
381 		break;
382 	default:
383 		WARN(1, "TPACKET version not supported.\n");
384 		BUG();
385 	}
386 
387 	smp_wmb();
388 }
389 
__packet_get_status(const struct packet_sock * po,void * frame)390 static int __packet_get_status(const struct packet_sock *po, void *frame)
391 {
392 	union tpacket_uhdr h;
393 
394 	smp_rmb();
395 
396 	h.raw = frame;
397 	switch (po->tp_version) {
398 	case TPACKET_V1:
399 		flush_dcache_page(pgv_to_page(&h.h1->tp_status));
400 		return h.h1->tp_status;
401 	case TPACKET_V2:
402 		flush_dcache_page(pgv_to_page(&h.h2->tp_status));
403 		return h.h2->tp_status;
404 	case TPACKET_V3:
405 		flush_dcache_page(pgv_to_page(&h.h3->tp_status));
406 		return h.h3->tp_status;
407 	default:
408 		WARN(1, "TPACKET version not supported.\n");
409 		BUG();
410 		return 0;
411 	}
412 }
413 
tpacket_get_timestamp(struct sk_buff * skb,struct timespec64 * ts,unsigned int flags)414 static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec64 *ts,
415 				   unsigned int flags)
416 {
417 	struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
418 
419 	if (shhwtstamps &&
420 	    (flags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
421 	    ktime_to_timespec64_cond(shhwtstamps->hwtstamp, ts))
422 		return TP_STATUS_TS_RAW_HARDWARE;
423 
424 	if ((flags & SOF_TIMESTAMPING_SOFTWARE) &&
425 	    ktime_to_timespec64_cond(skb->tstamp, ts))
426 		return TP_STATUS_TS_SOFTWARE;
427 
428 	return 0;
429 }
430 
__packet_set_timestamp(struct packet_sock * po,void * frame,struct sk_buff * skb)431 static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame,
432 				    struct sk_buff *skb)
433 {
434 	union tpacket_uhdr h;
435 	struct timespec64 ts;
436 	__u32 ts_status;
437 
438 	if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
439 		return 0;
440 
441 	h.raw = frame;
442 	/*
443 	 * versions 1 through 3 overflow the timestamps in y2106, since they
444 	 * all store the seconds in a 32-bit unsigned integer.
445 	 * If we create a version 4, that should have a 64-bit timestamp,
446 	 * either 64-bit seconds + 32-bit nanoseconds, or just 64-bit
447 	 * nanoseconds.
448 	 */
449 	switch (po->tp_version) {
450 	case TPACKET_V1:
451 		h.h1->tp_sec = ts.tv_sec;
452 		h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
453 		break;
454 	case TPACKET_V2:
455 		h.h2->tp_sec = ts.tv_sec;
456 		h.h2->tp_nsec = ts.tv_nsec;
457 		break;
458 	case TPACKET_V3:
459 		h.h3->tp_sec = ts.tv_sec;
460 		h.h3->tp_nsec = ts.tv_nsec;
461 		break;
462 	default:
463 		WARN(1, "TPACKET version not supported.\n");
464 		BUG();
465 	}
466 
467 	/* one flush is safe, as both fields always lie on the same cacheline */
468 	flush_dcache_page(pgv_to_page(&h.h1->tp_sec));
469 	smp_wmb();
470 
471 	return ts_status;
472 }
473 
packet_lookup_frame(const struct packet_sock * po,const struct packet_ring_buffer * rb,unsigned int position,int status)474 static void *packet_lookup_frame(const struct packet_sock *po,
475 				 const struct packet_ring_buffer *rb,
476 				 unsigned int position,
477 				 int status)
478 {
479 	unsigned int pg_vec_pos, frame_offset;
480 	union tpacket_uhdr h;
481 
482 	pg_vec_pos = position / rb->frames_per_block;
483 	frame_offset = position % rb->frames_per_block;
484 
485 	h.raw = rb->pg_vec[pg_vec_pos].buffer +
486 		(frame_offset * rb->frame_size);
487 
488 	if (status != __packet_get_status(po, h.raw))
489 		return NULL;
490 
491 	return h.raw;
492 }
493 
packet_current_frame(struct packet_sock * po,struct packet_ring_buffer * rb,int status)494 static void *packet_current_frame(struct packet_sock *po,
495 		struct packet_ring_buffer *rb,
496 		int status)
497 {
498 	return packet_lookup_frame(po, rb, rb->head, status);
499 }
500 
prb_del_retire_blk_timer(struct tpacket_kbdq_core * pkc)501 static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
502 {
503 	del_timer_sync(&pkc->retire_blk_timer);
504 }
505 
prb_shutdown_retire_blk_timer(struct packet_sock * po,struct sk_buff_head * rb_queue)506 static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
507 		struct sk_buff_head *rb_queue)
508 {
509 	struct tpacket_kbdq_core *pkc;
510 
511 	pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
512 
513 	spin_lock_bh(&rb_queue->lock);
514 	pkc->delete_blk_timer = 1;
515 	spin_unlock_bh(&rb_queue->lock);
516 
517 	prb_del_retire_blk_timer(pkc);
518 }
519 
prb_setup_retire_blk_timer(struct packet_sock * po)520 static void prb_setup_retire_blk_timer(struct packet_sock *po)
521 {
522 	struct tpacket_kbdq_core *pkc;
523 
524 	pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
525 	timer_setup(&pkc->retire_blk_timer, prb_retire_rx_blk_timer_expired,
526 		    0);
527 	pkc->retire_blk_timer.expires = jiffies;
528 }
529 
prb_calc_retire_blk_tmo(struct packet_sock * po,int blk_size_in_bytes)530 static int prb_calc_retire_blk_tmo(struct packet_sock *po,
531 				int blk_size_in_bytes)
532 {
533 	struct net_device *dev;
534 	unsigned int mbits, div;
535 	struct ethtool_link_ksettings ecmd;
536 	int err;
537 
538 	rtnl_lock();
539 	dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
540 	if (unlikely(!dev)) {
541 		rtnl_unlock();
542 		return DEFAULT_PRB_RETIRE_TOV;
543 	}
544 	err = __ethtool_get_link_ksettings(dev, &ecmd);
545 	rtnl_unlock();
546 	if (err)
547 		return DEFAULT_PRB_RETIRE_TOV;
548 
549 	/* If the link speed is so slow you don't really
550 	 * need to worry about perf anyways
551 	 */
552 	if (ecmd.base.speed < SPEED_1000 ||
553 	    ecmd.base.speed == SPEED_UNKNOWN)
554 		return DEFAULT_PRB_RETIRE_TOV;
555 
556 	div = ecmd.base.speed / 1000;
557 	mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
558 
559 	if (div)
560 		mbits /= div;
561 
562 	if (div)
563 		return mbits + 1;
564 	return mbits;
565 }
566 
prb_init_ft_ops(struct tpacket_kbdq_core * p1,union tpacket_req_u * req_u)567 static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
568 			union tpacket_req_u *req_u)
569 {
570 	p1->feature_req_word = req_u->req3.tp_feature_req_word;
571 }
572 
init_prb_bdqc(struct packet_sock * po,struct packet_ring_buffer * rb,struct pgv * pg_vec,union tpacket_req_u * req_u)573 static void init_prb_bdqc(struct packet_sock *po,
574 			struct packet_ring_buffer *rb,
575 			struct pgv *pg_vec,
576 			union tpacket_req_u *req_u)
577 {
578 	struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb);
579 	struct tpacket_block_desc *pbd;
580 
581 	memset(p1, 0x0, sizeof(*p1));
582 
583 	p1->knxt_seq_num = 1;
584 	p1->pkbdq = pg_vec;
585 	pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
586 	p1->pkblk_start	= pg_vec[0].buffer;
587 	p1->kblk_size = req_u->req3.tp_block_size;
588 	p1->knum_blocks	= req_u->req3.tp_block_nr;
589 	p1->hdrlen = po->tp_hdrlen;
590 	p1->version = po->tp_version;
591 	p1->last_kactive_blk_num = 0;
592 	po->stats.stats3.tp_freeze_q_cnt = 0;
593 	if (req_u->req3.tp_retire_blk_tov)
594 		p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
595 	else
596 		p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
597 						req_u->req3.tp_block_size);
598 	p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
599 	p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
600 	rwlock_init(&p1->blk_fill_in_prog_lock);
601 
602 	p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
603 	prb_init_ft_ops(p1, req_u);
604 	prb_setup_retire_blk_timer(po);
605 	prb_open_block(p1, pbd);
606 }
607 
608 /*  Do NOT update the last_blk_num first.
609  *  Assumes sk_buff_head lock is held.
610  */
_prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core * pkc)611 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
612 {
613 	mod_timer(&pkc->retire_blk_timer,
614 			jiffies + pkc->tov_in_jiffies);
615 	pkc->last_kactive_blk_num = pkc->kactive_blk_num;
616 }
617 
618 /*
619  * Timer logic:
620  * 1) We refresh the timer only when we open a block.
621  *    By doing this we don't waste cycles refreshing the timer
622  *	  on packet-by-packet basis.
623  *
624  * With a 1MB block-size, on a 1Gbps line, it will take
625  * i) ~8 ms to fill a block + ii) memcpy etc.
626  * In this cut we are not accounting for the memcpy time.
627  *
628  * So, if the user sets the 'tmo' to 10ms then the timer
629  * will never fire while the block is still getting filled
630  * (which is what we want). However, the user could choose
631  * to close a block early and that's fine.
632  *
633  * But when the timer does fire, we check whether or not to refresh it.
634  * Since the tmo granularity is in msecs, it is not too expensive
635  * to refresh the timer, lets say every '8' msecs.
636  * Either the user can set the 'tmo' or we can derive it based on
637  * a) line-speed and b) block-size.
638  * prb_calc_retire_blk_tmo() calculates the tmo.
639  *
640  */
prb_retire_rx_blk_timer_expired(struct timer_list * t)641 static void prb_retire_rx_blk_timer_expired(struct timer_list *t)
642 {
643 	struct packet_sock *po =
644 		from_timer(po, t, rx_ring.prb_bdqc.retire_blk_timer);
645 	struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
646 	unsigned int frozen;
647 	struct tpacket_block_desc *pbd;
648 
649 	spin_lock(&po->sk.sk_receive_queue.lock);
650 
651 	frozen = prb_queue_frozen(pkc);
652 	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
653 
654 	if (unlikely(pkc->delete_blk_timer))
655 		goto out;
656 
657 	/* We only need to plug the race when the block is partially filled.
658 	 * tpacket_rcv:
659 	 *		lock(); increment BLOCK_NUM_PKTS; unlock()
660 	 *		copy_bits() is in progress ...
661 	 *		timer fires on other cpu:
662 	 *		we can't retire the current block because copy_bits
663 	 *		is in progress.
664 	 *
665 	 */
666 	if (BLOCK_NUM_PKTS(pbd)) {
667 		/* Waiting for skb_copy_bits to finish... */
668 		write_lock(&pkc->blk_fill_in_prog_lock);
669 		write_unlock(&pkc->blk_fill_in_prog_lock);
670 	}
671 
672 	if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
673 		if (!frozen) {
674 			if (!BLOCK_NUM_PKTS(pbd)) {
675 				/* An empty block. Just refresh the timer. */
676 				goto refresh_timer;
677 			}
678 			prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
679 			if (!prb_dispatch_next_block(pkc, po))
680 				goto refresh_timer;
681 			else
682 				goto out;
683 		} else {
684 			/* Case 1. Queue was frozen because user-space was
685 			 *	   lagging behind.
686 			 */
687 			if (prb_curr_blk_in_use(pbd)) {
688 				/*
689 				 * Ok, user-space is still behind.
690 				 * So just refresh the timer.
691 				 */
692 				goto refresh_timer;
693 			} else {
694 			       /* Case 2. queue was frozen,user-space caught up,
695 				* now the link went idle && the timer fired.
696 				* We don't have a block to close.So we open this
697 				* block and restart the timer.
698 				* opening a block thaws the queue,restarts timer
699 				* Thawing/timer-refresh is a side effect.
700 				*/
701 				prb_open_block(pkc, pbd);
702 				goto out;
703 			}
704 		}
705 	}
706 
707 refresh_timer:
708 	_prb_refresh_rx_retire_blk_timer(pkc);
709 
710 out:
711 	spin_unlock(&po->sk.sk_receive_queue.lock);
712 }
713 
prb_flush_block(struct tpacket_kbdq_core * pkc1,struct tpacket_block_desc * pbd1,__u32 status)714 static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
715 		struct tpacket_block_desc *pbd1, __u32 status)
716 {
717 	/* Flush everything minus the block header */
718 
719 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
720 	u8 *start, *end;
721 
722 	start = (u8 *)pbd1;
723 
724 	/* Skip the block header(we know header WILL fit in 4K) */
725 	start += PAGE_SIZE;
726 
727 	end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
728 	for (; start < end; start += PAGE_SIZE)
729 		flush_dcache_page(pgv_to_page(start));
730 
731 	smp_wmb();
732 #endif
733 
734 	/* Now update the block status. */
735 
736 	BLOCK_STATUS(pbd1) = status;
737 
738 	/* Flush the block header */
739 
740 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
741 	start = (u8 *)pbd1;
742 	flush_dcache_page(pgv_to_page(start));
743 
744 	smp_wmb();
745 #endif
746 }
747 
748 /*
749  * Side effect:
750  *
751  * 1) flush the block
752  * 2) Increment active_blk_num
753  *
754  * Note:We DONT refresh the timer on purpose.
755  *	Because almost always the next block will be opened.
756  */
prb_close_block(struct tpacket_kbdq_core * pkc1,struct tpacket_block_desc * pbd1,struct packet_sock * po,unsigned int stat)757 static void prb_close_block(struct tpacket_kbdq_core *pkc1,
758 		struct tpacket_block_desc *pbd1,
759 		struct packet_sock *po, unsigned int stat)
760 {
761 	__u32 status = TP_STATUS_USER | stat;
762 
763 	struct tpacket3_hdr *last_pkt;
764 	struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
765 	struct sock *sk = &po->sk;
766 
767 	if (atomic_read(&po->tp_drops))
768 		status |= TP_STATUS_LOSING;
769 
770 	last_pkt = (struct tpacket3_hdr *)pkc1->prev;
771 	last_pkt->tp_next_offset = 0;
772 
773 	/* Get the ts of the last pkt */
774 	if (BLOCK_NUM_PKTS(pbd1)) {
775 		h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
776 		h1->ts_last_pkt.ts_nsec	= last_pkt->tp_nsec;
777 	} else {
778 		/* Ok, we tmo'd - so get the current time.
779 		 *
780 		 * It shouldn't really happen as we don't close empty
781 		 * blocks. See prb_retire_rx_blk_timer_expired().
782 		 */
783 		struct timespec64 ts;
784 		ktime_get_real_ts64(&ts);
785 		h1->ts_last_pkt.ts_sec = ts.tv_sec;
786 		h1->ts_last_pkt.ts_nsec	= ts.tv_nsec;
787 	}
788 
789 	smp_wmb();
790 
791 	/* Flush the block */
792 	prb_flush_block(pkc1, pbd1, status);
793 
794 	sk->sk_data_ready(sk);
795 
796 	pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
797 }
798 
prb_thaw_queue(struct tpacket_kbdq_core * pkc)799 static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
800 {
801 	pkc->reset_pending_on_curr_blk = 0;
802 }
803 
804 /*
805  * Side effect of opening a block:
806  *
807  * 1) prb_queue is thawed.
808  * 2) retire_blk_timer is refreshed.
809  *
810  */
prb_open_block(struct tpacket_kbdq_core * pkc1,struct tpacket_block_desc * pbd1)811 static void prb_open_block(struct tpacket_kbdq_core *pkc1,
812 	struct tpacket_block_desc *pbd1)
813 {
814 	struct timespec64 ts;
815 	struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
816 
817 	smp_rmb();
818 
819 	/* We could have just memset this but we will lose the
820 	 * flexibility of making the priv area sticky
821 	 */
822 
823 	BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
824 	BLOCK_NUM_PKTS(pbd1) = 0;
825 	BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
826 
827 	ktime_get_real_ts64(&ts);
828 
829 	h1->ts_first_pkt.ts_sec = ts.tv_sec;
830 	h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
831 
832 	pkc1->pkblk_start = (char *)pbd1;
833 	pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
834 
835 	BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
836 	BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
837 
838 	pbd1->version = pkc1->version;
839 	pkc1->prev = pkc1->nxt_offset;
840 	pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
841 
842 	prb_thaw_queue(pkc1);
843 	_prb_refresh_rx_retire_blk_timer(pkc1);
844 
845 	smp_wmb();
846 }
847 
848 /*
849  * Queue freeze logic:
850  * 1) Assume tp_block_nr = 8 blocks.
851  * 2) At time 't0', user opens Rx ring.
852  * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
853  * 4) user-space is either sleeping or processing block '0'.
854  * 5) tpacket_rcv is currently filling block '7', since there is no space left,
855  *    it will close block-7,loop around and try to fill block '0'.
856  *    call-flow:
857  *    __packet_lookup_frame_in_block
858  *      prb_retire_current_block()
859  *      prb_dispatch_next_block()
860  *        |->(BLOCK_STATUS == USER) evaluates to true
861  *    5.1) Since block-0 is currently in-use, we just freeze the queue.
862  * 6) Now there are two cases:
863  *    6.1) Link goes idle right after the queue is frozen.
864  *         But remember, the last open_block() refreshed the timer.
865  *         When this timer expires,it will refresh itself so that we can
866  *         re-open block-0 in near future.
867  *    6.2) Link is busy and keeps on receiving packets. This is a simple
868  *         case and __packet_lookup_frame_in_block will check if block-0
869  *         is free and can now be re-used.
870  */
prb_freeze_queue(struct tpacket_kbdq_core * pkc,struct packet_sock * po)871 static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
872 				  struct packet_sock *po)
873 {
874 	pkc->reset_pending_on_curr_blk = 1;
875 	po->stats.stats3.tp_freeze_q_cnt++;
876 }
877 
878 #define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
879 
880 /*
881  * If the next block is free then we will dispatch it
882  * and return a good offset.
883  * Else, we will freeze the queue.
884  * So, caller must check the return value.
885  */
prb_dispatch_next_block(struct tpacket_kbdq_core * pkc,struct packet_sock * po)886 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
887 		struct packet_sock *po)
888 {
889 	struct tpacket_block_desc *pbd;
890 
891 	smp_rmb();
892 
893 	/* 1. Get current block num */
894 	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
895 
896 	/* 2. If this block is currently in_use then freeze the queue */
897 	if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
898 		prb_freeze_queue(pkc, po);
899 		return NULL;
900 	}
901 
902 	/*
903 	 * 3.
904 	 * open this block and return the offset where the first packet
905 	 * needs to get stored.
906 	 */
907 	prb_open_block(pkc, pbd);
908 	return (void *)pkc->nxt_offset;
909 }
910 
prb_retire_current_block(struct tpacket_kbdq_core * pkc,struct packet_sock * po,unsigned int status)911 static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
912 		struct packet_sock *po, unsigned int status)
913 {
914 	struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
915 
916 	/* retire/close the current block */
917 	if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
918 		/*
919 		 * Plug the case where copy_bits() is in progress on
920 		 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
921 		 * have space to copy the pkt in the current block and
922 		 * called prb_retire_current_block()
923 		 *
924 		 * We don't need to worry about the TMO case because
925 		 * the timer-handler already handled this case.
926 		 */
927 		if (!(status & TP_STATUS_BLK_TMO)) {
928 			/* Waiting for skb_copy_bits to finish... */
929 			write_lock(&pkc->blk_fill_in_prog_lock);
930 			write_unlock(&pkc->blk_fill_in_prog_lock);
931 		}
932 		prb_close_block(pkc, pbd, po, status);
933 		return;
934 	}
935 }
936 
prb_curr_blk_in_use(struct tpacket_block_desc * pbd)937 static int prb_curr_blk_in_use(struct tpacket_block_desc *pbd)
938 {
939 	return TP_STATUS_USER & BLOCK_STATUS(pbd);
940 }
941 
prb_queue_frozen(struct tpacket_kbdq_core * pkc)942 static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
943 {
944 	return pkc->reset_pending_on_curr_blk;
945 }
946 
prb_clear_blk_fill_status(struct packet_ring_buffer * rb)947 static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
948 	__releases(&pkc->blk_fill_in_prog_lock)
949 {
950 	struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
951 
952 	read_unlock(&pkc->blk_fill_in_prog_lock);
953 }
954 
prb_fill_rxhash(struct tpacket_kbdq_core * pkc,struct tpacket3_hdr * ppd)955 static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
956 			struct tpacket3_hdr *ppd)
957 {
958 	ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb);
959 }
960 
prb_clear_rxhash(struct tpacket_kbdq_core * pkc,struct tpacket3_hdr * ppd)961 static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
962 			struct tpacket3_hdr *ppd)
963 {
964 	ppd->hv1.tp_rxhash = 0;
965 }
966 
prb_fill_vlan_info(struct tpacket_kbdq_core * pkc,struct tpacket3_hdr * ppd)967 static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
968 			struct tpacket3_hdr *ppd)
969 {
970 	if (skb_vlan_tag_present(pkc->skb)) {
971 		ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb);
972 		ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto);
973 		ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
974 	} else {
975 		ppd->hv1.tp_vlan_tci = 0;
976 		ppd->hv1.tp_vlan_tpid = 0;
977 		ppd->tp_status = TP_STATUS_AVAILABLE;
978 	}
979 }
980 
prb_run_all_ft_ops(struct tpacket_kbdq_core * pkc,struct tpacket3_hdr * ppd)981 static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
982 			struct tpacket3_hdr *ppd)
983 {
984 	ppd->hv1.tp_padding = 0;
985 	prb_fill_vlan_info(pkc, ppd);
986 
987 	if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
988 		prb_fill_rxhash(pkc, ppd);
989 	else
990 		prb_clear_rxhash(pkc, ppd);
991 }
992 
prb_fill_curr_block(char * curr,struct tpacket_kbdq_core * pkc,struct tpacket_block_desc * pbd,unsigned int len)993 static void prb_fill_curr_block(char *curr,
994 				struct tpacket_kbdq_core *pkc,
995 				struct tpacket_block_desc *pbd,
996 				unsigned int len)
997 	__acquires(&pkc->blk_fill_in_prog_lock)
998 {
999 	struct tpacket3_hdr *ppd;
1000 
1001 	ppd  = (struct tpacket3_hdr *)curr;
1002 	ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
1003 	pkc->prev = curr;
1004 	pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
1005 	BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
1006 	BLOCK_NUM_PKTS(pbd) += 1;
1007 	read_lock(&pkc->blk_fill_in_prog_lock);
1008 	prb_run_all_ft_ops(pkc, ppd);
1009 }
1010 
1011 /* Assumes caller has the sk->rx_queue.lock */
__packet_lookup_frame_in_block(struct packet_sock * po,struct sk_buff * skb,unsigned int len)1012 static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1013 					    struct sk_buff *skb,
1014 					    unsigned int len
1015 					    )
1016 {
1017 	struct tpacket_kbdq_core *pkc;
1018 	struct tpacket_block_desc *pbd;
1019 	char *curr, *end;
1020 
1021 	pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
1022 	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1023 
1024 	/* Queue is frozen when user space is lagging behind */
1025 	if (prb_queue_frozen(pkc)) {
1026 		/*
1027 		 * Check if that last block which caused the queue to freeze,
1028 		 * is still in_use by user-space.
1029 		 */
1030 		if (prb_curr_blk_in_use(pbd)) {
1031 			/* Can't record this packet */
1032 			return NULL;
1033 		} else {
1034 			/*
1035 			 * Ok, the block was released by user-space.
1036 			 * Now let's open that block.
1037 			 * opening a block also thaws the queue.
1038 			 * Thawing is a side effect.
1039 			 */
1040 			prb_open_block(pkc, pbd);
1041 		}
1042 	}
1043 
1044 	smp_mb();
1045 	curr = pkc->nxt_offset;
1046 	pkc->skb = skb;
1047 	end = (char *)pbd + pkc->kblk_size;
1048 
1049 	/* first try the current block */
1050 	if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
1051 		prb_fill_curr_block(curr, pkc, pbd, len);
1052 		return (void *)curr;
1053 	}
1054 
1055 	/* Ok, close the current block */
1056 	prb_retire_current_block(pkc, po, 0);
1057 
1058 	/* Now, try to dispatch the next block */
1059 	curr = (char *)prb_dispatch_next_block(pkc, po);
1060 	if (curr) {
1061 		pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1062 		prb_fill_curr_block(curr, pkc, pbd, len);
1063 		return (void *)curr;
1064 	}
1065 
1066 	/*
1067 	 * No free blocks are available.user_space hasn't caught up yet.
1068 	 * Queue was just frozen and now this packet will get dropped.
1069 	 */
1070 	return NULL;
1071 }
1072 
packet_current_rx_frame(struct packet_sock * po,struct sk_buff * skb,int status,unsigned int len)1073 static void *packet_current_rx_frame(struct packet_sock *po,
1074 					    struct sk_buff *skb,
1075 					    int status, unsigned int len)
1076 {
1077 	char *curr = NULL;
1078 	switch (po->tp_version) {
1079 	case TPACKET_V1:
1080 	case TPACKET_V2:
1081 		curr = packet_lookup_frame(po, &po->rx_ring,
1082 					po->rx_ring.head, status);
1083 		return curr;
1084 	case TPACKET_V3:
1085 		return __packet_lookup_frame_in_block(po, skb, len);
1086 	default:
1087 		WARN(1, "TPACKET version not supported\n");
1088 		BUG();
1089 		return NULL;
1090 	}
1091 }
1092 
prb_lookup_block(const struct packet_sock * po,const struct packet_ring_buffer * rb,unsigned int idx,int status)1093 static void *prb_lookup_block(const struct packet_sock *po,
1094 			      const struct packet_ring_buffer *rb,
1095 			      unsigned int idx,
1096 			      int status)
1097 {
1098 	struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
1099 	struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx);
1100 
1101 	if (status != BLOCK_STATUS(pbd))
1102 		return NULL;
1103 	return pbd;
1104 }
1105 
prb_previous_blk_num(struct packet_ring_buffer * rb)1106 static int prb_previous_blk_num(struct packet_ring_buffer *rb)
1107 {
1108 	unsigned int prev;
1109 	if (rb->prb_bdqc.kactive_blk_num)
1110 		prev = rb->prb_bdqc.kactive_blk_num-1;
1111 	else
1112 		prev = rb->prb_bdqc.knum_blocks-1;
1113 	return prev;
1114 }
1115 
1116 /* Assumes caller has held the rx_queue.lock */
__prb_previous_block(struct packet_sock * po,struct packet_ring_buffer * rb,int status)1117 static void *__prb_previous_block(struct packet_sock *po,
1118 					 struct packet_ring_buffer *rb,
1119 					 int status)
1120 {
1121 	unsigned int previous = prb_previous_blk_num(rb);
1122 	return prb_lookup_block(po, rb, previous, status);
1123 }
1124 
packet_previous_rx_frame(struct packet_sock * po,struct packet_ring_buffer * rb,int status)1125 static void *packet_previous_rx_frame(struct packet_sock *po,
1126 					     struct packet_ring_buffer *rb,
1127 					     int status)
1128 {
1129 	if (po->tp_version <= TPACKET_V2)
1130 		return packet_previous_frame(po, rb, status);
1131 
1132 	return __prb_previous_block(po, rb, status);
1133 }
1134 
packet_increment_rx_head(struct packet_sock * po,struct packet_ring_buffer * rb)1135 static void packet_increment_rx_head(struct packet_sock *po,
1136 					    struct packet_ring_buffer *rb)
1137 {
1138 	switch (po->tp_version) {
1139 	case TPACKET_V1:
1140 	case TPACKET_V2:
1141 		return packet_increment_head(rb);
1142 	case TPACKET_V3:
1143 	default:
1144 		WARN(1, "TPACKET version not supported.\n");
1145 		BUG();
1146 		return;
1147 	}
1148 }
1149 
packet_previous_frame(struct packet_sock * po,struct packet_ring_buffer * rb,int status)1150 static void *packet_previous_frame(struct packet_sock *po,
1151 		struct packet_ring_buffer *rb,
1152 		int status)
1153 {
1154 	unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1155 	return packet_lookup_frame(po, rb, previous, status);
1156 }
1157 
packet_increment_head(struct packet_ring_buffer * buff)1158 static void packet_increment_head(struct packet_ring_buffer *buff)
1159 {
1160 	buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1161 }
1162 
packet_inc_pending(struct packet_ring_buffer * rb)1163 static void packet_inc_pending(struct packet_ring_buffer *rb)
1164 {
1165 	this_cpu_inc(*rb->pending_refcnt);
1166 }
1167 
packet_dec_pending(struct packet_ring_buffer * rb)1168 static void packet_dec_pending(struct packet_ring_buffer *rb)
1169 {
1170 	this_cpu_dec(*rb->pending_refcnt);
1171 }
1172 
packet_read_pending(const struct packet_ring_buffer * rb)1173 static unsigned int packet_read_pending(const struct packet_ring_buffer *rb)
1174 {
1175 	unsigned int refcnt = 0;
1176 	int cpu;
1177 
1178 	/* We don't use pending refcount in rx_ring. */
1179 	if (rb->pending_refcnt == NULL)
1180 		return 0;
1181 
1182 	for_each_possible_cpu(cpu)
1183 		refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu);
1184 
1185 	return refcnt;
1186 }
1187 
packet_alloc_pending(struct packet_sock * po)1188 static int packet_alloc_pending(struct packet_sock *po)
1189 {
1190 	po->rx_ring.pending_refcnt = NULL;
1191 
1192 	po->tx_ring.pending_refcnt = alloc_percpu(unsigned int);
1193 	if (unlikely(po->tx_ring.pending_refcnt == NULL))
1194 		return -ENOBUFS;
1195 
1196 	return 0;
1197 }
1198 
packet_free_pending(struct packet_sock * po)1199 static void packet_free_pending(struct packet_sock *po)
1200 {
1201 	free_percpu(po->tx_ring.pending_refcnt);
1202 }
1203 
1204 #define ROOM_POW_OFF	2
1205 #define ROOM_NONE	0x0
1206 #define ROOM_LOW	0x1
1207 #define ROOM_NORMAL	0x2
1208 
__tpacket_has_room(const struct packet_sock * po,int pow_off)1209 static bool __tpacket_has_room(const struct packet_sock *po, int pow_off)
1210 {
1211 	int idx, len;
1212 
1213 	len = READ_ONCE(po->rx_ring.frame_max) + 1;
1214 	idx = READ_ONCE(po->rx_ring.head);
1215 	if (pow_off)
1216 		idx += len >> pow_off;
1217 	if (idx >= len)
1218 		idx -= len;
1219 	return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1220 }
1221 
__tpacket_v3_has_room(const struct packet_sock * po,int pow_off)1222 static bool __tpacket_v3_has_room(const struct packet_sock *po, int pow_off)
1223 {
1224 	int idx, len;
1225 
1226 	len = READ_ONCE(po->rx_ring.prb_bdqc.knum_blocks);
1227 	idx = READ_ONCE(po->rx_ring.prb_bdqc.kactive_blk_num);
1228 	if (pow_off)
1229 		idx += len >> pow_off;
1230 	if (idx >= len)
1231 		idx -= len;
1232 	return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1233 }
1234 
__packet_rcv_has_room(const struct packet_sock * po,const struct sk_buff * skb)1235 static int __packet_rcv_has_room(const struct packet_sock *po,
1236 				 const struct sk_buff *skb)
1237 {
1238 	const struct sock *sk = &po->sk;
1239 	int ret = ROOM_NONE;
1240 
1241 	if (po->prot_hook.func != tpacket_rcv) {
1242 		int rcvbuf = READ_ONCE(sk->sk_rcvbuf);
1243 		int avail = rcvbuf - atomic_read(&sk->sk_rmem_alloc)
1244 				   - (skb ? skb->truesize : 0);
1245 
1246 		if (avail > (rcvbuf >> ROOM_POW_OFF))
1247 			return ROOM_NORMAL;
1248 		else if (avail > 0)
1249 			return ROOM_LOW;
1250 		else
1251 			return ROOM_NONE;
1252 	}
1253 
1254 	if (po->tp_version == TPACKET_V3) {
1255 		if (__tpacket_v3_has_room(po, ROOM_POW_OFF))
1256 			ret = ROOM_NORMAL;
1257 		else if (__tpacket_v3_has_room(po, 0))
1258 			ret = ROOM_LOW;
1259 	} else {
1260 		if (__tpacket_has_room(po, ROOM_POW_OFF))
1261 			ret = ROOM_NORMAL;
1262 		else if (__tpacket_has_room(po, 0))
1263 			ret = ROOM_LOW;
1264 	}
1265 
1266 	return ret;
1267 }
1268 
packet_rcv_has_room(struct packet_sock * po,struct sk_buff * skb)1269 static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1270 {
1271 	int pressure, ret;
1272 
1273 	ret = __packet_rcv_has_room(po, skb);
1274 	pressure = ret != ROOM_NORMAL;
1275 
1276 	if (READ_ONCE(po->pressure) != pressure)
1277 		WRITE_ONCE(po->pressure, pressure);
1278 
1279 	return ret;
1280 }
1281 
packet_rcv_try_clear_pressure(struct packet_sock * po)1282 static void packet_rcv_try_clear_pressure(struct packet_sock *po)
1283 {
1284 	if (READ_ONCE(po->pressure) &&
1285 	    __packet_rcv_has_room(po, NULL) == ROOM_NORMAL)
1286 		WRITE_ONCE(po->pressure,  0);
1287 }
1288 
packet_sock_destruct(struct sock * sk)1289 static void packet_sock_destruct(struct sock *sk)
1290 {
1291 	skb_queue_purge(&sk->sk_error_queue);
1292 
1293 	WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1294 	WARN_ON(refcount_read(&sk->sk_wmem_alloc));
1295 
1296 	if (!sock_flag(sk, SOCK_DEAD)) {
1297 		pr_err("Attempt to release alive packet socket: %p\n", sk);
1298 		return;
1299 	}
1300 
1301 	sk_refcnt_debug_dec(sk);
1302 }
1303 
fanout_flow_is_huge(struct packet_sock * po,struct sk_buff * skb)1304 static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
1305 {
1306 	u32 *history = po->rollover->history;
1307 	u32 victim, rxhash;
1308 	int i, count = 0;
1309 
1310 	rxhash = skb_get_hash(skb);
1311 	for (i = 0; i < ROLLOVER_HLEN; i++)
1312 		if (READ_ONCE(history[i]) == rxhash)
1313 			count++;
1314 
1315 	victim = prandom_u32() % ROLLOVER_HLEN;
1316 
1317 	/* Avoid dirtying the cache line if possible */
1318 	if (READ_ONCE(history[victim]) != rxhash)
1319 		WRITE_ONCE(history[victim], rxhash);
1320 
1321 	return count > (ROLLOVER_HLEN >> 1);
1322 }
1323 
fanout_demux_hash(struct packet_fanout * f,struct sk_buff * skb,unsigned int num)1324 static unsigned int fanout_demux_hash(struct packet_fanout *f,
1325 				      struct sk_buff *skb,
1326 				      unsigned int num)
1327 {
1328 	return reciprocal_scale(__skb_get_hash_symmetric(skb), num);
1329 }
1330 
fanout_demux_lb(struct packet_fanout * f,struct sk_buff * skb,unsigned int num)1331 static unsigned int fanout_demux_lb(struct packet_fanout *f,
1332 				    struct sk_buff *skb,
1333 				    unsigned int num)
1334 {
1335 	unsigned int val = atomic_inc_return(&f->rr_cur);
1336 
1337 	return val % num;
1338 }
1339 
fanout_demux_cpu(struct packet_fanout * f,struct sk_buff * skb,unsigned int num)1340 static unsigned int fanout_demux_cpu(struct packet_fanout *f,
1341 				     struct sk_buff *skb,
1342 				     unsigned int num)
1343 {
1344 	return smp_processor_id() % num;
1345 }
1346 
fanout_demux_rnd(struct packet_fanout * f,struct sk_buff * skb,unsigned int num)1347 static unsigned int fanout_demux_rnd(struct packet_fanout *f,
1348 				     struct sk_buff *skb,
1349 				     unsigned int num)
1350 {
1351 	return prandom_u32_max(num);
1352 }
1353 
fanout_demux_rollover(struct packet_fanout * f,struct sk_buff * skb,unsigned int idx,bool try_self,unsigned int num)1354 static unsigned int fanout_demux_rollover(struct packet_fanout *f,
1355 					  struct sk_buff *skb,
1356 					  unsigned int idx, bool try_self,
1357 					  unsigned int num)
1358 {
1359 	struct packet_sock *po, *po_next, *po_skip = NULL;
1360 	unsigned int i, j, room = ROOM_NONE;
1361 
1362 	po = pkt_sk(rcu_dereference(f->arr[idx]));
1363 
1364 	if (try_self) {
1365 		room = packet_rcv_has_room(po, skb);
1366 		if (room == ROOM_NORMAL ||
1367 		    (room == ROOM_LOW && !fanout_flow_is_huge(po, skb)))
1368 			return idx;
1369 		po_skip = po;
1370 	}
1371 
1372 	i = j = min_t(int, po->rollover->sock, num - 1);
1373 	do {
1374 		po_next = pkt_sk(rcu_dereference(f->arr[i]));
1375 		if (po_next != po_skip && !READ_ONCE(po_next->pressure) &&
1376 		    packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) {
1377 			if (i != j)
1378 				po->rollover->sock = i;
1379 			atomic_long_inc(&po->rollover->num);
1380 			if (room == ROOM_LOW)
1381 				atomic_long_inc(&po->rollover->num_huge);
1382 			return i;
1383 		}
1384 
1385 		if (++i == num)
1386 			i = 0;
1387 	} while (i != j);
1388 
1389 	atomic_long_inc(&po->rollover->num_failed);
1390 	return idx;
1391 }
1392 
fanout_demux_qm(struct packet_fanout * f,struct sk_buff * skb,unsigned int num)1393 static unsigned int fanout_demux_qm(struct packet_fanout *f,
1394 				    struct sk_buff *skb,
1395 				    unsigned int num)
1396 {
1397 	return skb_get_queue_mapping(skb) % num;
1398 }
1399 
fanout_demux_bpf(struct packet_fanout * f,struct sk_buff * skb,unsigned int num)1400 static unsigned int fanout_demux_bpf(struct packet_fanout *f,
1401 				     struct sk_buff *skb,
1402 				     unsigned int num)
1403 {
1404 	struct bpf_prog *prog;
1405 	unsigned int ret = 0;
1406 
1407 	rcu_read_lock();
1408 	prog = rcu_dereference(f->bpf_prog);
1409 	if (prog)
1410 		ret = bpf_prog_run_clear_cb(prog, skb) % num;
1411 	rcu_read_unlock();
1412 
1413 	return ret;
1414 }
1415 
fanout_has_flag(struct packet_fanout * f,u16 flag)1416 static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
1417 {
1418 	return f->flags & (flag >> 8);
1419 }
1420 
packet_rcv_fanout(struct sk_buff * skb,struct net_device * dev,struct packet_type * pt,struct net_device * orig_dev)1421 static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1422 			     struct packet_type *pt, struct net_device *orig_dev)
1423 {
1424 	struct packet_fanout *f = pt->af_packet_priv;
1425 	unsigned int num = READ_ONCE(f->num_members);
1426 	struct net *net = read_pnet(&f->net);
1427 	struct packet_sock *po;
1428 	unsigned int idx;
1429 
1430 	if (!net_eq(dev_net(dev), net) || !num) {
1431 		kfree_skb(skb);
1432 		return 0;
1433 	}
1434 
1435 	if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
1436 		skb = ip_check_defrag(net, skb, IP_DEFRAG_AF_PACKET);
1437 		if (!skb)
1438 			return 0;
1439 	}
1440 	switch (f->type) {
1441 	case PACKET_FANOUT_HASH:
1442 	default:
1443 		idx = fanout_demux_hash(f, skb, num);
1444 		break;
1445 	case PACKET_FANOUT_LB:
1446 		idx = fanout_demux_lb(f, skb, num);
1447 		break;
1448 	case PACKET_FANOUT_CPU:
1449 		idx = fanout_demux_cpu(f, skb, num);
1450 		break;
1451 	case PACKET_FANOUT_RND:
1452 		idx = fanout_demux_rnd(f, skb, num);
1453 		break;
1454 	case PACKET_FANOUT_QM:
1455 		idx = fanout_demux_qm(f, skb, num);
1456 		break;
1457 	case PACKET_FANOUT_ROLLOVER:
1458 		idx = fanout_demux_rollover(f, skb, 0, false, num);
1459 		break;
1460 	case PACKET_FANOUT_CBPF:
1461 	case PACKET_FANOUT_EBPF:
1462 		idx = fanout_demux_bpf(f, skb, num);
1463 		break;
1464 	}
1465 
1466 	if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER))
1467 		idx = fanout_demux_rollover(f, skb, idx, true, num);
1468 
1469 	po = pkt_sk(rcu_dereference(f->arr[idx]));
1470 	return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1471 }
1472 
1473 DEFINE_MUTEX(fanout_mutex);
1474 EXPORT_SYMBOL_GPL(fanout_mutex);
1475 static LIST_HEAD(fanout_list);
1476 static u16 fanout_next_id;
1477 
__fanout_link(struct sock * sk,struct packet_sock * po)1478 static void __fanout_link(struct sock *sk, struct packet_sock *po)
1479 {
1480 	struct packet_fanout *f = po->fanout;
1481 
1482 	spin_lock(&f->lock);
1483 	rcu_assign_pointer(f->arr[f->num_members], sk);
1484 	smp_wmb();
1485 	f->num_members++;
1486 	if (f->num_members == 1)
1487 		dev_add_pack(&f->prot_hook);
1488 	spin_unlock(&f->lock);
1489 }
1490 
__fanout_unlink(struct sock * sk,struct packet_sock * po)1491 static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1492 {
1493 	struct packet_fanout *f = po->fanout;
1494 	int i;
1495 
1496 	spin_lock(&f->lock);
1497 	for (i = 0; i < f->num_members; i++) {
1498 		if (rcu_dereference_protected(f->arr[i],
1499 					      lockdep_is_held(&f->lock)) == sk)
1500 			break;
1501 	}
1502 	BUG_ON(i >= f->num_members);
1503 	rcu_assign_pointer(f->arr[i],
1504 			   rcu_dereference_protected(f->arr[f->num_members - 1],
1505 						     lockdep_is_held(&f->lock)));
1506 	f->num_members--;
1507 	if (f->num_members == 0)
1508 		__dev_remove_pack(&f->prot_hook);
1509 	spin_unlock(&f->lock);
1510 }
1511 
match_fanout_group(struct packet_type * ptype,struct sock * sk)1512 static bool match_fanout_group(struct packet_type *ptype, struct sock *sk)
1513 {
1514 	if (sk->sk_family != PF_PACKET)
1515 		return false;
1516 
1517 	return ptype->af_packet_priv == pkt_sk(sk)->fanout;
1518 }
1519 
fanout_init_data(struct packet_fanout * f)1520 static void fanout_init_data(struct packet_fanout *f)
1521 {
1522 	switch (f->type) {
1523 	case PACKET_FANOUT_LB:
1524 		atomic_set(&f->rr_cur, 0);
1525 		break;
1526 	case PACKET_FANOUT_CBPF:
1527 	case PACKET_FANOUT_EBPF:
1528 		RCU_INIT_POINTER(f->bpf_prog, NULL);
1529 		break;
1530 	}
1531 }
1532 
__fanout_set_data_bpf(struct packet_fanout * f,struct bpf_prog * new)1533 static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new)
1534 {
1535 	struct bpf_prog *old;
1536 
1537 	spin_lock(&f->lock);
1538 	old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock));
1539 	rcu_assign_pointer(f->bpf_prog, new);
1540 	spin_unlock(&f->lock);
1541 
1542 	if (old) {
1543 		synchronize_net();
1544 		bpf_prog_destroy(old);
1545 	}
1546 }
1547 
fanout_set_data_cbpf(struct packet_sock * po,sockptr_t data,unsigned int len)1548 static int fanout_set_data_cbpf(struct packet_sock *po, sockptr_t data,
1549 				unsigned int len)
1550 {
1551 	struct bpf_prog *new;
1552 	struct sock_fprog fprog;
1553 	int ret;
1554 
1555 	if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1556 		return -EPERM;
1557 
1558 	ret = copy_bpf_fprog_from_user(&fprog, data, len);
1559 	if (ret)
1560 		return ret;
1561 
1562 	ret = bpf_prog_create_from_user(&new, &fprog, NULL, false);
1563 	if (ret)
1564 		return ret;
1565 
1566 	__fanout_set_data_bpf(po->fanout, new);
1567 	return 0;
1568 }
1569 
fanout_set_data_ebpf(struct packet_sock * po,sockptr_t data,unsigned int len)1570 static int fanout_set_data_ebpf(struct packet_sock *po, sockptr_t data,
1571 				unsigned int len)
1572 {
1573 	struct bpf_prog *new;
1574 	u32 fd;
1575 
1576 	if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1577 		return -EPERM;
1578 	if (len != sizeof(fd))
1579 		return -EINVAL;
1580 	if (copy_from_sockptr(&fd, data, len))
1581 		return -EFAULT;
1582 
1583 	new = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
1584 	if (IS_ERR(new))
1585 		return PTR_ERR(new);
1586 
1587 	__fanout_set_data_bpf(po->fanout, new);
1588 	return 0;
1589 }
1590 
fanout_set_data(struct packet_sock * po,sockptr_t data,unsigned int len)1591 static int fanout_set_data(struct packet_sock *po, sockptr_t data,
1592 			   unsigned int len)
1593 {
1594 	switch (po->fanout->type) {
1595 	case PACKET_FANOUT_CBPF:
1596 		return fanout_set_data_cbpf(po, data, len);
1597 	case PACKET_FANOUT_EBPF:
1598 		return fanout_set_data_ebpf(po, data, len);
1599 	default:
1600 		return -EINVAL;
1601 	}
1602 }
1603 
fanout_release_data(struct packet_fanout * f)1604 static void fanout_release_data(struct packet_fanout *f)
1605 {
1606 	switch (f->type) {
1607 	case PACKET_FANOUT_CBPF:
1608 	case PACKET_FANOUT_EBPF:
1609 		__fanout_set_data_bpf(f, NULL);
1610 	}
1611 }
1612 
__fanout_id_is_free(struct sock * sk,u16 candidate_id)1613 static bool __fanout_id_is_free(struct sock *sk, u16 candidate_id)
1614 {
1615 	struct packet_fanout *f;
1616 
1617 	list_for_each_entry(f, &fanout_list, list) {
1618 		if (f->id == candidate_id &&
1619 		    read_pnet(&f->net) == sock_net(sk)) {
1620 			return false;
1621 		}
1622 	}
1623 	return true;
1624 }
1625 
fanout_find_new_id(struct sock * sk,u16 * new_id)1626 static bool fanout_find_new_id(struct sock *sk, u16 *new_id)
1627 {
1628 	u16 id = fanout_next_id;
1629 
1630 	do {
1631 		if (__fanout_id_is_free(sk, id)) {
1632 			*new_id = id;
1633 			fanout_next_id = id + 1;
1634 			return true;
1635 		}
1636 
1637 		id++;
1638 	} while (id != fanout_next_id);
1639 
1640 	return false;
1641 }
1642 
fanout_add(struct sock * sk,struct fanout_args * args)1643 static int fanout_add(struct sock *sk, struct fanout_args *args)
1644 {
1645 	struct packet_rollover *rollover = NULL;
1646 	struct packet_sock *po = pkt_sk(sk);
1647 	u16 type_flags = args->type_flags;
1648 	struct packet_fanout *f, *match;
1649 	u8 type = type_flags & 0xff;
1650 	u8 flags = type_flags >> 8;
1651 	u16 id = args->id;
1652 	int err;
1653 
1654 	switch (type) {
1655 	case PACKET_FANOUT_ROLLOVER:
1656 		if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)
1657 			return -EINVAL;
1658 	case PACKET_FANOUT_HASH:
1659 	case PACKET_FANOUT_LB:
1660 	case PACKET_FANOUT_CPU:
1661 	case PACKET_FANOUT_RND:
1662 	case PACKET_FANOUT_QM:
1663 	case PACKET_FANOUT_CBPF:
1664 	case PACKET_FANOUT_EBPF:
1665 		break;
1666 	default:
1667 		return -EINVAL;
1668 	}
1669 
1670 	mutex_lock(&fanout_mutex);
1671 
1672 	err = -EALREADY;
1673 	if (po->fanout)
1674 		goto out;
1675 
1676 	if (type == PACKET_FANOUT_ROLLOVER ||
1677 	    (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) {
1678 		err = -ENOMEM;
1679 		rollover = kzalloc(sizeof(*rollover), GFP_KERNEL);
1680 		if (!rollover)
1681 			goto out;
1682 		atomic_long_set(&rollover->num, 0);
1683 		atomic_long_set(&rollover->num_huge, 0);
1684 		atomic_long_set(&rollover->num_failed, 0);
1685 	}
1686 
1687 	if (type_flags & PACKET_FANOUT_FLAG_UNIQUEID) {
1688 		if (id != 0) {
1689 			err = -EINVAL;
1690 			goto out;
1691 		}
1692 		if (!fanout_find_new_id(sk, &id)) {
1693 			err = -ENOMEM;
1694 			goto out;
1695 		}
1696 		/* ephemeral flag for the first socket in the group: drop it */
1697 		flags &= ~(PACKET_FANOUT_FLAG_UNIQUEID >> 8);
1698 	}
1699 
1700 	match = NULL;
1701 	list_for_each_entry(f, &fanout_list, list) {
1702 		if (f->id == id &&
1703 		    read_pnet(&f->net) == sock_net(sk)) {
1704 			match = f;
1705 			break;
1706 		}
1707 	}
1708 	err = -EINVAL;
1709 	if (match) {
1710 		if (match->flags != flags)
1711 			goto out;
1712 		if (args->max_num_members &&
1713 		    args->max_num_members != match->max_num_members)
1714 			goto out;
1715 	} else {
1716 		if (args->max_num_members > PACKET_FANOUT_MAX)
1717 			goto out;
1718 		if (!args->max_num_members)
1719 			/* legacy PACKET_FANOUT_MAX */
1720 			args->max_num_members = 256;
1721 		err = -ENOMEM;
1722 		match = kvzalloc(struct_size(match, arr, args->max_num_members),
1723 				 GFP_KERNEL);
1724 		if (!match)
1725 			goto out;
1726 		write_pnet(&match->net, sock_net(sk));
1727 		match->id = id;
1728 		match->type = type;
1729 		match->flags = flags;
1730 		INIT_LIST_HEAD(&match->list);
1731 		spin_lock_init(&match->lock);
1732 		refcount_set(&match->sk_ref, 0);
1733 		fanout_init_data(match);
1734 		match->prot_hook.type = po->prot_hook.type;
1735 		match->prot_hook.dev = po->prot_hook.dev;
1736 		match->prot_hook.func = packet_rcv_fanout;
1737 		match->prot_hook.af_packet_priv = match;
1738 		match->prot_hook.af_packet_net = read_pnet(&match->net);
1739 		match->prot_hook.id_match = match_fanout_group;
1740 		match->max_num_members = args->max_num_members;
1741 		list_add(&match->list, &fanout_list);
1742 	}
1743 	err = -EINVAL;
1744 
1745 	spin_lock(&po->bind_lock);
1746 	if (po->running &&
1747 	    match->type == type &&
1748 	    match->prot_hook.type == po->prot_hook.type &&
1749 	    match->prot_hook.dev == po->prot_hook.dev) {
1750 		err = -ENOSPC;
1751 		if (refcount_read(&match->sk_ref) < match->max_num_members) {
1752 			__dev_remove_pack(&po->prot_hook);
1753 
1754 			/* Paired with packet_setsockopt(PACKET_FANOUT_DATA) */
1755 			WRITE_ONCE(po->fanout, match);
1756 
1757 			po->rollover = rollover;
1758 			rollover = NULL;
1759 			refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1);
1760 			__fanout_link(sk, po);
1761 			err = 0;
1762 		}
1763 	}
1764 	spin_unlock(&po->bind_lock);
1765 
1766 	if (err && !refcount_read(&match->sk_ref)) {
1767 		list_del(&match->list);
1768 		kvfree(match);
1769 	}
1770 
1771 out:
1772 	kfree(rollover);
1773 	mutex_unlock(&fanout_mutex);
1774 	return err;
1775 }
1776 
1777 /* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes
1778  * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout.
1779  * It is the responsibility of the caller to call fanout_release_data() and
1780  * free the returned packet_fanout (after synchronize_net())
1781  */
fanout_release(struct sock * sk)1782 static struct packet_fanout *fanout_release(struct sock *sk)
1783 {
1784 	struct packet_sock *po = pkt_sk(sk);
1785 	struct packet_fanout *f;
1786 
1787 	mutex_lock(&fanout_mutex);
1788 	f = po->fanout;
1789 	if (f) {
1790 		po->fanout = NULL;
1791 
1792 		if (refcount_dec_and_test(&f->sk_ref))
1793 			list_del(&f->list);
1794 		else
1795 			f = NULL;
1796 	}
1797 	mutex_unlock(&fanout_mutex);
1798 
1799 	return f;
1800 }
1801 
packet_extra_vlan_len_allowed(const struct net_device * dev,struct sk_buff * skb)1802 static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
1803 					  struct sk_buff *skb)
1804 {
1805 	/* Earlier code assumed this would be a VLAN pkt, double-check
1806 	 * this now that we have the actual packet in hand. We can only
1807 	 * do this check on Ethernet devices.
1808 	 */
1809 	if (unlikely(dev->type != ARPHRD_ETHER))
1810 		return false;
1811 
1812 	skb_reset_mac_header(skb);
1813 	return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q));
1814 }
1815 
1816 static const struct proto_ops packet_ops;
1817 
1818 static const struct proto_ops packet_ops_spkt;
1819 
packet_rcv_spkt(struct sk_buff * skb,struct net_device * dev,struct packet_type * pt,struct net_device * orig_dev)1820 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1821 			   struct packet_type *pt, struct net_device *orig_dev)
1822 {
1823 	struct sock *sk;
1824 	struct sockaddr_pkt *spkt;
1825 
1826 	/*
1827 	 *	When we registered the protocol we saved the socket in the data
1828 	 *	field for just this event.
1829 	 */
1830 
1831 	sk = pt->af_packet_priv;
1832 
1833 	/*
1834 	 *	Yank back the headers [hope the device set this
1835 	 *	right or kerboom...]
1836 	 *
1837 	 *	Incoming packets have ll header pulled,
1838 	 *	push it back.
1839 	 *
1840 	 *	For outgoing ones skb->data == skb_mac_header(skb)
1841 	 *	so that this procedure is noop.
1842 	 */
1843 
1844 	if (skb->pkt_type == PACKET_LOOPBACK)
1845 		goto out;
1846 
1847 	if (!net_eq(dev_net(dev), sock_net(sk)))
1848 		goto out;
1849 
1850 	skb = skb_share_check(skb, GFP_ATOMIC);
1851 	if (skb == NULL)
1852 		goto oom;
1853 
1854 	/* drop any routing info */
1855 	skb_dst_drop(skb);
1856 
1857 	/* drop conntrack reference */
1858 	nf_reset_ct(skb);
1859 
1860 	spkt = &PACKET_SKB_CB(skb)->sa.pkt;
1861 
1862 	skb_push(skb, skb->data - skb_mac_header(skb));
1863 
1864 	/*
1865 	 *	The SOCK_PACKET socket receives _all_ frames.
1866 	 */
1867 
1868 	spkt->spkt_family = dev->type;
1869 	strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1870 	spkt->spkt_protocol = skb->protocol;
1871 
1872 	/*
1873 	 *	Charge the memory to the socket. This is done specifically
1874 	 *	to prevent sockets using all the memory up.
1875 	 */
1876 
1877 	if (sock_queue_rcv_skb(sk, skb) == 0)
1878 		return 0;
1879 
1880 out:
1881 	kfree_skb(skb);
1882 oom:
1883 	return 0;
1884 }
1885 
packet_parse_headers(struct sk_buff * skb,struct socket * sock)1886 static void packet_parse_headers(struct sk_buff *skb, struct socket *sock)
1887 {
1888 	if ((!skb->protocol || skb->protocol == htons(ETH_P_ALL)) &&
1889 	    sock->type == SOCK_RAW) {
1890 		skb_reset_mac_header(skb);
1891 		skb->protocol = dev_parse_header_protocol(skb);
1892 	}
1893 
1894 	skb_probe_transport_header(skb);
1895 }
1896 
1897 /*
1898  *	Output a raw packet to a device layer. This bypasses all the other
1899  *	protocol layers and you must therefore supply it with a complete frame
1900  */
1901 
packet_sendmsg_spkt(struct socket * sock,struct msghdr * msg,size_t len)1902 static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg,
1903 			       size_t len)
1904 {
1905 	struct sock *sk = sock->sk;
1906 	DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name);
1907 	struct sk_buff *skb = NULL;
1908 	struct net_device *dev;
1909 	struct sockcm_cookie sockc;
1910 	__be16 proto = 0;
1911 	int err;
1912 	int extra_len = 0;
1913 
1914 	/*
1915 	 *	Get and verify the address.
1916 	 */
1917 
1918 	if (saddr) {
1919 		if (msg->msg_namelen < sizeof(struct sockaddr))
1920 			return -EINVAL;
1921 		if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
1922 			proto = saddr->spkt_protocol;
1923 	} else
1924 		return -ENOTCONN;	/* SOCK_PACKET must be sent giving an address */
1925 
1926 	/*
1927 	 *	Find the device first to size check it
1928 	 */
1929 
1930 	saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
1931 retry:
1932 	rcu_read_lock();
1933 	dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
1934 	err = -ENODEV;
1935 	if (dev == NULL)
1936 		goto out_unlock;
1937 
1938 	err = -ENETDOWN;
1939 	if (!(dev->flags & IFF_UP))
1940 		goto out_unlock;
1941 
1942 	/*
1943 	 * You may not queue a frame bigger than the mtu. This is the lowest level
1944 	 * raw protocol and you must do your own fragmentation at this level.
1945 	 */
1946 
1947 	if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
1948 		if (!netif_supports_nofcs(dev)) {
1949 			err = -EPROTONOSUPPORT;
1950 			goto out_unlock;
1951 		}
1952 		extra_len = 4; /* We're doing our own CRC */
1953 	}
1954 
1955 	err = -EMSGSIZE;
1956 	if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
1957 		goto out_unlock;
1958 
1959 	if (!skb) {
1960 		size_t reserved = LL_RESERVED_SPACE(dev);
1961 		int tlen = dev->needed_tailroom;
1962 		unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
1963 
1964 		rcu_read_unlock();
1965 		skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
1966 		if (skb == NULL)
1967 			return -ENOBUFS;
1968 		/* FIXME: Save some space for broken drivers that write a hard
1969 		 * header at transmission time by themselves. PPP is the notable
1970 		 * one here. This should really be fixed at the driver level.
1971 		 */
1972 		skb_reserve(skb, reserved);
1973 		skb_reset_network_header(skb);
1974 
1975 		/* Try to align data part correctly */
1976 		if (hhlen) {
1977 			skb->data -= hhlen;
1978 			skb->tail -= hhlen;
1979 			if (len < hhlen)
1980 				skb_reset_network_header(skb);
1981 		}
1982 		err = memcpy_from_msg(skb_put(skb, len), msg, len);
1983 		if (err)
1984 			goto out_free;
1985 		goto retry;
1986 	}
1987 
1988 	if (!dev_validate_header(dev, skb->data, len)) {
1989 		err = -EINVAL;
1990 		goto out_unlock;
1991 	}
1992 	if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
1993 	    !packet_extra_vlan_len_allowed(dev, skb)) {
1994 		err = -EMSGSIZE;
1995 		goto out_unlock;
1996 	}
1997 
1998 	sockcm_init(&sockc, sk);
1999 	if (msg->msg_controllen) {
2000 		err = sock_cmsg_send(sk, msg, &sockc);
2001 		if (unlikely(err))
2002 			goto out_unlock;
2003 	}
2004 
2005 	skb->protocol = proto;
2006 	skb->dev = dev;
2007 	skb->priority = sk->sk_priority;
2008 	skb->mark = sk->sk_mark;
2009 	skb->tstamp = sockc.transmit_time;
2010 
2011 	skb_setup_tx_timestamp(skb, sockc.tsflags);
2012 
2013 	if (unlikely(extra_len == 4))
2014 		skb->no_fcs = 1;
2015 
2016 	packet_parse_headers(skb, sock);
2017 
2018 	dev_queue_xmit(skb);
2019 	rcu_read_unlock();
2020 	return len;
2021 
2022 out_unlock:
2023 	rcu_read_unlock();
2024 out_free:
2025 	kfree_skb(skb);
2026 	return err;
2027 }
2028 
run_filter(struct sk_buff * skb,const struct sock * sk,unsigned int res)2029 static unsigned int run_filter(struct sk_buff *skb,
2030 			       const struct sock *sk,
2031 			       unsigned int res)
2032 {
2033 	struct sk_filter *filter;
2034 
2035 	rcu_read_lock();
2036 	filter = rcu_dereference(sk->sk_filter);
2037 	if (filter != NULL)
2038 		res = bpf_prog_run_clear_cb(filter->prog, skb);
2039 	rcu_read_unlock();
2040 
2041 	return res;
2042 }
2043 
packet_rcv_vnet(struct msghdr * msg,const struct sk_buff * skb,size_t * len)2044 static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
2045 			   size_t *len)
2046 {
2047 	struct virtio_net_hdr vnet_hdr;
2048 
2049 	if (*len < sizeof(vnet_hdr))
2050 		return -EINVAL;
2051 	*len -= sizeof(vnet_hdr);
2052 
2053 	if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true, 0))
2054 		return -EINVAL;
2055 
2056 	return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
2057 }
2058 
2059 /*
2060  * This function makes lazy skb cloning in hope that most of packets
2061  * are discarded by BPF.
2062  *
2063  * Note tricky part: we DO mangle shared skb! skb->data, skb->len
2064  * and skb->cb are mangled. It works because (and until) packets
2065  * falling here are owned by current CPU. Output packets are cloned
2066  * by dev_queue_xmit_nit(), input packets are processed by net_bh
2067  * sequencially, so that if we return skb to original state on exit,
2068  * we will not harm anyone.
2069  */
2070 
packet_rcv(struct sk_buff * skb,struct net_device * dev,struct packet_type * pt,struct net_device * orig_dev)2071 static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
2072 		      struct packet_type *pt, struct net_device *orig_dev)
2073 {
2074 	struct sock *sk;
2075 	struct sockaddr_ll *sll;
2076 	struct packet_sock *po;
2077 	u8 *skb_head = skb->data;
2078 	int skb_len = skb->len;
2079 	unsigned int snaplen, res;
2080 	bool is_drop_n_account = false;
2081 
2082 	if (skb->pkt_type == PACKET_LOOPBACK)
2083 		goto drop;
2084 
2085 	sk = pt->af_packet_priv;
2086 	po = pkt_sk(sk);
2087 
2088 	if (!net_eq(dev_net(dev), sock_net(sk)))
2089 		goto drop;
2090 
2091 	skb->dev = dev;
2092 
2093 	if (dev_has_header(dev)) {
2094 		/* The device has an explicit notion of ll header,
2095 		 * exported to higher levels.
2096 		 *
2097 		 * Otherwise, the device hides details of its frame
2098 		 * structure, so that corresponding packet head is
2099 		 * never delivered to user.
2100 		 */
2101 		if (sk->sk_type != SOCK_DGRAM)
2102 			skb_push(skb, skb->data - skb_mac_header(skb));
2103 		else if (skb->pkt_type == PACKET_OUTGOING) {
2104 			/* Special case: outgoing packets have ll header at head */
2105 			skb_pull(skb, skb_network_offset(skb));
2106 		}
2107 	}
2108 
2109 	snaplen = skb->len;
2110 
2111 	res = run_filter(skb, sk, snaplen);
2112 	if (!res)
2113 		goto drop_n_restore;
2114 	if (snaplen > res)
2115 		snaplen = res;
2116 
2117 	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2118 		goto drop_n_acct;
2119 
2120 	if (skb_shared(skb)) {
2121 		struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
2122 		if (nskb == NULL)
2123 			goto drop_n_acct;
2124 
2125 		if (skb_head != skb->data) {
2126 			skb->data = skb_head;
2127 			skb->len = skb_len;
2128 		}
2129 		consume_skb(skb);
2130 		skb = nskb;
2131 	}
2132 
2133 	sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8);
2134 
2135 	sll = &PACKET_SKB_CB(skb)->sa.ll;
2136 	sll->sll_hatype = dev->type;
2137 	sll->sll_pkttype = skb->pkt_type;
2138 	if (unlikely(po->origdev))
2139 		sll->sll_ifindex = orig_dev->ifindex;
2140 	else
2141 		sll->sll_ifindex = dev->ifindex;
2142 
2143 	sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2144 
2145 	/* sll->sll_family and sll->sll_protocol are set in packet_recvmsg().
2146 	 * Use their space for storing the original skb length.
2147 	 */
2148 	PACKET_SKB_CB(skb)->sa.origlen = skb->len;
2149 
2150 	if (pskb_trim(skb, snaplen))
2151 		goto drop_n_acct;
2152 
2153 	skb_set_owner_r(skb, sk);
2154 	skb->dev = NULL;
2155 	skb_dst_drop(skb);
2156 
2157 	/* drop conntrack reference */
2158 	nf_reset_ct(skb);
2159 
2160 	spin_lock(&sk->sk_receive_queue.lock);
2161 	po->stats.stats1.tp_packets++;
2162 	sock_skb_set_dropcount(sk, skb);
2163 	__skb_queue_tail(&sk->sk_receive_queue, skb);
2164 	spin_unlock(&sk->sk_receive_queue.lock);
2165 	sk->sk_data_ready(sk);
2166 	return 0;
2167 
2168 drop_n_acct:
2169 	is_drop_n_account = true;
2170 	atomic_inc(&po->tp_drops);
2171 	atomic_inc(&sk->sk_drops);
2172 
2173 drop_n_restore:
2174 	if (skb_head != skb->data && skb_shared(skb)) {
2175 		skb->data = skb_head;
2176 		skb->len = skb_len;
2177 	}
2178 drop:
2179 	if (!is_drop_n_account)
2180 		consume_skb(skb);
2181 	else
2182 		kfree_skb(skb);
2183 	return 0;
2184 }
2185 
tpacket_rcv(struct sk_buff * skb,struct net_device * dev,struct packet_type * pt,struct net_device * orig_dev)2186 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
2187 		       struct packet_type *pt, struct net_device *orig_dev)
2188 {
2189 	struct sock *sk;
2190 	struct packet_sock *po;
2191 	struct sockaddr_ll *sll;
2192 	union tpacket_uhdr h;
2193 	u8 *skb_head = skb->data;
2194 	int skb_len = skb->len;
2195 	unsigned int snaplen, res;
2196 	unsigned long status = TP_STATUS_USER;
2197 	unsigned short macoff, hdrlen;
2198 	unsigned int netoff;
2199 	struct sk_buff *copy_skb = NULL;
2200 	struct timespec64 ts;
2201 	__u32 ts_status;
2202 	bool is_drop_n_account = false;
2203 	unsigned int slot_id = 0;
2204 	bool do_vnet = false;
2205 
2206 	/* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
2207 	 * We may add members to them until current aligned size without forcing
2208 	 * userspace to call getsockopt(..., PACKET_HDRLEN, ...).
2209 	 */
2210 	BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32);
2211 	BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48);
2212 
2213 	if (skb->pkt_type == PACKET_LOOPBACK)
2214 		goto drop;
2215 
2216 	sk = pt->af_packet_priv;
2217 	po = pkt_sk(sk);
2218 
2219 	if (!net_eq(dev_net(dev), sock_net(sk)))
2220 		goto drop;
2221 
2222 	if (dev_has_header(dev)) {
2223 		if (sk->sk_type != SOCK_DGRAM)
2224 			skb_push(skb, skb->data - skb_mac_header(skb));
2225 		else if (skb->pkt_type == PACKET_OUTGOING) {
2226 			/* Special case: outgoing packets have ll header at head */
2227 			skb_pull(skb, skb_network_offset(skb));
2228 		}
2229 	}
2230 
2231 	snaplen = skb->len;
2232 
2233 	res = run_filter(skb, sk, snaplen);
2234 	if (!res)
2235 		goto drop_n_restore;
2236 
2237 	/* If we are flooded, just give up */
2238 	if (__packet_rcv_has_room(po, skb) == ROOM_NONE) {
2239 		atomic_inc(&po->tp_drops);
2240 		goto drop_n_restore;
2241 	}
2242 
2243 	if (skb->ip_summed == CHECKSUM_PARTIAL)
2244 		status |= TP_STATUS_CSUMNOTREADY;
2245 	else if (skb->pkt_type != PACKET_OUTGOING &&
2246 		 (skb->ip_summed == CHECKSUM_COMPLETE ||
2247 		  skb_csum_unnecessary(skb)))
2248 		status |= TP_STATUS_CSUM_VALID;
2249 
2250 	if (snaplen > res)
2251 		snaplen = res;
2252 
2253 	if (sk->sk_type == SOCK_DGRAM) {
2254 		macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
2255 				  po->tp_reserve;
2256 	} else {
2257 		unsigned int maclen = skb_network_offset(skb);
2258 		netoff = TPACKET_ALIGN(po->tp_hdrlen +
2259 				       (maclen < 16 ? 16 : maclen)) +
2260 				       po->tp_reserve;
2261 		if (po->has_vnet_hdr) {
2262 			netoff += sizeof(struct virtio_net_hdr);
2263 			do_vnet = true;
2264 		}
2265 		macoff = netoff - maclen;
2266 	}
2267 	if (netoff > USHRT_MAX) {
2268 		atomic_inc(&po->tp_drops);
2269 		goto drop_n_restore;
2270 	}
2271 	if (po->tp_version <= TPACKET_V2) {
2272 		if (macoff + snaplen > po->rx_ring.frame_size) {
2273 			if (po->copy_thresh &&
2274 			    atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
2275 				if (skb_shared(skb)) {
2276 					copy_skb = skb_clone(skb, GFP_ATOMIC);
2277 				} else {
2278 					copy_skb = skb_get(skb);
2279 					skb_head = skb->data;
2280 				}
2281 				if (copy_skb) {
2282 					memset(&PACKET_SKB_CB(copy_skb)->sa.ll, 0,
2283 					       sizeof(PACKET_SKB_CB(copy_skb)->sa.ll));
2284 					skb_set_owner_r(copy_skb, sk);
2285 				}
2286 			}
2287 			snaplen = po->rx_ring.frame_size - macoff;
2288 			if ((int)snaplen < 0) {
2289 				snaplen = 0;
2290 				do_vnet = false;
2291 			}
2292 		}
2293 	} else if (unlikely(macoff + snaplen >
2294 			    GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
2295 		u32 nval;
2296 
2297 		nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
2298 		pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
2299 			    snaplen, nval, macoff);
2300 		snaplen = nval;
2301 		if (unlikely((int)snaplen < 0)) {
2302 			snaplen = 0;
2303 			macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
2304 			do_vnet = false;
2305 		}
2306 	}
2307 	spin_lock(&sk->sk_receive_queue.lock);
2308 	h.raw = packet_current_rx_frame(po, skb,
2309 					TP_STATUS_KERNEL, (macoff+snaplen));
2310 	if (!h.raw)
2311 		goto drop_n_account;
2312 
2313 	if (po->tp_version <= TPACKET_V2) {
2314 		slot_id = po->rx_ring.head;
2315 		if (test_bit(slot_id, po->rx_ring.rx_owner_map))
2316 			goto drop_n_account;
2317 		__set_bit(slot_id, po->rx_ring.rx_owner_map);
2318 	}
2319 
2320 	if (do_vnet &&
2321 	    virtio_net_hdr_from_skb(skb, h.raw + macoff -
2322 				    sizeof(struct virtio_net_hdr),
2323 				    vio_le(), true, 0)) {
2324 		if (po->tp_version == TPACKET_V3)
2325 			prb_clear_blk_fill_status(&po->rx_ring);
2326 		goto drop_n_account;
2327 	}
2328 
2329 	if (po->tp_version <= TPACKET_V2) {
2330 		packet_increment_rx_head(po, &po->rx_ring);
2331 	/*
2332 	 * LOSING will be reported till you read the stats,
2333 	 * because it's COR - Clear On Read.
2334 	 * Anyways, moving it for V1/V2 only as V3 doesn't need this
2335 	 * at packet level.
2336 	 */
2337 		if (atomic_read(&po->tp_drops))
2338 			status |= TP_STATUS_LOSING;
2339 	}
2340 
2341 	po->stats.stats1.tp_packets++;
2342 	if (copy_skb) {
2343 		status |= TP_STATUS_COPY;
2344 		__skb_queue_tail(&sk->sk_receive_queue, copy_skb);
2345 	}
2346 	spin_unlock(&sk->sk_receive_queue.lock);
2347 
2348 	skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
2349 
2350 	/* Always timestamp; prefer an existing software timestamp taken
2351 	 * closer to the time of capture.
2352 	 */
2353 	ts_status = tpacket_get_timestamp(skb, &ts,
2354 					  po->tp_tstamp | SOF_TIMESTAMPING_SOFTWARE);
2355 	if (!ts_status)
2356 		ktime_get_real_ts64(&ts);
2357 
2358 	status |= ts_status;
2359 
2360 	switch (po->tp_version) {
2361 	case TPACKET_V1:
2362 		h.h1->tp_len = skb->len;
2363 		h.h1->tp_snaplen = snaplen;
2364 		h.h1->tp_mac = macoff;
2365 		h.h1->tp_net = netoff;
2366 		h.h1->tp_sec = ts.tv_sec;
2367 		h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
2368 		hdrlen = sizeof(*h.h1);
2369 		break;
2370 	case TPACKET_V2:
2371 		h.h2->tp_len = skb->len;
2372 		h.h2->tp_snaplen = snaplen;
2373 		h.h2->tp_mac = macoff;
2374 		h.h2->tp_net = netoff;
2375 		h.h2->tp_sec = ts.tv_sec;
2376 		h.h2->tp_nsec = ts.tv_nsec;
2377 		if (skb_vlan_tag_present(skb)) {
2378 			h.h2->tp_vlan_tci = skb_vlan_tag_get(skb);
2379 			h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto);
2380 			status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
2381 		} else {
2382 			h.h2->tp_vlan_tci = 0;
2383 			h.h2->tp_vlan_tpid = 0;
2384 		}
2385 		memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding));
2386 		hdrlen = sizeof(*h.h2);
2387 		break;
2388 	case TPACKET_V3:
2389 		/* tp_nxt_offset,vlan are already populated above.
2390 		 * So DONT clear those fields here
2391 		 */
2392 		h.h3->tp_status |= status;
2393 		h.h3->tp_len = skb->len;
2394 		h.h3->tp_snaplen = snaplen;
2395 		h.h3->tp_mac = macoff;
2396 		h.h3->tp_net = netoff;
2397 		h.h3->tp_sec  = ts.tv_sec;
2398 		h.h3->tp_nsec = ts.tv_nsec;
2399 		memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding));
2400 		hdrlen = sizeof(*h.h3);
2401 		break;
2402 	default:
2403 		BUG();
2404 	}
2405 
2406 	sll = h.raw + TPACKET_ALIGN(hdrlen);
2407 	sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2408 	sll->sll_family = AF_PACKET;
2409 	sll->sll_hatype = dev->type;
2410 	sll->sll_protocol = skb->protocol;
2411 	sll->sll_pkttype = skb->pkt_type;
2412 	if (unlikely(po->origdev))
2413 		sll->sll_ifindex = orig_dev->ifindex;
2414 	else
2415 		sll->sll_ifindex = dev->ifindex;
2416 
2417 	smp_mb();
2418 
2419 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
2420 	if (po->tp_version <= TPACKET_V2) {
2421 		u8 *start, *end;
2422 
2423 		end = (u8 *) PAGE_ALIGN((unsigned long) h.raw +
2424 					macoff + snaplen);
2425 
2426 		for (start = h.raw; start < end; start += PAGE_SIZE)
2427 			flush_dcache_page(pgv_to_page(start));
2428 	}
2429 	smp_wmb();
2430 #endif
2431 
2432 	if (po->tp_version <= TPACKET_V2) {
2433 		spin_lock(&sk->sk_receive_queue.lock);
2434 		__packet_set_status(po, h.raw, status);
2435 		__clear_bit(slot_id, po->rx_ring.rx_owner_map);
2436 		spin_unlock(&sk->sk_receive_queue.lock);
2437 		sk->sk_data_ready(sk);
2438 	} else if (po->tp_version == TPACKET_V3) {
2439 		prb_clear_blk_fill_status(&po->rx_ring);
2440 	}
2441 
2442 drop_n_restore:
2443 	if (skb_head != skb->data && skb_shared(skb)) {
2444 		skb->data = skb_head;
2445 		skb->len = skb_len;
2446 	}
2447 drop:
2448 	if (!is_drop_n_account)
2449 		consume_skb(skb);
2450 	else
2451 		kfree_skb(skb);
2452 	return 0;
2453 
2454 drop_n_account:
2455 	spin_unlock(&sk->sk_receive_queue.lock);
2456 	atomic_inc(&po->tp_drops);
2457 	is_drop_n_account = true;
2458 
2459 	sk->sk_data_ready(sk);
2460 	kfree_skb(copy_skb);
2461 	goto drop_n_restore;
2462 }
2463 
tpacket_destruct_skb(struct sk_buff * skb)2464 static void tpacket_destruct_skb(struct sk_buff *skb)
2465 {
2466 	struct packet_sock *po = pkt_sk(skb->sk);
2467 
2468 	if (likely(po->tx_ring.pg_vec)) {
2469 		void *ph;
2470 		__u32 ts;
2471 
2472 		ph = skb_zcopy_get_nouarg(skb);
2473 		packet_dec_pending(&po->tx_ring);
2474 
2475 		ts = __packet_set_timestamp(po, ph, skb);
2476 		__packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
2477 
2478 		if (!packet_read_pending(&po->tx_ring))
2479 			complete(&po->skb_completion);
2480 	}
2481 
2482 	sock_wfree(skb);
2483 }
2484 
__packet_snd_vnet_parse(struct virtio_net_hdr * vnet_hdr,size_t len)2485 static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len)
2486 {
2487 	if ((vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2488 	    (__virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2489 	     __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2 >
2490 	      __virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len)))
2491 		vnet_hdr->hdr_len = __cpu_to_virtio16(vio_le(),
2492 			 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2493 			__virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2);
2494 
2495 	if (__virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len) > len)
2496 		return -EINVAL;
2497 
2498 	return 0;
2499 }
2500 
packet_snd_vnet_parse(struct msghdr * msg,size_t * len,struct virtio_net_hdr * vnet_hdr)2501 static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len,
2502 				 struct virtio_net_hdr *vnet_hdr)
2503 {
2504 	if (*len < sizeof(*vnet_hdr))
2505 		return -EINVAL;
2506 	*len -= sizeof(*vnet_hdr);
2507 
2508 	if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter))
2509 		return -EFAULT;
2510 
2511 	return __packet_snd_vnet_parse(vnet_hdr, *len);
2512 }
2513 
tpacket_fill_skb(struct packet_sock * po,struct sk_buff * skb,void * frame,struct net_device * dev,void * data,int tp_len,__be16 proto,unsigned char * addr,int hlen,int copylen,const struct sockcm_cookie * sockc)2514 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2515 		void *frame, struct net_device *dev, void *data, int tp_len,
2516 		__be16 proto, unsigned char *addr, int hlen, int copylen,
2517 		const struct sockcm_cookie *sockc)
2518 {
2519 	union tpacket_uhdr ph;
2520 	int to_write, offset, len, nr_frags, len_max;
2521 	struct socket *sock = po->sk.sk_socket;
2522 	struct page *page;
2523 	int err;
2524 
2525 	ph.raw = frame;
2526 
2527 	skb->protocol = proto;
2528 	skb->dev = dev;
2529 	skb->priority = po->sk.sk_priority;
2530 	skb->mark = po->sk.sk_mark;
2531 	skb->tstamp = sockc->transmit_time;
2532 	skb_setup_tx_timestamp(skb, sockc->tsflags);
2533 	skb_zcopy_set_nouarg(skb, ph.raw);
2534 
2535 	skb_reserve(skb, hlen);
2536 	skb_reset_network_header(skb);
2537 
2538 	to_write = tp_len;
2539 
2540 	if (sock->type == SOCK_DGRAM) {
2541 		err = dev_hard_header(skb, dev, ntohs(proto), addr,
2542 				NULL, tp_len);
2543 		if (unlikely(err < 0))
2544 			return -EINVAL;
2545 	} else if (copylen) {
2546 		int hdrlen = min_t(int, copylen, tp_len);
2547 
2548 		skb_push(skb, dev->hard_header_len);
2549 		skb_put(skb, copylen - dev->hard_header_len);
2550 		err = skb_store_bits(skb, 0, data, hdrlen);
2551 		if (unlikely(err))
2552 			return err;
2553 		if (!dev_validate_header(dev, skb->data, hdrlen))
2554 			return -EINVAL;
2555 
2556 		data += hdrlen;
2557 		to_write -= hdrlen;
2558 	}
2559 
2560 	offset = offset_in_page(data);
2561 	len_max = PAGE_SIZE - offset;
2562 	len = ((to_write > len_max) ? len_max : to_write);
2563 
2564 	skb->data_len = to_write;
2565 	skb->len += to_write;
2566 	skb->truesize += to_write;
2567 	refcount_add(to_write, &po->sk.sk_wmem_alloc);
2568 
2569 	while (likely(to_write)) {
2570 		nr_frags = skb_shinfo(skb)->nr_frags;
2571 
2572 		if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
2573 			pr_err("Packet exceed the number of skb frags(%lu)\n",
2574 			       MAX_SKB_FRAGS);
2575 			return -EFAULT;
2576 		}
2577 
2578 		page = pgv_to_page(data);
2579 		data += len;
2580 		flush_dcache_page(page);
2581 		get_page(page);
2582 		skb_fill_page_desc(skb, nr_frags, page, offset, len);
2583 		to_write -= len;
2584 		offset = 0;
2585 		len_max = PAGE_SIZE;
2586 		len = ((to_write > len_max) ? len_max : to_write);
2587 	}
2588 
2589 	packet_parse_headers(skb, sock);
2590 
2591 	return tp_len;
2592 }
2593 
tpacket_parse_header(struct packet_sock * po,void * frame,int size_max,void ** data)2594 static int tpacket_parse_header(struct packet_sock *po, void *frame,
2595 				int size_max, void **data)
2596 {
2597 	union tpacket_uhdr ph;
2598 	int tp_len, off;
2599 
2600 	ph.raw = frame;
2601 
2602 	switch (po->tp_version) {
2603 	case TPACKET_V3:
2604 		if (ph.h3->tp_next_offset != 0) {
2605 			pr_warn_once("variable sized slot not supported");
2606 			return -EINVAL;
2607 		}
2608 		tp_len = ph.h3->tp_len;
2609 		break;
2610 	case TPACKET_V2:
2611 		tp_len = ph.h2->tp_len;
2612 		break;
2613 	default:
2614 		tp_len = ph.h1->tp_len;
2615 		break;
2616 	}
2617 	if (unlikely(tp_len > size_max)) {
2618 		pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
2619 		return -EMSGSIZE;
2620 	}
2621 
2622 	if (unlikely(po->tp_tx_has_off)) {
2623 		int off_min, off_max;
2624 
2625 		off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2626 		off_max = po->tx_ring.frame_size - tp_len;
2627 		if (po->sk.sk_type == SOCK_DGRAM) {
2628 			switch (po->tp_version) {
2629 			case TPACKET_V3:
2630 				off = ph.h3->tp_net;
2631 				break;
2632 			case TPACKET_V2:
2633 				off = ph.h2->tp_net;
2634 				break;
2635 			default:
2636 				off = ph.h1->tp_net;
2637 				break;
2638 			}
2639 		} else {
2640 			switch (po->tp_version) {
2641 			case TPACKET_V3:
2642 				off = ph.h3->tp_mac;
2643 				break;
2644 			case TPACKET_V2:
2645 				off = ph.h2->tp_mac;
2646 				break;
2647 			default:
2648 				off = ph.h1->tp_mac;
2649 				break;
2650 			}
2651 		}
2652 		if (unlikely((off < off_min) || (off_max < off)))
2653 			return -EINVAL;
2654 	} else {
2655 		off = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2656 	}
2657 
2658 	*data = frame + off;
2659 	return tp_len;
2660 }
2661 
tpacket_snd(struct packet_sock * po,struct msghdr * msg)2662 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2663 {
2664 	struct sk_buff *skb = NULL;
2665 	struct net_device *dev;
2666 	struct virtio_net_hdr *vnet_hdr = NULL;
2667 	struct sockcm_cookie sockc;
2668 	__be16 proto;
2669 	int err, reserve = 0;
2670 	void *ph;
2671 	DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2672 	bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
2673 	unsigned char *addr = NULL;
2674 	int tp_len, size_max;
2675 	void *data;
2676 	int len_sum = 0;
2677 	int status = TP_STATUS_AVAILABLE;
2678 	int hlen, tlen, copylen = 0;
2679 	long timeo = 0;
2680 
2681 	mutex_lock(&po->pg_vec_lock);
2682 
2683 	/* packet_sendmsg() check on tx_ring.pg_vec was lockless,
2684 	 * we need to confirm it under protection of pg_vec_lock.
2685 	 */
2686 	if (unlikely(!po->tx_ring.pg_vec)) {
2687 		err = -EBUSY;
2688 		goto out;
2689 	}
2690 	if (likely(saddr == NULL)) {
2691 		dev	= packet_cached_dev_get(po);
2692 		proto	= READ_ONCE(po->num);
2693 	} else {
2694 		err = -EINVAL;
2695 		if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2696 			goto out;
2697 		if (msg->msg_namelen < (saddr->sll_halen
2698 					+ offsetof(struct sockaddr_ll,
2699 						sll_addr)))
2700 			goto out;
2701 		proto	= saddr->sll_protocol;
2702 		dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2703 		if (po->sk.sk_socket->type == SOCK_DGRAM) {
2704 			if (dev && msg->msg_namelen < dev->addr_len +
2705 				   offsetof(struct sockaddr_ll, sll_addr))
2706 				goto out_put;
2707 			addr = saddr->sll_addr;
2708 		}
2709 	}
2710 
2711 	err = -ENXIO;
2712 	if (unlikely(dev == NULL))
2713 		goto out;
2714 	err = -ENETDOWN;
2715 	if (unlikely(!(dev->flags & IFF_UP)))
2716 		goto out_put;
2717 
2718 	sockcm_init(&sockc, &po->sk);
2719 	if (msg->msg_controllen) {
2720 		err = sock_cmsg_send(&po->sk, msg, &sockc);
2721 		if (unlikely(err))
2722 			goto out_put;
2723 	}
2724 
2725 	if (po->sk.sk_socket->type == SOCK_RAW)
2726 		reserve = dev->hard_header_len;
2727 	size_max = po->tx_ring.frame_size
2728 		- (po->tp_hdrlen - sizeof(struct sockaddr_ll));
2729 
2730 	if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !po->has_vnet_hdr)
2731 		size_max = dev->mtu + reserve + VLAN_HLEN;
2732 
2733 	reinit_completion(&po->skb_completion);
2734 
2735 	do {
2736 		ph = packet_current_frame(po, &po->tx_ring,
2737 					  TP_STATUS_SEND_REQUEST);
2738 		if (unlikely(ph == NULL)) {
2739 			if (need_wait && skb) {
2740 				timeo = sock_sndtimeo(&po->sk, msg->msg_flags & MSG_DONTWAIT);
2741 				timeo = wait_for_completion_interruptible_timeout(&po->skb_completion, timeo);
2742 				if (timeo <= 0) {
2743 					err = !timeo ? -ETIMEDOUT : -ERESTARTSYS;
2744 					goto out_put;
2745 				}
2746 			}
2747 			/* check for additional frames */
2748 			continue;
2749 		}
2750 
2751 		skb = NULL;
2752 		tp_len = tpacket_parse_header(po, ph, size_max, &data);
2753 		if (tp_len < 0)
2754 			goto tpacket_error;
2755 
2756 		status = TP_STATUS_SEND_REQUEST;
2757 		hlen = LL_RESERVED_SPACE(dev);
2758 		tlen = dev->needed_tailroom;
2759 		if (po->has_vnet_hdr) {
2760 			vnet_hdr = data;
2761 			data += sizeof(*vnet_hdr);
2762 			tp_len -= sizeof(*vnet_hdr);
2763 			if (tp_len < 0 ||
2764 			    __packet_snd_vnet_parse(vnet_hdr, tp_len)) {
2765 				tp_len = -EINVAL;
2766 				goto tpacket_error;
2767 			}
2768 			copylen = __virtio16_to_cpu(vio_le(),
2769 						    vnet_hdr->hdr_len);
2770 		}
2771 		copylen = max_t(int, copylen, dev->hard_header_len);
2772 		skb = sock_alloc_send_skb(&po->sk,
2773 				hlen + tlen + sizeof(struct sockaddr_ll) +
2774 				(copylen - dev->hard_header_len),
2775 				!need_wait, &err);
2776 
2777 		if (unlikely(skb == NULL)) {
2778 			/* we assume the socket was initially writeable ... */
2779 			if (likely(len_sum > 0))
2780 				err = len_sum;
2781 			goto out_status;
2782 		}
2783 		tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto,
2784 					  addr, hlen, copylen, &sockc);
2785 		if (likely(tp_len >= 0) &&
2786 		    tp_len > dev->mtu + reserve &&
2787 		    !po->has_vnet_hdr &&
2788 		    !packet_extra_vlan_len_allowed(dev, skb))
2789 			tp_len = -EMSGSIZE;
2790 
2791 		if (unlikely(tp_len < 0)) {
2792 tpacket_error:
2793 			if (po->tp_loss) {
2794 				__packet_set_status(po, ph,
2795 						TP_STATUS_AVAILABLE);
2796 				packet_increment_head(&po->tx_ring);
2797 				kfree_skb(skb);
2798 				continue;
2799 			} else {
2800 				status = TP_STATUS_WRONG_FORMAT;
2801 				err = tp_len;
2802 				goto out_status;
2803 			}
2804 		}
2805 
2806 		if (po->has_vnet_hdr) {
2807 			if (virtio_net_hdr_to_skb(skb, vnet_hdr, vio_le())) {
2808 				tp_len = -EINVAL;
2809 				goto tpacket_error;
2810 			}
2811 			virtio_net_hdr_set_proto(skb, vnet_hdr);
2812 		}
2813 
2814 		skb->destructor = tpacket_destruct_skb;
2815 		__packet_set_status(po, ph, TP_STATUS_SENDING);
2816 		packet_inc_pending(&po->tx_ring);
2817 
2818 		status = TP_STATUS_SEND_REQUEST;
2819 		err = po->xmit(skb);
2820 		if (unlikely(err > 0)) {
2821 			err = net_xmit_errno(err);
2822 			if (err && __packet_get_status(po, ph) ==
2823 				   TP_STATUS_AVAILABLE) {
2824 				/* skb was destructed already */
2825 				skb = NULL;
2826 				goto out_status;
2827 			}
2828 			/*
2829 			 * skb was dropped but not destructed yet;
2830 			 * let's treat it like congestion or err < 0
2831 			 */
2832 			err = 0;
2833 		}
2834 		packet_increment_head(&po->tx_ring);
2835 		len_sum += tp_len;
2836 	} while (likely((ph != NULL) ||
2837 		/* Note: packet_read_pending() might be slow if we have
2838 		 * to call it as it's per_cpu variable, but in fast-path
2839 		 * we already short-circuit the loop with the first
2840 		 * condition, and luckily don't have to go that path
2841 		 * anyway.
2842 		 */
2843 		 (need_wait && packet_read_pending(&po->tx_ring))));
2844 
2845 	err = len_sum;
2846 	goto out_put;
2847 
2848 out_status:
2849 	__packet_set_status(po, ph, status);
2850 	kfree_skb(skb);
2851 out_put:
2852 	dev_put(dev);
2853 out:
2854 	mutex_unlock(&po->pg_vec_lock);
2855 	return err;
2856 }
2857 
packet_alloc_skb(struct sock * sk,size_t prepad,size_t reserve,size_t len,size_t linear,int noblock,int * err)2858 static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2859 				        size_t reserve, size_t len,
2860 				        size_t linear, int noblock,
2861 				        int *err)
2862 {
2863 	struct sk_buff *skb;
2864 
2865 	/* Under a page?  Don't bother with paged skb. */
2866 	if (prepad + len < PAGE_SIZE || !linear)
2867 		linear = len;
2868 
2869 	skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
2870 				   err, 0);
2871 	if (!skb)
2872 		return NULL;
2873 
2874 	skb_reserve(skb, reserve);
2875 	skb_put(skb, linear);
2876 	skb->data_len = len - linear;
2877 	skb->len += len - linear;
2878 
2879 	return skb;
2880 }
2881 
packet_snd(struct socket * sock,struct msghdr * msg,size_t len)2882 static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2883 {
2884 	struct sock *sk = sock->sk;
2885 	DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2886 	struct sk_buff *skb;
2887 	struct net_device *dev;
2888 	__be16 proto;
2889 	unsigned char *addr = NULL;
2890 	int err, reserve = 0;
2891 	struct sockcm_cookie sockc;
2892 	struct virtio_net_hdr vnet_hdr = { 0 };
2893 	int offset = 0;
2894 	struct packet_sock *po = pkt_sk(sk);
2895 	bool has_vnet_hdr = false;
2896 	int hlen, tlen, linear;
2897 	int extra_len = 0;
2898 
2899 	/*
2900 	 *	Get and verify the address.
2901 	 */
2902 
2903 	if (likely(saddr == NULL)) {
2904 		dev	= packet_cached_dev_get(po);
2905 		proto	= READ_ONCE(po->num);
2906 	} else {
2907 		err = -EINVAL;
2908 		if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2909 			goto out;
2910 		if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
2911 			goto out;
2912 		proto	= saddr->sll_protocol;
2913 		dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
2914 		if (sock->type == SOCK_DGRAM) {
2915 			if (dev && msg->msg_namelen < dev->addr_len +
2916 				   offsetof(struct sockaddr_ll, sll_addr))
2917 				goto out_unlock;
2918 			addr = saddr->sll_addr;
2919 		}
2920 	}
2921 
2922 	err = -ENXIO;
2923 	if (unlikely(dev == NULL))
2924 		goto out_unlock;
2925 	err = -ENETDOWN;
2926 	if (unlikely(!(dev->flags & IFF_UP)))
2927 		goto out_unlock;
2928 
2929 	sockcm_init(&sockc, sk);
2930 	sockc.mark = sk->sk_mark;
2931 	if (msg->msg_controllen) {
2932 		err = sock_cmsg_send(sk, msg, &sockc);
2933 		if (unlikely(err))
2934 			goto out_unlock;
2935 	}
2936 
2937 	if (sock->type == SOCK_RAW)
2938 		reserve = dev->hard_header_len;
2939 	if (po->has_vnet_hdr) {
2940 		err = packet_snd_vnet_parse(msg, &len, &vnet_hdr);
2941 		if (err)
2942 			goto out_unlock;
2943 		has_vnet_hdr = true;
2944 	}
2945 
2946 	if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
2947 		if (!netif_supports_nofcs(dev)) {
2948 			err = -EPROTONOSUPPORT;
2949 			goto out_unlock;
2950 		}
2951 		extra_len = 4; /* We're doing our own CRC */
2952 	}
2953 
2954 	err = -EMSGSIZE;
2955 	if (!vnet_hdr.gso_type &&
2956 	    (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
2957 		goto out_unlock;
2958 
2959 	err = -ENOBUFS;
2960 	hlen = LL_RESERVED_SPACE(dev);
2961 	tlen = dev->needed_tailroom;
2962 	linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len);
2963 	linear = max(linear, min_t(int, len, dev->hard_header_len));
2964 	skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear,
2965 			       msg->msg_flags & MSG_DONTWAIT, &err);
2966 	if (skb == NULL)
2967 		goto out_unlock;
2968 
2969 	skb_reset_network_header(skb);
2970 
2971 	err = -EINVAL;
2972 	if (sock->type == SOCK_DGRAM) {
2973 		offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
2974 		if (unlikely(offset < 0))
2975 			goto out_free;
2976 	} else if (reserve) {
2977 		skb_reserve(skb, -reserve);
2978 		if (len < reserve + sizeof(struct ipv6hdr) &&
2979 		    dev->min_header_len != dev->hard_header_len)
2980 			skb_reset_network_header(skb);
2981 	}
2982 
2983 	/* Returns -EFAULT on error */
2984 	err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len);
2985 	if (err)
2986 		goto out_free;
2987 
2988 	if (sock->type == SOCK_RAW &&
2989 	    !dev_validate_header(dev, skb->data, len)) {
2990 		err = -EINVAL;
2991 		goto out_free;
2992 	}
2993 
2994 	skb_setup_tx_timestamp(skb, sockc.tsflags);
2995 
2996 	if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) &&
2997 	    !packet_extra_vlan_len_allowed(dev, skb)) {
2998 		err = -EMSGSIZE;
2999 		goto out_free;
3000 	}
3001 
3002 	skb->protocol = proto;
3003 	skb->dev = dev;
3004 	skb->priority = sk->sk_priority;
3005 	skb->mark = sockc.mark;
3006 	skb->tstamp = sockc.transmit_time;
3007 
3008 	if (has_vnet_hdr) {
3009 		err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le());
3010 		if (err)
3011 			goto out_free;
3012 		len += sizeof(vnet_hdr);
3013 		virtio_net_hdr_set_proto(skb, &vnet_hdr);
3014 	}
3015 
3016 	packet_parse_headers(skb, sock);
3017 
3018 	if (unlikely(extra_len == 4))
3019 		skb->no_fcs = 1;
3020 
3021 	err = po->xmit(skb);
3022 	if (err > 0 && (err = net_xmit_errno(err)) != 0)
3023 		goto out_unlock;
3024 
3025 	dev_put(dev);
3026 
3027 	return len;
3028 
3029 out_free:
3030 	kfree_skb(skb);
3031 out_unlock:
3032 	if (dev)
3033 		dev_put(dev);
3034 out:
3035 	return err;
3036 }
3037 
packet_sendmsg(struct socket * sock,struct msghdr * msg,size_t len)3038 static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
3039 {
3040 	struct sock *sk = sock->sk;
3041 	struct packet_sock *po = pkt_sk(sk);
3042 
3043 	/* Reading tx_ring.pg_vec without holding pg_vec_lock is racy.
3044 	 * tpacket_snd() will redo the check safely.
3045 	 */
3046 	if (data_race(po->tx_ring.pg_vec))
3047 		return tpacket_snd(po, msg);
3048 
3049 	return packet_snd(sock, msg, len);
3050 }
3051 
3052 /*
3053  *	Close a PACKET socket. This is fairly simple. We immediately go
3054  *	to 'closed' state and remove our protocol entry in the device list.
3055  */
3056 
packet_release(struct socket * sock)3057 static int packet_release(struct socket *sock)
3058 {
3059 	struct sock *sk = sock->sk;
3060 	struct packet_sock *po;
3061 	struct packet_fanout *f;
3062 	struct net *net;
3063 	union tpacket_req_u req_u;
3064 
3065 	if (!sk)
3066 		return 0;
3067 
3068 	net = sock_net(sk);
3069 	po = pkt_sk(sk);
3070 
3071 	mutex_lock(&net->packet.sklist_lock);
3072 	sk_del_node_init_rcu(sk);
3073 	mutex_unlock(&net->packet.sklist_lock);
3074 
3075 	preempt_disable();
3076 	sock_prot_inuse_add(net, sk->sk_prot, -1);
3077 	preempt_enable();
3078 
3079 	spin_lock(&po->bind_lock);
3080 	unregister_prot_hook(sk, false);
3081 	packet_cached_dev_reset(po);
3082 
3083 	if (po->prot_hook.dev) {
3084 		dev_put(po->prot_hook.dev);
3085 		po->prot_hook.dev = NULL;
3086 	}
3087 	spin_unlock(&po->bind_lock);
3088 
3089 	packet_flush_mclist(sk);
3090 
3091 	lock_sock(sk);
3092 	if (po->rx_ring.pg_vec) {
3093 		memset(&req_u, 0, sizeof(req_u));
3094 		packet_set_ring(sk, &req_u, 1, 0);
3095 	}
3096 
3097 	if (po->tx_ring.pg_vec) {
3098 		memset(&req_u, 0, sizeof(req_u));
3099 		packet_set_ring(sk, &req_u, 1, 1);
3100 	}
3101 	release_sock(sk);
3102 
3103 	f = fanout_release(sk);
3104 
3105 	synchronize_net();
3106 
3107 	kfree(po->rollover);
3108 	if (f) {
3109 		fanout_release_data(f);
3110 		kvfree(f);
3111 	}
3112 	/*
3113 	 *	Now the socket is dead. No more input will appear.
3114 	 */
3115 	sock_orphan(sk);
3116 	sock->sk = NULL;
3117 
3118 	/* Purge queues */
3119 
3120 	skb_queue_purge(&sk->sk_receive_queue);
3121 	packet_free_pending(po);
3122 	sk_refcnt_debug_release(sk);
3123 
3124 	sock_put(sk);
3125 	return 0;
3126 }
3127 
3128 /*
3129  *	Attach a packet hook.
3130  */
3131 
packet_do_bind(struct sock * sk,const char * name,int ifindex,__be16 proto)3132 static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
3133 			  __be16 proto)
3134 {
3135 	struct packet_sock *po = pkt_sk(sk);
3136 	struct net_device *dev_curr;
3137 	__be16 proto_curr;
3138 	bool need_rehook;
3139 	struct net_device *dev = NULL;
3140 	int ret = 0;
3141 	bool unlisted = false;
3142 
3143 	lock_sock(sk);
3144 	spin_lock(&po->bind_lock);
3145 	rcu_read_lock();
3146 
3147 	if (po->fanout) {
3148 		ret = -EINVAL;
3149 		goto out_unlock;
3150 	}
3151 
3152 	if (name) {
3153 		dev = dev_get_by_name_rcu(sock_net(sk), name);
3154 		if (!dev) {
3155 			ret = -ENODEV;
3156 			goto out_unlock;
3157 		}
3158 	} else if (ifindex) {
3159 		dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
3160 		if (!dev) {
3161 			ret = -ENODEV;
3162 			goto out_unlock;
3163 		}
3164 	}
3165 
3166 	if (dev)
3167 		dev_hold(dev);
3168 
3169 	proto_curr = po->prot_hook.type;
3170 	dev_curr = po->prot_hook.dev;
3171 
3172 	need_rehook = proto_curr != proto || dev_curr != dev;
3173 
3174 	if (need_rehook) {
3175 		if (po->running) {
3176 			rcu_read_unlock();
3177 			/* prevents packet_notifier() from calling
3178 			 * register_prot_hook()
3179 			 */
3180 			WRITE_ONCE(po->num, 0);
3181 			__unregister_prot_hook(sk, true);
3182 			rcu_read_lock();
3183 			dev_curr = po->prot_hook.dev;
3184 			if (dev)
3185 				unlisted = !dev_get_by_index_rcu(sock_net(sk),
3186 								 dev->ifindex);
3187 		}
3188 
3189 		BUG_ON(po->running);
3190 		WRITE_ONCE(po->num, proto);
3191 		po->prot_hook.type = proto;
3192 
3193 		if (unlikely(unlisted)) {
3194 			dev_put(dev);
3195 			po->prot_hook.dev = NULL;
3196 			WRITE_ONCE(po->ifindex, -1);
3197 			packet_cached_dev_reset(po);
3198 		} else {
3199 			po->prot_hook.dev = dev;
3200 			WRITE_ONCE(po->ifindex, dev ? dev->ifindex : 0);
3201 			packet_cached_dev_assign(po, dev);
3202 		}
3203 	}
3204 	if (dev_curr)
3205 		dev_put(dev_curr);
3206 
3207 	if (proto == 0 || !need_rehook)
3208 		goto out_unlock;
3209 
3210 	if (!unlisted && (!dev || (dev->flags & IFF_UP))) {
3211 		register_prot_hook(sk);
3212 	} else {
3213 		sk->sk_err = ENETDOWN;
3214 		if (!sock_flag(sk, SOCK_DEAD))
3215 			sk->sk_error_report(sk);
3216 	}
3217 
3218 out_unlock:
3219 	rcu_read_unlock();
3220 	spin_unlock(&po->bind_lock);
3221 	release_sock(sk);
3222 	return ret;
3223 }
3224 
3225 /*
3226  *	Bind a packet socket to a device
3227  */
3228 
packet_bind_spkt(struct socket * sock,struct sockaddr * uaddr,int addr_len)3229 static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
3230 			    int addr_len)
3231 {
3232 	struct sock *sk = sock->sk;
3233 	char name[sizeof(uaddr->sa_data) + 1];
3234 
3235 	/*
3236 	 *	Check legality
3237 	 */
3238 
3239 	if (addr_len != sizeof(struct sockaddr))
3240 		return -EINVAL;
3241 	/* uaddr->sa_data comes from the userspace, it's not guaranteed to be
3242 	 * zero-terminated.
3243 	 */
3244 	memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data));
3245 	name[sizeof(uaddr->sa_data)] = 0;
3246 
3247 	return packet_do_bind(sk, name, 0, pkt_sk(sk)->num);
3248 }
3249 
packet_bind(struct socket * sock,struct sockaddr * uaddr,int addr_len)3250 static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
3251 {
3252 	struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
3253 	struct sock *sk = sock->sk;
3254 
3255 	/*
3256 	 *	Check legality
3257 	 */
3258 
3259 	if (addr_len < sizeof(struct sockaddr_ll))
3260 		return -EINVAL;
3261 	if (sll->sll_family != AF_PACKET)
3262 		return -EINVAL;
3263 
3264 	return packet_do_bind(sk, NULL, sll->sll_ifindex,
3265 			      sll->sll_protocol ? : pkt_sk(sk)->num);
3266 }
3267 
3268 static struct proto packet_proto = {
3269 	.name	  = "PACKET",
3270 	.owner	  = THIS_MODULE,
3271 	.obj_size = sizeof(struct packet_sock),
3272 };
3273 
3274 /*
3275  *	Create a packet of type SOCK_PACKET.
3276  */
3277 
packet_create(struct net * net,struct socket * sock,int protocol,int kern)3278 static int packet_create(struct net *net, struct socket *sock, int protocol,
3279 			 int kern)
3280 {
3281 	struct sock *sk;
3282 	struct packet_sock *po;
3283 	__be16 proto = (__force __be16)protocol; /* weird, but documented */
3284 	int err;
3285 
3286 	if (!ns_capable(net->user_ns, CAP_NET_RAW))
3287 		return -EPERM;
3288 	if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
3289 	    sock->type != SOCK_PACKET)
3290 		return -ESOCKTNOSUPPORT;
3291 
3292 	sock->state = SS_UNCONNECTED;
3293 
3294 	err = -ENOBUFS;
3295 	sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern);
3296 	if (sk == NULL)
3297 		goto out;
3298 
3299 	sock->ops = &packet_ops;
3300 	if (sock->type == SOCK_PACKET)
3301 		sock->ops = &packet_ops_spkt;
3302 
3303 	sock_init_data(sock, sk);
3304 
3305 	po = pkt_sk(sk);
3306 	init_completion(&po->skb_completion);
3307 	sk->sk_family = PF_PACKET;
3308 	po->num = proto;
3309 	po->xmit = dev_queue_xmit;
3310 
3311 	err = packet_alloc_pending(po);
3312 	if (err)
3313 		goto out2;
3314 
3315 	packet_cached_dev_reset(po);
3316 
3317 	sk->sk_destruct = packet_sock_destruct;
3318 	sk_refcnt_debug_inc(sk);
3319 
3320 	/*
3321 	 *	Attach a protocol block
3322 	 */
3323 
3324 	spin_lock_init(&po->bind_lock);
3325 	mutex_init(&po->pg_vec_lock);
3326 	po->rollover = NULL;
3327 	po->prot_hook.func = packet_rcv;
3328 
3329 	if (sock->type == SOCK_PACKET)
3330 		po->prot_hook.func = packet_rcv_spkt;
3331 
3332 	po->prot_hook.af_packet_priv = sk;
3333 	po->prot_hook.af_packet_net = sock_net(sk);
3334 
3335 	if (proto) {
3336 		po->prot_hook.type = proto;
3337 		__register_prot_hook(sk);
3338 	}
3339 
3340 	mutex_lock(&net->packet.sklist_lock);
3341 	sk_add_node_tail_rcu(sk, &net->packet.sklist);
3342 	mutex_unlock(&net->packet.sklist_lock);
3343 
3344 	preempt_disable();
3345 	sock_prot_inuse_add(net, &packet_proto, 1);
3346 	preempt_enable();
3347 
3348 	return 0;
3349 out2:
3350 	sk_free(sk);
3351 out:
3352 	return err;
3353 }
3354 
3355 /*
3356  *	Pull a packet from our receive queue and hand it to the user.
3357  *	If necessary we block.
3358  */
3359 
packet_recvmsg(struct socket * sock,struct msghdr * msg,size_t len,int flags)3360 static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
3361 			  int flags)
3362 {
3363 	struct sock *sk = sock->sk;
3364 	struct sk_buff *skb;
3365 	int copied, err;
3366 	int vnet_hdr_len = 0;
3367 	unsigned int origlen = 0;
3368 
3369 	err = -EINVAL;
3370 	if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
3371 		goto out;
3372 
3373 #if 0
3374 	/* What error should we return now? EUNATTACH? */
3375 	if (pkt_sk(sk)->ifindex < 0)
3376 		return -ENODEV;
3377 #endif
3378 
3379 	if (flags & MSG_ERRQUEUE) {
3380 		err = sock_recv_errqueue(sk, msg, len,
3381 					 SOL_PACKET, PACKET_TX_TIMESTAMP);
3382 		goto out;
3383 	}
3384 
3385 	/*
3386 	 *	Call the generic datagram receiver. This handles all sorts
3387 	 *	of horrible races and re-entrancy so we can forget about it
3388 	 *	in the protocol layers.
3389 	 *
3390 	 *	Now it will return ENETDOWN, if device have just gone down,
3391 	 *	but then it will block.
3392 	 */
3393 
3394 	skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
3395 
3396 	/*
3397 	 *	An error occurred so return it. Because skb_recv_datagram()
3398 	 *	handles the blocking we don't see and worry about blocking
3399 	 *	retries.
3400 	 */
3401 
3402 	if (skb == NULL)
3403 		goto out;
3404 
3405 	packet_rcv_try_clear_pressure(pkt_sk(sk));
3406 
3407 	if (pkt_sk(sk)->has_vnet_hdr) {
3408 		err = packet_rcv_vnet(msg, skb, &len);
3409 		if (err)
3410 			goto out_free;
3411 		vnet_hdr_len = sizeof(struct virtio_net_hdr);
3412 	}
3413 
3414 	/* You lose any data beyond the buffer you gave. If it worries
3415 	 * a user program they can ask the device for its MTU
3416 	 * anyway.
3417 	 */
3418 	copied = skb->len;
3419 	if (copied > len) {
3420 		copied = len;
3421 		msg->msg_flags |= MSG_TRUNC;
3422 	}
3423 
3424 	err = skb_copy_datagram_msg(skb, 0, msg, copied);
3425 	if (err)
3426 		goto out_free;
3427 
3428 	if (sock->type != SOCK_PACKET) {
3429 		struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3430 
3431 		/* Original length was stored in sockaddr_ll fields */
3432 		origlen = PACKET_SKB_CB(skb)->sa.origlen;
3433 		sll->sll_family = AF_PACKET;
3434 		sll->sll_protocol = skb->protocol;
3435 	}
3436 
3437 	sock_recv_ts_and_drops(msg, sk, skb);
3438 
3439 	if (msg->msg_name) {
3440 		const size_t max_len = min(sizeof(skb->cb),
3441 					   sizeof(struct sockaddr_storage));
3442 		int copy_len;
3443 
3444 		/* If the address length field is there to be filled
3445 		 * in, we fill it in now.
3446 		 */
3447 		if (sock->type == SOCK_PACKET) {
3448 			__sockaddr_check_size(sizeof(struct sockaddr_pkt));
3449 			msg->msg_namelen = sizeof(struct sockaddr_pkt);
3450 			copy_len = msg->msg_namelen;
3451 		} else {
3452 			struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3453 
3454 			msg->msg_namelen = sll->sll_halen +
3455 				offsetof(struct sockaddr_ll, sll_addr);
3456 			copy_len = msg->msg_namelen;
3457 			if (msg->msg_namelen < sizeof(struct sockaddr_ll)) {
3458 				memset(msg->msg_name +
3459 				       offsetof(struct sockaddr_ll, sll_addr),
3460 				       0, sizeof(sll->sll_addr));
3461 				msg->msg_namelen = sizeof(struct sockaddr_ll);
3462 			}
3463 		}
3464 		if (WARN_ON_ONCE(copy_len > max_len)) {
3465 			copy_len = max_len;
3466 			msg->msg_namelen = copy_len;
3467 		}
3468 		memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len);
3469 	}
3470 
3471 	if (pkt_sk(sk)->auxdata) {
3472 		struct tpacket_auxdata aux;
3473 
3474 		aux.tp_status = TP_STATUS_USER;
3475 		if (skb->ip_summed == CHECKSUM_PARTIAL)
3476 			aux.tp_status |= TP_STATUS_CSUMNOTREADY;
3477 		else if (skb->pkt_type != PACKET_OUTGOING &&
3478 			 (skb->ip_summed == CHECKSUM_COMPLETE ||
3479 			  skb_csum_unnecessary(skb)))
3480 			aux.tp_status |= TP_STATUS_CSUM_VALID;
3481 
3482 		aux.tp_len = origlen;
3483 		aux.tp_snaplen = skb->len;
3484 		aux.tp_mac = 0;
3485 		aux.tp_net = skb_network_offset(skb);
3486 		if (skb_vlan_tag_present(skb)) {
3487 			aux.tp_vlan_tci = skb_vlan_tag_get(skb);
3488 			aux.tp_vlan_tpid = ntohs(skb->vlan_proto);
3489 			aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
3490 		} else {
3491 			aux.tp_vlan_tci = 0;
3492 			aux.tp_vlan_tpid = 0;
3493 		}
3494 		put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
3495 	}
3496 
3497 	/*
3498 	 *	Free or return the buffer as appropriate. Again this
3499 	 *	hides all the races and re-entrancy issues from us.
3500 	 */
3501 	err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
3502 
3503 out_free:
3504 	skb_free_datagram(sk, skb);
3505 out:
3506 	return err;
3507 }
3508 
packet_getname_spkt(struct socket * sock,struct sockaddr * uaddr,int peer)3509 static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
3510 			       int peer)
3511 {
3512 	struct net_device *dev;
3513 	struct sock *sk	= sock->sk;
3514 
3515 	if (peer)
3516 		return -EOPNOTSUPP;
3517 
3518 	uaddr->sa_family = AF_PACKET;
3519 	memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
3520 	rcu_read_lock();
3521 	dev = dev_get_by_index_rcu(sock_net(sk), READ_ONCE(pkt_sk(sk)->ifindex));
3522 	if (dev)
3523 		strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
3524 	rcu_read_unlock();
3525 
3526 	return sizeof(*uaddr);
3527 }
3528 
packet_getname(struct socket * sock,struct sockaddr * uaddr,int peer)3529 static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
3530 			  int peer)
3531 {
3532 	struct net_device *dev;
3533 	struct sock *sk = sock->sk;
3534 	struct packet_sock *po = pkt_sk(sk);
3535 	DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
3536 	int ifindex;
3537 
3538 	if (peer)
3539 		return -EOPNOTSUPP;
3540 
3541 	ifindex = READ_ONCE(po->ifindex);
3542 	sll->sll_family = AF_PACKET;
3543 	sll->sll_ifindex = ifindex;
3544 	sll->sll_protocol = READ_ONCE(po->num);
3545 	sll->sll_pkttype = 0;
3546 	rcu_read_lock();
3547 	dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
3548 	if (dev) {
3549 		sll->sll_hatype = dev->type;
3550 		sll->sll_halen = dev->addr_len;
3551 		memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
3552 	} else {
3553 		sll->sll_hatype = 0;	/* Bad: we have no ARPHRD_UNSPEC */
3554 		sll->sll_halen = 0;
3555 	}
3556 	rcu_read_unlock();
3557 
3558 	return offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
3559 }
3560 
packet_dev_mc(struct net_device * dev,struct packet_mclist * i,int what)3561 static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
3562 			 int what)
3563 {
3564 	switch (i->type) {
3565 	case PACKET_MR_MULTICAST:
3566 		if (i->alen != dev->addr_len)
3567 			return -EINVAL;
3568 		if (what > 0)
3569 			return dev_mc_add(dev, i->addr);
3570 		else
3571 			return dev_mc_del(dev, i->addr);
3572 		break;
3573 	case PACKET_MR_PROMISC:
3574 		return dev_set_promiscuity(dev, what);
3575 	case PACKET_MR_ALLMULTI:
3576 		return dev_set_allmulti(dev, what);
3577 	case PACKET_MR_UNICAST:
3578 		if (i->alen != dev->addr_len)
3579 			return -EINVAL;
3580 		if (what > 0)
3581 			return dev_uc_add(dev, i->addr);
3582 		else
3583 			return dev_uc_del(dev, i->addr);
3584 		break;
3585 	default:
3586 		break;
3587 	}
3588 	return 0;
3589 }
3590 
packet_dev_mclist_delete(struct net_device * dev,struct packet_mclist ** mlp)3591 static void packet_dev_mclist_delete(struct net_device *dev,
3592 				     struct packet_mclist **mlp)
3593 {
3594 	struct packet_mclist *ml;
3595 
3596 	while ((ml = *mlp) != NULL) {
3597 		if (ml->ifindex == dev->ifindex) {
3598 			packet_dev_mc(dev, ml, -1);
3599 			*mlp = ml->next;
3600 			kfree(ml);
3601 		} else
3602 			mlp = &ml->next;
3603 	}
3604 }
3605 
packet_mc_add(struct sock * sk,struct packet_mreq_max * mreq)3606 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
3607 {
3608 	struct packet_sock *po = pkt_sk(sk);
3609 	struct packet_mclist *ml, *i;
3610 	struct net_device *dev;
3611 	int err;
3612 
3613 	rtnl_lock();
3614 
3615 	err = -ENODEV;
3616 	dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
3617 	if (!dev)
3618 		goto done;
3619 
3620 	err = -EINVAL;
3621 	if (mreq->mr_alen > dev->addr_len)
3622 		goto done;
3623 
3624 	err = -ENOBUFS;
3625 	i = kmalloc(sizeof(*i), GFP_KERNEL);
3626 	if (i == NULL)
3627 		goto done;
3628 
3629 	err = 0;
3630 	for (ml = po->mclist; ml; ml = ml->next) {
3631 		if (ml->ifindex == mreq->mr_ifindex &&
3632 		    ml->type == mreq->mr_type &&
3633 		    ml->alen == mreq->mr_alen &&
3634 		    memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3635 			ml->count++;
3636 			/* Free the new element ... */
3637 			kfree(i);
3638 			goto done;
3639 		}
3640 	}
3641 
3642 	i->type = mreq->mr_type;
3643 	i->ifindex = mreq->mr_ifindex;
3644 	i->alen = mreq->mr_alen;
3645 	memcpy(i->addr, mreq->mr_address, i->alen);
3646 	memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
3647 	i->count = 1;
3648 	i->next = po->mclist;
3649 	po->mclist = i;
3650 	err = packet_dev_mc(dev, i, 1);
3651 	if (err) {
3652 		po->mclist = i->next;
3653 		kfree(i);
3654 	}
3655 
3656 done:
3657 	rtnl_unlock();
3658 	return err;
3659 }
3660 
packet_mc_drop(struct sock * sk,struct packet_mreq_max * mreq)3661 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
3662 {
3663 	struct packet_mclist *ml, **mlp;
3664 
3665 	rtnl_lock();
3666 
3667 	for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
3668 		if (ml->ifindex == mreq->mr_ifindex &&
3669 		    ml->type == mreq->mr_type &&
3670 		    ml->alen == mreq->mr_alen &&
3671 		    memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3672 			if (--ml->count == 0) {
3673 				struct net_device *dev;
3674 				*mlp = ml->next;
3675 				dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3676 				if (dev)
3677 					packet_dev_mc(dev, ml, -1);
3678 				kfree(ml);
3679 			}
3680 			break;
3681 		}
3682 	}
3683 	rtnl_unlock();
3684 	return 0;
3685 }
3686 
packet_flush_mclist(struct sock * sk)3687 static void packet_flush_mclist(struct sock *sk)
3688 {
3689 	struct packet_sock *po = pkt_sk(sk);
3690 	struct packet_mclist *ml;
3691 
3692 	if (!po->mclist)
3693 		return;
3694 
3695 	rtnl_lock();
3696 	while ((ml = po->mclist) != NULL) {
3697 		struct net_device *dev;
3698 
3699 		po->mclist = ml->next;
3700 		dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3701 		if (dev != NULL)
3702 			packet_dev_mc(dev, ml, -1);
3703 		kfree(ml);
3704 	}
3705 	rtnl_unlock();
3706 }
3707 
3708 static int
packet_setsockopt(struct socket * sock,int level,int optname,sockptr_t optval,unsigned int optlen)3709 packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
3710 		  unsigned int optlen)
3711 {
3712 	struct sock *sk = sock->sk;
3713 	struct packet_sock *po = pkt_sk(sk);
3714 	int ret;
3715 
3716 	if (level != SOL_PACKET)
3717 		return -ENOPROTOOPT;
3718 
3719 	switch (optname) {
3720 	case PACKET_ADD_MEMBERSHIP:
3721 	case PACKET_DROP_MEMBERSHIP:
3722 	{
3723 		struct packet_mreq_max mreq;
3724 		int len = optlen;
3725 		memset(&mreq, 0, sizeof(mreq));
3726 		if (len < sizeof(struct packet_mreq))
3727 			return -EINVAL;
3728 		if (len > sizeof(mreq))
3729 			len = sizeof(mreq);
3730 		if (copy_from_sockptr(&mreq, optval, len))
3731 			return -EFAULT;
3732 		if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
3733 			return -EINVAL;
3734 		if (optname == PACKET_ADD_MEMBERSHIP)
3735 			ret = packet_mc_add(sk, &mreq);
3736 		else
3737 			ret = packet_mc_drop(sk, &mreq);
3738 		return ret;
3739 	}
3740 
3741 	case PACKET_RX_RING:
3742 	case PACKET_TX_RING:
3743 	{
3744 		union tpacket_req_u req_u;
3745 		int len;
3746 
3747 		lock_sock(sk);
3748 		switch (po->tp_version) {
3749 		case TPACKET_V1:
3750 		case TPACKET_V2:
3751 			len = sizeof(req_u.req);
3752 			break;
3753 		case TPACKET_V3:
3754 		default:
3755 			len = sizeof(req_u.req3);
3756 			break;
3757 		}
3758 		if (optlen < len) {
3759 			ret = -EINVAL;
3760 		} else {
3761 			if (copy_from_sockptr(&req_u.req, optval, len))
3762 				ret = -EFAULT;
3763 			else
3764 				ret = packet_set_ring(sk, &req_u, 0,
3765 						    optname == PACKET_TX_RING);
3766 		}
3767 		release_sock(sk);
3768 		return ret;
3769 	}
3770 	case PACKET_COPY_THRESH:
3771 	{
3772 		int val;
3773 
3774 		if (optlen != sizeof(val))
3775 			return -EINVAL;
3776 		if (copy_from_sockptr(&val, optval, sizeof(val)))
3777 			return -EFAULT;
3778 
3779 		pkt_sk(sk)->copy_thresh = val;
3780 		return 0;
3781 	}
3782 	case PACKET_VERSION:
3783 	{
3784 		int val;
3785 
3786 		if (optlen != sizeof(val))
3787 			return -EINVAL;
3788 		if (copy_from_sockptr(&val, optval, sizeof(val)))
3789 			return -EFAULT;
3790 		switch (val) {
3791 		case TPACKET_V1:
3792 		case TPACKET_V2:
3793 		case TPACKET_V3:
3794 			break;
3795 		default:
3796 			return -EINVAL;
3797 		}
3798 		lock_sock(sk);
3799 		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3800 			ret = -EBUSY;
3801 		} else {
3802 			po->tp_version = val;
3803 			ret = 0;
3804 		}
3805 		release_sock(sk);
3806 		return ret;
3807 	}
3808 	case PACKET_RESERVE:
3809 	{
3810 		unsigned int val;
3811 
3812 		if (optlen != sizeof(val))
3813 			return -EINVAL;
3814 		if (copy_from_sockptr(&val, optval, sizeof(val)))
3815 			return -EFAULT;
3816 		if (val > INT_MAX)
3817 			return -EINVAL;
3818 		lock_sock(sk);
3819 		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3820 			ret = -EBUSY;
3821 		} else {
3822 			po->tp_reserve = val;
3823 			ret = 0;
3824 		}
3825 		release_sock(sk);
3826 		return ret;
3827 	}
3828 	case PACKET_LOSS:
3829 	{
3830 		unsigned int val;
3831 
3832 		if (optlen != sizeof(val))
3833 			return -EINVAL;
3834 		if (copy_from_sockptr(&val, optval, sizeof(val)))
3835 			return -EFAULT;
3836 
3837 		lock_sock(sk);
3838 		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3839 			ret = -EBUSY;
3840 		} else {
3841 			po->tp_loss = !!val;
3842 			ret = 0;
3843 		}
3844 		release_sock(sk);
3845 		return ret;
3846 	}
3847 	case PACKET_AUXDATA:
3848 	{
3849 		int val;
3850 
3851 		if (optlen < sizeof(val))
3852 			return -EINVAL;
3853 		if (copy_from_sockptr(&val, optval, sizeof(val)))
3854 			return -EFAULT;
3855 
3856 		lock_sock(sk);
3857 		po->auxdata = !!val;
3858 		release_sock(sk);
3859 		return 0;
3860 	}
3861 	case PACKET_ORIGDEV:
3862 	{
3863 		int val;
3864 
3865 		if (optlen < sizeof(val))
3866 			return -EINVAL;
3867 		if (copy_from_sockptr(&val, optval, sizeof(val)))
3868 			return -EFAULT;
3869 
3870 		lock_sock(sk);
3871 		po->origdev = !!val;
3872 		release_sock(sk);
3873 		return 0;
3874 	}
3875 	case PACKET_VNET_HDR:
3876 	{
3877 		int val;
3878 
3879 		if (sock->type != SOCK_RAW)
3880 			return -EINVAL;
3881 		if (optlen < sizeof(val))
3882 			return -EINVAL;
3883 		if (copy_from_sockptr(&val, optval, sizeof(val)))
3884 			return -EFAULT;
3885 
3886 		lock_sock(sk);
3887 		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3888 			ret = -EBUSY;
3889 		} else {
3890 			po->has_vnet_hdr = !!val;
3891 			ret = 0;
3892 		}
3893 		release_sock(sk);
3894 		return ret;
3895 	}
3896 	case PACKET_TIMESTAMP:
3897 	{
3898 		int val;
3899 
3900 		if (optlen != sizeof(val))
3901 			return -EINVAL;
3902 		if (copy_from_sockptr(&val, optval, sizeof(val)))
3903 			return -EFAULT;
3904 
3905 		po->tp_tstamp = val;
3906 		return 0;
3907 	}
3908 	case PACKET_FANOUT:
3909 	{
3910 		struct fanout_args args = { 0 };
3911 
3912 		if (optlen != sizeof(int) && optlen != sizeof(args))
3913 			return -EINVAL;
3914 		if (copy_from_sockptr(&args, optval, optlen))
3915 			return -EFAULT;
3916 
3917 		return fanout_add(sk, &args);
3918 	}
3919 	case PACKET_FANOUT_DATA:
3920 	{
3921 		/* Paired with the WRITE_ONCE() in fanout_add() */
3922 		if (!READ_ONCE(po->fanout))
3923 			return -EINVAL;
3924 
3925 		return fanout_set_data(po, optval, optlen);
3926 	}
3927 	case PACKET_IGNORE_OUTGOING:
3928 	{
3929 		int val;
3930 
3931 		if (optlen != sizeof(val))
3932 			return -EINVAL;
3933 		if (copy_from_sockptr(&val, optval, sizeof(val)))
3934 			return -EFAULT;
3935 		if (val < 0 || val > 1)
3936 			return -EINVAL;
3937 
3938 		po->prot_hook.ignore_outgoing = !!val;
3939 		return 0;
3940 	}
3941 	case PACKET_TX_HAS_OFF:
3942 	{
3943 		unsigned int val;
3944 
3945 		if (optlen != sizeof(val))
3946 			return -EINVAL;
3947 		if (copy_from_sockptr(&val, optval, sizeof(val)))
3948 			return -EFAULT;
3949 
3950 		lock_sock(sk);
3951 		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3952 			ret = -EBUSY;
3953 		} else {
3954 			po->tp_tx_has_off = !!val;
3955 			ret = 0;
3956 		}
3957 		release_sock(sk);
3958 		return 0;
3959 	}
3960 	case PACKET_QDISC_BYPASS:
3961 	{
3962 		int val;
3963 
3964 		if (optlen != sizeof(val))
3965 			return -EINVAL;
3966 		if (copy_from_sockptr(&val, optval, sizeof(val)))
3967 			return -EFAULT;
3968 
3969 		po->xmit = val ? packet_direct_xmit : dev_queue_xmit;
3970 		return 0;
3971 	}
3972 	default:
3973 		return -ENOPROTOOPT;
3974 	}
3975 }
3976 
packet_getsockopt(struct socket * sock,int level,int optname,char __user * optval,int __user * optlen)3977 static int packet_getsockopt(struct socket *sock, int level, int optname,
3978 			     char __user *optval, int __user *optlen)
3979 {
3980 	int len;
3981 	int val, lv = sizeof(val);
3982 	struct sock *sk = sock->sk;
3983 	struct packet_sock *po = pkt_sk(sk);
3984 	void *data = &val;
3985 	union tpacket_stats_u st;
3986 	struct tpacket_rollover_stats rstats;
3987 	int drops;
3988 
3989 	if (level != SOL_PACKET)
3990 		return -ENOPROTOOPT;
3991 
3992 	if (get_user(len, optlen))
3993 		return -EFAULT;
3994 
3995 	if (len < 0)
3996 		return -EINVAL;
3997 
3998 	switch (optname) {
3999 	case PACKET_STATISTICS:
4000 		spin_lock_bh(&sk->sk_receive_queue.lock);
4001 		memcpy(&st, &po->stats, sizeof(st));
4002 		memset(&po->stats, 0, sizeof(po->stats));
4003 		spin_unlock_bh(&sk->sk_receive_queue.lock);
4004 		drops = atomic_xchg(&po->tp_drops, 0);
4005 
4006 		if (po->tp_version == TPACKET_V3) {
4007 			lv = sizeof(struct tpacket_stats_v3);
4008 			st.stats3.tp_drops = drops;
4009 			st.stats3.tp_packets += drops;
4010 			data = &st.stats3;
4011 		} else {
4012 			lv = sizeof(struct tpacket_stats);
4013 			st.stats1.tp_drops = drops;
4014 			st.stats1.tp_packets += drops;
4015 			data = &st.stats1;
4016 		}
4017 
4018 		break;
4019 	case PACKET_AUXDATA:
4020 		val = po->auxdata;
4021 		break;
4022 	case PACKET_ORIGDEV:
4023 		val = po->origdev;
4024 		break;
4025 	case PACKET_VNET_HDR:
4026 		val = po->has_vnet_hdr;
4027 		break;
4028 	case PACKET_VERSION:
4029 		val = po->tp_version;
4030 		break;
4031 	case PACKET_HDRLEN:
4032 		if (len > sizeof(int))
4033 			len = sizeof(int);
4034 		if (len < sizeof(int))
4035 			return -EINVAL;
4036 		if (copy_from_user(&val, optval, len))
4037 			return -EFAULT;
4038 		switch (val) {
4039 		case TPACKET_V1:
4040 			val = sizeof(struct tpacket_hdr);
4041 			break;
4042 		case TPACKET_V2:
4043 			val = sizeof(struct tpacket2_hdr);
4044 			break;
4045 		case TPACKET_V3:
4046 			val = sizeof(struct tpacket3_hdr);
4047 			break;
4048 		default:
4049 			return -EINVAL;
4050 		}
4051 		break;
4052 	case PACKET_RESERVE:
4053 		val = po->tp_reserve;
4054 		break;
4055 	case PACKET_LOSS:
4056 		val = po->tp_loss;
4057 		break;
4058 	case PACKET_TIMESTAMP:
4059 		val = po->tp_tstamp;
4060 		break;
4061 	case PACKET_FANOUT:
4062 		val = (po->fanout ?
4063 		       ((u32)po->fanout->id |
4064 			((u32)po->fanout->type << 16) |
4065 			((u32)po->fanout->flags << 24)) :
4066 		       0);
4067 		break;
4068 	case PACKET_IGNORE_OUTGOING:
4069 		val = po->prot_hook.ignore_outgoing;
4070 		break;
4071 	case PACKET_ROLLOVER_STATS:
4072 		if (!po->rollover)
4073 			return -EINVAL;
4074 		rstats.tp_all = atomic_long_read(&po->rollover->num);
4075 		rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
4076 		rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
4077 		data = &rstats;
4078 		lv = sizeof(rstats);
4079 		break;
4080 	case PACKET_TX_HAS_OFF:
4081 		val = po->tp_tx_has_off;
4082 		break;
4083 	case PACKET_QDISC_BYPASS:
4084 		val = packet_use_direct_xmit(po);
4085 		break;
4086 	default:
4087 		return -ENOPROTOOPT;
4088 	}
4089 
4090 	if (len > lv)
4091 		len = lv;
4092 	if (put_user(len, optlen))
4093 		return -EFAULT;
4094 	if (copy_to_user(optval, data, len))
4095 		return -EFAULT;
4096 	return 0;
4097 }
4098 
packet_notifier(struct notifier_block * this,unsigned long msg,void * ptr)4099 static int packet_notifier(struct notifier_block *this,
4100 			   unsigned long msg, void *ptr)
4101 {
4102 	struct sock *sk;
4103 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4104 	struct net *net = dev_net(dev);
4105 
4106 	rcu_read_lock();
4107 	sk_for_each_rcu(sk, &net->packet.sklist) {
4108 		struct packet_sock *po = pkt_sk(sk);
4109 
4110 		switch (msg) {
4111 		case NETDEV_UNREGISTER:
4112 			if (po->mclist)
4113 				packet_dev_mclist_delete(dev, &po->mclist);
4114 			fallthrough;
4115 
4116 		case NETDEV_DOWN:
4117 			if (dev->ifindex == po->ifindex) {
4118 				spin_lock(&po->bind_lock);
4119 				if (po->running) {
4120 					__unregister_prot_hook(sk, false);
4121 					sk->sk_err = ENETDOWN;
4122 					if (!sock_flag(sk, SOCK_DEAD))
4123 						sk->sk_error_report(sk);
4124 				}
4125 				if (msg == NETDEV_UNREGISTER) {
4126 					packet_cached_dev_reset(po);
4127 					WRITE_ONCE(po->ifindex, -1);
4128 					if (po->prot_hook.dev)
4129 						dev_put(po->prot_hook.dev);
4130 					po->prot_hook.dev = NULL;
4131 				}
4132 				spin_unlock(&po->bind_lock);
4133 			}
4134 			break;
4135 		case NETDEV_UP:
4136 			if (dev->ifindex == po->ifindex) {
4137 				spin_lock(&po->bind_lock);
4138 				if (po->num)
4139 					register_prot_hook(sk);
4140 				spin_unlock(&po->bind_lock);
4141 			}
4142 			break;
4143 		}
4144 	}
4145 	rcu_read_unlock();
4146 	return NOTIFY_DONE;
4147 }
4148 
4149 
packet_ioctl(struct socket * sock,unsigned int cmd,unsigned long arg)4150 static int packet_ioctl(struct socket *sock, unsigned int cmd,
4151 			unsigned long arg)
4152 {
4153 	struct sock *sk = sock->sk;
4154 
4155 	switch (cmd) {
4156 	case SIOCOUTQ:
4157 	{
4158 		int amount = sk_wmem_alloc_get(sk);
4159 
4160 		return put_user(amount, (int __user *)arg);
4161 	}
4162 	case SIOCINQ:
4163 	{
4164 		struct sk_buff *skb;
4165 		int amount = 0;
4166 
4167 		spin_lock_bh(&sk->sk_receive_queue.lock);
4168 		skb = skb_peek(&sk->sk_receive_queue);
4169 		if (skb)
4170 			amount = skb->len;
4171 		spin_unlock_bh(&sk->sk_receive_queue.lock);
4172 		return put_user(amount, (int __user *)arg);
4173 	}
4174 #ifdef CONFIG_INET
4175 	case SIOCADDRT:
4176 	case SIOCDELRT:
4177 	case SIOCDARP:
4178 	case SIOCGARP:
4179 	case SIOCSARP:
4180 	case SIOCGIFADDR:
4181 	case SIOCSIFADDR:
4182 	case SIOCGIFBRDADDR:
4183 	case SIOCSIFBRDADDR:
4184 	case SIOCGIFNETMASK:
4185 	case SIOCSIFNETMASK:
4186 	case SIOCGIFDSTADDR:
4187 	case SIOCSIFDSTADDR:
4188 	case SIOCSIFFLAGS:
4189 		return inet_dgram_ops.ioctl(sock, cmd, arg);
4190 #endif
4191 
4192 	default:
4193 		return -ENOIOCTLCMD;
4194 	}
4195 	return 0;
4196 }
4197 
packet_poll(struct file * file,struct socket * sock,poll_table * wait)4198 static __poll_t packet_poll(struct file *file, struct socket *sock,
4199 				poll_table *wait)
4200 {
4201 	struct sock *sk = sock->sk;
4202 	struct packet_sock *po = pkt_sk(sk);
4203 	__poll_t mask = datagram_poll(file, sock, wait);
4204 
4205 	spin_lock_bh(&sk->sk_receive_queue.lock);
4206 	if (po->rx_ring.pg_vec) {
4207 		if (!packet_previous_rx_frame(po, &po->rx_ring,
4208 			TP_STATUS_KERNEL))
4209 			mask |= EPOLLIN | EPOLLRDNORM;
4210 	}
4211 	packet_rcv_try_clear_pressure(po);
4212 	spin_unlock_bh(&sk->sk_receive_queue.lock);
4213 	spin_lock_bh(&sk->sk_write_queue.lock);
4214 	if (po->tx_ring.pg_vec) {
4215 		if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
4216 			mask |= EPOLLOUT | EPOLLWRNORM;
4217 	}
4218 	spin_unlock_bh(&sk->sk_write_queue.lock);
4219 	return mask;
4220 }
4221 
4222 
4223 /* Dirty? Well, I still did not learn better way to account
4224  * for user mmaps.
4225  */
4226 
packet_mm_open(struct vm_area_struct * vma)4227 static void packet_mm_open(struct vm_area_struct *vma)
4228 {
4229 	struct file *file = vma->vm_file;
4230 	struct socket *sock = file->private_data;
4231 	struct sock *sk = sock->sk;
4232 
4233 	if (sk)
4234 		atomic_inc(&pkt_sk(sk)->mapped);
4235 }
4236 
packet_mm_close(struct vm_area_struct * vma)4237 static void packet_mm_close(struct vm_area_struct *vma)
4238 {
4239 	struct file *file = vma->vm_file;
4240 	struct socket *sock = file->private_data;
4241 	struct sock *sk = sock->sk;
4242 
4243 	if (sk)
4244 		atomic_dec(&pkt_sk(sk)->mapped);
4245 }
4246 
4247 static const struct vm_operations_struct packet_mmap_ops = {
4248 	.open	=	packet_mm_open,
4249 	.close	=	packet_mm_close,
4250 };
4251 
free_pg_vec(struct pgv * pg_vec,unsigned int order,unsigned int len)4252 static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
4253 			unsigned int len)
4254 {
4255 	int i;
4256 
4257 	for (i = 0; i < len; i++) {
4258 		if (likely(pg_vec[i].buffer)) {
4259 			if (is_vmalloc_addr(pg_vec[i].buffer))
4260 				vfree(pg_vec[i].buffer);
4261 			else
4262 				free_pages((unsigned long)pg_vec[i].buffer,
4263 					   order);
4264 			pg_vec[i].buffer = NULL;
4265 		}
4266 	}
4267 	kfree(pg_vec);
4268 }
4269 
alloc_one_pg_vec_page(unsigned long order)4270 static char *alloc_one_pg_vec_page(unsigned long order)
4271 {
4272 	char *buffer;
4273 	gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
4274 			  __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
4275 
4276 	buffer = (char *) __get_free_pages(gfp_flags, order);
4277 	if (buffer)
4278 		return buffer;
4279 
4280 	/* __get_free_pages failed, fall back to vmalloc */
4281 	buffer = vzalloc(array_size((1 << order), PAGE_SIZE));
4282 	if (buffer)
4283 		return buffer;
4284 
4285 	/* vmalloc failed, lets dig into swap here */
4286 	gfp_flags &= ~__GFP_NORETRY;
4287 	buffer = (char *) __get_free_pages(gfp_flags, order);
4288 	if (buffer)
4289 		return buffer;
4290 
4291 	/* complete and utter failure */
4292 	return NULL;
4293 }
4294 
alloc_pg_vec(struct tpacket_req * req,int order)4295 static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
4296 {
4297 	unsigned int block_nr = req->tp_block_nr;
4298 	struct pgv *pg_vec;
4299 	int i;
4300 
4301 	pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL | __GFP_NOWARN);
4302 	if (unlikely(!pg_vec))
4303 		goto out;
4304 
4305 	for (i = 0; i < block_nr; i++) {
4306 		pg_vec[i].buffer = alloc_one_pg_vec_page(order);
4307 		if (unlikely(!pg_vec[i].buffer))
4308 			goto out_free_pgvec;
4309 	}
4310 
4311 out:
4312 	return pg_vec;
4313 
4314 out_free_pgvec:
4315 	free_pg_vec(pg_vec, order, block_nr);
4316 	pg_vec = NULL;
4317 	goto out;
4318 }
4319 
packet_set_ring(struct sock * sk,union tpacket_req_u * req_u,int closing,int tx_ring)4320 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4321 		int closing, int tx_ring)
4322 {
4323 	struct pgv *pg_vec = NULL;
4324 	struct packet_sock *po = pkt_sk(sk);
4325 	unsigned long *rx_owner_map = NULL;
4326 	int was_running, order = 0;
4327 	struct packet_ring_buffer *rb;
4328 	struct sk_buff_head *rb_queue;
4329 	__be16 num;
4330 	int err;
4331 	/* Added to avoid minimal code churn */
4332 	struct tpacket_req *req = &req_u->req;
4333 
4334 	rb = tx_ring ? &po->tx_ring : &po->rx_ring;
4335 	rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
4336 
4337 	err = -EBUSY;
4338 	if (!closing) {
4339 		if (atomic_read(&po->mapped))
4340 			goto out;
4341 		if (packet_read_pending(rb))
4342 			goto out;
4343 	}
4344 
4345 	if (req->tp_block_nr) {
4346 		unsigned int min_frame_size;
4347 
4348 		/* Sanity tests and some calculations */
4349 		err = -EBUSY;
4350 		if (unlikely(rb->pg_vec))
4351 			goto out;
4352 
4353 		switch (po->tp_version) {
4354 		case TPACKET_V1:
4355 			po->tp_hdrlen = TPACKET_HDRLEN;
4356 			break;
4357 		case TPACKET_V2:
4358 			po->tp_hdrlen = TPACKET2_HDRLEN;
4359 			break;
4360 		case TPACKET_V3:
4361 			po->tp_hdrlen = TPACKET3_HDRLEN;
4362 			break;
4363 		}
4364 
4365 		err = -EINVAL;
4366 		if (unlikely((int)req->tp_block_size <= 0))
4367 			goto out;
4368 		if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
4369 			goto out;
4370 		min_frame_size = po->tp_hdrlen + po->tp_reserve;
4371 		if (po->tp_version >= TPACKET_V3 &&
4372 		    req->tp_block_size <
4373 		    BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + min_frame_size)
4374 			goto out;
4375 		if (unlikely(req->tp_frame_size < min_frame_size))
4376 			goto out;
4377 		if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
4378 			goto out;
4379 
4380 		rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
4381 		if (unlikely(rb->frames_per_block == 0))
4382 			goto out;
4383 		if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr))
4384 			goto out;
4385 		if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
4386 					req->tp_frame_nr))
4387 			goto out;
4388 
4389 		err = -ENOMEM;
4390 		order = get_order(req->tp_block_size);
4391 		pg_vec = alloc_pg_vec(req, order);
4392 		if (unlikely(!pg_vec))
4393 			goto out;
4394 		switch (po->tp_version) {
4395 		case TPACKET_V3:
4396 			/* Block transmit is not supported yet */
4397 			if (!tx_ring) {
4398 				init_prb_bdqc(po, rb, pg_vec, req_u);
4399 			} else {
4400 				struct tpacket_req3 *req3 = &req_u->req3;
4401 
4402 				if (req3->tp_retire_blk_tov ||
4403 				    req3->tp_sizeof_priv ||
4404 				    req3->tp_feature_req_word) {
4405 					err = -EINVAL;
4406 					goto out_free_pg_vec;
4407 				}
4408 			}
4409 			break;
4410 		default:
4411 			if (!tx_ring) {
4412 				rx_owner_map = bitmap_alloc(req->tp_frame_nr,
4413 					GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
4414 				if (!rx_owner_map)
4415 					goto out_free_pg_vec;
4416 			}
4417 			break;
4418 		}
4419 	}
4420 	/* Done */
4421 	else {
4422 		err = -EINVAL;
4423 		if (unlikely(req->tp_frame_nr))
4424 			goto out;
4425 	}
4426 
4427 
4428 	/* Detach socket from network */
4429 	spin_lock(&po->bind_lock);
4430 	was_running = po->running;
4431 	num = po->num;
4432 	if (was_running) {
4433 		WRITE_ONCE(po->num, 0);
4434 		__unregister_prot_hook(sk, false);
4435 	}
4436 	spin_unlock(&po->bind_lock);
4437 
4438 	synchronize_net();
4439 
4440 	err = -EBUSY;
4441 	mutex_lock(&po->pg_vec_lock);
4442 	if (closing || atomic_read(&po->mapped) == 0) {
4443 		err = 0;
4444 		spin_lock_bh(&rb_queue->lock);
4445 		swap(rb->pg_vec, pg_vec);
4446 		if (po->tp_version <= TPACKET_V2)
4447 			swap(rb->rx_owner_map, rx_owner_map);
4448 		rb->frame_max = (req->tp_frame_nr - 1);
4449 		rb->head = 0;
4450 		rb->frame_size = req->tp_frame_size;
4451 		spin_unlock_bh(&rb_queue->lock);
4452 
4453 		swap(rb->pg_vec_order, order);
4454 		swap(rb->pg_vec_len, req->tp_block_nr);
4455 
4456 		rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
4457 		po->prot_hook.func = (po->rx_ring.pg_vec) ?
4458 						tpacket_rcv : packet_rcv;
4459 		skb_queue_purge(rb_queue);
4460 		if (atomic_read(&po->mapped))
4461 			pr_err("packet_mmap: vma is busy: %d\n",
4462 			       atomic_read(&po->mapped));
4463 	}
4464 	mutex_unlock(&po->pg_vec_lock);
4465 
4466 	spin_lock(&po->bind_lock);
4467 	if (was_running) {
4468 		WRITE_ONCE(po->num, num);
4469 		register_prot_hook(sk);
4470 	}
4471 	spin_unlock(&po->bind_lock);
4472 	if (pg_vec && (po->tp_version > TPACKET_V2)) {
4473 		/* Because we don't support block-based V3 on tx-ring */
4474 		if (!tx_ring)
4475 			prb_shutdown_retire_blk_timer(po, rb_queue);
4476 	}
4477 
4478 out_free_pg_vec:
4479 	if (pg_vec) {
4480 		bitmap_free(rx_owner_map);
4481 		free_pg_vec(pg_vec, order, req->tp_block_nr);
4482 	}
4483 out:
4484 	return err;
4485 }
4486 
packet_mmap(struct file * file,struct socket * sock,struct vm_area_struct * vma)4487 static int packet_mmap(struct file *file, struct socket *sock,
4488 		struct vm_area_struct *vma)
4489 {
4490 	struct sock *sk = sock->sk;
4491 	struct packet_sock *po = pkt_sk(sk);
4492 	unsigned long size, expected_size;
4493 	struct packet_ring_buffer *rb;
4494 	unsigned long start;
4495 	int err = -EINVAL;
4496 	int i;
4497 
4498 	if (vma->vm_pgoff)
4499 		return -EINVAL;
4500 
4501 	mutex_lock(&po->pg_vec_lock);
4502 
4503 	expected_size = 0;
4504 	for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4505 		if (rb->pg_vec) {
4506 			expected_size += rb->pg_vec_len
4507 						* rb->pg_vec_pages
4508 						* PAGE_SIZE;
4509 		}
4510 	}
4511 
4512 	if (expected_size == 0)
4513 		goto out;
4514 
4515 	size = vma->vm_end - vma->vm_start;
4516 	if (size != expected_size)
4517 		goto out;
4518 
4519 	start = vma->vm_start;
4520 	for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4521 		if (rb->pg_vec == NULL)
4522 			continue;
4523 
4524 		for (i = 0; i < rb->pg_vec_len; i++) {
4525 			struct page *page;
4526 			void *kaddr = rb->pg_vec[i].buffer;
4527 			int pg_num;
4528 
4529 			for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
4530 				page = pgv_to_page(kaddr);
4531 				err = vm_insert_page(vma, start, page);
4532 				if (unlikely(err))
4533 					goto out;
4534 				start += PAGE_SIZE;
4535 				kaddr += PAGE_SIZE;
4536 			}
4537 		}
4538 	}
4539 
4540 	atomic_inc(&po->mapped);
4541 	vma->vm_ops = &packet_mmap_ops;
4542 	err = 0;
4543 
4544 out:
4545 	mutex_unlock(&po->pg_vec_lock);
4546 	return err;
4547 }
4548 
4549 static const struct proto_ops packet_ops_spkt = {
4550 	.family =	PF_PACKET,
4551 	.owner =	THIS_MODULE,
4552 	.release =	packet_release,
4553 	.bind =		packet_bind_spkt,
4554 	.connect =	sock_no_connect,
4555 	.socketpair =	sock_no_socketpair,
4556 	.accept =	sock_no_accept,
4557 	.getname =	packet_getname_spkt,
4558 	.poll =		datagram_poll,
4559 	.ioctl =	packet_ioctl,
4560 	.gettstamp =	sock_gettstamp,
4561 	.listen =	sock_no_listen,
4562 	.shutdown =	sock_no_shutdown,
4563 	.sendmsg =	packet_sendmsg_spkt,
4564 	.recvmsg =	packet_recvmsg,
4565 	.mmap =		sock_no_mmap,
4566 	.sendpage =	sock_no_sendpage,
4567 };
4568 
4569 static const struct proto_ops packet_ops = {
4570 	.family =	PF_PACKET,
4571 	.owner =	THIS_MODULE,
4572 	.release =	packet_release,
4573 	.bind =		packet_bind,
4574 	.connect =	sock_no_connect,
4575 	.socketpair =	sock_no_socketpair,
4576 	.accept =	sock_no_accept,
4577 	.getname =	packet_getname,
4578 	.poll =		packet_poll,
4579 	.ioctl =	packet_ioctl,
4580 	.gettstamp =	sock_gettstamp,
4581 	.listen =	sock_no_listen,
4582 	.shutdown =	sock_no_shutdown,
4583 	.setsockopt =	packet_setsockopt,
4584 	.getsockopt =	packet_getsockopt,
4585 	.sendmsg =	packet_sendmsg,
4586 	.recvmsg =	packet_recvmsg,
4587 	.mmap =		packet_mmap,
4588 	.sendpage =	sock_no_sendpage,
4589 };
4590 
4591 static const struct net_proto_family packet_family_ops = {
4592 	.family =	PF_PACKET,
4593 	.create =	packet_create,
4594 	.owner	=	THIS_MODULE,
4595 };
4596 
4597 static struct notifier_block packet_netdev_notifier = {
4598 	.notifier_call =	packet_notifier,
4599 };
4600 
4601 #ifdef CONFIG_PROC_FS
4602 
packet_seq_start(struct seq_file * seq,loff_t * pos)4603 static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
4604 	__acquires(RCU)
4605 {
4606 	struct net *net = seq_file_net(seq);
4607 
4608 	rcu_read_lock();
4609 	return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
4610 }
4611 
packet_seq_next(struct seq_file * seq,void * v,loff_t * pos)4612 static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4613 {
4614 	struct net *net = seq_file_net(seq);
4615 	return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
4616 }
4617 
packet_seq_stop(struct seq_file * seq,void * v)4618 static void packet_seq_stop(struct seq_file *seq, void *v)
4619 	__releases(RCU)
4620 {
4621 	rcu_read_unlock();
4622 }
4623 
packet_seq_show(struct seq_file * seq,void * v)4624 static int packet_seq_show(struct seq_file *seq, void *v)
4625 {
4626 	if (v == SEQ_START_TOKEN)
4627 		seq_puts(seq, "sk       RefCnt Type Proto  Iface R Rmem   User   Inode\n");
4628 	else {
4629 		struct sock *s = sk_entry(v);
4630 		const struct packet_sock *po = pkt_sk(s);
4631 
4632 		seq_printf(seq,
4633 			   "%pK %-6d %-4d %04x   %-5d %1d %-6u %-6u %-6lu\n",
4634 			   s,
4635 			   refcount_read(&s->sk_refcnt),
4636 			   s->sk_type,
4637 			   ntohs(READ_ONCE(po->num)),
4638 			   READ_ONCE(po->ifindex),
4639 			   po->running,
4640 			   atomic_read(&s->sk_rmem_alloc),
4641 			   from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
4642 			   sock_i_ino(s));
4643 	}
4644 
4645 	return 0;
4646 }
4647 
4648 static const struct seq_operations packet_seq_ops = {
4649 	.start	= packet_seq_start,
4650 	.next	= packet_seq_next,
4651 	.stop	= packet_seq_stop,
4652 	.show	= packet_seq_show,
4653 };
4654 #endif
4655 
packet_net_init(struct net * net)4656 static int __net_init packet_net_init(struct net *net)
4657 {
4658 	mutex_init(&net->packet.sklist_lock);
4659 	INIT_HLIST_HEAD(&net->packet.sklist);
4660 
4661 #ifdef CONFIG_PROC_FS
4662 	if (!proc_create_net("packet", 0, net->proc_net, &packet_seq_ops,
4663 			sizeof(struct seq_net_private)))
4664 		return -ENOMEM;
4665 #endif /* CONFIG_PROC_FS */
4666 
4667 	return 0;
4668 }
4669 
packet_net_exit(struct net * net)4670 static void __net_exit packet_net_exit(struct net *net)
4671 {
4672 	remove_proc_entry("packet", net->proc_net);
4673 	WARN_ON_ONCE(!hlist_empty(&net->packet.sklist));
4674 }
4675 
4676 static struct pernet_operations packet_net_ops = {
4677 	.init = packet_net_init,
4678 	.exit = packet_net_exit,
4679 };
4680 
4681 
packet_exit(void)4682 static void __exit packet_exit(void)
4683 {
4684 	unregister_netdevice_notifier(&packet_netdev_notifier);
4685 	unregister_pernet_subsys(&packet_net_ops);
4686 	sock_unregister(PF_PACKET);
4687 	proto_unregister(&packet_proto);
4688 }
4689 
packet_init(void)4690 static int __init packet_init(void)
4691 {
4692 	int rc;
4693 
4694 	rc = proto_register(&packet_proto, 0);
4695 	if (rc)
4696 		goto out;
4697 	rc = sock_register(&packet_family_ops);
4698 	if (rc)
4699 		goto out_proto;
4700 	rc = register_pernet_subsys(&packet_net_ops);
4701 	if (rc)
4702 		goto out_sock;
4703 	rc = register_netdevice_notifier(&packet_netdev_notifier);
4704 	if (rc)
4705 		goto out_pernet;
4706 
4707 	return 0;
4708 
4709 out_pernet:
4710 	unregister_pernet_subsys(&packet_net_ops);
4711 out_sock:
4712 	sock_unregister(PF_PACKET);
4713 out_proto:
4714 	proto_unregister(&packet_proto);
4715 out:
4716 	return rc;
4717 }
4718 
4719 module_init(packet_init);
4720 module_exit(packet_exit);
4721 MODULE_LICENSE("GPL");
4722 MODULE_ALIAS_NETPROTO(PF_PACKET);
4723