• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2017 - Cambridge Greys Limited
4  * Copyright (C) 2011 - 2014 Cisco Systems Inc
5  * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
6  * Copyright (C) 2001 Lennert Buytenhek (buytenh@gnu.org) and
7  * James Leu (jleu@mindspring.net).
8  * Copyright (C) 2001 by various other people who didn't put their name here.
9  */
10 
11 #include <linux/version.h>
12 #include <linux/memblock.h>
13 #include <linux/etherdevice.h>
14 #include <linux/ethtool.h>
15 #include <linux/inetdevice.h>
16 #include <linux/init.h>
17 #include <linux/list.h>
18 #include <linux/netdevice.h>
19 #include <linux/platform_device.h>
20 #include <linux/rtnetlink.h>
21 #include <linux/skbuff.h>
22 #include <linux/slab.h>
23 #include <linux/interrupt.h>
24 #include <init.h>
25 #include <irq_kern.h>
26 #include <irq_user.h>
27 #include <net_kern.h>
28 #include <os.h>
29 #include "mconsole_kern.h"
30 #include "vector_user.h"
31 #include "vector_kern.h"
32 
33 /*
34  * Adapted from network devices with the following major changes:
35  * All transports are static - simplifies the code significantly
36  * Multiple FDs/IRQs per device
37  * Vector IO optionally used for read/write, falling back to legacy
38  * based on configuration and/or availability
39  * Configuration is no longer positional - L2TPv3 and GRE require up to
40  * 10 parameters, passing this as positional is not fit for purpose.
41  * Only socket transports are supported
42  */
43 
44 
45 #define DRIVER_NAME "uml-vector"
46 #define DRIVER_VERSION "01"
47 struct vector_cmd_line_arg {
48 	struct list_head list;
49 	int unit;
50 	char *arguments;
51 };
52 
53 struct vector_device {
54 	struct list_head list;
55 	struct net_device *dev;
56 	struct platform_device pdev;
57 	int unit;
58 	int opened;
59 };
60 
61 static LIST_HEAD(vec_cmd_line);
62 
63 static DEFINE_SPINLOCK(vector_devices_lock);
64 static LIST_HEAD(vector_devices);
65 
66 static int driver_registered;
67 
68 static void vector_eth_configure(int n, struct arglist *def);
69 
70 /* Argument accessors to set variables (and/or set default values)
71  * mtu, buffer sizing, default headroom, etc
72  */
73 
74 #define DEFAULT_HEADROOM 2
75 #define SAFETY_MARGIN 32
76 #define DEFAULT_VECTOR_SIZE 64
77 #define TX_SMALL_PACKET 128
78 #define MAX_IOV_SIZE (MAX_SKB_FRAGS + 1)
79 #define MAX_ITERATIONS 64
80 
81 static const struct {
82 	const char string[ETH_GSTRING_LEN];
83 } ethtool_stats_keys[] = {
84 	{ "rx_queue_max" },
85 	{ "rx_queue_running_average" },
86 	{ "tx_queue_max" },
87 	{ "tx_queue_running_average" },
88 	{ "rx_encaps_errors" },
89 	{ "tx_timeout_count" },
90 	{ "tx_restart_queue" },
91 	{ "tx_kicks" },
92 	{ "tx_flow_control_xon" },
93 	{ "tx_flow_control_xoff" },
94 	{ "rx_csum_offload_good" },
95 	{ "rx_csum_offload_errors"},
96 	{ "sg_ok"},
97 	{ "sg_linearized"},
98 };
99 
100 #define VECTOR_NUM_STATS	ARRAY_SIZE(ethtool_stats_keys)
101 
vector_reset_stats(struct vector_private * vp)102 static void vector_reset_stats(struct vector_private *vp)
103 {
104 	vp->estats.rx_queue_max = 0;
105 	vp->estats.rx_queue_running_average = 0;
106 	vp->estats.tx_queue_max = 0;
107 	vp->estats.tx_queue_running_average = 0;
108 	vp->estats.rx_encaps_errors = 0;
109 	vp->estats.tx_timeout_count = 0;
110 	vp->estats.tx_restart_queue = 0;
111 	vp->estats.tx_kicks = 0;
112 	vp->estats.tx_flow_control_xon = 0;
113 	vp->estats.tx_flow_control_xoff = 0;
114 	vp->estats.sg_ok = 0;
115 	vp->estats.sg_linearized = 0;
116 }
117 
get_mtu(struct arglist * def)118 static int get_mtu(struct arglist *def)
119 {
120 	char *mtu = uml_vector_fetch_arg(def, "mtu");
121 	long result;
122 
123 	if (mtu != NULL) {
124 		if (kstrtoul(mtu, 10, &result) == 0)
125 			if ((result < (1 << 16) - 1) && (result >= 576))
126 				return result;
127 	}
128 	return ETH_MAX_PACKET;
129 }
130 
get_depth(struct arglist * def)131 static int get_depth(struct arglist *def)
132 {
133 	char *mtu = uml_vector_fetch_arg(def, "depth");
134 	long result;
135 
136 	if (mtu != NULL) {
137 		if (kstrtoul(mtu, 10, &result) == 0)
138 			return result;
139 	}
140 	return DEFAULT_VECTOR_SIZE;
141 }
142 
get_headroom(struct arglist * def)143 static int get_headroom(struct arglist *def)
144 {
145 	char *mtu = uml_vector_fetch_arg(def, "headroom");
146 	long result;
147 
148 	if (mtu != NULL) {
149 		if (kstrtoul(mtu, 10, &result) == 0)
150 			return result;
151 	}
152 	return DEFAULT_HEADROOM;
153 }
154 
get_req_size(struct arglist * def)155 static int get_req_size(struct arglist *def)
156 {
157 	char *gro = uml_vector_fetch_arg(def, "gro");
158 	long result;
159 
160 	if (gro != NULL) {
161 		if (kstrtoul(gro, 10, &result) == 0) {
162 			if (result > 0)
163 				return 65536;
164 		}
165 	}
166 	return get_mtu(def) + ETH_HEADER_OTHER +
167 		get_headroom(def) + SAFETY_MARGIN;
168 }
169 
170 
get_transport_options(struct arglist * def)171 static int get_transport_options(struct arglist *def)
172 {
173 	char *transport = uml_vector_fetch_arg(def, "transport");
174 	char *vector = uml_vector_fetch_arg(def, "vec");
175 
176 	int vec_rx = VECTOR_RX;
177 	int vec_tx = VECTOR_TX;
178 	long parsed;
179 
180 	if (vector != NULL) {
181 		if (kstrtoul(vector, 10, &parsed) == 0) {
182 			if (parsed == 0) {
183 				vec_rx = 0;
184 				vec_tx = 0;
185 			}
186 		}
187 	}
188 
189 
190 	if (strncmp(transport, TRANS_TAP, TRANS_TAP_LEN) == 0)
191 		return 0;
192 	if (strncmp(transport, TRANS_HYBRID, TRANS_HYBRID_LEN) == 0)
193 		return (vec_rx | VECTOR_BPF);
194 	if (strncmp(transport, TRANS_RAW, TRANS_RAW_LEN) == 0)
195 		return (vec_rx | vec_tx | VECTOR_QDISC_BYPASS);
196 	return (vec_rx | vec_tx);
197 }
198 
199 
200 /* A mini-buffer for packet drop read
201  * All of our supported transports are datagram oriented and we always
202  * read using recvmsg or recvmmsg. If we pass a buffer which is smaller
203  * than the packet size it still counts as full packet read and will
204  * clean the incoming stream to keep sigio/epoll happy
205  */
206 
207 #define DROP_BUFFER_SIZE 32
208 
209 static char *drop_buffer;
210 
211 /* Array backed queues optimized for bulk enqueue/dequeue and
212  * 1:N (small values of N) or 1:1 enqueuer/dequeuer ratios.
213  * For more details and full design rationale see
214  * http://foswiki.cambridgegreys.com/Main/EatYourTailAndEnjoyIt
215  */
216 
217 
218 /*
219  * Advance the mmsg queue head by n = advance. Resets the queue to
220  * maximum enqueue/dequeue-at-once capacity if possible. Called by
221  * dequeuers. Caller must hold the head_lock!
222  */
223 
vector_advancehead(struct vector_queue * qi,int advance)224 static int vector_advancehead(struct vector_queue *qi, int advance)
225 {
226 	int queue_depth;
227 
228 	qi->head =
229 		(qi->head + advance)
230 			% qi->max_depth;
231 
232 
233 	spin_lock(&qi->tail_lock);
234 	qi->queue_depth -= advance;
235 
236 	/* we are at 0, use this to
237 	 * reset head and tail so we can use max size vectors
238 	 */
239 
240 	if (qi->queue_depth == 0) {
241 		qi->head = 0;
242 		qi->tail = 0;
243 	}
244 	queue_depth = qi->queue_depth;
245 	spin_unlock(&qi->tail_lock);
246 	return queue_depth;
247 }
248 
249 /*	Advance the queue tail by n = advance.
250  *	This is called by enqueuers which should hold the
251  *	head lock already
252  */
253 
vector_advancetail(struct vector_queue * qi,int advance)254 static int vector_advancetail(struct vector_queue *qi, int advance)
255 {
256 	int queue_depth;
257 
258 	qi->tail =
259 		(qi->tail + advance)
260 			% qi->max_depth;
261 	spin_lock(&qi->head_lock);
262 	qi->queue_depth += advance;
263 	queue_depth = qi->queue_depth;
264 	spin_unlock(&qi->head_lock);
265 	return queue_depth;
266 }
267 
prep_msg(struct vector_private * vp,struct sk_buff * skb,struct iovec * iov)268 static int prep_msg(struct vector_private *vp,
269 	struct sk_buff *skb,
270 	struct iovec *iov)
271 {
272 	int iov_index = 0;
273 	int nr_frags, frag;
274 	skb_frag_t *skb_frag;
275 
276 	nr_frags = skb_shinfo(skb)->nr_frags;
277 	if (nr_frags > MAX_IOV_SIZE) {
278 		if (skb_linearize(skb) != 0)
279 			goto drop;
280 	}
281 	if (vp->header_size > 0) {
282 		iov[iov_index].iov_len = vp->header_size;
283 		vp->form_header(iov[iov_index].iov_base, skb, vp);
284 		iov_index++;
285 	}
286 	iov[iov_index].iov_base = skb->data;
287 	if (nr_frags > 0) {
288 		iov[iov_index].iov_len = skb->len - skb->data_len;
289 		vp->estats.sg_ok++;
290 	} else
291 		iov[iov_index].iov_len = skb->len;
292 	iov_index++;
293 	for (frag = 0; frag < nr_frags; frag++) {
294 		skb_frag = &skb_shinfo(skb)->frags[frag];
295 		iov[iov_index].iov_base = skb_frag_address_safe(skb_frag);
296 		iov[iov_index].iov_len = skb_frag_size(skb_frag);
297 		iov_index++;
298 	}
299 	return iov_index;
300 drop:
301 	return -1;
302 }
303 /*
304  * Generic vector enqueue with support for forming headers using transport
305  * specific callback. Allows GRE, L2TPv3, RAW and other transports
306  * to use a common enqueue procedure in vector mode
307  */
308 
vector_enqueue(struct vector_queue * qi,struct sk_buff * skb)309 static int vector_enqueue(struct vector_queue *qi, struct sk_buff *skb)
310 {
311 	struct vector_private *vp = netdev_priv(qi->dev);
312 	int queue_depth;
313 	int packet_len;
314 	struct mmsghdr *mmsg_vector = qi->mmsg_vector;
315 	int iov_count;
316 
317 	spin_lock(&qi->tail_lock);
318 	spin_lock(&qi->head_lock);
319 	queue_depth = qi->queue_depth;
320 	spin_unlock(&qi->head_lock);
321 
322 	if (skb)
323 		packet_len = skb->len;
324 
325 	if (queue_depth < qi->max_depth) {
326 
327 		*(qi->skbuff_vector + qi->tail) = skb;
328 		mmsg_vector += qi->tail;
329 		iov_count = prep_msg(
330 			vp,
331 			skb,
332 			mmsg_vector->msg_hdr.msg_iov
333 		);
334 		if (iov_count < 1)
335 			goto drop;
336 		mmsg_vector->msg_hdr.msg_iovlen = iov_count;
337 		mmsg_vector->msg_hdr.msg_name = vp->fds->remote_addr;
338 		mmsg_vector->msg_hdr.msg_namelen = vp->fds->remote_addr_size;
339 		queue_depth = vector_advancetail(qi, 1);
340 	} else
341 		goto drop;
342 	spin_unlock(&qi->tail_lock);
343 	return queue_depth;
344 drop:
345 	qi->dev->stats.tx_dropped++;
346 	if (skb != NULL) {
347 		packet_len = skb->len;
348 		dev_consume_skb_any(skb);
349 		netdev_completed_queue(qi->dev, 1, packet_len);
350 	}
351 	spin_unlock(&qi->tail_lock);
352 	return queue_depth;
353 }
354 
consume_vector_skbs(struct vector_queue * qi,int count)355 static int consume_vector_skbs(struct vector_queue *qi, int count)
356 {
357 	struct sk_buff *skb;
358 	int skb_index;
359 	int bytes_compl = 0;
360 
361 	for (skb_index = qi->head; skb_index < qi->head + count; skb_index++) {
362 		skb = *(qi->skbuff_vector + skb_index);
363 		/* mark as empty to ensure correct destruction if
364 		 * needed
365 		 */
366 		bytes_compl += skb->len;
367 		*(qi->skbuff_vector + skb_index) = NULL;
368 		dev_consume_skb_any(skb);
369 	}
370 	qi->dev->stats.tx_bytes += bytes_compl;
371 	qi->dev->stats.tx_packets += count;
372 	netdev_completed_queue(qi->dev, count, bytes_compl);
373 	return vector_advancehead(qi, count);
374 }
375 
376 /*
377  * Generic vector deque via sendmmsg with support for forming headers
378  * using transport specific callback. Allows GRE, L2TPv3, RAW and
379  * other transports to use a common dequeue procedure in vector mode
380  */
381 
382 
vector_send(struct vector_queue * qi)383 static int vector_send(struct vector_queue *qi)
384 {
385 	struct vector_private *vp = netdev_priv(qi->dev);
386 	struct mmsghdr *send_from;
387 	int result = 0, send_len, queue_depth = qi->max_depth;
388 
389 	if (spin_trylock(&qi->head_lock)) {
390 		if (spin_trylock(&qi->tail_lock)) {
391 			/* update queue_depth to current value */
392 			queue_depth = qi->queue_depth;
393 			spin_unlock(&qi->tail_lock);
394 			while (queue_depth > 0) {
395 				/* Calculate the start of the vector */
396 				send_len = queue_depth;
397 				send_from = qi->mmsg_vector;
398 				send_from += qi->head;
399 				/* Adjust vector size if wraparound */
400 				if (send_len + qi->head > qi->max_depth)
401 					send_len = qi->max_depth - qi->head;
402 				/* Try to TX as many packets as possible */
403 				if (send_len > 0) {
404 					result = uml_vector_sendmmsg(
405 						 vp->fds->tx_fd,
406 						 send_from,
407 						 send_len,
408 						 0
409 					);
410 					vp->in_write_poll =
411 						(result != send_len);
412 				}
413 				/* For some of the sendmmsg error scenarios
414 				 * we may end being unsure in the TX success
415 				 * for all packets. It is safer to declare
416 				 * them all TX-ed and blame the network.
417 				 */
418 				if (result < 0) {
419 					if (net_ratelimit())
420 						netdev_err(vp->dev, "sendmmsg err=%i\n",
421 							result);
422 					vp->in_error = true;
423 					result = send_len;
424 				}
425 				if (result > 0) {
426 					queue_depth =
427 						consume_vector_skbs(qi, result);
428 					/* This is equivalent to an TX IRQ.
429 					 * Restart the upper layers to feed us
430 					 * more packets.
431 					 */
432 					if (result > vp->estats.tx_queue_max)
433 						vp->estats.tx_queue_max = result;
434 					vp->estats.tx_queue_running_average =
435 						(vp->estats.tx_queue_running_average + result) >> 1;
436 				}
437 				netif_trans_update(qi->dev);
438 				netif_wake_queue(qi->dev);
439 				/* if TX is busy, break out of the send loop,
440 				 *  poll write IRQ will reschedule xmit for us
441 				 */
442 				if (result != send_len) {
443 					vp->estats.tx_restart_queue++;
444 					break;
445 				}
446 			}
447 		}
448 		spin_unlock(&qi->head_lock);
449 	} else {
450 		tasklet_schedule(&vp->tx_poll);
451 	}
452 	return queue_depth;
453 }
454 
455 /* Queue destructor. Deliberately stateless so we can use
456  * it in queue cleanup if initialization fails.
457  */
458 
destroy_queue(struct vector_queue * qi)459 static void destroy_queue(struct vector_queue *qi)
460 {
461 	int i;
462 	struct iovec *iov;
463 	struct vector_private *vp = netdev_priv(qi->dev);
464 	struct mmsghdr *mmsg_vector;
465 
466 	if (qi == NULL)
467 		return;
468 	/* deallocate any skbuffs - we rely on any unused to be
469 	 * set to NULL.
470 	 */
471 	if (qi->skbuff_vector != NULL) {
472 		for (i = 0; i < qi->max_depth; i++) {
473 			if (*(qi->skbuff_vector + i) != NULL)
474 				dev_kfree_skb_any(*(qi->skbuff_vector + i));
475 		}
476 		kfree(qi->skbuff_vector);
477 	}
478 	/* deallocate matching IOV structures including header buffs */
479 	if (qi->mmsg_vector != NULL) {
480 		mmsg_vector = qi->mmsg_vector;
481 		for (i = 0; i < qi->max_depth; i++) {
482 			iov = mmsg_vector->msg_hdr.msg_iov;
483 			if (iov != NULL) {
484 				if ((vp->header_size > 0) &&
485 					(iov->iov_base != NULL))
486 					kfree(iov->iov_base);
487 				kfree(iov);
488 			}
489 			mmsg_vector++;
490 		}
491 		kfree(qi->mmsg_vector);
492 	}
493 	kfree(qi);
494 }
495 
496 /*
497  * Queue constructor. Create a queue with a given side.
498  */
create_queue(struct vector_private * vp,int max_size,int header_size,int num_extra_frags)499 static struct vector_queue *create_queue(
500 	struct vector_private *vp,
501 	int max_size,
502 	int header_size,
503 	int num_extra_frags)
504 {
505 	struct vector_queue *result;
506 	int i;
507 	struct iovec *iov;
508 	struct mmsghdr *mmsg_vector;
509 
510 	result = kmalloc(sizeof(struct vector_queue), GFP_KERNEL);
511 	if (result == NULL)
512 		return NULL;
513 	result->max_depth = max_size;
514 	result->dev = vp->dev;
515 	result->mmsg_vector = kmalloc(
516 		(sizeof(struct mmsghdr) * max_size), GFP_KERNEL);
517 	if (result->mmsg_vector == NULL)
518 		goto out_mmsg_fail;
519 	result->skbuff_vector = kmalloc(
520 		(sizeof(void *) * max_size), GFP_KERNEL);
521 	if (result->skbuff_vector == NULL)
522 		goto out_skb_fail;
523 
524 	/* further failures can be handled safely by destroy_queue*/
525 
526 	mmsg_vector = result->mmsg_vector;
527 	for (i = 0; i < max_size; i++) {
528 		/* Clear all pointers - we use non-NULL as marking on
529 		 * what to free on destruction
530 		 */
531 		*(result->skbuff_vector + i) = NULL;
532 		mmsg_vector->msg_hdr.msg_iov = NULL;
533 		mmsg_vector++;
534 	}
535 	mmsg_vector = result->mmsg_vector;
536 	result->max_iov_frags = num_extra_frags;
537 	for (i = 0; i < max_size; i++) {
538 		if (vp->header_size > 0)
539 			iov = kmalloc_array(3 + num_extra_frags,
540 					    sizeof(struct iovec),
541 					    GFP_KERNEL
542 			);
543 		else
544 			iov = kmalloc_array(2 + num_extra_frags,
545 					    sizeof(struct iovec),
546 					    GFP_KERNEL
547 			);
548 		if (iov == NULL)
549 			goto out_fail;
550 		mmsg_vector->msg_hdr.msg_iov = iov;
551 		mmsg_vector->msg_hdr.msg_iovlen = 1;
552 		mmsg_vector->msg_hdr.msg_control = NULL;
553 		mmsg_vector->msg_hdr.msg_controllen = 0;
554 		mmsg_vector->msg_hdr.msg_flags = MSG_DONTWAIT;
555 		mmsg_vector->msg_hdr.msg_name = NULL;
556 		mmsg_vector->msg_hdr.msg_namelen = 0;
557 		if (vp->header_size > 0) {
558 			iov->iov_base = kmalloc(header_size, GFP_KERNEL);
559 			if (iov->iov_base == NULL)
560 				goto out_fail;
561 			iov->iov_len = header_size;
562 			mmsg_vector->msg_hdr.msg_iovlen = 2;
563 			iov++;
564 		}
565 		iov->iov_base = NULL;
566 		iov->iov_len = 0;
567 		mmsg_vector++;
568 	}
569 	spin_lock_init(&result->head_lock);
570 	spin_lock_init(&result->tail_lock);
571 	result->queue_depth = 0;
572 	result->head = 0;
573 	result->tail = 0;
574 	return result;
575 out_skb_fail:
576 	kfree(result->mmsg_vector);
577 out_mmsg_fail:
578 	kfree(result);
579 	return NULL;
580 out_fail:
581 	destroy_queue(result);
582 	return NULL;
583 }
584 
585 /*
586  * We do not use the RX queue as a proper wraparound queue for now
587  * This is not necessary because the consumption via netif_rx()
588  * happens in-line. While we can try using the return code of
589  * netif_rx() for flow control there are no drivers doing this today.
590  * For this RX specific use we ignore the tail/head locks and
591  * just read into a prepared queue filled with skbuffs.
592  */
593 
prep_skb(struct vector_private * vp,struct user_msghdr * msg)594 static struct sk_buff *prep_skb(
595 	struct vector_private *vp,
596 	struct user_msghdr *msg)
597 {
598 	int linear = vp->max_packet + vp->headroom + SAFETY_MARGIN;
599 	struct sk_buff *result;
600 	int iov_index = 0, len;
601 	struct iovec *iov = msg->msg_iov;
602 	int err, nr_frags, frag;
603 	skb_frag_t *skb_frag;
604 
605 	if (vp->req_size <= linear)
606 		len = linear;
607 	else
608 		len = vp->req_size;
609 	result = alloc_skb_with_frags(
610 		linear,
611 		len - vp->max_packet,
612 		3,
613 		&err,
614 		GFP_ATOMIC
615 	);
616 	if (vp->header_size > 0)
617 		iov_index++;
618 	if (result == NULL) {
619 		iov[iov_index].iov_base = NULL;
620 		iov[iov_index].iov_len = 0;
621 		goto done;
622 	}
623 	skb_reserve(result, vp->headroom);
624 	result->dev = vp->dev;
625 	skb_put(result, vp->max_packet);
626 	result->data_len = len - vp->max_packet;
627 	result->len += len - vp->max_packet;
628 	skb_reset_mac_header(result);
629 	result->ip_summed = CHECKSUM_NONE;
630 	iov[iov_index].iov_base = result->data;
631 	iov[iov_index].iov_len = vp->max_packet;
632 	iov_index++;
633 
634 	nr_frags = skb_shinfo(result)->nr_frags;
635 	for (frag = 0; frag < nr_frags; frag++) {
636 		skb_frag = &skb_shinfo(result)->frags[frag];
637 		iov[iov_index].iov_base = skb_frag_address_safe(skb_frag);
638 		if (iov[iov_index].iov_base != NULL)
639 			iov[iov_index].iov_len = skb_frag_size(skb_frag);
640 		else
641 			iov[iov_index].iov_len = 0;
642 		iov_index++;
643 	}
644 done:
645 	msg->msg_iovlen = iov_index;
646 	return result;
647 }
648 
649 
650 /* Prepare queue for recvmmsg one-shot rx - fill with fresh sk_buffs*/
651 
prep_queue_for_rx(struct vector_queue * qi)652 static void prep_queue_for_rx(struct vector_queue *qi)
653 {
654 	struct vector_private *vp = netdev_priv(qi->dev);
655 	struct mmsghdr *mmsg_vector = qi->mmsg_vector;
656 	void **skbuff_vector = qi->skbuff_vector;
657 	int i;
658 
659 	if (qi->queue_depth == 0)
660 		return;
661 	for (i = 0; i < qi->queue_depth; i++) {
662 		/* it is OK if allocation fails - recvmmsg with NULL data in
663 		 * iov argument still performs an RX, just drops the packet
664 		 * This allows us stop faffing around with a "drop buffer"
665 		 */
666 
667 		*skbuff_vector = prep_skb(vp, &mmsg_vector->msg_hdr);
668 		skbuff_vector++;
669 		mmsg_vector++;
670 	}
671 	qi->queue_depth = 0;
672 }
673 
find_device(int n)674 static struct vector_device *find_device(int n)
675 {
676 	struct vector_device *device;
677 	struct list_head *ele;
678 
679 	spin_lock(&vector_devices_lock);
680 	list_for_each(ele, &vector_devices) {
681 		device = list_entry(ele, struct vector_device, list);
682 		if (device->unit == n)
683 			goto out;
684 	}
685 	device = NULL;
686  out:
687 	spin_unlock(&vector_devices_lock);
688 	return device;
689 }
690 
vector_parse(char * str,int * index_out,char ** str_out,char ** error_out)691 static int vector_parse(char *str, int *index_out, char **str_out,
692 			char **error_out)
693 {
694 	int n, len, err;
695 	char *start = str;
696 
697 	len = strlen(str);
698 
699 	while ((*str != ':') && (strlen(str) > 1))
700 		str++;
701 	if (*str != ':') {
702 		*error_out = "Expected ':' after device number";
703 		return -EINVAL;
704 	}
705 	*str = '\0';
706 
707 	err = kstrtouint(start, 0, &n);
708 	if (err < 0) {
709 		*error_out = "Bad device number";
710 		return err;
711 	}
712 
713 	str++;
714 	if (find_device(n)) {
715 		*error_out = "Device already configured";
716 		return -EINVAL;
717 	}
718 
719 	*index_out = n;
720 	*str_out = str;
721 	return 0;
722 }
723 
vector_config(char * str,char ** error_out)724 static int vector_config(char *str, char **error_out)
725 {
726 	int err, n;
727 	char *params;
728 	struct arglist *parsed;
729 
730 	err = vector_parse(str, &n, &params, error_out);
731 	if (err != 0)
732 		return err;
733 
734 	/* This string is broken up and the pieces used by the underlying
735 	 * driver. We should copy it to make sure things do not go wrong
736 	 * later.
737 	 */
738 
739 	params = kstrdup(params, GFP_KERNEL);
740 	if (params == NULL) {
741 		*error_out = "vector_config failed to strdup string";
742 		return -ENOMEM;
743 	}
744 
745 	parsed = uml_parse_vector_ifspec(params);
746 
747 	if (parsed == NULL) {
748 		*error_out = "vector_config failed to parse parameters";
749 		kfree(params);
750 		return -EINVAL;
751 	}
752 
753 	vector_eth_configure(n, parsed);
754 	return 0;
755 }
756 
vector_id(char ** str,int * start_out,int * end_out)757 static int vector_id(char **str, int *start_out, int *end_out)
758 {
759 	char *end;
760 	int n;
761 
762 	n = simple_strtoul(*str, &end, 0);
763 	if ((*end != '\0') || (end == *str))
764 		return -1;
765 
766 	*start_out = n;
767 	*end_out = n;
768 	*str = end;
769 	return n;
770 }
771 
vector_remove(int n,char ** error_out)772 static int vector_remove(int n, char **error_out)
773 {
774 	struct vector_device *vec_d;
775 	struct net_device *dev;
776 	struct vector_private *vp;
777 
778 	vec_d = find_device(n);
779 	if (vec_d == NULL)
780 		return -ENODEV;
781 	dev = vec_d->dev;
782 	vp = netdev_priv(dev);
783 	if (vp->fds != NULL)
784 		return -EBUSY;
785 	unregister_netdev(dev);
786 	platform_device_unregister(&vec_d->pdev);
787 	return 0;
788 }
789 
790 /*
791  * There is no shared per-transport initialization code, so
792  * we will just initialize each interface one by one and
793  * add them to a list
794  */
795 
796 static struct platform_driver uml_net_driver = {
797 	.driver = {
798 		.name = DRIVER_NAME,
799 	},
800 };
801 
802 
vector_device_release(struct device * dev)803 static void vector_device_release(struct device *dev)
804 {
805 	struct vector_device *device = dev_get_drvdata(dev);
806 	struct net_device *netdev = device->dev;
807 
808 	list_del(&device->list);
809 	kfree(device);
810 	free_netdev(netdev);
811 }
812 
813 /* Bog standard recv using recvmsg - not used normally unless the user
814  * explicitly specifies not to use recvmmsg vector RX.
815  */
816 
vector_legacy_rx(struct vector_private * vp)817 static int vector_legacy_rx(struct vector_private *vp)
818 {
819 	int pkt_len;
820 	struct user_msghdr hdr;
821 	struct iovec iov[2 + MAX_IOV_SIZE]; /* header + data use case only */
822 	int iovpos = 0;
823 	struct sk_buff *skb;
824 	int header_check;
825 
826 	hdr.msg_name = NULL;
827 	hdr.msg_namelen = 0;
828 	hdr.msg_iov = (struct iovec *) &iov;
829 	hdr.msg_control = NULL;
830 	hdr.msg_controllen = 0;
831 	hdr.msg_flags = 0;
832 
833 	if (vp->header_size > 0) {
834 		iov[0].iov_base = vp->header_rxbuffer;
835 		iov[0].iov_len = vp->header_size;
836 	}
837 
838 	skb = prep_skb(vp, &hdr);
839 
840 	if (skb == NULL) {
841 		/* Read a packet into drop_buffer and don't do
842 		 * anything with it.
843 		 */
844 		iov[iovpos].iov_base = drop_buffer;
845 		iov[iovpos].iov_len = DROP_BUFFER_SIZE;
846 		hdr.msg_iovlen = 1;
847 		vp->dev->stats.rx_dropped++;
848 	}
849 
850 	pkt_len = uml_vector_recvmsg(vp->fds->rx_fd, &hdr, 0);
851 	if (pkt_len < 0) {
852 		vp->in_error = true;
853 		return pkt_len;
854 	}
855 
856 	if (skb != NULL) {
857 		if (pkt_len > vp->header_size) {
858 			if (vp->header_size > 0) {
859 				header_check = vp->verify_header(
860 					vp->header_rxbuffer, skb, vp);
861 				if (header_check < 0) {
862 					dev_kfree_skb_irq(skb);
863 					vp->dev->stats.rx_dropped++;
864 					vp->estats.rx_encaps_errors++;
865 					return 0;
866 				}
867 				if (header_check > 0) {
868 					vp->estats.rx_csum_offload_good++;
869 					skb->ip_summed = CHECKSUM_UNNECESSARY;
870 				}
871 			}
872 			pskb_trim(skb, pkt_len - vp->rx_header_size);
873 			skb->protocol = eth_type_trans(skb, skb->dev);
874 			vp->dev->stats.rx_bytes += skb->len;
875 			vp->dev->stats.rx_packets++;
876 			netif_rx(skb);
877 		} else {
878 			dev_kfree_skb_irq(skb);
879 		}
880 	}
881 	return pkt_len;
882 }
883 
884 /*
885  * Packet at a time TX which falls back to vector TX if the
886  * underlying transport is busy.
887  */
888 
889 
890 
writev_tx(struct vector_private * vp,struct sk_buff * skb)891 static int writev_tx(struct vector_private *vp, struct sk_buff *skb)
892 {
893 	struct iovec iov[3 + MAX_IOV_SIZE];
894 	int iov_count, pkt_len = 0;
895 
896 	iov[0].iov_base = vp->header_txbuffer;
897 	iov_count = prep_msg(vp, skb, (struct iovec *) &iov);
898 
899 	if (iov_count < 1)
900 		goto drop;
901 
902 	pkt_len = uml_vector_writev(
903 		vp->fds->tx_fd,
904 		(struct iovec *) &iov,
905 		iov_count
906 	);
907 
908 	if (pkt_len < 0)
909 		goto drop;
910 
911 	netif_trans_update(vp->dev);
912 	netif_wake_queue(vp->dev);
913 
914 	if (pkt_len > 0) {
915 		vp->dev->stats.tx_bytes += skb->len;
916 		vp->dev->stats.tx_packets++;
917 	} else {
918 		vp->dev->stats.tx_dropped++;
919 	}
920 	consume_skb(skb);
921 	return pkt_len;
922 drop:
923 	vp->dev->stats.tx_dropped++;
924 	consume_skb(skb);
925 	if (pkt_len < 0)
926 		vp->in_error = true;
927 	return pkt_len;
928 }
929 
930 /*
931  * Receive as many messages as we can in one call using the special
932  * mmsg vector matched to an skb vector which we prepared earlier.
933  */
934 
vector_mmsg_rx(struct vector_private * vp)935 static int vector_mmsg_rx(struct vector_private *vp)
936 {
937 	int packet_count, i;
938 	struct vector_queue *qi = vp->rx_queue;
939 	struct sk_buff *skb;
940 	struct mmsghdr *mmsg_vector = qi->mmsg_vector;
941 	void **skbuff_vector = qi->skbuff_vector;
942 	int header_check;
943 
944 	/* Refresh the vector and make sure it is with new skbs and the
945 	 * iovs are updated to point to them.
946 	 */
947 
948 	prep_queue_for_rx(qi);
949 
950 	/* Fire the Lazy Gun - get as many packets as we can in one go. */
951 
952 	packet_count = uml_vector_recvmmsg(
953 		vp->fds->rx_fd, qi->mmsg_vector, qi->max_depth, 0);
954 
955 	if (packet_count < 0)
956 		vp->in_error = true;
957 
958 	if (packet_count <= 0)
959 		return packet_count;
960 
961 	/* We treat packet processing as enqueue, buffer refresh as dequeue
962 	 * The queue_depth tells us how many buffers have been used and how
963 	 * many do we need to prep the next time prep_queue_for_rx() is called.
964 	 */
965 
966 	qi->queue_depth = packet_count;
967 
968 	for (i = 0; i < packet_count; i++) {
969 		skb = (*skbuff_vector);
970 		if (mmsg_vector->msg_len > vp->header_size) {
971 			if (vp->header_size > 0) {
972 				header_check = vp->verify_header(
973 					mmsg_vector->msg_hdr.msg_iov->iov_base,
974 					skb,
975 					vp
976 				);
977 				if (header_check < 0) {
978 				/* Overlay header failed to verify - discard.
979 				 * We can actually keep this skb and reuse it,
980 				 * but that will make the prep logic too
981 				 * complex.
982 				 */
983 					dev_kfree_skb_irq(skb);
984 					vp->estats.rx_encaps_errors++;
985 					continue;
986 				}
987 				if (header_check > 0) {
988 					vp->estats.rx_csum_offload_good++;
989 					skb->ip_summed = CHECKSUM_UNNECESSARY;
990 				}
991 			}
992 			pskb_trim(skb,
993 				mmsg_vector->msg_len - vp->rx_header_size);
994 			skb->protocol = eth_type_trans(skb, skb->dev);
995 			/*
996 			 * We do not need to lock on updating stats here
997 			 * The interrupt loop is non-reentrant.
998 			 */
999 			vp->dev->stats.rx_bytes += skb->len;
1000 			vp->dev->stats.rx_packets++;
1001 			netif_rx(skb);
1002 		} else {
1003 			/* Overlay header too short to do anything - discard.
1004 			 * We can actually keep this skb and reuse it,
1005 			 * but that will make the prep logic too complex.
1006 			 */
1007 			if (skb != NULL)
1008 				dev_kfree_skb_irq(skb);
1009 		}
1010 		(*skbuff_vector) = NULL;
1011 		/* Move to the next buffer element */
1012 		mmsg_vector++;
1013 		skbuff_vector++;
1014 	}
1015 	if (packet_count > 0) {
1016 		if (vp->estats.rx_queue_max < packet_count)
1017 			vp->estats.rx_queue_max = packet_count;
1018 		vp->estats.rx_queue_running_average =
1019 			(vp->estats.rx_queue_running_average + packet_count) >> 1;
1020 	}
1021 	return packet_count;
1022 }
1023 
vector_rx(struct vector_private * vp)1024 static void vector_rx(struct vector_private *vp)
1025 {
1026 	int err;
1027 	int iter = 0;
1028 
1029 	if ((vp->options & VECTOR_RX) > 0)
1030 		while (((err = vector_mmsg_rx(vp)) > 0) && (iter < MAX_ITERATIONS))
1031 			iter++;
1032 	else
1033 		while (((err = vector_legacy_rx(vp)) > 0) && (iter < MAX_ITERATIONS))
1034 			iter++;
1035 	if ((err != 0) && net_ratelimit())
1036 		netdev_err(vp->dev, "vector_rx: error(%d)\n", err);
1037 	if (iter == MAX_ITERATIONS)
1038 		netdev_err(vp->dev, "vector_rx: device stuck, remote end may have closed the connection\n");
1039 }
1040 
vector_net_start_xmit(struct sk_buff * skb,struct net_device * dev)1041 static int vector_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
1042 {
1043 	struct vector_private *vp = netdev_priv(dev);
1044 	int queue_depth = 0;
1045 
1046 	if (vp->in_error) {
1047 		deactivate_fd(vp->fds->rx_fd, vp->rx_irq);
1048 		if ((vp->fds->rx_fd != vp->fds->tx_fd) && (vp->tx_irq != 0))
1049 			deactivate_fd(vp->fds->tx_fd, vp->tx_irq);
1050 		return NETDEV_TX_BUSY;
1051 	}
1052 
1053 	if ((vp->options & VECTOR_TX) == 0) {
1054 		writev_tx(vp, skb);
1055 		return NETDEV_TX_OK;
1056 	}
1057 
1058 	/* We do BQL only in the vector path, no point doing it in
1059 	 * packet at a time mode as there is no device queue
1060 	 */
1061 
1062 	netdev_sent_queue(vp->dev, skb->len);
1063 	queue_depth = vector_enqueue(vp->tx_queue, skb);
1064 
1065 	/* if the device queue is full, stop the upper layers and
1066 	 * flush it.
1067 	 */
1068 
1069 	if (queue_depth >= vp->tx_queue->max_depth - 1) {
1070 		vp->estats.tx_kicks++;
1071 		netif_stop_queue(dev);
1072 		vector_send(vp->tx_queue);
1073 		return NETDEV_TX_OK;
1074 	}
1075 	if (netdev_xmit_more()) {
1076 		mod_timer(&vp->tl, vp->coalesce);
1077 		return NETDEV_TX_OK;
1078 	}
1079 	if (skb->len < TX_SMALL_PACKET) {
1080 		vp->estats.tx_kicks++;
1081 		vector_send(vp->tx_queue);
1082 	} else
1083 		tasklet_schedule(&vp->tx_poll);
1084 	return NETDEV_TX_OK;
1085 }
1086 
vector_rx_interrupt(int irq,void * dev_id)1087 static irqreturn_t vector_rx_interrupt(int irq, void *dev_id)
1088 {
1089 	struct net_device *dev = dev_id;
1090 	struct vector_private *vp = netdev_priv(dev);
1091 
1092 	if (!netif_running(dev))
1093 		return IRQ_NONE;
1094 	vector_rx(vp);
1095 	return IRQ_HANDLED;
1096 
1097 }
1098 
vector_tx_interrupt(int irq,void * dev_id)1099 static irqreturn_t vector_tx_interrupt(int irq, void *dev_id)
1100 {
1101 	struct net_device *dev = dev_id;
1102 	struct vector_private *vp = netdev_priv(dev);
1103 
1104 	if (!netif_running(dev))
1105 		return IRQ_NONE;
1106 	/* We need to pay attention to it only if we got
1107 	 * -EAGAIN or -ENOBUFFS from sendmmsg. Otherwise
1108 	 * we ignore it. In the future, it may be worth
1109 	 * it to improve the IRQ controller a bit to make
1110 	 * tweaking the IRQ mask less costly
1111 	 */
1112 
1113 	if (vp->in_write_poll)
1114 		tasklet_schedule(&vp->tx_poll);
1115 	return IRQ_HANDLED;
1116 
1117 }
1118 
1119 static int irq_rr;
1120 
vector_net_close(struct net_device * dev)1121 static int vector_net_close(struct net_device *dev)
1122 {
1123 	struct vector_private *vp = netdev_priv(dev);
1124 	unsigned long flags;
1125 
1126 	netif_stop_queue(dev);
1127 	del_timer(&vp->tl);
1128 
1129 	if (vp->fds == NULL)
1130 		return 0;
1131 
1132 	/* Disable and free all IRQS */
1133 	if (vp->rx_irq > 0) {
1134 		um_free_irq(vp->rx_irq, dev);
1135 		vp->rx_irq = 0;
1136 	}
1137 	if (vp->tx_irq > 0) {
1138 		um_free_irq(vp->tx_irq, dev);
1139 		vp->tx_irq = 0;
1140 	}
1141 	tasklet_kill(&vp->tx_poll);
1142 	if (vp->fds->rx_fd > 0) {
1143 		os_close_file(vp->fds->rx_fd);
1144 		vp->fds->rx_fd = -1;
1145 	}
1146 	if (vp->fds->tx_fd > 0) {
1147 		os_close_file(vp->fds->tx_fd);
1148 		vp->fds->tx_fd = -1;
1149 	}
1150 	kfree(vp->bpf);
1151 	kfree(vp->fds->remote_addr);
1152 	kfree(vp->transport_data);
1153 	kfree(vp->header_rxbuffer);
1154 	kfree(vp->header_txbuffer);
1155 	if (vp->rx_queue != NULL)
1156 		destroy_queue(vp->rx_queue);
1157 	if (vp->tx_queue != NULL)
1158 		destroy_queue(vp->tx_queue);
1159 	kfree(vp->fds);
1160 	vp->fds = NULL;
1161 	spin_lock_irqsave(&vp->lock, flags);
1162 	vp->opened = false;
1163 	vp->in_error = false;
1164 	spin_unlock_irqrestore(&vp->lock, flags);
1165 	return 0;
1166 }
1167 
1168 /* TX tasklet */
1169 
vector_tx_poll(unsigned long data)1170 static void vector_tx_poll(unsigned long data)
1171 {
1172 	struct vector_private *vp = (struct vector_private *)data;
1173 
1174 	vp->estats.tx_kicks++;
1175 	vector_send(vp->tx_queue);
1176 }
vector_reset_tx(struct work_struct * work)1177 static void vector_reset_tx(struct work_struct *work)
1178 {
1179 	struct vector_private *vp =
1180 		container_of(work, struct vector_private, reset_tx);
1181 	netdev_reset_queue(vp->dev);
1182 	netif_start_queue(vp->dev);
1183 	netif_wake_queue(vp->dev);
1184 }
vector_net_open(struct net_device * dev)1185 static int vector_net_open(struct net_device *dev)
1186 {
1187 	struct vector_private *vp = netdev_priv(dev);
1188 	unsigned long flags;
1189 	int err = -EINVAL;
1190 	struct vector_device *vdevice;
1191 
1192 	spin_lock_irqsave(&vp->lock, flags);
1193 	if (vp->opened) {
1194 		spin_unlock_irqrestore(&vp->lock, flags);
1195 		return -ENXIO;
1196 	}
1197 	vp->opened = true;
1198 	spin_unlock_irqrestore(&vp->lock, flags);
1199 
1200 	vp->fds = uml_vector_user_open(vp->unit, vp->parsed);
1201 
1202 	if (vp->fds == NULL)
1203 		goto out_close;
1204 
1205 	if (build_transport_data(vp) < 0)
1206 		goto out_close;
1207 
1208 	if ((vp->options & VECTOR_RX) > 0) {
1209 		vp->rx_queue = create_queue(
1210 			vp,
1211 			get_depth(vp->parsed),
1212 			vp->rx_header_size,
1213 			MAX_IOV_SIZE
1214 		);
1215 		vp->rx_queue->queue_depth = get_depth(vp->parsed);
1216 	} else {
1217 		vp->header_rxbuffer = kmalloc(
1218 			vp->rx_header_size,
1219 			GFP_KERNEL
1220 		);
1221 		if (vp->header_rxbuffer == NULL)
1222 			goto out_close;
1223 	}
1224 	if ((vp->options & VECTOR_TX) > 0) {
1225 		vp->tx_queue = create_queue(
1226 			vp,
1227 			get_depth(vp->parsed),
1228 			vp->header_size,
1229 			MAX_IOV_SIZE
1230 		);
1231 	} else {
1232 		vp->header_txbuffer = kmalloc(vp->header_size, GFP_KERNEL);
1233 		if (vp->header_txbuffer == NULL)
1234 			goto out_close;
1235 	}
1236 
1237 	/* READ IRQ */
1238 	err = um_request_irq(
1239 		irq_rr + VECTOR_BASE_IRQ, vp->fds->rx_fd,
1240 			IRQ_READ, vector_rx_interrupt,
1241 			IRQF_SHARED, dev->name, dev);
1242 	if (err != 0) {
1243 		netdev_err(dev, "vector_open: failed to get rx irq(%d)\n", err);
1244 		err = -ENETUNREACH;
1245 		goto out_close;
1246 	}
1247 	vp->rx_irq = irq_rr + VECTOR_BASE_IRQ;
1248 	dev->irq = irq_rr + VECTOR_BASE_IRQ;
1249 	irq_rr = (irq_rr + 1) % VECTOR_IRQ_SPACE;
1250 
1251 	/* WRITE IRQ - we need it only if we have vector TX */
1252 	if ((vp->options & VECTOR_TX) > 0) {
1253 		err = um_request_irq(
1254 			irq_rr + VECTOR_BASE_IRQ, vp->fds->tx_fd,
1255 				IRQ_WRITE, vector_tx_interrupt,
1256 				IRQF_SHARED, dev->name, dev);
1257 		if (err != 0) {
1258 			netdev_err(dev,
1259 				"vector_open: failed to get tx irq(%d)\n", err);
1260 			err = -ENETUNREACH;
1261 			goto out_close;
1262 		}
1263 		vp->tx_irq = irq_rr + VECTOR_BASE_IRQ;
1264 		irq_rr = (irq_rr + 1) % VECTOR_IRQ_SPACE;
1265 	}
1266 
1267 	if ((vp->options & VECTOR_QDISC_BYPASS) != 0) {
1268 		if (!uml_raw_enable_qdisc_bypass(vp->fds->rx_fd))
1269 			vp->options |= VECTOR_BPF;
1270 	}
1271 	if ((vp->options & VECTOR_BPF) != 0)
1272 		vp->bpf = uml_vector_default_bpf(vp->fds->rx_fd, dev->dev_addr);
1273 
1274 	netif_start_queue(dev);
1275 
1276 	/* clear buffer - it can happen that the host side of the interface
1277 	 * is full when we get here. In this case, new data is never queued,
1278 	 * SIGIOs never arrive, and the net never works.
1279 	 */
1280 
1281 	vector_rx(vp);
1282 
1283 	vector_reset_stats(vp);
1284 	vdevice = find_device(vp->unit);
1285 	vdevice->opened = 1;
1286 
1287 	if ((vp->options & VECTOR_TX) != 0)
1288 		add_timer(&vp->tl);
1289 	return 0;
1290 out_close:
1291 	vector_net_close(dev);
1292 	return err;
1293 }
1294 
1295 
vector_net_set_multicast_list(struct net_device * dev)1296 static void vector_net_set_multicast_list(struct net_device *dev)
1297 {
1298 	/* TODO: - we can do some BPF games here */
1299 	return;
1300 }
1301 
vector_net_tx_timeout(struct net_device * dev)1302 static void vector_net_tx_timeout(struct net_device *dev)
1303 {
1304 	struct vector_private *vp = netdev_priv(dev);
1305 
1306 	vp->estats.tx_timeout_count++;
1307 	netif_trans_update(dev);
1308 	schedule_work(&vp->reset_tx);
1309 }
1310 
vector_fix_features(struct net_device * dev,netdev_features_t features)1311 static netdev_features_t vector_fix_features(struct net_device *dev,
1312 	netdev_features_t features)
1313 {
1314 	features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
1315 	return features;
1316 }
1317 
vector_set_features(struct net_device * dev,netdev_features_t features)1318 static int vector_set_features(struct net_device *dev,
1319 	netdev_features_t features)
1320 {
1321 	struct vector_private *vp = netdev_priv(dev);
1322 	/* Adjust buffer sizes for GSO/GRO. Unfortunately, there is
1323 	 * no way to negotiate it on raw sockets, so we can change
1324 	 * only our side.
1325 	 */
1326 	if (features & NETIF_F_GRO)
1327 		/* All new frame buffers will be GRO-sized */
1328 		vp->req_size = 65536;
1329 	else
1330 		/* All new frame buffers will be normal sized */
1331 		vp->req_size = vp->max_packet + vp->headroom + SAFETY_MARGIN;
1332 	return 0;
1333 }
1334 
1335 #ifdef CONFIG_NET_POLL_CONTROLLER
vector_net_poll_controller(struct net_device * dev)1336 static void vector_net_poll_controller(struct net_device *dev)
1337 {
1338 	disable_irq(dev->irq);
1339 	vector_rx_interrupt(dev->irq, dev);
1340 	enable_irq(dev->irq);
1341 }
1342 #endif
1343 
vector_net_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)1344 static void vector_net_get_drvinfo(struct net_device *dev,
1345 				struct ethtool_drvinfo *info)
1346 {
1347 	strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1348 	strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
1349 }
1350 
vector_get_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring)1351 static void vector_get_ringparam(struct net_device *netdev,
1352 				struct ethtool_ringparam *ring)
1353 {
1354 	struct vector_private *vp = netdev_priv(netdev);
1355 
1356 	ring->rx_max_pending = vp->rx_queue->max_depth;
1357 	ring->tx_max_pending = vp->tx_queue->max_depth;
1358 	ring->rx_pending = vp->rx_queue->max_depth;
1359 	ring->tx_pending = vp->tx_queue->max_depth;
1360 }
1361 
vector_get_strings(struct net_device * dev,u32 stringset,u8 * buf)1362 static void vector_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
1363 {
1364 	switch (stringset) {
1365 	case ETH_SS_TEST:
1366 		*buf = '\0';
1367 		break;
1368 	case ETH_SS_STATS:
1369 		memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
1370 		break;
1371 	default:
1372 		WARN_ON(1);
1373 		break;
1374 	}
1375 }
1376 
vector_get_sset_count(struct net_device * dev,int sset)1377 static int vector_get_sset_count(struct net_device *dev, int sset)
1378 {
1379 	switch (sset) {
1380 	case ETH_SS_TEST:
1381 		return 0;
1382 	case ETH_SS_STATS:
1383 		return VECTOR_NUM_STATS;
1384 	default:
1385 		return -EOPNOTSUPP;
1386 	}
1387 }
1388 
vector_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * estats,u64 * tmp_stats)1389 static void vector_get_ethtool_stats(struct net_device *dev,
1390 	struct ethtool_stats *estats,
1391 	u64 *tmp_stats)
1392 {
1393 	struct vector_private *vp = netdev_priv(dev);
1394 
1395 	memcpy(tmp_stats, &vp->estats, sizeof(struct vector_estats));
1396 }
1397 
vector_get_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec)1398 static int vector_get_coalesce(struct net_device *netdev,
1399 					struct ethtool_coalesce *ec)
1400 {
1401 	struct vector_private *vp = netdev_priv(netdev);
1402 
1403 	ec->tx_coalesce_usecs = (vp->coalesce * 1000000) / HZ;
1404 	return 0;
1405 }
1406 
vector_set_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec)1407 static int vector_set_coalesce(struct net_device *netdev,
1408 					struct ethtool_coalesce *ec)
1409 {
1410 	struct vector_private *vp = netdev_priv(netdev);
1411 
1412 	vp->coalesce = (ec->tx_coalesce_usecs * HZ) / 1000000;
1413 	if (vp->coalesce == 0)
1414 		vp->coalesce = 1;
1415 	return 0;
1416 }
1417 
1418 static const struct ethtool_ops vector_net_ethtool_ops = {
1419 	.get_drvinfo	= vector_net_get_drvinfo,
1420 	.get_link	= ethtool_op_get_link,
1421 	.get_ts_info	= ethtool_op_get_ts_info,
1422 	.get_ringparam	= vector_get_ringparam,
1423 	.get_strings	= vector_get_strings,
1424 	.get_sset_count	= vector_get_sset_count,
1425 	.get_ethtool_stats = vector_get_ethtool_stats,
1426 	.get_coalesce	= vector_get_coalesce,
1427 	.set_coalesce	= vector_set_coalesce,
1428 };
1429 
1430 
1431 static const struct net_device_ops vector_netdev_ops = {
1432 	.ndo_open		= vector_net_open,
1433 	.ndo_stop		= vector_net_close,
1434 	.ndo_start_xmit		= vector_net_start_xmit,
1435 	.ndo_set_rx_mode	= vector_net_set_multicast_list,
1436 	.ndo_tx_timeout		= vector_net_tx_timeout,
1437 	.ndo_set_mac_address	= eth_mac_addr,
1438 	.ndo_validate_addr	= eth_validate_addr,
1439 	.ndo_fix_features	= vector_fix_features,
1440 	.ndo_set_features	= vector_set_features,
1441 #ifdef CONFIG_NET_POLL_CONTROLLER
1442 	.ndo_poll_controller = vector_net_poll_controller,
1443 #endif
1444 };
1445 
1446 
vector_timer_expire(struct timer_list * t)1447 static void vector_timer_expire(struct timer_list *t)
1448 {
1449 	struct vector_private *vp = from_timer(vp, t, tl);
1450 
1451 	vp->estats.tx_kicks++;
1452 	vector_send(vp->tx_queue);
1453 }
1454 
vector_eth_configure(int n,struct arglist * def)1455 static void vector_eth_configure(
1456 		int n,
1457 		struct arglist *def
1458 	)
1459 {
1460 	struct vector_device *device;
1461 	struct net_device *dev;
1462 	struct vector_private *vp;
1463 	int err;
1464 
1465 	device = kzalloc(sizeof(*device), GFP_KERNEL);
1466 	if (device == NULL) {
1467 		printk(KERN_ERR "eth_configure failed to allocate struct "
1468 				 "vector_device\n");
1469 		return;
1470 	}
1471 	dev = alloc_etherdev(sizeof(struct vector_private));
1472 	if (dev == NULL) {
1473 		printk(KERN_ERR "eth_configure: failed to allocate struct "
1474 				 "net_device for vec%d\n", n);
1475 		goto out_free_device;
1476 	}
1477 
1478 	dev->mtu = get_mtu(def);
1479 
1480 	INIT_LIST_HEAD(&device->list);
1481 	device->unit = n;
1482 
1483 	/* If this name ends up conflicting with an existing registered
1484 	 * netdevice, that is OK, register_netdev{,ice}() will notice this
1485 	 * and fail.
1486 	 */
1487 	snprintf(dev->name, sizeof(dev->name), "vec%d", n);
1488 	uml_net_setup_etheraddr(dev, uml_vector_fetch_arg(def, "mac"));
1489 	vp = netdev_priv(dev);
1490 
1491 	/* sysfs register */
1492 	if (!driver_registered) {
1493 		platform_driver_register(&uml_net_driver);
1494 		driver_registered = 1;
1495 	}
1496 	device->pdev.id = n;
1497 	device->pdev.name = DRIVER_NAME;
1498 	device->pdev.dev.release = vector_device_release;
1499 	dev_set_drvdata(&device->pdev.dev, device);
1500 	if (platform_device_register(&device->pdev))
1501 		goto out_free_netdev;
1502 	SET_NETDEV_DEV(dev, &device->pdev.dev);
1503 
1504 	device->dev = dev;
1505 
1506 	*vp = ((struct vector_private)
1507 		{
1508 		.list			= LIST_HEAD_INIT(vp->list),
1509 		.dev			= dev,
1510 		.unit			= n,
1511 		.options		= get_transport_options(def),
1512 		.rx_irq			= 0,
1513 		.tx_irq			= 0,
1514 		.parsed			= def,
1515 		.max_packet		= get_mtu(def) + ETH_HEADER_OTHER,
1516 		/* TODO - we need to calculate headroom so that ip header
1517 		 * is 16 byte aligned all the time
1518 		 */
1519 		.headroom		= get_headroom(def),
1520 		.form_header		= NULL,
1521 		.verify_header		= NULL,
1522 		.header_rxbuffer	= NULL,
1523 		.header_txbuffer	= NULL,
1524 		.header_size		= 0,
1525 		.rx_header_size		= 0,
1526 		.rexmit_scheduled	= false,
1527 		.opened			= false,
1528 		.transport_data		= NULL,
1529 		.in_write_poll		= false,
1530 		.coalesce		= 2,
1531 		.req_size		= get_req_size(def),
1532 		.in_error		= false
1533 		});
1534 
1535 	dev->features = dev->hw_features = (NETIF_F_SG | NETIF_F_FRAGLIST);
1536 	tasklet_init(&vp->tx_poll, vector_tx_poll, (unsigned long)vp);
1537 	INIT_WORK(&vp->reset_tx, vector_reset_tx);
1538 
1539 	timer_setup(&vp->tl, vector_timer_expire, 0);
1540 	spin_lock_init(&vp->lock);
1541 
1542 	/* FIXME */
1543 	dev->netdev_ops = &vector_netdev_ops;
1544 	dev->ethtool_ops = &vector_net_ethtool_ops;
1545 	dev->watchdog_timeo = (HZ >> 1);
1546 	/* primary IRQ - fixme */
1547 	dev->irq = 0; /* we will adjust this once opened */
1548 
1549 	rtnl_lock();
1550 	err = register_netdevice(dev);
1551 	rtnl_unlock();
1552 	if (err)
1553 		goto out_undo_user_init;
1554 
1555 	spin_lock(&vector_devices_lock);
1556 	list_add(&device->list, &vector_devices);
1557 	spin_unlock(&vector_devices_lock);
1558 
1559 	return;
1560 
1561 out_undo_user_init:
1562 	return;
1563 out_free_netdev:
1564 	free_netdev(dev);
1565 out_free_device:
1566 	kfree(device);
1567 }
1568 
1569 
1570 
1571 
1572 /*
1573  * Invoked late in the init
1574  */
1575 
vector_init(void)1576 static int __init vector_init(void)
1577 {
1578 	struct list_head *ele;
1579 	struct vector_cmd_line_arg *def;
1580 	struct arglist *parsed;
1581 
1582 	list_for_each(ele, &vec_cmd_line) {
1583 		def = list_entry(ele, struct vector_cmd_line_arg, list);
1584 		parsed = uml_parse_vector_ifspec(def->arguments);
1585 		if (parsed != NULL)
1586 			vector_eth_configure(def->unit, parsed);
1587 	}
1588 	return 0;
1589 }
1590 
1591 
1592 /* Invoked at initial argument parsing, only stores
1593  * arguments until a proper vector_init is called
1594  * later
1595  */
1596 
vector_setup(char * str)1597 static int __init vector_setup(char *str)
1598 {
1599 	char *error;
1600 	int n, err;
1601 	struct vector_cmd_line_arg *new;
1602 
1603 	err = vector_parse(str, &n, &str, &error);
1604 	if (err) {
1605 		printk(KERN_ERR "vector_setup - Couldn't parse '%s' : %s\n",
1606 				 str, error);
1607 		return 1;
1608 	}
1609 	new = memblock_alloc(sizeof(*new), SMP_CACHE_BYTES);
1610 	if (!new)
1611 		panic("%s: Failed to allocate %zu bytes\n", __func__,
1612 		      sizeof(*new));
1613 	INIT_LIST_HEAD(&new->list);
1614 	new->unit = n;
1615 	new->arguments = str;
1616 	list_add_tail(&new->list, &vec_cmd_line);
1617 	return 1;
1618 }
1619 
1620 __setup("vec", vector_setup);
1621 __uml_help(vector_setup,
1622 "vec[0-9]+:<option>=<value>,<option>=<value>\n"
1623 "	 Configure a vector io network device.\n\n"
1624 );
1625 
1626 late_initcall(vector_init);
1627 
1628 static struct mc_device vector_mc = {
1629 	.list		= LIST_HEAD_INIT(vector_mc.list),
1630 	.name		= "vec",
1631 	.config		= vector_config,
1632 	.get_config	= NULL,
1633 	.id		= vector_id,
1634 	.remove		= vector_remove,
1635 };
1636 
1637 #ifdef CONFIG_INET
vector_inetaddr_event(struct notifier_block * this,unsigned long event,void * ptr)1638 static int vector_inetaddr_event(
1639 	struct notifier_block *this,
1640 	unsigned long event,
1641 	void *ptr)
1642 {
1643 	return NOTIFY_DONE;
1644 }
1645 
1646 static struct notifier_block vector_inetaddr_notifier = {
1647 	.notifier_call		= vector_inetaddr_event,
1648 };
1649 
inet_register(void)1650 static void inet_register(void)
1651 {
1652 	register_inetaddr_notifier(&vector_inetaddr_notifier);
1653 }
1654 #else
inet_register(void)1655 static inline void inet_register(void)
1656 {
1657 }
1658 #endif
1659 
vector_net_init(void)1660 static int vector_net_init(void)
1661 {
1662 	mconsole_register_dev(&vector_mc);
1663 	inet_register();
1664 	return 0;
1665 }
1666 
1667 __initcall(vector_net_init);
1668 
1669 
1670 
1671