• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * IUCV network driver
4  *
5  * Copyright IBM Corp. 2001, 2009
6  *
7  * Author(s):
8  *	Original netiucv driver:
9  *		Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
10  *	Sysfs integration and all bugs therein:
11  *		Cornelia Huck (cornelia.huck@de.ibm.com)
12  *	PM functions:
13  *		Ursula Braun (ursula.braun@de.ibm.com)
14  *
15  * Documentation used:
16  *  the source of the original IUCV driver by:
17  *    Stefan Hegewald <hegewald@de.ibm.com>
18  *    Hartmut Penner <hpenner@de.ibm.com>
19  *    Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
20  *    Martin Schwidefsky (schwidefsky@de.ibm.com)
21  *    Alan Altmark (Alan_Altmark@us.ibm.com)  Sept. 2000
22  */
23 
24 #define KMSG_COMPONENT "netiucv"
25 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
26 
27 #undef DEBUG
28 
29 #include <linux/module.h>
30 #include <linux/init.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/errno.h>
34 #include <linux/types.h>
35 #include <linux/interrupt.h>
36 #include <linux/timer.h>
37 #include <linux/bitops.h>
38 
39 #include <linux/signal.h>
40 #include <linux/string.h>
41 #include <linux/device.h>
42 
43 #include <linux/ip.h>
44 #include <linux/if_arp.h>
45 #include <linux/tcp.h>
46 #include <linux/skbuff.h>
47 #include <linux/ctype.h>
48 #include <net/dst.h>
49 
50 #include <asm/io.h>
51 #include <linux/uaccess.h>
52 #include <asm/ebcdic.h>
53 
54 #include <net/iucv/iucv.h>
55 #include "fsm.h"
56 
57 MODULE_AUTHOR
58     ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)");
59 MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
60 
61 /**
62  * Debug Facility stuff
63  */
64 #define IUCV_DBF_SETUP_NAME "iucv_setup"
65 #define IUCV_DBF_SETUP_LEN 64
66 #define IUCV_DBF_SETUP_PAGES 2
67 #define IUCV_DBF_SETUP_NR_AREAS 1
68 #define IUCV_DBF_SETUP_LEVEL 3
69 
70 #define IUCV_DBF_DATA_NAME "iucv_data"
71 #define IUCV_DBF_DATA_LEN 128
72 #define IUCV_DBF_DATA_PAGES 2
73 #define IUCV_DBF_DATA_NR_AREAS 1
74 #define IUCV_DBF_DATA_LEVEL 2
75 
76 #define IUCV_DBF_TRACE_NAME "iucv_trace"
77 #define IUCV_DBF_TRACE_LEN 16
78 #define IUCV_DBF_TRACE_PAGES 4
79 #define IUCV_DBF_TRACE_NR_AREAS 1
80 #define IUCV_DBF_TRACE_LEVEL 3
81 
82 #define IUCV_DBF_TEXT(name,level,text) \
83 	do { \
84 		debug_text_event(iucv_dbf_##name,level,text); \
85 	} while (0)
86 
87 #define IUCV_DBF_HEX(name,level,addr,len) \
88 	do { \
89 		debug_event(iucv_dbf_##name,level,(void*)(addr),len); \
90 	} while (0)
91 
92 DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf);
93 
94 #define IUCV_DBF_TEXT_(name, level, text...) \
95 	do { \
96 		if (debug_level_enabled(iucv_dbf_##name, level)) { \
97 			char* __buf = get_cpu_var(iucv_dbf_txt_buf); \
98 			sprintf(__buf, text); \
99 			debug_text_event(iucv_dbf_##name, level, __buf); \
100 			put_cpu_var(iucv_dbf_txt_buf); \
101 		} \
102 	} while (0)
103 
104 #define IUCV_DBF_SPRINTF(name,level,text...) \
105 	do { \
106 		debug_sprintf_event(iucv_dbf_trace, level, ##text ); \
107 		debug_sprintf_event(iucv_dbf_trace, level, text ); \
108 	} while (0)
109 
110 /**
111  * some more debug stuff
112  */
113 #define PRINTK_HEADER " iucv: "       /* for debugging */
114 
115 static struct device_driver netiucv_driver = {
116 	.owner = THIS_MODULE,
117 	.name = "netiucv",
118 	.bus  = &iucv_bus,
119 };
120 
121 /**
122  * Per connection profiling data
123  */
124 struct connection_profile {
125 	unsigned long maxmulti;
126 	unsigned long maxcqueue;
127 	unsigned long doios_single;
128 	unsigned long doios_multi;
129 	unsigned long txlen;
130 	unsigned long tx_time;
131 	unsigned long send_stamp;
132 	unsigned long tx_pending;
133 	unsigned long tx_max_pending;
134 };
135 
136 /**
137  * Representation of one iucv connection
138  */
139 struct iucv_connection {
140 	struct list_head	  list;
141 	struct iucv_path	  *path;
142 	struct sk_buff            *rx_buff;
143 	struct sk_buff            *tx_buff;
144 	struct sk_buff_head       collect_queue;
145 	struct sk_buff_head	  commit_queue;
146 	spinlock_t                collect_lock;
147 	int                       collect_len;
148 	int                       max_buffsize;
149 	fsm_timer                 timer;
150 	fsm_instance              *fsm;
151 	struct net_device         *netdev;
152 	struct connection_profile prof;
153 	char                      userid[9];
154 	char			  userdata[17];
155 };
156 
157 /**
158  * Linked list of all connection structs.
159  */
160 static LIST_HEAD(iucv_connection_list);
161 static DEFINE_RWLOCK(iucv_connection_rwlock);
162 
163 /**
164  * Representation of event-data for the
165  * connection state machine.
166  */
167 struct iucv_event {
168 	struct iucv_connection *conn;
169 	void                   *data;
170 };
171 
172 /**
173  * Private part of the network device structure
174  */
175 struct netiucv_priv {
176 	struct net_device_stats stats;
177 	unsigned long           tbusy;
178 	fsm_instance            *fsm;
179         struct iucv_connection  *conn;
180 	struct device           *dev;
181 };
182 
183 /**
184  * Link level header for a packet.
185  */
186 struct ll_header {
187 	u16 next;
188 };
189 
190 #define NETIUCV_HDRLEN		 (sizeof(struct ll_header))
191 #define NETIUCV_BUFSIZE_MAX	 65537
192 #define NETIUCV_BUFSIZE_DEFAULT  NETIUCV_BUFSIZE_MAX
193 #define NETIUCV_MTU_MAX          (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN)
194 #define NETIUCV_MTU_DEFAULT      9216
195 #define NETIUCV_QUEUELEN_DEFAULT 50
196 #define NETIUCV_TIMEOUT_5SEC     5000
197 
198 /**
199  * Compatibility macros for busy handling
200  * of network devices.
201  */
netiucv_clear_busy(struct net_device * dev)202 static void netiucv_clear_busy(struct net_device *dev)
203 {
204 	struct netiucv_priv *priv = netdev_priv(dev);
205 	clear_bit(0, &priv->tbusy);
206 	netif_wake_queue(dev);
207 }
208 
netiucv_test_and_set_busy(struct net_device * dev)209 static int netiucv_test_and_set_busy(struct net_device *dev)
210 {
211 	struct netiucv_priv *priv = netdev_priv(dev);
212 	netif_stop_queue(dev);
213 	return test_and_set_bit(0, &priv->tbusy);
214 }
215 
216 static u8 iucvMagic_ascii[16] = {
217 	0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
218 	0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20
219 };
220 
221 static u8 iucvMagic_ebcdic[16] = {
222 	0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
223 	0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
224 };
225 
226 /**
227  * Convert an iucv userId to its printable
228  * form (strip whitespace at end).
229  *
230  * @param An iucv userId
231  *
232  * @returns The printable string (static data!!)
233  */
netiucv_printname(char * name,int len)234 static char *netiucv_printname(char *name, int len)
235 {
236 	static char tmp[17];
237 	char *p = tmp;
238 	memcpy(tmp, name, len);
239 	tmp[len] = '\0';
240 	while (*p && ((p - tmp) < len) && (!isspace(*p)))
241 		p++;
242 	*p = '\0';
243 	return tmp;
244 }
245 
netiucv_printuser(struct iucv_connection * conn)246 static char *netiucv_printuser(struct iucv_connection *conn)
247 {
248 	static char tmp_uid[9];
249 	static char tmp_udat[17];
250 	static char buf[100];
251 
252 	if (memcmp(conn->userdata, iucvMagic_ebcdic, 16)) {
253 		tmp_uid[8] = '\0';
254 		tmp_udat[16] = '\0';
255 		memcpy(tmp_uid, netiucv_printname(conn->userid, 8), 8);
256 		memcpy(tmp_udat, conn->userdata, 16);
257 		EBCASC(tmp_udat, 16);
258 		memcpy(tmp_udat, netiucv_printname(tmp_udat, 16), 16);
259 		sprintf(buf, "%s.%s", tmp_uid, tmp_udat);
260 		return buf;
261 	} else
262 		return netiucv_printname(conn->userid, 8);
263 }
264 
265 /**
266  * States of the interface statemachine.
267  */
268 enum dev_states {
269 	DEV_STATE_STOPPED,
270 	DEV_STATE_STARTWAIT,
271 	DEV_STATE_STOPWAIT,
272 	DEV_STATE_RUNNING,
273 	/**
274 	 * MUST be always the last element!!
275 	 */
276 	NR_DEV_STATES
277 };
278 
279 static const char *dev_state_names[] = {
280 	"Stopped",
281 	"StartWait",
282 	"StopWait",
283 	"Running",
284 };
285 
286 /**
287  * Events of the interface statemachine.
288  */
289 enum dev_events {
290 	DEV_EVENT_START,
291 	DEV_EVENT_STOP,
292 	DEV_EVENT_CONUP,
293 	DEV_EVENT_CONDOWN,
294 	/**
295 	 * MUST be always the last element!!
296 	 */
297 	NR_DEV_EVENTS
298 };
299 
300 static const char *dev_event_names[] = {
301 	"Start",
302 	"Stop",
303 	"Connection up",
304 	"Connection down",
305 };
306 
307 /**
308  * Events of the connection statemachine
309  */
310 enum conn_events {
311 	/**
312 	 * Events, representing callbacks from
313 	 * lowlevel iucv layer)
314 	 */
315 	CONN_EVENT_CONN_REQ,
316 	CONN_EVENT_CONN_ACK,
317 	CONN_EVENT_CONN_REJ,
318 	CONN_EVENT_CONN_SUS,
319 	CONN_EVENT_CONN_RES,
320 	CONN_EVENT_RX,
321 	CONN_EVENT_TXDONE,
322 
323 	/**
324 	 * Events, representing errors return codes from
325 	 * calls to lowlevel iucv layer
326 	 */
327 
328 	/**
329 	 * Event, representing timer expiry.
330 	 */
331 	CONN_EVENT_TIMER,
332 
333 	/**
334 	 * Events, representing commands from upper levels.
335 	 */
336 	CONN_EVENT_START,
337 	CONN_EVENT_STOP,
338 
339 	/**
340 	 * MUST be always the last element!!
341 	 */
342 	NR_CONN_EVENTS,
343 };
344 
345 static const char *conn_event_names[] = {
346 	"Remote connection request",
347 	"Remote connection acknowledge",
348 	"Remote connection reject",
349 	"Connection suspended",
350 	"Connection resumed",
351 	"Data received",
352 	"Data sent",
353 
354 	"Timer",
355 
356 	"Start",
357 	"Stop",
358 };
359 
360 /**
361  * States of the connection statemachine.
362  */
363 enum conn_states {
364 	/**
365 	 * Connection not assigned to any device,
366 	 * initial state, invalid
367 	 */
368 	CONN_STATE_INVALID,
369 
370 	/**
371 	 * Userid assigned but not operating
372 	 */
373 	CONN_STATE_STOPPED,
374 
375 	/**
376 	 * Connection registered,
377 	 * no connection request sent yet,
378 	 * no connection request received
379 	 */
380 	CONN_STATE_STARTWAIT,
381 
382 	/**
383 	 * Connection registered and connection request sent,
384 	 * no acknowledge and no connection request received yet.
385 	 */
386 	CONN_STATE_SETUPWAIT,
387 
388 	/**
389 	 * Connection up and running idle
390 	 */
391 	CONN_STATE_IDLE,
392 
393 	/**
394 	 * Data sent, awaiting CONN_EVENT_TXDONE
395 	 */
396 	CONN_STATE_TX,
397 
398 	/**
399 	 * Error during registration.
400 	 */
401 	CONN_STATE_REGERR,
402 
403 	/**
404 	 * Error during registration.
405 	 */
406 	CONN_STATE_CONNERR,
407 
408 	/**
409 	 * MUST be always the last element!!
410 	 */
411 	NR_CONN_STATES,
412 };
413 
414 static const char *conn_state_names[] = {
415 	"Invalid",
416 	"Stopped",
417 	"StartWait",
418 	"SetupWait",
419 	"Idle",
420 	"TX",
421 	"Terminating",
422 	"Registration error",
423 	"Connect error",
424 };
425 
426 
427 /**
428  * Debug Facility Stuff
429  */
430 static debug_info_t *iucv_dbf_setup = NULL;
431 static debug_info_t *iucv_dbf_data = NULL;
432 static debug_info_t *iucv_dbf_trace = NULL;
433 
434 DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf);
435 
iucv_unregister_dbf_views(void)436 static void iucv_unregister_dbf_views(void)
437 {
438 	debug_unregister(iucv_dbf_setup);
439 	debug_unregister(iucv_dbf_data);
440 	debug_unregister(iucv_dbf_trace);
441 }
iucv_register_dbf_views(void)442 static int iucv_register_dbf_views(void)
443 {
444 	iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME,
445 					IUCV_DBF_SETUP_PAGES,
446 					IUCV_DBF_SETUP_NR_AREAS,
447 					IUCV_DBF_SETUP_LEN);
448 	iucv_dbf_data = debug_register(IUCV_DBF_DATA_NAME,
449 				       IUCV_DBF_DATA_PAGES,
450 				       IUCV_DBF_DATA_NR_AREAS,
451 				       IUCV_DBF_DATA_LEN);
452 	iucv_dbf_trace = debug_register(IUCV_DBF_TRACE_NAME,
453 					IUCV_DBF_TRACE_PAGES,
454 					IUCV_DBF_TRACE_NR_AREAS,
455 					IUCV_DBF_TRACE_LEN);
456 
457 	if ((iucv_dbf_setup == NULL) || (iucv_dbf_data == NULL) ||
458 	    (iucv_dbf_trace == NULL)) {
459 		iucv_unregister_dbf_views();
460 		return -ENOMEM;
461 	}
462 	debug_register_view(iucv_dbf_setup, &debug_hex_ascii_view);
463 	debug_set_level(iucv_dbf_setup, IUCV_DBF_SETUP_LEVEL);
464 
465 	debug_register_view(iucv_dbf_data, &debug_hex_ascii_view);
466 	debug_set_level(iucv_dbf_data, IUCV_DBF_DATA_LEVEL);
467 
468 	debug_register_view(iucv_dbf_trace, &debug_hex_ascii_view);
469 	debug_set_level(iucv_dbf_trace, IUCV_DBF_TRACE_LEVEL);
470 
471 	return 0;
472 }
473 
474 /*
475  * Callback-wrappers, called from lowlevel iucv layer.
476  */
477 
netiucv_callback_rx(struct iucv_path * path,struct iucv_message * msg)478 static void netiucv_callback_rx(struct iucv_path *path,
479 				struct iucv_message *msg)
480 {
481 	struct iucv_connection *conn = path->private;
482 	struct iucv_event ev;
483 
484 	ev.conn = conn;
485 	ev.data = msg;
486 	fsm_event(conn->fsm, CONN_EVENT_RX, &ev);
487 }
488 
netiucv_callback_txdone(struct iucv_path * path,struct iucv_message * msg)489 static void netiucv_callback_txdone(struct iucv_path *path,
490 				    struct iucv_message *msg)
491 {
492 	struct iucv_connection *conn = path->private;
493 	struct iucv_event ev;
494 
495 	ev.conn = conn;
496 	ev.data = msg;
497 	fsm_event(conn->fsm, CONN_EVENT_TXDONE, &ev);
498 }
499 
netiucv_callback_connack(struct iucv_path * path,u8 ipuser[16])500 static void netiucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
501 {
502 	struct iucv_connection *conn = path->private;
503 
504 	fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, conn);
505 }
506 
netiucv_callback_connreq(struct iucv_path * path,u8 * ipvmid,u8 * ipuser)507 static int netiucv_callback_connreq(struct iucv_path *path, u8 *ipvmid,
508 				    u8 *ipuser)
509 {
510 	struct iucv_connection *conn = path->private;
511 	struct iucv_event ev;
512 	static char tmp_user[9];
513 	static char tmp_udat[17];
514 	int rc;
515 
516 	rc = -EINVAL;
517 	memcpy(tmp_user, netiucv_printname(ipvmid, 8), 8);
518 	memcpy(tmp_udat, ipuser, 16);
519 	EBCASC(tmp_udat, 16);
520 	read_lock_bh(&iucv_connection_rwlock);
521 	list_for_each_entry(conn, &iucv_connection_list, list) {
522 		if (strncmp(ipvmid, conn->userid, 8) ||
523 		    strncmp(ipuser, conn->userdata, 16))
524 			continue;
525 		/* Found a matching connection for this path. */
526 		conn->path = path;
527 		ev.conn = conn;
528 		ev.data = path;
529 		fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev);
530 		rc = 0;
531 	}
532 	IUCV_DBF_TEXT_(setup, 2, "Connection requested for %s.%s\n",
533 		       tmp_user, netiucv_printname(tmp_udat, 16));
534 	read_unlock_bh(&iucv_connection_rwlock);
535 	return rc;
536 }
537 
netiucv_callback_connrej(struct iucv_path * path,u8 * ipuser)538 static void netiucv_callback_connrej(struct iucv_path *path, u8 *ipuser)
539 {
540 	struct iucv_connection *conn = path->private;
541 
542 	fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, conn);
543 }
544 
netiucv_callback_connsusp(struct iucv_path * path,u8 * ipuser)545 static void netiucv_callback_connsusp(struct iucv_path *path, u8 *ipuser)
546 {
547 	struct iucv_connection *conn = path->private;
548 
549 	fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, conn);
550 }
551 
netiucv_callback_connres(struct iucv_path * path,u8 * ipuser)552 static void netiucv_callback_connres(struct iucv_path *path, u8 *ipuser)
553 {
554 	struct iucv_connection *conn = path->private;
555 
556 	fsm_event(conn->fsm, CONN_EVENT_CONN_RES, conn);
557 }
558 
559 /**
560  * NOP action for statemachines
561  */
netiucv_action_nop(fsm_instance * fi,int event,void * arg)562 static void netiucv_action_nop(fsm_instance *fi, int event, void *arg)
563 {
564 }
565 
566 /*
567  * Actions of the connection statemachine
568  */
569 
570 /**
571  * netiucv_unpack_skb
572  * @conn: The connection where this skb has been received.
573  * @pskb: The received skb.
574  *
575  * Unpack a just received skb and hand it over to upper layers.
576  * Helper function for conn_action_rx.
577  */
netiucv_unpack_skb(struct iucv_connection * conn,struct sk_buff * pskb)578 static void netiucv_unpack_skb(struct iucv_connection *conn,
579 			       struct sk_buff *pskb)
580 {
581 	struct net_device     *dev = conn->netdev;
582 	struct netiucv_priv   *privptr = netdev_priv(dev);
583 	u16 offset = 0;
584 
585 	skb_put(pskb, NETIUCV_HDRLEN);
586 	pskb->dev = dev;
587 	pskb->ip_summed = CHECKSUM_NONE;
588 	pskb->protocol = cpu_to_be16(ETH_P_IP);
589 
590 	while (1) {
591 		struct sk_buff *skb;
592 		struct ll_header *header = (struct ll_header *) pskb->data;
593 
594 		if (!header->next)
595 			break;
596 
597 		skb_pull(pskb, NETIUCV_HDRLEN);
598 		header->next -= offset;
599 		offset += header->next;
600 		header->next -= NETIUCV_HDRLEN;
601 		if (skb_tailroom(pskb) < header->next) {
602 			IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n",
603 				header->next, skb_tailroom(pskb));
604 			return;
605 		}
606 		skb_put(pskb, header->next);
607 		skb_reset_mac_header(pskb);
608 		skb = dev_alloc_skb(pskb->len);
609 		if (!skb) {
610 			IUCV_DBF_TEXT(data, 2,
611 				"Out of memory in netiucv_unpack_skb\n");
612 			privptr->stats.rx_dropped++;
613 			return;
614 		}
615 		skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len),
616 					  pskb->len);
617 		skb_reset_mac_header(skb);
618 		skb->dev = pskb->dev;
619 		skb->protocol = pskb->protocol;
620 		pskb->ip_summed = CHECKSUM_UNNECESSARY;
621 		privptr->stats.rx_packets++;
622 		privptr->stats.rx_bytes += skb->len;
623 		/*
624 		 * Since receiving is always initiated from a tasklet (in iucv.c),
625 		 * we must use netif_rx_ni() instead of netif_rx()
626 		 */
627 		netif_rx_ni(skb);
628 		skb_pull(pskb, header->next);
629 		skb_put(pskb, NETIUCV_HDRLEN);
630 	}
631 }
632 
conn_action_rx(fsm_instance * fi,int event,void * arg)633 static void conn_action_rx(fsm_instance *fi, int event, void *arg)
634 {
635 	struct iucv_event *ev = arg;
636 	struct iucv_connection *conn = ev->conn;
637 	struct iucv_message *msg = ev->data;
638 	struct netiucv_priv *privptr = netdev_priv(conn->netdev);
639 	int rc;
640 
641 	IUCV_DBF_TEXT(trace, 4, __func__);
642 
643 	if (!conn->netdev) {
644 		iucv_message_reject(conn->path, msg);
645 		IUCV_DBF_TEXT(data, 2,
646 			      "Received data for unlinked connection\n");
647 		return;
648 	}
649 	if (msg->length > conn->max_buffsize) {
650 		iucv_message_reject(conn->path, msg);
651 		privptr->stats.rx_dropped++;
652 		IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n",
653 			       msg->length, conn->max_buffsize);
654 		return;
655 	}
656 	conn->rx_buff->data = conn->rx_buff->head;
657 	skb_reset_tail_pointer(conn->rx_buff);
658 	conn->rx_buff->len = 0;
659 	rc = iucv_message_receive(conn->path, msg, 0, conn->rx_buff->data,
660 				  msg->length, NULL);
661 	if (rc || msg->length < 5) {
662 		privptr->stats.rx_errors++;
663 		IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc);
664 		return;
665 	}
666 	netiucv_unpack_skb(conn, conn->rx_buff);
667 }
668 
conn_action_txdone(fsm_instance * fi,int event,void * arg)669 static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
670 {
671 	struct iucv_event *ev = arg;
672 	struct iucv_connection *conn = ev->conn;
673 	struct iucv_message *msg = ev->data;
674 	struct iucv_message txmsg;
675 	struct netiucv_priv *privptr = NULL;
676 	u32 single_flag = msg->tag;
677 	u32 txbytes = 0;
678 	u32 txpackets = 0;
679 	u32 stat_maxcq = 0;
680 	struct sk_buff *skb;
681 	unsigned long saveflags;
682 	struct ll_header header;
683 	int rc;
684 
685 	IUCV_DBF_TEXT(trace, 4, __func__);
686 
687 	if (!conn || !conn->netdev) {
688 		IUCV_DBF_TEXT(data, 2,
689 			      "Send confirmation for unlinked connection\n");
690 		return;
691 	}
692 	privptr = netdev_priv(conn->netdev);
693 	conn->prof.tx_pending--;
694 	if (single_flag) {
695 		if ((skb = skb_dequeue(&conn->commit_queue))) {
696 			refcount_dec(&skb->users);
697 			if (privptr) {
698 				privptr->stats.tx_packets++;
699 				privptr->stats.tx_bytes +=
700 					(skb->len - NETIUCV_HDRLEN
701 						  - NETIUCV_HDRLEN);
702 			}
703 			dev_kfree_skb_any(skb);
704 		}
705 	}
706 	conn->tx_buff->data = conn->tx_buff->head;
707 	skb_reset_tail_pointer(conn->tx_buff);
708 	conn->tx_buff->len = 0;
709 	spin_lock_irqsave(&conn->collect_lock, saveflags);
710 	while ((skb = skb_dequeue(&conn->collect_queue))) {
711 		header.next = conn->tx_buff->len + skb->len + NETIUCV_HDRLEN;
712 		skb_put_data(conn->tx_buff, &header, NETIUCV_HDRLEN);
713 		skb_copy_from_linear_data(skb,
714 					  skb_put(conn->tx_buff, skb->len),
715 					  skb->len);
716 		txbytes += skb->len;
717 		txpackets++;
718 		stat_maxcq++;
719 		refcount_dec(&skb->users);
720 		dev_kfree_skb_any(skb);
721 	}
722 	if (conn->collect_len > conn->prof.maxmulti)
723 		conn->prof.maxmulti = conn->collect_len;
724 	conn->collect_len = 0;
725 	spin_unlock_irqrestore(&conn->collect_lock, saveflags);
726 	if (conn->tx_buff->len == 0) {
727 		fsm_newstate(fi, CONN_STATE_IDLE);
728 		return;
729 	}
730 
731 	header.next = 0;
732 	skb_put_data(conn->tx_buff, &header, NETIUCV_HDRLEN);
733 	conn->prof.send_stamp = jiffies;
734 	txmsg.class = 0;
735 	txmsg.tag = 0;
736 	rc = iucv_message_send(conn->path, &txmsg, 0, 0,
737 			       conn->tx_buff->data, conn->tx_buff->len);
738 	conn->prof.doios_multi++;
739 	conn->prof.txlen += conn->tx_buff->len;
740 	conn->prof.tx_pending++;
741 	if (conn->prof.tx_pending > conn->prof.tx_max_pending)
742 		conn->prof.tx_max_pending = conn->prof.tx_pending;
743 	if (rc) {
744 		conn->prof.tx_pending--;
745 		fsm_newstate(fi, CONN_STATE_IDLE);
746 		if (privptr)
747 			privptr->stats.tx_errors += txpackets;
748 		IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
749 	} else {
750 		if (privptr) {
751 			privptr->stats.tx_packets += txpackets;
752 			privptr->stats.tx_bytes += txbytes;
753 		}
754 		if (stat_maxcq > conn->prof.maxcqueue)
755 			conn->prof.maxcqueue = stat_maxcq;
756 	}
757 }
758 
759 static struct iucv_handler netiucv_handler = {
760 	.path_pending	  = netiucv_callback_connreq,
761 	.path_complete	  = netiucv_callback_connack,
762 	.path_severed	  = netiucv_callback_connrej,
763 	.path_quiesced	  = netiucv_callback_connsusp,
764 	.path_resumed	  = netiucv_callback_connres,
765 	.message_pending  = netiucv_callback_rx,
766 	.message_complete = netiucv_callback_txdone,
767 };
768 
conn_action_connaccept(fsm_instance * fi,int event,void * arg)769 static void conn_action_connaccept(fsm_instance *fi, int event, void *arg)
770 {
771 	struct iucv_event *ev = arg;
772 	struct iucv_connection *conn = ev->conn;
773 	struct iucv_path *path = ev->data;
774 	struct net_device *netdev = conn->netdev;
775 	struct netiucv_priv *privptr = netdev_priv(netdev);
776 	int rc;
777 
778 	IUCV_DBF_TEXT(trace, 3, __func__);
779 
780 	conn->path = path;
781 	path->msglim = NETIUCV_QUEUELEN_DEFAULT;
782 	path->flags = 0;
783 	rc = iucv_path_accept(path, &netiucv_handler, conn->userdata , conn);
784 	if (rc) {
785 		IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc);
786 		return;
787 	}
788 	fsm_newstate(fi, CONN_STATE_IDLE);
789 	netdev->tx_queue_len = conn->path->msglim;
790 	fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
791 }
792 
conn_action_connreject(fsm_instance * fi,int event,void * arg)793 static void conn_action_connreject(fsm_instance *fi, int event, void *arg)
794 {
795 	struct iucv_event *ev = arg;
796 	struct iucv_path *path = ev->data;
797 
798 	IUCV_DBF_TEXT(trace, 3, __func__);
799 	iucv_path_sever(path, NULL);
800 }
801 
conn_action_connack(fsm_instance * fi,int event,void * arg)802 static void conn_action_connack(fsm_instance *fi, int event, void *arg)
803 {
804 	struct iucv_connection *conn = arg;
805 	struct net_device *netdev = conn->netdev;
806 	struct netiucv_priv *privptr = netdev_priv(netdev);
807 
808 	IUCV_DBF_TEXT(trace, 3, __func__);
809 	fsm_deltimer(&conn->timer);
810 	fsm_newstate(fi, CONN_STATE_IDLE);
811 	netdev->tx_queue_len = conn->path->msglim;
812 	fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
813 }
814 
conn_action_conntimsev(fsm_instance * fi,int event,void * arg)815 static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg)
816 {
817 	struct iucv_connection *conn = arg;
818 
819 	IUCV_DBF_TEXT(trace, 3, __func__);
820 	fsm_deltimer(&conn->timer);
821 	iucv_path_sever(conn->path, conn->userdata);
822 	fsm_newstate(fi, CONN_STATE_STARTWAIT);
823 }
824 
conn_action_connsever(fsm_instance * fi,int event,void * arg)825 static void conn_action_connsever(fsm_instance *fi, int event, void *arg)
826 {
827 	struct iucv_connection *conn = arg;
828 	struct net_device *netdev = conn->netdev;
829 	struct netiucv_priv *privptr = netdev_priv(netdev);
830 
831 	IUCV_DBF_TEXT(trace, 3, __func__);
832 
833 	fsm_deltimer(&conn->timer);
834 	iucv_path_sever(conn->path, conn->userdata);
835 	dev_info(privptr->dev, "The peer z/VM guest %s has closed the "
836 			       "connection\n", netiucv_printuser(conn));
837 	IUCV_DBF_TEXT(data, 2,
838 		      "conn_action_connsever: Remote dropped connection\n");
839 	fsm_newstate(fi, CONN_STATE_STARTWAIT);
840 	fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
841 }
842 
conn_action_start(fsm_instance * fi,int event,void * arg)843 static void conn_action_start(fsm_instance *fi, int event, void *arg)
844 {
845 	struct iucv_connection *conn = arg;
846 	struct net_device *netdev = conn->netdev;
847 	struct netiucv_priv *privptr = netdev_priv(netdev);
848 	int rc;
849 
850 	IUCV_DBF_TEXT(trace, 3, __func__);
851 
852 	fsm_newstate(fi, CONN_STATE_STARTWAIT);
853 
854 	/*
855 	 * We must set the state before calling iucv_connect because the
856 	 * callback handler could be called at any point after the connection
857 	 * request is sent
858 	 */
859 
860 	fsm_newstate(fi, CONN_STATE_SETUPWAIT);
861 	conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL);
862 	IUCV_DBF_TEXT_(setup, 2, "%s: connecting to %s ...\n",
863 		netdev->name, netiucv_printuser(conn));
864 
865 	rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid,
866 			       NULL, conn->userdata, conn);
867 	switch (rc) {
868 	case 0:
869 		netdev->tx_queue_len = conn->path->msglim;
870 		fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC,
871 			     CONN_EVENT_TIMER, conn);
872 		return;
873 	case 11:
874 		dev_warn(privptr->dev,
875 			"The IUCV device failed to connect to z/VM guest %s\n",
876 			netiucv_printname(conn->userid, 8));
877 		fsm_newstate(fi, CONN_STATE_STARTWAIT);
878 		break;
879 	case 12:
880 		dev_warn(privptr->dev,
881 			"The IUCV device failed to connect to the peer on z/VM"
882 			" guest %s\n", netiucv_printname(conn->userid, 8));
883 		fsm_newstate(fi, CONN_STATE_STARTWAIT);
884 		break;
885 	case 13:
886 		dev_err(privptr->dev,
887 			"Connecting the IUCV device would exceed the maximum"
888 			" number of IUCV connections\n");
889 		fsm_newstate(fi, CONN_STATE_CONNERR);
890 		break;
891 	case 14:
892 		dev_err(privptr->dev,
893 			"z/VM guest %s has too many IUCV connections"
894 			" to connect with the IUCV device\n",
895 			netiucv_printname(conn->userid, 8));
896 		fsm_newstate(fi, CONN_STATE_CONNERR);
897 		break;
898 	case 15:
899 		dev_err(privptr->dev,
900 			"The IUCV device cannot connect to a z/VM guest with no"
901 			" IUCV authorization\n");
902 		fsm_newstate(fi, CONN_STATE_CONNERR);
903 		break;
904 	default:
905 		dev_err(privptr->dev,
906 			"Connecting the IUCV device failed with error %d\n",
907 			rc);
908 		fsm_newstate(fi, CONN_STATE_CONNERR);
909 		break;
910 	}
911 	IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc);
912 	kfree(conn->path);
913 	conn->path = NULL;
914 }
915 
netiucv_purge_skb_queue(struct sk_buff_head * q)916 static void netiucv_purge_skb_queue(struct sk_buff_head *q)
917 {
918 	struct sk_buff *skb;
919 
920 	while ((skb = skb_dequeue(q))) {
921 		refcount_dec(&skb->users);
922 		dev_kfree_skb_any(skb);
923 	}
924 }
925 
conn_action_stop(fsm_instance * fi,int event,void * arg)926 static void conn_action_stop(fsm_instance *fi, int event, void *arg)
927 {
928 	struct iucv_event *ev = arg;
929 	struct iucv_connection *conn = ev->conn;
930 	struct net_device *netdev = conn->netdev;
931 	struct netiucv_priv *privptr = netdev_priv(netdev);
932 
933 	IUCV_DBF_TEXT(trace, 3, __func__);
934 
935 	fsm_deltimer(&conn->timer);
936 	fsm_newstate(fi, CONN_STATE_STOPPED);
937 	netiucv_purge_skb_queue(&conn->collect_queue);
938 	if (conn->path) {
939 		IUCV_DBF_TEXT(trace, 5, "calling iucv_path_sever\n");
940 		iucv_path_sever(conn->path, conn->userdata);
941 		kfree(conn->path);
942 		conn->path = NULL;
943 	}
944 	netiucv_purge_skb_queue(&conn->commit_queue);
945 	fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
946 }
947 
conn_action_inval(fsm_instance * fi,int event,void * arg)948 static void conn_action_inval(fsm_instance *fi, int event, void *arg)
949 {
950 	struct iucv_connection *conn = arg;
951 	struct net_device *netdev = conn->netdev;
952 
953 	IUCV_DBF_TEXT_(data, 2, "%s('%s'): conn_action_inval called\n",
954 		netdev->name, conn->userid);
955 }
956 
957 static const fsm_node conn_fsm[] = {
958 	{ CONN_STATE_INVALID,   CONN_EVENT_START,    conn_action_inval      },
959 	{ CONN_STATE_STOPPED,   CONN_EVENT_START,    conn_action_start      },
960 
961 	{ CONN_STATE_STOPPED,   CONN_EVENT_STOP,     conn_action_stop       },
962 	{ CONN_STATE_STARTWAIT, CONN_EVENT_STOP,     conn_action_stop       },
963 	{ CONN_STATE_SETUPWAIT, CONN_EVENT_STOP,     conn_action_stop       },
964 	{ CONN_STATE_IDLE,      CONN_EVENT_STOP,     conn_action_stop       },
965 	{ CONN_STATE_TX,        CONN_EVENT_STOP,     conn_action_stop       },
966 	{ CONN_STATE_REGERR,    CONN_EVENT_STOP,     conn_action_stop       },
967 	{ CONN_STATE_CONNERR,   CONN_EVENT_STOP,     conn_action_stop       },
968 
969 	{ CONN_STATE_STOPPED,   CONN_EVENT_CONN_REQ, conn_action_connreject },
970         { CONN_STATE_STARTWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
971 	{ CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
972 	{ CONN_STATE_IDLE,      CONN_EVENT_CONN_REQ, conn_action_connreject },
973 	{ CONN_STATE_TX,        CONN_EVENT_CONN_REQ, conn_action_connreject },
974 
975 	{ CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_ACK, conn_action_connack    },
976 	{ CONN_STATE_SETUPWAIT, CONN_EVENT_TIMER,    conn_action_conntimsev },
977 
978 	{ CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REJ, conn_action_connsever  },
979 	{ CONN_STATE_IDLE,      CONN_EVENT_CONN_REJ, conn_action_connsever  },
980 	{ CONN_STATE_TX,        CONN_EVENT_CONN_REJ, conn_action_connsever  },
981 
982 	{ CONN_STATE_IDLE,      CONN_EVENT_RX,       conn_action_rx         },
983 	{ CONN_STATE_TX,        CONN_EVENT_RX,       conn_action_rx         },
984 
985 	{ CONN_STATE_TX,        CONN_EVENT_TXDONE,   conn_action_txdone     },
986 	{ CONN_STATE_IDLE,      CONN_EVENT_TXDONE,   conn_action_txdone     },
987 };
988 
989 static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node);
990 
991 
992 /*
993  * Actions for interface - statemachine.
994  */
995 
996 /**
997  * dev_action_start
998  * @fi: An instance of an interface statemachine.
999  * @event: The event, just happened.
1000  * @arg: Generic pointer, casted from struct net_device * upon call.
1001  *
1002  * Startup connection by sending CONN_EVENT_START to it.
1003  */
dev_action_start(fsm_instance * fi,int event,void * arg)1004 static void dev_action_start(fsm_instance *fi, int event, void *arg)
1005 {
1006 	struct net_device   *dev = arg;
1007 	struct netiucv_priv *privptr = netdev_priv(dev);
1008 
1009 	IUCV_DBF_TEXT(trace, 3, __func__);
1010 
1011 	fsm_newstate(fi, DEV_STATE_STARTWAIT);
1012 	fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn);
1013 }
1014 
1015 /**
1016  * Shutdown connection by sending CONN_EVENT_STOP to it.
1017  *
1018  * @param fi    An instance of an interface statemachine.
1019  * @param event The event, just happened.
1020  * @param arg   Generic pointer, casted from struct net_device * upon call.
1021  */
1022 static void
dev_action_stop(fsm_instance * fi,int event,void * arg)1023 dev_action_stop(fsm_instance *fi, int event, void *arg)
1024 {
1025 	struct net_device   *dev = arg;
1026 	struct netiucv_priv *privptr = netdev_priv(dev);
1027 	struct iucv_event   ev;
1028 
1029 	IUCV_DBF_TEXT(trace, 3, __func__);
1030 
1031 	ev.conn = privptr->conn;
1032 
1033 	fsm_newstate(fi, DEV_STATE_STOPWAIT);
1034 	fsm_event(privptr->conn->fsm, CONN_EVENT_STOP, &ev);
1035 }
1036 
1037 /**
1038  * Called from connection statemachine
1039  * when a connection is up and running.
1040  *
1041  * @param fi    An instance of an interface statemachine.
1042  * @param event The event, just happened.
1043  * @param arg   Generic pointer, casted from struct net_device * upon call.
1044  */
1045 static void
dev_action_connup(fsm_instance * fi,int event,void * arg)1046 dev_action_connup(fsm_instance *fi, int event, void *arg)
1047 {
1048 	struct net_device   *dev = arg;
1049 	struct netiucv_priv *privptr = netdev_priv(dev);
1050 
1051 	IUCV_DBF_TEXT(trace, 3, __func__);
1052 
1053 	switch (fsm_getstate(fi)) {
1054 		case DEV_STATE_STARTWAIT:
1055 			fsm_newstate(fi, DEV_STATE_RUNNING);
1056 			dev_info(privptr->dev,
1057 				"The IUCV device has been connected"
1058 				" successfully to %s\n",
1059 				netiucv_printuser(privptr->conn));
1060 			IUCV_DBF_TEXT(setup, 3,
1061 				"connection is up and running\n");
1062 			break;
1063 		case DEV_STATE_STOPWAIT:
1064 			IUCV_DBF_TEXT(data, 2,
1065 				"dev_action_connup: in DEV_STATE_STOPWAIT\n");
1066 			break;
1067 	}
1068 }
1069 
1070 /**
1071  * Called from connection statemachine
1072  * when a connection has been shutdown.
1073  *
1074  * @param fi    An instance of an interface statemachine.
1075  * @param event The event, just happened.
1076  * @param arg   Generic pointer, casted from struct net_device * upon call.
1077  */
1078 static void
dev_action_conndown(fsm_instance * fi,int event,void * arg)1079 dev_action_conndown(fsm_instance *fi, int event, void *arg)
1080 {
1081 	IUCV_DBF_TEXT(trace, 3, __func__);
1082 
1083 	switch (fsm_getstate(fi)) {
1084 		case DEV_STATE_RUNNING:
1085 			fsm_newstate(fi, DEV_STATE_STARTWAIT);
1086 			break;
1087 		case DEV_STATE_STOPWAIT:
1088 			fsm_newstate(fi, DEV_STATE_STOPPED);
1089 			IUCV_DBF_TEXT(setup, 3, "connection is down\n");
1090 			break;
1091 	}
1092 }
1093 
1094 static const fsm_node dev_fsm[] = {
1095 	{ DEV_STATE_STOPPED,    DEV_EVENT_START,   dev_action_start    },
1096 
1097 	{ DEV_STATE_STOPWAIT,   DEV_EVENT_START,   dev_action_start    },
1098 	{ DEV_STATE_STOPWAIT,   DEV_EVENT_CONDOWN, dev_action_conndown },
1099 
1100 	{ DEV_STATE_STARTWAIT,  DEV_EVENT_STOP,    dev_action_stop     },
1101 	{ DEV_STATE_STARTWAIT,  DEV_EVENT_CONUP,   dev_action_connup   },
1102 
1103 	{ DEV_STATE_RUNNING,    DEV_EVENT_STOP,    dev_action_stop     },
1104 	{ DEV_STATE_RUNNING,    DEV_EVENT_CONDOWN, dev_action_conndown },
1105 	{ DEV_STATE_RUNNING,    DEV_EVENT_CONUP,   netiucv_action_nop  },
1106 };
1107 
1108 static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node);
1109 
1110 /**
1111  * Transmit a packet.
1112  * This is a helper function for netiucv_tx().
1113  *
1114  * @param conn Connection to be used for sending.
1115  * @param skb Pointer to struct sk_buff of packet to send.
1116  *            The linklevel header has already been set up
1117  *            by netiucv_tx().
1118  *
1119  * @return 0 on success, -ERRNO on failure. (Never fails.)
1120  */
netiucv_transmit_skb(struct iucv_connection * conn,struct sk_buff * skb)1121 static int netiucv_transmit_skb(struct iucv_connection *conn,
1122 				struct sk_buff *skb)
1123 {
1124 	struct iucv_message msg;
1125 	unsigned long saveflags;
1126 	struct ll_header header;
1127 	int rc;
1128 
1129 	if (fsm_getstate(conn->fsm) != CONN_STATE_IDLE) {
1130 		int l = skb->len + NETIUCV_HDRLEN;
1131 
1132 		spin_lock_irqsave(&conn->collect_lock, saveflags);
1133 		if (conn->collect_len + l >
1134 		    (conn->max_buffsize - NETIUCV_HDRLEN)) {
1135 			rc = -EBUSY;
1136 			IUCV_DBF_TEXT(data, 2,
1137 				      "EBUSY from netiucv_transmit_skb\n");
1138 		} else {
1139 			refcount_inc(&skb->users);
1140 			skb_queue_tail(&conn->collect_queue, skb);
1141 			conn->collect_len += l;
1142 			rc = 0;
1143 		}
1144 		spin_unlock_irqrestore(&conn->collect_lock, saveflags);
1145 	} else {
1146 		struct sk_buff *nskb = skb;
1147 		/**
1148 		 * Copy the skb to a new allocated skb in lowmem only if the
1149 		 * data is located above 2G in memory or tailroom is < 2.
1150 		 */
1151 		unsigned long hi = ((unsigned long)(skb_tail_pointer(skb) +
1152 				    NETIUCV_HDRLEN)) >> 31;
1153 		int copied = 0;
1154 		if (hi || (skb_tailroom(skb) < 2)) {
1155 			nskb = alloc_skb(skb->len + NETIUCV_HDRLEN +
1156 					 NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA);
1157 			if (!nskb) {
1158 				IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n");
1159 				rc = -ENOMEM;
1160 				return rc;
1161 			} else {
1162 				skb_reserve(nskb, NETIUCV_HDRLEN);
1163 				skb_put_data(nskb, skb->data, skb->len);
1164 			}
1165 			copied = 1;
1166 		}
1167 		/**
1168 		 * skb now is below 2G and has enough room. Add headers.
1169 		 */
1170 		header.next = nskb->len + NETIUCV_HDRLEN;
1171 		memcpy(skb_push(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
1172 		header.next = 0;
1173 		skb_put_data(nskb, &header, NETIUCV_HDRLEN);
1174 
1175 		fsm_newstate(conn->fsm, CONN_STATE_TX);
1176 		conn->prof.send_stamp = jiffies;
1177 
1178 		msg.tag = 1;
1179 		msg.class = 0;
1180 		rc = iucv_message_send(conn->path, &msg, 0, 0,
1181 				       nskb->data, nskb->len);
1182 		conn->prof.doios_single++;
1183 		conn->prof.txlen += skb->len;
1184 		conn->prof.tx_pending++;
1185 		if (conn->prof.tx_pending > conn->prof.tx_max_pending)
1186 			conn->prof.tx_max_pending = conn->prof.tx_pending;
1187 		if (rc) {
1188 			struct netiucv_priv *privptr;
1189 			fsm_newstate(conn->fsm, CONN_STATE_IDLE);
1190 			conn->prof.tx_pending--;
1191 			privptr = netdev_priv(conn->netdev);
1192 			if (privptr)
1193 				privptr->stats.tx_errors++;
1194 			if (copied)
1195 				dev_kfree_skb(nskb);
1196 			else {
1197 				/**
1198 				 * Remove our headers. They get added
1199 				 * again on retransmit.
1200 				 */
1201 				skb_pull(skb, NETIUCV_HDRLEN);
1202 				skb_trim(skb, skb->len - NETIUCV_HDRLEN);
1203 			}
1204 			IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
1205 		} else {
1206 			if (copied)
1207 				dev_kfree_skb(skb);
1208 			refcount_inc(&nskb->users);
1209 			skb_queue_tail(&conn->commit_queue, nskb);
1210 		}
1211 	}
1212 
1213 	return rc;
1214 }
1215 
1216 /*
1217  * Interface API for upper network layers
1218  */
1219 
1220 /**
1221  * Open an interface.
1222  * Called from generic network layer when ifconfig up is run.
1223  *
1224  * @param dev Pointer to interface struct.
1225  *
1226  * @return 0 on success, -ERRNO on failure. (Never fails.)
1227  */
netiucv_open(struct net_device * dev)1228 static int netiucv_open(struct net_device *dev)
1229 {
1230 	struct netiucv_priv *priv = netdev_priv(dev);
1231 
1232 	fsm_event(priv->fsm, DEV_EVENT_START, dev);
1233 	return 0;
1234 }
1235 
1236 /**
1237  * Close an interface.
1238  * Called from generic network layer when ifconfig down is run.
1239  *
1240  * @param dev Pointer to interface struct.
1241  *
1242  * @return 0 on success, -ERRNO on failure. (Never fails.)
1243  */
netiucv_close(struct net_device * dev)1244 static int netiucv_close(struct net_device *dev)
1245 {
1246 	struct netiucv_priv *priv = netdev_priv(dev);
1247 
1248 	fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
1249 	return 0;
1250 }
1251 
1252 /**
1253  * Start transmission of a packet.
1254  * Called from generic network device layer.
1255  */
netiucv_tx(struct sk_buff * skb,struct net_device * dev)1256 static netdev_tx_t netiucv_tx(struct sk_buff *skb, struct net_device *dev)
1257 {
1258 	struct netiucv_priv *privptr = netdev_priv(dev);
1259 	int rc;
1260 
1261 	IUCV_DBF_TEXT(trace, 4, __func__);
1262 	/**
1263 	 * Some sanity checks ...
1264 	 */
1265 	if (skb == NULL) {
1266 		IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n");
1267 		privptr->stats.tx_dropped++;
1268 		return NETDEV_TX_OK;
1269 	}
1270 	if (skb_headroom(skb) < NETIUCV_HDRLEN) {
1271 		IUCV_DBF_TEXT(data, 2,
1272 			"netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n");
1273 		dev_kfree_skb(skb);
1274 		privptr->stats.tx_dropped++;
1275 		return NETDEV_TX_OK;
1276 	}
1277 
1278 	/**
1279 	 * If connection is not running, try to restart it
1280 	 * and throw away packet.
1281 	 */
1282 	if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
1283 		dev_kfree_skb(skb);
1284 		privptr->stats.tx_dropped++;
1285 		privptr->stats.tx_errors++;
1286 		privptr->stats.tx_carrier_errors++;
1287 		return NETDEV_TX_OK;
1288 	}
1289 
1290 	if (netiucv_test_and_set_busy(dev)) {
1291 		IUCV_DBF_TEXT(data, 2, "EBUSY from netiucv_tx\n");
1292 		return NETDEV_TX_BUSY;
1293 	}
1294 	netif_trans_update(dev);
1295 	rc = netiucv_transmit_skb(privptr->conn, skb);
1296 	netiucv_clear_busy(dev);
1297 	return rc ? NETDEV_TX_BUSY : NETDEV_TX_OK;
1298 }
1299 
1300 /**
1301  * netiucv_stats
1302  * @dev: Pointer to interface struct.
1303  *
1304  * Returns interface statistics of a device.
1305  *
1306  * Returns pointer to stats struct of this interface.
1307  */
netiucv_stats(struct net_device * dev)1308 static struct net_device_stats *netiucv_stats (struct net_device * dev)
1309 {
1310 	struct netiucv_priv *priv = netdev_priv(dev);
1311 
1312 	IUCV_DBF_TEXT(trace, 5, __func__);
1313 	return &priv->stats;
1314 }
1315 
1316 /*
1317  * attributes in sysfs
1318  */
1319 
user_show(struct device * dev,struct device_attribute * attr,char * buf)1320 static ssize_t user_show(struct device *dev, struct device_attribute *attr,
1321 			 char *buf)
1322 {
1323 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1324 
1325 	IUCV_DBF_TEXT(trace, 5, __func__);
1326 	return sprintf(buf, "%s\n", netiucv_printuser(priv->conn));
1327 }
1328 
netiucv_check_user(const char * buf,size_t count,char * username,char * userdata)1329 static int netiucv_check_user(const char *buf, size_t count, char *username,
1330 			      char *userdata)
1331 {
1332 	const char *p;
1333 	int i;
1334 
1335 	p = strchr(buf, '.');
1336 	if ((p && ((count > 26) ||
1337 		   ((p - buf) > 8) ||
1338 		   (buf + count - p > 18))) ||
1339 	    (!p && (count > 9))) {
1340 		IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
1341 		return -EINVAL;
1342 	}
1343 
1344 	for (i = 0, p = buf; i < 8 && *p && *p != '.'; i++, p++) {
1345 		if (isalnum(*p) || *p == '$') {
1346 			username[i] = toupper(*p);
1347 			continue;
1348 		}
1349 		if (*p == '\n')
1350 			/* trailing lf, grr */
1351 			break;
1352 		IUCV_DBF_TEXT_(setup, 2,
1353 			       "conn_write: invalid character %02x\n", *p);
1354 		return -EINVAL;
1355 	}
1356 	while (i < 8)
1357 		username[i++] = ' ';
1358 	username[8] = '\0';
1359 
1360 	if (*p == '.') {
1361 		p++;
1362 		for (i = 0; i < 16 && *p; i++, p++) {
1363 			if (*p == '\n')
1364 				break;
1365 			userdata[i] = toupper(*p);
1366 		}
1367 		while (i > 0 && i < 16)
1368 			userdata[i++] = ' ';
1369 	} else
1370 		memcpy(userdata, iucvMagic_ascii, 16);
1371 	userdata[16] = '\0';
1372 	ASCEBC(userdata, 16);
1373 
1374 	return 0;
1375 }
1376 
user_write(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1377 static ssize_t user_write(struct device *dev, struct device_attribute *attr,
1378 			  const char *buf, size_t count)
1379 {
1380 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1381 	struct net_device *ndev = priv->conn->netdev;
1382 	char	username[9];
1383 	char	userdata[17];
1384 	int	rc;
1385 	struct iucv_connection *cp;
1386 
1387 	IUCV_DBF_TEXT(trace, 3, __func__);
1388 	rc = netiucv_check_user(buf, count, username, userdata);
1389 	if (rc)
1390 		return rc;
1391 
1392 	if (memcmp(username, priv->conn->userid, 9) &&
1393 	    (ndev->flags & (IFF_UP | IFF_RUNNING))) {
1394 		/* username changed while the interface is active. */
1395 		IUCV_DBF_TEXT(setup, 2, "user_write: device active\n");
1396 		return -EPERM;
1397 	}
1398 	read_lock_bh(&iucv_connection_rwlock);
1399 	list_for_each_entry(cp, &iucv_connection_list, list) {
1400 		if (!strncmp(username, cp->userid, 9) &&
1401 		   !strncmp(userdata, cp->userdata, 17) && cp->netdev != ndev) {
1402 			read_unlock_bh(&iucv_connection_rwlock);
1403 			IUCV_DBF_TEXT_(setup, 2, "user_write: Connection to %s "
1404 				"already exists\n", netiucv_printuser(cp));
1405 			return -EEXIST;
1406 		}
1407 	}
1408 	read_unlock_bh(&iucv_connection_rwlock);
1409 	memcpy(priv->conn->userid, username, 9);
1410 	memcpy(priv->conn->userdata, userdata, 17);
1411 	return count;
1412 }
1413 
1414 static DEVICE_ATTR(user, 0644, user_show, user_write);
1415 
buffer_show(struct device * dev,struct device_attribute * attr,char * buf)1416 static ssize_t buffer_show (struct device *dev, struct device_attribute *attr,
1417 			    char *buf)
1418 {
1419 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1420 
1421 	IUCV_DBF_TEXT(trace, 5, __func__);
1422 	return sprintf(buf, "%d\n", priv->conn->max_buffsize);
1423 }
1424 
buffer_write(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1425 static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
1426 			     const char *buf, size_t count)
1427 {
1428 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1429 	struct net_device *ndev = priv->conn->netdev;
1430 	unsigned int bs1;
1431 	int rc;
1432 
1433 	IUCV_DBF_TEXT(trace, 3, __func__);
1434 	if (count >= 39)
1435 		return -EINVAL;
1436 
1437 	rc = kstrtouint(buf, 0, &bs1);
1438 
1439 	if (rc == -EINVAL) {
1440 		IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %s\n",
1441 			buf);
1442 		return -EINVAL;
1443 	}
1444 	if ((rc == -ERANGE) || (bs1 > NETIUCV_BUFSIZE_MAX)) {
1445 		IUCV_DBF_TEXT_(setup, 2,
1446 			"buffer_write: buffer size %d too large\n",
1447 			bs1);
1448 		return -EINVAL;
1449 	}
1450 	if ((ndev->flags & IFF_RUNNING) &&
1451 	    (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) {
1452 		IUCV_DBF_TEXT_(setup, 2,
1453 			"buffer_write: buffer size %d too small\n",
1454 			bs1);
1455 		return -EINVAL;
1456 	}
1457 	if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) {
1458 		IUCV_DBF_TEXT_(setup, 2,
1459 			"buffer_write: buffer size %d too small\n",
1460 			bs1);
1461 		return -EINVAL;
1462 	}
1463 
1464 	priv->conn->max_buffsize = bs1;
1465 	if (!(ndev->flags & IFF_RUNNING))
1466 		ndev->mtu = bs1 - NETIUCV_HDRLEN - NETIUCV_HDRLEN;
1467 
1468 	return count;
1469 
1470 }
1471 
1472 static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
1473 
dev_fsm_show(struct device * dev,struct device_attribute * attr,char * buf)1474 static ssize_t dev_fsm_show (struct device *dev, struct device_attribute *attr,
1475 			     char *buf)
1476 {
1477 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1478 
1479 	IUCV_DBF_TEXT(trace, 5, __func__);
1480 	return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm));
1481 }
1482 
1483 static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL);
1484 
conn_fsm_show(struct device * dev,struct device_attribute * attr,char * buf)1485 static ssize_t conn_fsm_show (struct device *dev,
1486 			      struct device_attribute *attr, char *buf)
1487 {
1488 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1489 
1490 	IUCV_DBF_TEXT(trace, 5, __func__);
1491 	return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm));
1492 }
1493 
1494 static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL);
1495 
maxmulti_show(struct device * dev,struct device_attribute * attr,char * buf)1496 static ssize_t maxmulti_show (struct device *dev,
1497 			      struct device_attribute *attr, char *buf)
1498 {
1499 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1500 
1501 	IUCV_DBF_TEXT(trace, 5, __func__);
1502 	return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
1503 }
1504 
maxmulti_write(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1505 static ssize_t maxmulti_write (struct device *dev,
1506 			       struct device_attribute *attr,
1507 			       const char *buf, size_t count)
1508 {
1509 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1510 
1511 	IUCV_DBF_TEXT(trace, 4, __func__);
1512 	priv->conn->prof.maxmulti = 0;
1513 	return count;
1514 }
1515 
1516 static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write);
1517 
maxcq_show(struct device * dev,struct device_attribute * attr,char * buf)1518 static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr,
1519 			   char *buf)
1520 {
1521 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1522 
1523 	IUCV_DBF_TEXT(trace, 5, __func__);
1524 	return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
1525 }
1526 
maxcq_write(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1527 static ssize_t maxcq_write (struct device *dev, struct device_attribute *attr,
1528 			    const char *buf, size_t count)
1529 {
1530 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1531 
1532 	IUCV_DBF_TEXT(trace, 4, __func__);
1533 	priv->conn->prof.maxcqueue = 0;
1534 	return count;
1535 }
1536 
1537 static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write);
1538 
sdoio_show(struct device * dev,struct device_attribute * attr,char * buf)1539 static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr,
1540 			   char *buf)
1541 {
1542 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1543 
1544 	IUCV_DBF_TEXT(trace, 5, __func__);
1545 	return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
1546 }
1547 
sdoio_write(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1548 static ssize_t sdoio_write (struct device *dev, struct device_attribute *attr,
1549 			    const char *buf, size_t count)
1550 {
1551 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1552 
1553 	IUCV_DBF_TEXT(trace, 4, __func__);
1554 	priv->conn->prof.doios_single = 0;
1555 	return count;
1556 }
1557 
1558 static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write);
1559 
mdoio_show(struct device * dev,struct device_attribute * attr,char * buf)1560 static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr,
1561 			   char *buf)
1562 {
1563 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1564 
1565 	IUCV_DBF_TEXT(trace, 5, __func__);
1566 	return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
1567 }
1568 
mdoio_write(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1569 static ssize_t mdoio_write (struct device *dev, struct device_attribute *attr,
1570 			    const char *buf, size_t count)
1571 {
1572 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1573 
1574 	IUCV_DBF_TEXT(trace, 5, __func__);
1575 	priv->conn->prof.doios_multi = 0;
1576 	return count;
1577 }
1578 
1579 static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write);
1580 
txlen_show(struct device * dev,struct device_attribute * attr,char * buf)1581 static ssize_t txlen_show (struct device *dev, struct device_attribute *attr,
1582 			   char *buf)
1583 {
1584 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1585 
1586 	IUCV_DBF_TEXT(trace, 5, __func__);
1587 	return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
1588 }
1589 
txlen_write(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1590 static ssize_t txlen_write (struct device *dev, struct device_attribute *attr,
1591 			    const char *buf, size_t count)
1592 {
1593 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1594 
1595 	IUCV_DBF_TEXT(trace, 4, __func__);
1596 	priv->conn->prof.txlen = 0;
1597 	return count;
1598 }
1599 
1600 static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write);
1601 
txtime_show(struct device * dev,struct device_attribute * attr,char * buf)1602 static ssize_t txtime_show (struct device *dev, struct device_attribute *attr,
1603 			    char *buf)
1604 {
1605 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1606 
1607 	IUCV_DBF_TEXT(trace, 5, __func__);
1608 	return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
1609 }
1610 
txtime_write(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1611 static ssize_t txtime_write (struct device *dev, struct device_attribute *attr,
1612 			     const char *buf, size_t count)
1613 {
1614 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1615 
1616 	IUCV_DBF_TEXT(trace, 4, __func__);
1617 	priv->conn->prof.tx_time = 0;
1618 	return count;
1619 }
1620 
1621 static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write);
1622 
txpend_show(struct device * dev,struct device_attribute * attr,char * buf)1623 static ssize_t txpend_show (struct device *dev, struct device_attribute *attr,
1624 			    char *buf)
1625 {
1626 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1627 
1628 	IUCV_DBF_TEXT(trace, 5, __func__);
1629 	return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
1630 }
1631 
txpend_write(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1632 static ssize_t txpend_write (struct device *dev, struct device_attribute *attr,
1633 			     const char *buf, size_t count)
1634 {
1635 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1636 
1637 	IUCV_DBF_TEXT(trace, 4, __func__);
1638 	priv->conn->prof.tx_pending = 0;
1639 	return count;
1640 }
1641 
1642 static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write);
1643 
txmpnd_show(struct device * dev,struct device_attribute * attr,char * buf)1644 static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr,
1645 			    char *buf)
1646 {
1647 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1648 
1649 	IUCV_DBF_TEXT(trace, 5, __func__);
1650 	return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
1651 }
1652 
txmpnd_write(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1653 static ssize_t txmpnd_write (struct device *dev, struct device_attribute *attr,
1654 			     const char *buf, size_t count)
1655 {
1656 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1657 
1658 	IUCV_DBF_TEXT(trace, 4, __func__);
1659 	priv->conn->prof.tx_max_pending = 0;
1660 	return count;
1661 }
1662 
1663 static DEVICE_ATTR(tx_max_pending, 0644, txmpnd_show, txmpnd_write);
1664 
1665 static struct attribute *netiucv_attrs[] = {
1666 	&dev_attr_buffer.attr,
1667 	&dev_attr_user.attr,
1668 	NULL,
1669 };
1670 
1671 static struct attribute_group netiucv_attr_group = {
1672 	.attrs = netiucv_attrs,
1673 };
1674 
1675 static struct attribute *netiucv_stat_attrs[] = {
1676 	&dev_attr_device_fsm_state.attr,
1677 	&dev_attr_connection_fsm_state.attr,
1678 	&dev_attr_max_tx_buffer_used.attr,
1679 	&dev_attr_max_chained_skbs.attr,
1680 	&dev_attr_tx_single_write_ops.attr,
1681 	&dev_attr_tx_multi_write_ops.attr,
1682 	&dev_attr_netto_bytes.attr,
1683 	&dev_attr_max_tx_io_time.attr,
1684 	&dev_attr_tx_pending.attr,
1685 	&dev_attr_tx_max_pending.attr,
1686 	NULL,
1687 };
1688 
1689 static struct attribute_group netiucv_stat_attr_group = {
1690 	.name  = "stats",
1691 	.attrs = netiucv_stat_attrs,
1692 };
1693 
1694 static const struct attribute_group *netiucv_attr_groups[] = {
1695 	&netiucv_stat_attr_group,
1696 	&netiucv_attr_group,
1697 	NULL,
1698 };
1699 
netiucv_register_device(struct net_device * ndev)1700 static int netiucv_register_device(struct net_device *ndev)
1701 {
1702 	struct netiucv_priv *priv = netdev_priv(ndev);
1703 	struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL);
1704 	int ret;
1705 
1706 	IUCV_DBF_TEXT(trace, 3, __func__);
1707 
1708 	if (dev) {
1709 		dev_set_name(dev, "net%s", ndev->name);
1710 		dev->bus = &iucv_bus;
1711 		dev->parent = iucv_root;
1712 		dev->groups = netiucv_attr_groups;
1713 		/*
1714 		 * The release function could be called after the
1715 		 * module has been unloaded. It's _only_ task is to
1716 		 * free the struct. Therefore, we specify kfree()
1717 		 * directly here. (Probably a little bit obfuscating
1718 		 * but legitime ...).
1719 		 */
1720 		dev->release = (void (*)(struct device *))kfree;
1721 		dev->driver = &netiucv_driver;
1722 	} else
1723 		return -ENOMEM;
1724 
1725 	ret = device_register(dev);
1726 	if (ret) {
1727 		put_device(dev);
1728 		return ret;
1729 	}
1730 	priv->dev = dev;
1731 	dev_set_drvdata(dev, priv);
1732 	return 0;
1733 }
1734 
netiucv_unregister_device(struct device * dev)1735 static void netiucv_unregister_device(struct device *dev)
1736 {
1737 	IUCV_DBF_TEXT(trace, 3, __func__);
1738 	device_unregister(dev);
1739 }
1740 
1741 /**
1742  * Allocate and initialize a new connection structure.
1743  * Add it to the list of netiucv connections;
1744  */
netiucv_new_connection(struct net_device * dev,char * username,char * userdata)1745 static struct iucv_connection *netiucv_new_connection(struct net_device *dev,
1746 						      char *username,
1747 						      char *userdata)
1748 {
1749 	struct iucv_connection *conn;
1750 
1751 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
1752 	if (!conn)
1753 		goto out;
1754 	skb_queue_head_init(&conn->collect_queue);
1755 	skb_queue_head_init(&conn->commit_queue);
1756 	spin_lock_init(&conn->collect_lock);
1757 	conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT;
1758 	conn->netdev = dev;
1759 
1760 	conn->rx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1761 	if (!conn->rx_buff)
1762 		goto out_conn;
1763 	conn->tx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1764 	if (!conn->tx_buff)
1765 		goto out_rx;
1766 	conn->fsm = init_fsm("netiucvconn", conn_state_names,
1767 			     conn_event_names, NR_CONN_STATES,
1768 			     NR_CONN_EVENTS, conn_fsm, CONN_FSM_LEN,
1769 			     GFP_KERNEL);
1770 	if (!conn->fsm)
1771 		goto out_tx;
1772 
1773 	fsm_settimer(conn->fsm, &conn->timer);
1774 	fsm_newstate(conn->fsm, CONN_STATE_INVALID);
1775 
1776 	if (userdata)
1777 		memcpy(conn->userdata, userdata, 17);
1778 	if (username) {
1779 		memcpy(conn->userid, username, 9);
1780 		fsm_newstate(conn->fsm, CONN_STATE_STOPPED);
1781 	}
1782 
1783 	write_lock_bh(&iucv_connection_rwlock);
1784 	list_add_tail(&conn->list, &iucv_connection_list);
1785 	write_unlock_bh(&iucv_connection_rwlock);
1786 	return conn;
1787 
1788 out_tx:
1789 	kfree_skb(conn->tx_buff);
1790 out_rx:
1791 	kfree_skb(conn->rx_buff);
1792 out_conn:
1793 	kfree(conn);
1794 out:
1795 	return NULL;
1796 }
1797 
1798 /**
1799  * Release a connection structure and remove it from the
1800  * list of netiucv connections.
1801  */
netiucv_remove_connection(struct iucv_connection * conn)1802 static void netiucv_remove_connection(struct iucv_connection *conn)
1803 {
1804 
1805 	IUCV_DBF_TEXT(trace, 3, __func__);
1806 	write_lock_bh(&iucv_connection_rwlock);
1807 	list_del_init(&conn->list);
1808 	write_unlock_bh(&iucv_connection_rwlock);
1809 	fsm_deltimer(&conn->timer);
1810 	netiucv_purge_skb_queue(&conn->collect_queue);
1811 	if (conn->path) {
1812 		iucv_path_sever(conn->path, conn->userdata);
1813 		kfree(conn->path);
1814 		conn->path = NULL;
1815 	}
1816 	netiucv_purge_skb_queue(&conn->commit_queue);
1817 	kfree_fsm(conn->fsm);
1818 	kfree_skb(conn->rx_buff);
1819 	kfree_skb(conn->tx_buff);
1820 }
1821 
1822 /**
1823  * Release everything of a net device.
1824  */
netiucv_free_netdevice(struct net_device * dev)1825 static void netiucv_free_netdevice(struct net_device *dev)
1826 {
1827 	struct netiucv_priv *privptr = netdev_priv(dev);
1828 
1829 	IUCV_DBF_TEXT(trace, 3, __func__);
1830 
1831 	if (!dev)
1832 		return;
1833 
1834 	if (privptr) {
1835 		if (privptr->conn)
1836 			netiucv_remove_connection(privptr->conn);
1837 		if (privptr->fsm)
1838 			kfree_fsm(privptr->fsm);
1839 		privptr->conn = NULL; privptr->fsm = NULL;
1840 		/* privptr gets freed by free_netdev() */
1841 	}
1842 }
1843 
1844 /**
1845  * Initialize a net device. (Called from kernel in alloc_netdev())
1846  */
1847 static const struct net_device_ops netiucv_netdev_ops = {
1848 	.ndo_open		= netiucv_open,
1849 	.ndo_stop		= netiucv_close,
1850 	.ndo_get_stats		= netiucv_stats,
1851 	.ndo_start_xmit		= netiucv_tx,
1852 };
1853 
netiucv_setup_netdevice(struct net_device * dev)1854 static void netiucv_setup_netdevice(struct net_device *dev)
1855 {
1856 	dev->mtu	         = NETIUCV_MTU_DEFAULT;
1857 	dev->min_mtu		 = 576;
1858 	dev->max_mtu		 = NETIUCV_MTU_MAX;
1859 	dev->needs_free_netdev   = true;
1860 	dev->priv_destructor     = netiucv_free_netdevice;
1861 	dev->hard_header_len     = NETIUCV_HDRLEN;
1862 	dev->addr_len            = 0;
1863 	dev->type                = ARPHRD_SLIP;
1864 	dev->tx_queue_len        = NETIUCV_QUEUELEN_DEFAULT;
1865 	dev->flags	         = IFF_POINTOPOINT | IFF_NOARP;
1866 	dev->netdev_ops		 = &netiucv_netdev_ops;
1867 }
1868 
1869 /**
1870  * Allocate and initialize everything of a net device.
1871  */
netiucv_init_netdevice(char * username,char * userdata)1872 static struct net_device *netiucv_init_netdevice(char *username, char *userdata)
1873 {
1874 	struct netiucv_priv *privptr;
1875 	struct net_device *dev;
1876 
1877 	dev = alloc_netdev(sizeof(struct netiucv_priv), "iucv%d",
1878 			   NET_NAME_UNKNOWN, netiucv_setup_netdevice);
1879 	if (!dev)
1880 		return NULL;
1881 	rtnl_lock();
1882 	if (dev_alloc_name(dev, dev->name) < 0)
1883 		goto out_netdev;
1884 
1885 	privptr = netdev_priv(dev);
1886 	privptr->fsm = init_fsm("netiucvdev", dev_state_names,
1887 				dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS,
1888 				dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
1889 	if (!privptr->fsm)
1890 		goto out_netdev;
1891 
1892 	privptr->conn = netiucv_new_connection(dev, username, userdata);
1893 	if (!privptr->conn) {
1894 		IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n");
1895 		goto out_fsm;
1896 	}
1897 	fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
1898 	return dev;
1899 
1900 out_fsm:
1901 	kfree_fsm(privptr->fsm);
1902 out_netdev:
1903 	rtnl_unlock();
1904 	free_netdev(dev);
1905 	return NULL;
1906 }
1907 
connection_store(struct device_driver * drv,const char * buf,size_t count)1908 static ssize_t connection_store(struct device_driver *drv, const char *buf,
1909 				size_t count)
1910 {
1911 	char username[9];
1912 	char userdata[17];
1913 	int rc;
1914 	struct net_device *dev;
1915 	struct netiucv_priv *priv;
1916 	struct iucv_connection *cp;
1917 
1918 	IUCV_DBF_TEXT(trace, 3, __func__);
1919 	rc = netiucv_check_user(buf, count, username, userdata);
1920 	if (rc)
1921 		return rc;
1922 
1923 	read_lock_bh(&iucv_connection_rwlock);
1924 	list_for_each_entry(cp, &iucv_connection_list, list) {
1925 		if (!strncmp(username, cp->userid, 9) &&
1926 		    !strncmp(userdata, cp->userdata, 17)) {
1927 			read_unlock_bh(&iucv_connection_rwlock);
1928 			IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection to %s "
1929 				"already exists\n", netiucv_printuser(cp));
1930 			return -EEXIST;
1931 		}
1932 	}
1933 	read_unlock_bh(&iucv_connection_rwlock);
1934 
1935 	dev = netiucv_init_netdevice(username, userdata);
1936 	if (!dev) {
1937 		IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n");
1938 		return -ENODEV;
1939 	}
1940 
1941 	rc = netiucv_register_device(dev);
1942 	if (rc) {
1943 		rtnl_unlock();
1944 		IUCV_DBF_TEXT_(setup, 2,
1945 			"ret %d from netiucv_register_device\n", rc);
1946 		goto out_free_ndev;
1947 	}
1948 
1949 	/* sysfs magic */
1950 	priv = netdev_priv(dev);
1951 	SET_NETDEV_DEV(dev, priv->dev);
1952 
1953 	rc = register_netdevice(dev);
1954 	rtnl_unlock();
1955 	if (rc)
1956 		goto out_unreg;
1957 
1958 	dev_info(priv->dev, "The IUCV interface to %s has been established "
1959 			    "successfully\n",
1960 		netiucv_printuser(priv->conn));
1961 
1962 	return count;
1963 
1964 out_unreg:
1965 	netiucv_unregister_device(priv->dev);
1966 out_free_ndev:
1967 	netiucv_free_netdevice(dev);
1968 	return rc;
1969 }
1970 static DRIVER_ATTR_WO(connection);
1971 
remove_store(struct device_driver * drv,const char * buf,size_t count)1972 static ssize_t remove_store(struct device_driver *drv, const char *buf,
1973 			    size_t count)
1974 {
1975 	struct iucv_connection *cp;
1976         struct net_device *ndev;
1977         struct netiucv_priv *priv;
1978         struct device *dev;
1979         char name[IFNAMSIZ];
1980 	const char *p;
1981         int i;
1982 
1983 	IUCV_DBF_TEXT(trace, 3, __func__);
1984 
1985         if (count >= IFNAMSIZ)
1986                 count = IFNAMSIZ - 1;
1987 
1988 	for (i = 0, p = buf; i < count && *p; i++, p++) {
1989 		if (*p == '\n' || *p == ' ')
1990                         /* trailing lf, grr */
1991                         break;
1992 		name[i] = *p;
1993         }
1994         name[i] = '\0';
1995 
1996 	read_lock_bh(&iucv_connection_rwlock);
1997 	list_for_each_entry(cp, &iucv_connection_list, list) {
1998 		ndev = cp->netdev;
1999 		priv = netdev_priv(ndev);
2000                 dev = priv->dev;
2001 		if (strncmp(name, ndev->name, count))
2002 			continue;
2003 		read_unlock_bh(&iucv_connection_rwlock);
2004                 if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
2005 			dev_warn(dev, "The IUCV device is connected"
2006 				" to %s and cannot be removed\n",
2007 				priv->conn->userid);
2008 			IUCV_DBF_TEXT(data, 2, "remove_write: still active\n");
2009 			return -EPERM;
2010                 }
2011                 unregister_netdev(ndev);
2012                 netiucv_unregister_device(dev);
2013                 return count;
2014         }
2015 	read_unlock_bh(&iucv_connection_rwlock);
2016 	IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n");
2017         return -EINVAL;
2018 }
2019 static DRIVER_ATTR_WO(remove);
2020 
2021 static struct attribute * netiucv_drv_attrs[] = {
2022 	&driver_attr_connection.attr,
2023 	&driver_attr_remove.attr,
2024 	NULL,
2025 };
2026 
2027 static struct attribute_group netiucv_drv_attr_group = {
2028 	.attrs = netiucv_drv_attrs,
2029 };
2030 
2031 static const struct attribute_group *netiucv_drv_attr_groups[] = {
2032 	&netiucv_drv_attr_group,
2033 	NULL,
2034 };
2035 
netiucv_banner(void)2036 static void netiucv_banner(void)
2037 {
2038 	pr_info("driver initialized\n");
2039 }
2040 
netiucv_exit(void)2041 static void __exit netiucv_exit(void)
2042 {
2043 	struct iucv_connection *cp;
2044 	struct net_device *ndev;
2045 	struct netiucv_priv *priv;
2046 	struct device *dev;
2047 
2048 	IUCV_DBF_TEXT(trace, 3, __func__);
2049 	while (!list_empty(&iucv_connection_list)) {
2050 		cp = list_entry(iucv_connection_list.next,
2051 				struct iucv_connection, list);
2052 		ndev = cp->netdev;
2053 		priv = netdev_priv(ndev);
2054 		dev = priv->dev;
2055 
2056 		unregister_netdev(ndev);
2057 		netiucv_unregister_device(dev);
2058 	}
2059 
2060 	driver_unregister(&netiucv_driver);
2061 	iucv_unregister(&netiucv_handler, 1);
2062 	iucv_unregister_dbf_views();
2063 
2064 	pr_info("driver unloaded\n");
2065 	return;
2066 }
2067 
netiucv_init(void)2068 static int __init netiucv_init(void)
2069 {
2070 	int rc;
2071 
2072 	rc = iucv_register_dbf_views();
2073 	if (rc)
2074 		goto out;
2075 	rc = iucv_register(&netiucv_handler, 1);
2076 	if (rc)
2077 		goto out_dbf;
2078 	IUCV_DBF_TEXT(trace, 3, __func__);
2079 	netiucv_driver.groups = netiucv_drv_attr_groups;
2080 	rc = driver_register(&netiucv_driver);
2081 	if (rc) {
2082 		IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc);
2083 		goto out_iucv;
2084 	}
2085 
2086 	netiucv_banner();
2087 	return rc;
2088 
2089 out_iucv:
2090 	iucv_unregister(&netiucv_handler, 1);
2091 out_dbf:
2092 	iucv_unregister_dbf_views();
2093 out:
2094 	return rc;
2095 }
2096 
2097 module_init(netiucv_init);
2098 module_exit(netiucv_exit);
2099 MODULE_LICENSE("GPL");
2100