• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) ST-Ericsson AB 2010
3  * Author:	Sjur Brendeland
4  * License terms: GNU General Public License (GPL) version 2
5  */
6 
7 #include <linux/hardirq.h>
8 #include <linux/init.h>
9 #include <linux/module.h>
10 #include <linux/device.h>
11 #include <linux/types.h>
12 #include <linux/skbuff.h>
13 #include <linux/netdevice.h>
14 #include <linux/rtnetlink.h>
15 #include <linux/tty.h>
16 #include <linux/file.h>
17 #include <linux/if_arp.h>
18 #include <net/caif/caif_device.h>
19 #include <net/caif/cfcnfg.h>
20 #include <linux/err.h>
21 #include <linux/debugfs.h>
22 
23 MODULE_LICENSE("GPL");
24 MODULE_AUTHOR("Sjur Brendeland");
25 MODULE_DESCRIPTION("CAIF serial device TTY line discipline");
26 MODULE_LICENSE("GPL");
27 MODULE_ALIAS_LDISC(N_CAIF);
28 
29 #define SEND_QUEUE_LOW 10
30 #define SEND_QUEUE_HIGH 100
31 #define CAIF_SENDING	        1 /* Bit 1 = 0x02*/
32 #define CAIF_FLOW_OFF_SENT	4 /* Bit 4 = 0x10 */
33 #define MAX_WRITE_CHUNK	     4096
34 #define ON 1
35 #define OFF 0
36 #define CAIF_MAX_MTU 4096
37 
38 static DEFINE_SPINLOCK(ser_lock);
39 static LIST_HEAD(ser_list);
40 static LIST_HEAD(ser_release_list);
41 
42 static bool ser_loop;
43 module_param(ser_loop, bool, S_IRUGO);
44 MODULE_PARM_DESC(ser_loop, "Run in simulated loopback mode.");
45 
46 static bool ser_use_stx = true;
47 module_param(ser_use_stx, bool, S_IRUGO);
48 MODULE_PARM_DESC(ser_use_stx, "STX enabled or not.");
49 
50 static bool ser_use_fcs = true;
51 
52 module_param(ser_use_fcs, bool, S_IRUGO);
53 MODULE_PARM_DESC(ser_use_fcs, "FCS enabled or not.");
54 
55 static int ser_write_chunk = MAX_WRITE_CHUNK;
56 module_param(ser_write_chunk, int, S_IRUGO);
57 
58 MODULE_PARM_DESC(ser_write_chunk, "Maximum size of data written to UART.");
59 
60 static struct dentry *debugfsdir;
61 
62 static int caif_net_open(struct net_device *dev);
63 static int caif_net_close(struct net_device *dev);
64 
65 struct ser_device {
66 	struct caif_dev_common common;
67 	struct list_head node;
68 	struct net_device *dev;
69 	struct sk_buff_head head;
70 	struct tty_struct *tty;
71 	bool tx_started;
72 	unsigned long state;
73 #ifdef CONFIG_DEBUG_FS
74 	struct dentry *debugfs_tty_dir;
75 	struct debugfs_blob_wrapper tx_blob;
76 	struct debugfs_blob_wrapper rx_blob;
77 	u8 rx_data[128];
78 	u8 tx_data[128];
79 	u8 tty_status;
80 
81 #endif
82 };
83 
84 static void caifdev_setup(struct net_device *dev);
85 static void ldisc_tx_wakeup(struct tty_struct *tty);
86 #ifdef CONFIG_DEBUG_FS
update_tty_status(struct ser_device * ser)87 static inline void update_tty_status(struct ser_device *ser)
88 {
89 	ser->tty_status =
90 		ser->tty->stopped << 5 |
91 		ser->tty->flow_stopped << 3 |
92 		ser->tty->packet << 2 |
93 		ser->tty->port->low_latency << 1;
94 }
debugfs_init(struct ser_device * ser,struct tty_struct * tty)95 static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty)
96 {
97 	ser->debugfs_tty_dir =
98 			debugfs_create_dir(tty->name, debugfsdir);
99 	if (!IS_ERR(ser->debugfs_tty_dir)) {
100 		debugfs_create_blob("last_tx_msg", S_IRUSR,
101 				ser->debugfs_tty_dir,
102 				&ser->tx_blob);
103 
104 		debugfs_create_blob("last_rx_msg", S_IRUSR,
105 				ser->debugfs_tty_dir,
106 				&ser->rx_blob);
107 
108 		debugfs_create_x32("ser_state", S_IRUSR,
109 				ser->debugfs_tty_dir,
110 				(u32 *)&ser->state);
111 
112 		debugfs_create_x8("tty_status", S_IRUSR,
113 				ser->debugfs_tty_dir,
114 				&ser->tty_status);
115 
116 	}
117 	ser->tx_blob.data = ser->tx_data;
118 	ser->tx_blob.size = 0;
119 	ser->rx_blob.data = ser->rx_data;
120 	ser->rx_blob.size = 0;
121 }
122 
debugfs_deinit(struct ser_device * ser)123 static inline void debugfs_deinit(struct ser_device *ser)
124 {
125 	debugfs_remove_recursive(ser->debugfs_tty_dir);
126 }
127 
debugfs_rx(struct ser_device * ser,const u8 * data,int size)128 static inline void debugfs_rx(struct ser_device *ser, const u8 *data, int size)
129 {
130 	if (size > sizeof(ser->rx_data))
131 		size = sizeof(ser->rx_data);
132 	memcpy(ser->rx_data, data, size);
133 	ser->rx_blob.data = ser->rx_data;
134 	ser->rx_blob.size = size;
135 }
136 
debugfs_tx(struct ser_device * ser,const u8 * data,int size)137 static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size)
138 {
139 	if (size > sizeof(ser->tx_data))
140 		size = sizeof(ser->tx_data);
141 	memcpy(ser->tx_data, data, size);
142 	ser->tx_blob.data = ser->tx_data;
143 	ser->tx_blob.size = size;
144 }
145 #else
debugfs_init(struct ser_device * ser,struct tty_struct * tty)146 static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty)
147 {
148 }
149 
debugfs_deinit(struct ser_device * ser)150 static inline void debugfs_deinit(struct ser_device *ser)
151 {
152 }
153 
update_tty_status(struct ser_device * ser)154 static inline void update_tty_status(struct ser_device *ser)
155 {
156 }
157 
debugfs_rx(struct ser_device * ser,const u8 * data,int size)158 static inline void debugfs_rx(struct ser_device *ser, const u8 *data, int size)
159 {
160 }
161 
debugfs_tx(struct ser_device * ser,const u8 * data,int size)162 static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size)
163 {
164 }
165 
166 #endif
167 
ldisc_receive(struct tty_struct * tty,const u8 * data,char * flags,int count)168 static void ldisc_receive(struct tty_struct *tty, const u8 *data,
169 			char *flags, int count)
170 {
171 	struct sk_buff *skb = NULL;
172 	struct ser_device *ser;
173 	int ret;
174 	u8 *p;
175 
176 	ser = tty->disc_data;
177 
178 	/*
179 	 * NOTE: flags may contain information about break or overrun.
180 	 * This is not yet handled.
181 	 */
182 
183 
184 	/*
185 	 * Workaround for garbage at start of transmission,
186 	 * only enable if STX handling is not enabled.
187 	 */
188 	if (!ser->common.use_stx && !ser->tx_started) {
189 		dev_info(&ser->dev->dev,
190 			"Bytes received before initial transmission -"
191 			"bytes discarded.\n");
192 		return;
193 	}
194 
195 	BUG_ON(ser->dev == NULL);
196 
197 	/* Get a suitable caif packet and copy in data. */
198 	skb = netdev_alloc_skb(ser->dev, count+1);
199 	if (skb == NULL)
200 		return;
201 	p = skb_put(skb, count);
202 	memcpy(p, data, count);
203 
204 	skb->protocol = htons(ETH_P_CAIF);
205 	skb_reset_mac_header(skb);
206 	debugfs_rx(ser, data, count);
207 	/* Push received packet up the stack. */
208 	ret = netif_rx_ni(skb);
209 	if (!ret) {
210 		ser->dev->stats.rx_packets++;
211 		ser->dev->stats.rx_bytes += count;
212 	} else
213 		++ser->dev->stats.rx_dropped;
214 	update_tty_status(ser);
215 }
216 
handle_tx(struct ser_device * ser)217 static int handle_tx(struct ser_device *ser)
218 {
219 	struct tty_struct *tty;
220 	struct sk_buff *skb;
221 	int tty_wr, len, room;
222 
223 	tty = ser->tty;
224 	ser->tx_started = true;
225 
226 	/* Enter critical section */
227 	if (test_and_set_bit(CAIF_SENDING, &ser->state))
228 		return 0;
229 
230 	/* skb_peek is safe because handle_tx is called after skb_queue_tail */
231 	while ((skb = skb_peek(&ser->head)) != NULL) {
232 
233 		/* Make sure you don't write too much */
234 		len = skb->len;
235 		room = tty_write_room(tty);
236 		if (!room)
237 			break;
238 		if (room > ser_write_chunk)
239 			room = ser_write_chunk;
240 		if (len > room)
241 			len = room;
242 
243 		/* Write to tty or loopback */
244 		if (!ser_loop) {
245 			tty_wr = tty->ops->write(tty, skb->data, len);
246 			update_tty_status(ser);
247 		} else {
248 			tty_wr = len;
249 			ldisc_receive(tty, skb->data, NULL, len);
250 		}
251 		ser->dev->stats.tx_packets++;
252 		ser->dev->stats.tx_bytes += tty_wr;
253 
254 		/* Error on TTY ?! */
255 		if (tty_wr < 0)
256 			goto error;
257 		/* Reduce buffer written, and discard if empty */
258 		skb_pull(skb, tty_wr);
259 		if (skb->len == 0) {
260 			struct sk_buff *tmp = skb_dequeue(&ser->head);
261 			WARN_ON(tmp != skb);
262 			if (in_interrupt())
263 				dev_kfree_skb_irq(skb);
264 			else
265 				kfree_skb(skb);
266 		}
267 	}
268 	/* Send flow off if queue is empty */
269 	if (ser->head.qlen <= SEND_QUEUE_LOW &&
270 		test_and_clear_bit(CAIF_FLOW_OFF_SENT, &ser->state) &&
271 		ser->common.flowctrl != NULL)
272 				ser->common.flowctrl(ser->dev, ON);
273 	clear_bit(CAIF_SENDING, &ser->state);
274 	return 0;
275 error:
276 	clear_bit(CAIF_SENDING, &ser->state);
277 	return tty_wr;
278 }
279 
caif_xmit(struct sk_buff * skb,struct net_device * dev)280 static int caif_xmit(struct sk_buff *skb, struct net_device *dev)
281 {
282 	struct ser_device *ser;
283 
284 	BUG_ON(dev == NULL);
285 	ser = netdev_priv(dev);
286 
287 	/* Send flow off once, on high water mark */
288 	if (ser->head.qlen > SEND_QUEUE_HIGH &&
289 		!test_and_set_bit(CAIF_FLOW_OFF_SENT, &ser->state) &&
290 		ser->common.flowctrl != NULL)
291 
292 		ser->common.flowctrl(ser->dev, OFF);
293 
294 	skb_queue_tail(&ser->head, skb);
295 	return handle_tx(ser);
296 }
297 
298 
ldisc_tx_wakeup(struct tty_struct * tty)299 static void ldisc_tx_wakeup(struct tty_struct *tty)
300 {
301 	struct ser_device *ser;
302 
303 	ser = tty->disc_data;
304 	BUG_ON(ser == NULL);
305 	WARN_ON(ser->tty != tty);
306 	handle_tx(ser);
307 }
308 
309 
ser_release(struct work_struct * work)310 static void ser_release(struct work_struct *work)
311 {
312 	struct list_head list;
313 	struct ser_device *ser, *tmp;
314 
315 	spin_lock(&ser_lock);
316 	list_replace_init(&ser_release_list, &list);
317 	spin_unlock(&ser_lock);
318 
319 	if (!list_empty(&list)) {
320 		rtnl_lock();
321 		list_for_each_entry_safe(ser, tmp, &list, node) {
322 			dev_close(ser->dev);
323 			unregister_netdevice(ser->dev);
324 			debugfs_deinit(ser);
325 		}
326 		rtnl_unlock();
327 	}
328 }
329 
330 static DECLARE_WORK(ser_release_work, ser_release);
331 
ldisc_open(struct tty_struct * tty)332 static int ldisc_open(struct tty_struct *tty)
333 {
334 	struct ser_device *ser;
335 	struct net_device *dev;
336 	char name[64];
337 	int result;
338 
339 	/* No write no play */
340 	if (tty->ops->write == NULL)
341 		return -EOPNOTSUPP;
342 	if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_TTY_CONFIG))
343 		return -EPERM;
344 
345 	/* release devices to avoid name collision */
346 	ser_release(NULL);
347 
348 	result = snprintf(name, sizeof(name), "cf%s", tty->name);
349 	if (result >= IFNAMSIZ)
350 		return -EINVAL;
351 	dev = alloc_netdev(sizeof(*ser), name, NET_NAME_UNKNOWN,
352 			   caifdev_setup);
353 	if (!dev)
354 		return -ENOMEM;
355 
356 	ser = netdev_priv(dev);
357 	ser->tty = tty_kref_get(tty);
358 	ser->dev = dev;
359 	debugfs_init(ser, tty);
360 	tty->receive_room = N_TTY_BUF_SIZE;
361 	tty->disc_data = ser;
362 	set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
363 	rtnl_lock();
364 	result = register_netdevice(dev);
365 	if (result) {
366 		rtnl_unlock();
367 		free_netdev(dev);
368 		return -ENODEV;
369 	}
370 
371 	spin_lock(&ser_lock);
372 	list_add(&ser->node, &ser_list);
373 	spin_unlock(&ser_lock);
374 	rtnl_unlock();
375 	netif_stop_queue(dev);
376 	update_tty_status(ser);
377 	return 0;
378 }
379 
ldisc_close(struct tty_struct * tty)380 static void ldisc_close(struct tty_struct *tty)
381 {
382 	struct ser_device *ser = tty->disc_data;
383 
384 	tty_kref_put(ser->tty);
385 
386 	spin_lock(&ser_lock);
387 	list_move(&ser->node, &ser_release_list);
388 	spin_unlock(&ser_lock);
389 	schedule_work(&ser_release_work);
390 }
391 
392 /* The line discipline structure. */
393 static struct tty_ldisc_ops caif_ldisc = {
394 	.owner =	THIS_MODULE,
395 	.magic =	TTY_LDISC_MAGIC,
396 	.name =		"n_caif",
397 	.open =		ldisc_open,
398 	.close =	ldisc_close,
399 	.receive_buf =	ldisc_receive,
400 	.write_wakeup =	ldisc_tx_wakeup
401 };
402 
register_ldisc(void)403 static int register_ldisc(void)
404 {
405 	int result;
406 
407 	result = tty_register_ldisc(N_CAIF, &caif_ldisc);
408 	if (result < 0) {
409 		pr_err("cannot register CAIF ldisc=%d err=%d\n", N_CAIF,
410 			result);
411 		return result;
412 	}
413 	return result;
414 }
415 static const struct net_device_ops netdev_ops = {
416 	.ndo_open = caif_net_open,
417 	.ndo_stop = caif_net_close,
418 	.ndo_start_xmit = caif_xmit
419 };
420 
caifdev_setup(struct net_device * dev)421 static void caifdev_setup(struct net_device *dev)
422 {
423 	struct ser_device *serdev = netdev_priv(dev);
424 
425 	dev->features = 0;
426 	dev->netdev_ops = &netdev_ops;
427 	dev->type = ARPHRD_CAIF;
428 	dev->flags = IFF_POINTOPOINT | IFF_NOARP;
429 	dev->mtu = CAIF_MAX_MTU;
430 	dev->priv_flags |= IFF_NO_QUEUE;
431 	dev->destructor = free_netdev;
432 	skb_queue_head_init(&serdev->head);
433 	serdev->common.link_select = CAIF_LINK_LOW_LATENCY;
434 	serdev->common.use_frag = true;
435 	serdev->common.use_stx = ser_use_stx;
436 	serdev->common.use_fcs = ser_use_fcs;
437 	serdev->dev = dev;
438 }
439 
440 
caif_net_open(struct net_device * dev)441 static int caif_net_open(struct net_device *dev)
442 {
443 	netif_wake_queue(dev);
444 	return 0;
445 }
446 
caif_net_close(struct net_device * dev)447 static int caif_net_close(struct net_device *dev)
448 {
449 	netif_stop_queue(dev);
450 	return 0;
451 }
452 
caif_ser_init(void)453 static int __init caif_ser_init(void)
454 {
455 	int ret;
456 
457 	ret = register_ldisc();
458 	debugfsdir = debugfs_create_dir("caif_serial", NULL);
459 	return ret;
460 }
461 
caif_ser_exit(void)462 static void __exit caif_ser_exit(void)
463 {
464 	spin_lock(&ser_lock);
465 	list_splice(&ser_list, &ser_release_list);
466 	spin_unlock(&ser_lock);
467 	ser_release(NULL);
468 	cancel_work_sync(&ser_release_work);
469 	tty_unregister_ldisc(N_CAIF);
470 	debugfs_remove_recursive(debugfsdir);
471 }
472 
473 module_init(caif_ser_init);
474 module_exit(caif_ser_exit);
475