• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * SuperH IrDA Driver
3  *
4  * Copyright (C) 2010 Renesas Solutions Corp.
5  * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
6  *
7  * Based on sh_sir.c
8  * Copyright (C) 2009 Renesas Solutions Corp.
9  * Copyright 2006-2009 Analog Devices Inc.
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License version 2 as
13  * published by the Free Software Foundation.
14  */
15 
16 /*
17  * CAUTION
18  *
19  * This driver is very simple.
20  * So, it doesn't have below support now
21  *  - MIR/FIR support
22  *  - DMA transfer support
23  *  - FIFO mode support
24  */
25 #include <linux/io.h>
26 #include <linux/interrupt.h>
27 #include <linux/module.h>
28 #include <linux/platform_device.h>
29 #include <linux/pm_runtime.h>
30 #include <linux/clk.h>
31 #include <net/irda/wrapper.h>
32 #include <net/irda/irda_device.h>
33 
34 #define DRIVER_NAME "sh_irda"
35 
36 #define __IRDARAM_LEN	0x1039
37 
38 #define IRTMR		0x1F00 /* Transfer mode */
39 #define IRCFR		0x1F02 /* Configuration */
40 #define IRCTR		0x1F04 /* IR control */
41 #define IRTFLR		0x1F20 /* Transmit frame length */
42 #define IRTCTR		0x1F22 /* Transmit control */
43 #define IRRFLR		0x1F40 /* Receive frame length */
44 #define IRRCTR		0x1F42 /* Receive control */
45 #define SIRISR		0x1F60 /* SIR-UART mode interrupt source */
46 #define SIRIMR		0x1F62 /* SIR-UART mode interrupt mask */
47 #define SIRICR		0x1F64 /* SIR-UART mode interrupt clear */
48 #define SIRBCR		0x1F68 /* SIR-UART mode baud rate count */
49 #define MFIRISR		0x1F70 /* MIR/FIR mode interrupt source */
50 #define MFIRIMR		0x1F72 /* MIR/FIR mode interrupt mask */
51 #define MFIRICR		0x1F74 /* MIR/FIR mode interrupt clear */
52 #define CRCCTR		0x1F80 /* CRC engine control */
53 #define CRCIR		0x1F86 /* CRC engine input data */
54 #define CRCCR		0x1F8A /* CRC engine calculation */
55 #define CRCOR		0x1F8E /* CRC engine output data */
56 #define FIFOCP		0x1FC0 /* FIFO current pointer */
57 #define FIFOFP		0x1FC2 /* FIFO follow pointer */
58 #define FIFORSMSK	0x1FC4 /* FIFO receive status mask */
59 #define FIFORSOR	0x1FC6 /* FIFO receive status OR */
60 #define FIFOSEL		0x1FC8 /* FIFO select */
61 #define FIFORS		0x1FCA /* FIFO receive status */
62 #define FIFORFL		0x1FCC /* FIFO receive frame length */
63 #define FIFORAMCP	0x1FCE /* FIFO RAM current pointer */
64 #define FIFORAMFP	0x1FD0 /* FIFO RAM follow pointer */
65 #define BIFCTL		0x1FD2 /* BUS interface control */
66 #define IRDARAM		0x0000 /* IrDA buffer RAM */
67 #define IRDARAM_LEN	__IRDARAM_LEN /* - 8/16/32 (read-only for 32) */
68 
69 /* IRTMR */
70 #define TMD_MASK	(0x3 << 14) /* Transfer Mode */
71 #define TMD_SIR		(0x0 << 14)
72 #define TMD_MIR		(0x3 << 14)
73 #define TMD_FIR		(0x2 << 14)
74 
75 #define FIFORIM		(1 << 8) /* FIFO receive interrupt mask */
76 #define MIM		(1 << 4) /* MIR/FIR Interrupt Mask */
77 #define SIM		(1 << 0) /* SIR Interrupt Mask */
78 #define xIM_MASK	(FIFORIM | MIM | SIM)
79 
80 /* IRCFR */
81 #define RTO_SHIFT	8 /* shift for Receive Timeout */
82 #define RTO		(0x3 << RTO_SHIFT)
83 
84 /* IRTCTR */
85 #define ARMOD		(1 << 15) /* Auto-Receive Mode */
86 #define TE		(1 <<  0) /* Transmit Enable */
87 
88 /* IRRFLR */
89 #define RFL_MASK	(0x1FFF) /* mask for Receive Frame Length */
90 
91 /* IRRCTR */
92 #define RE		(1 <<  0) /* Receive Enable */
93 
94 /*
95  * SIRISR,  SIRIMR,  SIRICR,
96  * MFIRISR, MFIRIMR, MFIRICR
97  */
98 #define FRE		(1 << 15) /* Frame Receive End */
99 #define TROV		(1 << 11) /* Transfer Area Overflow */
100 #define xIR_9		(1 << 9)
101 #define TOT		xIR_9     /* for SIR     Timeout */
102 #define ABTD		xIR_9     /* for MIR/FIR Abort Detection */
103 #define xIR_8		(1 << 8)
104 #define FER		xIR_8     /* for SIR     Framing Error */
105 #define CRCER		xIR_8     /* for MIR/FIR CRC error */
106 #define FTE		(1 << 7)  /* Frame Transmit End */
107 #define xIR_MASK	(FRE | TROV | xIR_9 | xIR_8 | FTE)
108 
109 /* SIRBCR */
110 #define BRC_MASK	(0x3F) /* mask for Baud Rate Count */
111 
112 /* CRCCTR */
113 #define CRC_RST		(1 << 15) /* CRC Engine Reset */
114 #define CRC_CT_MASK	0x0FFF    /* mask for CRC Engine Input Data Count */
115 
116 /* CRCIR */
117 #define CRC_IN_MASK	0x0FFF    /* mask for CRC Engine Input Data */
118 
119 /************************************************************************
120 
121 
122 			enum / structure
123 
124 
125 ************************************************************************/
126 enum sh_irda_mode {
127 	SH_IRDA_NONE = 0,
128 	SH_IRDA_SIR,
129 	SH_IRDA_MIR,
130 	SH_IRDA_FIR,
131 };
132 
133 struct sh_irda_self;
134 struct sh_irda_xir_func {
135 	int (*xir_fre)	(struct sh_irda_self *self);
136 	int (*xir_trov)	(struct sh_irda_self *self);
137 	int (*xir_9)	(struct sh_irda_self *self);
138 	int (*xir_8)	(struct sh_irda_self *self);
139 	int (*xir_fte)	(struct sh_irda_self *self);
140 };
141 
142 struct sh_irda_self {
143 	void __iomem		*membase;
144 	unsigned int		irq;
145 	struct platform_device	*pdev;
146 
147 	struct net_device	*ndev;
148 
149 	struct irlap_cb		*irlap;
150 	struct qos_info		qos;
151 
152 	iobuff_t		tx_buff;
153 	iobuff_t		rx_buff;
154 
155 	enum sh_irda_mode	mode;
156 	spinlock_t		lock;
157 
158 	struct sh_irda_xir_func	*xir_func;
159 };
160 
161 /************************************************************************
162 
163 
164 			common function
165 
166 
167 ************************************************************************/
sh_irda_write(struct sh_irda_self * self,u32 offset,u16 data)168 static void sh_irda_write(struct sh_irda_self *self, u32 offset, u16 data)
169 {
170 	unsigned long flags;
171 
172 	spin_lock_irqsave(&self->lock, flags);
173 	iowrite16(data, self->membase + offset);
174 	spin_unlock_irqrestore(&self->lock, flags);
175 }
176 
sh_irda_read(struct sh_irda_self * self,u32 offset)177 static u16 sh_irda_read(struct sh_irda_self *self, u32 offset)
178 {
179 	unsigned long flags;
180 	u16 ret;
181 
182 	spin_lock_irqsave(&self->lock, flags);
183 	ret = ioread16(self->membase + offset);
184 	spin_unlock_irqrestore(&self->lock, flags);
185 
186 	return ret;
187 }
188 
sh_irda_update_bits(struct sh_irda_self * self,u32 offset,u16 mask,u16 data)189 static void sh_irda_update_bits(struct sh_irda_self *self, u32 offset,
190 			       u16 mask, u16 data)
191 {
192 	unsigned long flags;
193 	u16 old, new;
194 
195 	spin_lock_irqsave(&self->lock, flags);
196 	old = ioread16(self->membase + offset);
197 	new = (old & ~mask) | data;
198 	if (old != new)
199 		iowrite16(data, self->membase + offset);
200 	spin_unlock_irqrestore(&self->lock, flags);
201 }
202 
203 /************************************************************************
204 
205 
206 			mode function
207 
208 
209 ************************************************************************/
210 /*=====================================
211  *
212  *		common
213  *
214  *=====================================*/
sh_irda_rcv_ctrl(struct sh_irda_self * self,int enable)215 static void sh_irda_rcv_ctrl(struct sh_irda_self *self, int enable)
216 {
217 	struct device *dev = &self->ndev->dev;
218 
219 	sh_irda_update_bits(self, IRRCTR, RE, enable ? RE : 0);
220 	dev_dbg(dev, "recv %s\n", enable ? "enable" : "disable");
221 }
222 
sh_irda_set_timeout(struct sh_irda_self * self,int interval)223 static int sh_irda_set_timeout(struct sh_irda_self *self, int interval)
224 {
225 	struct device *dev = &self->ndev->dev;
226 
227 	if (SH_IRDA_SIR != self->mode)
228 		interval = 0;
229 
230 	if (interval < 0 || interval > 2) {
231 		dev_err(dev, "unsupported timeout interval\n");
232 		return -EINVAL;
233 	}
234 
235 	sh_irda_update_bits(self, IRCFR, RTO, interval << RTO_SHIFT);
236 	return 0;
237 }
238 
sh_irda_set_baudrate(struct sh_irda_self * self,int baudrate)239 static int sh_irda_set_baudrate(struct sh_irda_self *self, int baudrate)
240 {
241 	struct device *dev = &self->ndev->dev;
242 	u16 val;
243 
244 	if (baudrate < 0)
245 		return 0;
246 
247 	if (SH_IRDA_SIR != self->mode) {
248 		dev_err(dev, "it is not SIR mode\n");
249 		return -EINVAL;
250 	}
251 
252 	/*
253 	 * Baud rate (bits/s) =
254 	 *   (48 MHz / 26) / (baud rate counter value + 1) x 16
255 	 */
256 	val = (48000000 / 26 / 16 / baudrate) - 1;
257 	dev_dbg(dev, "baudrate = %d,  val = 0x%02x\n", baudrate, val);
258 
259 	sh_irda_update_bits(self, SIRBCR, BRC_MASK, val);
260 
261 	return 0;
262 }
263 
sh_irda_get_rcv_length(struct sh_irda_self * self)264 static int sh_irda_get_rcv_length(struct sh_irda_self *self)
265 {
266 	return RFL_MASK & sh_irda_read(self, IRRFLR);
267 }
268 
269 /*=====================================
270  *
271  *		NONE MODE
272  *
273  *=====================================*/
sh_irda_xir_fre(struct sh_irda_self * self)274 static int sh_irda_xir_fre(struct sh_irda_self *self)
275 {
276 	struct device *dev = &self->ndev->dev;
277 	dev_err(dev, "none mode: frame recv\n");
278 	return 0;
279 }
280 
sh_irda_xir_trov(struct sh_irda_self * self)281 static int sh_irda_xir_trov(struct sh_irda_self *self)
282 {
283 	struct device *dev = &self->ndev->dev;
284 	dev_err(dev, "none mode: buffer ram over\n");
285 	return 0;
286 }
287 
sh_irda_xir_9(struct sh_irda_self * self)288 static int sh_irda_xir_9(struct sh_irda_self *self)
289 {
290 	struct device *dev = &self->ndev->dev;
291 	dev_err(dev, "none mode: time over\n");
292 	return 0;
293 }
294 
sh_irda_xir_8(struct sh_irda_self * self)295 static int sh_irda_xir_8(struct sh_irda_self *self)
296 {
297 	struct device *dev = &self->ndev->dev;
298 	dev_err(dev, "none mode: framing error\n");
299 	return 0;
300 }
301 
sh_irda_xir_fte(struct sh_irda_self * self)302 static int sh_irda_xir_fte(struct sh_irda_self *self)
303 {
304 	struct device *dev = &self->ndev->dev;
305 	dev_err(dev, "none mode: frame transmit end\n");
306 	return 0;
307 }
308 
309 static struct sh_irda_xir_func sh_irda_xir_func = {
310 	.xir_fre	= sh_irda_xir_fre,
311 	.xir_trov	= sh_irda_xir_trov,
312 	.xir_9		= sh_irda_xir_9,
313 	.xir_8		= sh_irda_xir_8,
314 	.xir_fte	= sh_irda_xir_fte,
315 };
316 
317 /*=====================================
318  *
319  *		MIR/FIR MODE
320  *
321  * MIR/FIR are not supported now
322  *=====================================*/
323 static struct sh_irda_xir_func sh_irda_mfir_func = {
324 	.xir_fre	= sh_irda_xir_fre,
325 	.xir_trov	= sh_irda_xir_trov,
326 	.xir_9		= sh_irda_xir_9,
327 	.xir_8		= sh_irda_xir_8,
328 	.xir_fte	= sh_irda_xir_fte,
329 };
330 
331 /*=====================================
332  *
333  *		SIR MODE
334  *
335  *=====================================*/
sh_irda_sir_fre(struct sh_irda_self * self)336 static int sh_irda_sir_fre(struct sh_irda_self *self)
337 {
338 	struct device *dev = &self->ndev->dev;
339 	u16 data16;
340 	u8  *data = (u8 *)&data16;
341 	int len = sh_irda_get_rcv_length(self);
342 	int i, j;
343 
344 	if (len > IRDARAM_LEN)
345 		len = IRDARAM_LEN;
346 
347 	dev_dbg(dev, "frame recv length = %d\n", len);
348 
349 	for (i = 0; i < len; i++) {
350 		j = i % 2;
351 		if (!j)
352 			data16 = sh_irda_read(self, IRDARAM + i);
353 
354 		async_unwrap_char(self->ndev, &self->ndev->stats,
355 				  &self->rx_buff, data[j]);
356 	}
357 	self->ndev->last_rx = jiffies;
358 
359 	sh_irda_rcv_ctrl(self, 1);
360 
361 	return 0;
362 }
363 
sh_irda_sir_trov(struct sh_irda_self * self)364 static int sh_irda_sir_trov(struct sh_irda_self *self)
365 {
366 	struct device *dev = &self->ndev->dev;
367 
368 	dev_err(dev, "buffer ram over\n");
369 	sh_irda_rcv_ctrl(self, 1);
370 	return 0;
371 }
372 
sh_irda_sir_tot(struct sh_irda_self * self)373 static int sh_irda_sir_tot(struct sh_irda_self *self)
374 {
375 	struct device *dev = &self->ndev->dev;
376 
377 	dev_err(dev, "time over\n");
378 	sh_irda_set_baudrate(self, 9600);
379 	sh_irda_rcv_ctrl(self, 1);
380 	return 0;
381 }
382 
sh_irda_sir_fer(struct sh_irda_self * self)383 static int sh_irda_sir_fer(struct sh_irda_self *self)
384 {
385 	struct device *dev = &self->ndev->dev;
386 
387 	dev_err(dev, "framing error\n");
388 	sh_irda_rcv_ctrl(self, 1);
389 	return 0;
390 }
391 
sh_irda_sir_fte(struct sh_irda_self * self)392 static int sh_irda_sir_fte(struct sh_irda_self *self)
393 {
394 	struct device *dev = &self->ndev->dev;
395 
396 	dev_dbg(dev, "frame transmit end\n");
397 	netif_wake_queue(self->ndev);
398 
399 	return 0;
400 }
401 
402 static struct sh_irda_xir_func sh_irda_sir_func = {
403 	.xir_fre	= sh_irda_sir_fre,
404 	.xir_trov	= sh_irda_sir_trov,
405 	.xir_9		= sh_irda_sir_tot,
406 	.xir_8		= sh_irda_sir_fer,
407 	.xir_fte	= sh_irda_sir_fte,
408 };
409 
sh_irda_set_mode(struct sh_irda_self * self,enum sh_irda_mode mode)410 static void sh_irda_set_mode(struct sh_irda_self *self, enum sh_irda_mode mode)
411 {
412 	struct device *dev = &self->ndev->dev;
413 	struct sh_irda_xir_func	*func;
414 	const char *name;
415 	u16 data;
416 
417 	switch (mode) {
418 	case SH_IRDA_SIR:
419 		name	= "SIR";
420 		data	= TMD_SIR;
421 		func	= &sh_irda_sir_func;
422 		break;
423 	case SH_IRDA_MIR:
424 		name	= "MIR";
425 		data	= TMD_MIR;
426 		func	= &sh_irda_mfir_func;
427 		break;
428 	case SH_IRDA_FIR:
429 		name	= "FIR";
430 		data	= TMD_FIR;
431 		func	= &sh_irda_mfir_func;
432 		break;
433 	default:
434 		name	= "NONE";
435 		data	= 0;
436 		func	= &sh_irda_xir_func;
437 		break;
438 	}
439 
440 	self->mode = mode;
441 	self->xir_func = func;
442 	sh_irda_update_bits(self, IRTMR, TMD_MASK, data);
443 
444 	dev_dbg(dev, "switch to %s mode", name);
445 }
446 
447 /************************************************************************
448 
449 
450 			irq function
451 
452 
453 ************************************************************************/
sh_irda_set_irq_mask(struct sh_irda_self * self)454 static void sh_irda_set_irq_mask(struct sh_irda_self *self)
455 {
456 	u16 tmr_hole;
457 	u16 xir_reg;
458 
459 	/* set all mask */
460 	sh_irda_update_bits(self, IRTMR,   xIM_MASK, xIM_MASK);
461 	sh_irda_update_bits(self, SIRIMR,  xIR_MASK, xIR_MASK);
462 	sh_irda_update_bits(self, MFIRIMR, xIR_MASK, xIR_MASK);
463 
464 	/* clear irq */
465 	sh_irda_update_bits(self, SIRICR,  xIR_MASK, xIR_MASK);
466 	sh_irda_update_bits(self, MFIRICR, xIR_MASK, xIR_MASK);
467 
468 	switch (self->mode) {
469 	case SH_IRDA_SIR:
470 		tmr_hole	= SIM;
471 		xir_reg		= SIRIMR;
472 		break;
473 	case SH_IRDA_MIR:
474 	case SH_IRDA_FIR:
475 		tmr_hole	= MIM;
476 		xir_reg		= MFIRIMR;
477 		break;
478 	default:
479 		tmr_hole	= 0;
480 		xir_reg		= 0;
481 		break;
482 	}
483 
484 	/* open mask */
485 	if (xir_reg) {
486 		sh_irda_update_bits(self, IRTMR, tmr_hole, 0);
487 		sh_irda_update_bits(self, xir_reg, xIR_MASK, 0);
488 	}
489 }
490 
sh_irda_irq(int irq,void * dev_id)491 static irqreturn_t sh_irda_irq(int irq, void *dev_id)
492 {
493 	struct sh_irda_self *self = dev_id;
494 	struct sh_irda_xir_func	*func = self->xir_func;
495 	u16 isr = sh_irda_read(self, SIRISR);
496 
497 	/* clear irq */
498 	sh_irda_write(self, SIRICR, isr);
499 
500 	if (isr & FRE)
501 		func->xir_fre(self);
502 	if (isr & TROV)
503 		func->xir_trov(self);
504 	if (isr & xIR_9)
505 		func->xir_9(self);
506 	if (isr & xIR_8)
507 		func->xir_8(self);
508 	if (isr & FTE)
509 		func->xir_fte(self);
510 
511 	return IRQ_HANDLED;
512 }
513 
514 /************************************************************************
515 
516 
517 			CRC function
518 
519 
520 ************************************************************************/
sh_irda_crc_reset(struct sh_irda_self * self)521 static void sh_irda_crc_reset(struct sh_irda_self *self)
522 {
523 	sh_irda_write(self, CRCCTR, CRC_RST);
524 }
525 
sh_irda_crc_add(struct sh_irda_self * self,u16 data)526 static void sh_irda_crc_add(struct sh_irda_self *self, u16 data)
527 {
528 	sh_irda_write(self, CRCIR, data & CRC_IN_MASK);
529 }
530 
sh_irda_crc_cnt(struct sh_irda_self * self)531 static u16 sh_irda_crc_cnt(struct sh_irda_self *self)
532 {
533 	return CRC_CT_MASK & sh_irda_read(self, CRCCTR);
534 }
535 
sh_irda_crc_out(struct sh_irda_self * self)536 static u16 sh_irda_crc_out(struct sh_irda_self *self)
537 {
538 	return sh_irda_read(self, CRCOR);
539 }
540 
sh_irda_crc_init(struct sh_irda_self * self)541 static int sh_irda_crc_init(struct sh_irda_self *self)
542 {
543 	struct device *dev = &self->ndev->dev;
544 	int ret = -EIO;
545 	u16 val;
546 
547 	sh_irda_crc_reset(self);
548 
549 	sh_irda_crc_add(self, 0xCC);
550 	sh_irda_crc_add(self, 0xF5);
551 	sh_irda_crc_add(self, 0xF1);
552 	sh_irda_crc_add(self, 0xA7);
553 
554 	val = sh_irda_crc_cnt(self);
555 	if (4 != val) {
556 		dev_err(dev, "CRC count error %x\n", val);
557 		goto crc_init_out;
558 	}
559 
560 	val = sh_irda_crc_out(self);
561 	if (0x51DF != val) {
562 		dev_err(dev, "CRC result error%x\n", val);
563 		goto crc_init_out;
564 	}
565 
566 	ret = 0;
567 
568 crc_init_out:
569 
570 	sh_irda_crc_reset(self);
571 	return ret;
572 }
573 
574 /************************************************************************
575 
576 
577 			iobuf function
578 
579 
580 ************************************************************************/
sh_irda_remove_iobuf(struct sh_irda_self * self)581 static void sh_irda_remove_iobuf(struct sh_irda_self *self)
582 {
583 	kfree(self->rx_buff.head);
584 
585 	self->tx_buff.head = NULL;
586 	self->tx_buff.data = NULL;
587 	self->rx_buff.head = NULL;
588 	self->rx_buff.data = NULL;
589 }
590 
sh_irda_init_iobuf(struct sh_irda_self * self,int rxsize,int txsize)591 static int sh_irda_init_iobuf(struct sh_irda_self *self, int rxsize, int txsize)
592 {
593 	if (self->rx_buff.head ||
594 	    self->tx_buff.head) {
595 		dev_err(&self->ndev->dev, "iobuff has already existed.");
596 		return -EINVAL;
597 	}
598 
599 	/* rx_buff */
600 	self->rx_buff.head = kmalloc(rxsize, GFP_KERNEL);
601 	if (!self->rx_buff.head)
602 		return -ENOMEM;
603 
604 	self->rx_buff.truesize	= rxsize;
605 	self->rx_buff.in_frame	= FALSE;
606 	self->rx_buff.state	= OUTSIDE_FRAME;
607 	self->rx_buff.data	= self->rx_buff.head;
608 
609 	/* tx_buff */
610 	self->tx_buff.head	= self->membase + IRDARAM;
611 	self->tx_buff.truesize	= IRDARAM_LEN;
612 
613 	return 0;
614 }
615 
616 /************************************************************************
617 
618 
619 			net_device_ops function
620 
621 
622 ************************************************************************/
sh_irda_hard_xmit(struct sk_buff * skb,struct net_device * ndev)623 static int sh_irda_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
624 {
625 	struct sh_irda_self *self = netdev_priv(ndev);
626 	struct device *dev = &self->ndev->dev;
627 	int speed = irda_get_next_speed(skb);
628 	int ret;
629 
630 	dev_dbg(dev, "hard xmit\n");
631 
632 	netif_stop_queue(ndev);
633 	sh_irda_rcv_ctrl(self, 0);
634 
635 	ret = sh_irda_set_baudrate(self, speed);
636 	if (ret < 0)
637 		goto sh_irda_hard_xmit_end;
638 
639 	self->tx_buff.len = 0;
640 	if (skb->len) {
641 		unsigned long flags;
642 
643 		spin_lock_irqsave(&self->lock, flags);
644 		self->tx_buff.len = async_wrap_skb(skb,
645 						   self->tx_buff.head,
646 						   self->tx_buff.truesize);
647 		spin_unlock_irqrestore(&self->lock, flags);
648 
649 		if (self->tx_buff.len > self->tx_buff.truesize)
650 			self->tx_buff.len = self->tx_buff.truesize;
651 
652 		sh_irda_write(self, IRTFLR, self->tx_buff.len);
653 		sh_irda_write(self, IRTCTR, ARMOD | TE);
654 	} else
655 		goto sh_irda_hard_xmit_end;
656 
657 	dev_kfree_skb(skb);
658 
659 	return 0;
660 
661 sh_irda_hard_xmit_end:
662 	sh_irda_set_baudrate(self, 9600);
663 	netif_wake_queue(self->ndev);
664 	sh_irda_rcv_ctrl(self, 1);
665 	dev_kfree_skb(skb);
666 
667 	return ret;
668 
669 }
670 
sh_irda_ioctl(struct net_device * ndev,struct ifreq * ifreq,int cmd)671 static int sh_irda_ioctl(struct net_device *ndev, struct ifreq *ifreq, int cmd)
672 {
673 	/*
674 	 * FIXME
675 	 *
676 	 * This function is needed for irda framework.
677 	 * But nothing to do now
678 	 */
679 	return 0;
680 }
681 
sh_irda_stats(struct net_device * ndev)682 static struct net_device_stats *sh_irda_stats(struct net_device *ndev)
683 {
684 	struct sh_irda_self *self = netdev_priv(ndev);
685 
686 	return &self->ndev->stats;
687 }
688 
sh_irda_open(struct net_device * ndev)689 static int sh_irda_open(struct net_device *ndev)
690 {
691 	struct sh_irda_self *self = netdev_priv(ndev);
692 	int err;
693 
694 	pm_runtime_get_sync(&self->pdev->dev);
695 	err = sh_irda_crc_init(self);
696 	if (err)
697 		goto open_err;
698 
699 	sh_irda_set_mode(self, SH_IRDA_SIR);
700 	sh_irda_set_timeout(self, 2);
701 	sh_irda_set_baudrate(self, 9600);
702 
703 	self->irlap = irlap_open(ndev, &self->qos, DRIVER_NAME);
704 	if (!self->irlap) {
705 		err = -ENODEV;
706 		goto open_err;
707 	}
708 
709 	netif_start_queue(ndev);
710 	sh_irda_rcv_ctrl(self, 1);
711 	sh_irda_set_irq_mask(self);
712 
713 	dev_info(&ndev->dev, "opened\n");
714 
715 	return 0;
716 
717 open_err:
718 	pm_runtime_put_sync(&self->pdev->dev);
719 
720 	return err;
721 }
722 
sh_irda_stop(struct net_device * ndev)723 static int sh_irda_stop(struct net_device *ndev)
724 {
725 	struct sh_irda_self *self = netdev_priv(ndev);
726 
727 	/* Stop IrLAP */
728 	if (self->irlap) {
729 		irlap_close(self->irlap);
730 		self->irlap = NULL;
731 	}
732 
733 	netif_stop_queue(ndev);
734 	pm_runtime_put_sync(&self->pdev->dev);
735 
736 	dev_info(&ndev->dev, "stopped\n");
737 
738 	return 0;
739 }
740 
741 static const struct net_device_ops sh_irda_ndo = {
742 	.ndo_open		= sh_irda_open,
743 	.ndo_stop		= sh_irda_stop,
744 	.ndo_start_xmit		= sh_irda_hard_xmit,
745 	.ndo_do_ioctl		= sh_irda_ioctl,
746 	.ndo_get_stats		= sh_irda_stats,
747 };
748 
749 /************************************************************************
750 
751 
752 			platform_driver function
753 
754 
755 ************************************************************************/
sh_irda_probe(struct platform_device * pdev)756 static int sh_irda_probe(struct platform_device *pdev)
757 {
758 	struct net_device *ndev;
759 	struct sh_irda_self *self;
760 	struct resource *res;
761 	int irq;
762 	int err = -ENOMEM;
763 
764 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
765 	irq = platform_get_irq(pdev, 0);
766 	if (!res || irq < 0) {
767 		dev_err(&pdev->dev, "Not enough platform resources.\n");
768 		goto exit;
769 	}
770 
771 	ndev = alloc_irdadev(sizeof(*self));
772 	if (!ndev)
773 		goto exit;
774 
775 	self = netdev_priv(ndev);
776 	self->membase = ioremap_nocache(res->start, resource_size(res));
777 	if (!self->membase) {
778 		err = -ENXIO;
779 		dev_err(&pdev->dev, "Unable to ioremap.\n");
780 		goto err_mem_1;
781 	}
782 
783 	err = sh_irda_init_iobuf(self, IRDA_SKB_MAX_MTU, IRDA_SIR_MAX_FRAME);
784 	if (err)
785 		goto err_mem_2;
786 
787 	self->pdev = pdev;
788 	pm_runtime_enable(&pdev->dev);
789 
790 	irda_init_max_qos_capabilies(&self->qos);
791 
792 	ndev->netdev_ops	= &sh_irda_ndo;
793 	ndev->irq		= irq;
794 
795 	self->ndev			= ndev;
796 	self->qos.baud_rate.bits	&= IR_9600; /* FIXME */
797 	self->qos.min_turn_time.bits	= 1; /* 10 ms or more */
798 	spin_lock_init(&self->lock);
799 
800 	irda_qos_bits_to_value(&self->qos);
801 
802 	err = register_netdev(ndev);
803 	if (err)
804 		goto err_mem_4;
805 
806 	platform_set_drvdata(pdev, ndev);
807 	err = devm_request_irq(&pdev->dev, irq, sh_irda_irq, 0, "sh_irda", self);
808 	if (err) {
809 		dev_warn(&pdev->dev, "Unable to attach sh_irda interrupt\n");
810 		goto err_mem_4;
811 	}
812 
813 	dev_info(&pdev->dev, "SuperH IrDA probed\n");
814 
815 	goto exit;
816 
817 err_mem_4:
818 	pm_runtime_disable(&pdev->dev);
819 	sh_irda_remove_iobuf(self);
820 err_mem_2:
821 	iounmap(self->membase);
822 err_mem_1:
823 	free_netdev(ndev);
824 exit:
825 	return err;
826 }
827 
sh_irda_remove(struct platform_device * pdev)828 static int sh_irda_remove(struct platform_device *pdev)
829 {
830 	struct net_device *ndev = platform_get_drvdata(pdev);
831 	struct sh_irda_self *self = netdev_priv(ndev);
832 
833 	if (!self)
834 		return 0;
835 
836 	unregister_netdev(ndev);
837 	pm_runtime_disable(&pdev->dev);
838 	sh_irda_remove_iobuf(self);
839 	iounmap(self->membase);
840 	free_netdev(ndev);
841 
842 	return 0;
843 }
844 
sh_irda_runtime_nop(struct device * dev)845 static int sh_irda_runtime_nop(struct device *dev)
846 {
847 	/* Runtime PM callback shared between ->runtime_suspend()
848 	 * and ->runtime_resume(). Simply returns success.
849 	 *
850 	 * This driver re-initializes all registers after
851 	 * pm_runtime_get_sync() anyway so there is no need
852 	 * to save and restore registers here.
853 	 */
854 	return 0;
855 }
856 
857 static const struct dev_pm_ops sh_irda_pm_ops = {
858 	.runtime_suspend	= sh_irda_runtime_nop,
859 	.runtime_resume		= sh_irda_runtime_nop,
860 };
861 
862 static struct platform_driver sh_irda_driver = {
863 	.probe	= sh_irda_probe,
864 	.remove	= sh_irda_remove,
865 	.driver	= {
866 		.name	= DRIVER_NAME,
867 		.pm	= &sh_irda_pm_ops,
868 	},
869 };
870 
871 module_platform_driver(sh_irda_driver);
872 
873 MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
874 MODULE_DESCRIPTION("SuperH IrDA driver");
875 MODULE_LICENSE("GPL");
876