• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  linux/drivers/net/irda/sa1100_ir.c
3  *
4  *  Copyright (C) 2000-2001 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  *  Infra-red driver for the StrongARM SA1100 embedded microprocessor
11  *
12  *  Note that we don't have to worry about the SA1111's DMA bugs in here,
13  *  so we use the straight forward dma_map_* functions with a null pointer.
14  *
15  *  This driver takes one kernel command line parameter, sa1100ir=, with
16  *  the following options:
17  *	max_rate:baudrate	- set the maximum baud rate
18  *	power_level:level	- set the transmitter power level
19  *	tx_lpm:0|1		- set transmit low power mode
20  */
21 #include <linux/module.h>
22 #include <linux/moduleparam.h>
23 #include <linux/types.h>
24 #include <linux/init.h>
25 #include <linux/errno.h>
26 #include <linux/netdevice.h>
27 #include <linux/slab.h>
28 #include <linux/rtnetlink.h>
29 #include <linux/interrupt.h>
30 #include <linux/delay.h>
31 #include <linux/platform_device.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/dmaengine.h>
34 #include <linux/sa11x0-dma.h>
35 
36 #include <net/irda/irda.h>
37 #include <net/irda/wrapper.h>
38 #include <net/irda/irda_device.h>
39 
40 #include <mach/hardware.h>
41 #include <asm/mach/irda.h>
42 
43 static int power_level = 3;
44 static int tx_lpm;
45 static int max_rate = 4000000;
46 
47 struct sa1100_buf {
48 	struct device		*dev;
49 	struct sk_buff		*skb;
50 	struct scatterlist	sg;
51 	struct dma_chan		*chan;
52 	dma_cookie_t		cookie;
53 };
54 
55 struct sa1100_irda {
56 	unsigned char		utcr4;
57 	unsigned char		power;
58 	unsigned char		open;
59 
60 	int			speed;
61 	int			newspeed;
62 
63 	struct sa1100_buf	dma_rx;
64 	struct sa1100_buf	dma_tx;
65 
66 	struct device		*dev;
67 	struct irda_platform_data *pdata;
68 	struct irlap_cb		*irlap;
69 	struct qos_info		qos;
70 
71 	iobuff_t		tx_buff;
72 	iobuff_t		rx_buff;
73 
74 	int (*tx_start)(struct sk_buff *, struct net_device *, struct sa1100_irda *);
75 	irqreturn_t (*irq)(struct net_device *, struct sa1100_irda *);
76 };
77 
78 static int sa1100_irda_set_speed(struct sa1100_irda *, int);
79 
80 #define IS_FIR(si)		((si)->speed >= 4000000)
81 
82 #define HPSIR_MAX_RXLEN		2047
83 
84 static struct dma_slave_config sa1100_irda_sir_tx = {
85 	.direction	= DMA_TO_DEVICE,
86 	.dst_addr	= __PREG(Ser2UTDR),
87 	.dst_addr_width	= DMA_SLAVE_BUSWIDTH_1_BYTE,
88 	.dst_maxburst	= 4,
89 };
90 
91 static struct dma_slave_config sa1100_irda_fir_rx = {
92 	.direction	= DMA_FROM_DEVICE,
93 	.src_addr	= __PREG(Ser2HSDR),
94 	.src_addr_width	= DMA_SLAVE_BUSWIDTH_1_BYTE,
95 	.src_maxburst	= 8,
96 };
97 
98 static struct dma_slave_config sa1100_irda_fir_tx = {
99 	.direction	= DMA_TO_DEVICE,
100 	.dst_addr	= __PREG(Ser2HSDR),
101 	.dst_addr_width	= DMA_SLAVE_BUSWIDTH_1_BYTE,
102 	.dst_maxburst	= 8,
103 };
104 
sa1100_irda_dma_xferred(struct sa1100_buf * buf)105 static unsigned sa1100_irda_dma_xferred(struct sa1100_buf *buf)
106 {
107 	struct dma_chan *chan = buf->chan;
108 	struct dma_tx_state state;
109 	enum dma_status status;
110 
111 	status = chan->device->device_tx_status(chan, buf->cookie, &state);
112 	if (status != DMA_PAUSED)
113 		return 0;
114 
115 	return sg_dma_len(&buf->sg) - state.residue;
116 }
117 
sa1100_irda_dma_request(struct device * dev,struct sa1100_buf * buf,const char * name,struct dma_slave_config * cfg)118 static int sa1100_irda_dma_request(struct device *dev, struct sa1100_buf *buf,
119 	const char *name, struct dma_slave_config *cfg)
120 {
121 	dma_cap_mask_t m;
122 	int ret;
123 
124 	dma_cap_zero(m);
125 	dma_cap_set(DMA_SLAVE, m);
126 
127 	buf->chan = dma_request_channel(m, sa11x0_dma_filter_fn, (void *)name);
128 	if (!buf->chan) {
129 		dev_err(dev, "unable to request DMA channel for %s\n",
130 			name);
131 		return -ENOENT;
132 	}
133 
134 	ret = dmaengine_slave_config(buf->chan, cfg);
135 	if (ret)
136 		dev_warn(dev, "DMA slave_config for %s returned %d\n",
137 			name, ret);
138 
139 	buf->dev = buf->chan->device->dev;
140 
141 	return 0;
142 }
143 
sa1100_irda_dma_start(struct sa1100_buf * buf,enum dma_transfer_direction dir,dma_async_tx_callback cb,void * cb_p)144 static void sa1100_irda_dma_start(struct sa1100_buf *buf,
145 	enum dma_transfer_direction dir, dma_async_tx_callback cb, void *cb_p)
146 {
147 	struct dma_async_tx_descriptor *desc;
148 	struct dma_chan *chan = buf->chan;
149 
150 	desc = dmaengine_prep_slave_sg(chan, &buf->sg, 1, dir,
151 			DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
152 	if (desc) {
153 		desc->callback = cb;
154 		desc->callback_param = cb_p;
155 		buf->cookie = dmaengine_submit(desc);
156 		dma_async_issue_pending(chan);
157 	}
158 }
159 
160 /*
161  * Allocate and map the receive buffer, unless it is already allocated.
162  */
sa1100_irda_rx_alloc(struct sa1100_irda * si)163 static int sa1100_irda_rx_alloc(struct sa1100_irda *si)
164 {
165 	if (si->dma_rx.skb)
166 		return 0;
167 
168 	si->dma_rx.skb = alloc_skb(HPSIR_MAX_RXLEN + 1, GFP_ATOMIC);
169 	if (!si->dma_rx.skb) {
170 		printk(KERN_ERR "sa1100_ir: out of memory for RX SKB\n");
171 		return -ENOMEM;
172 	}
173 
174 	/*
175 	 * Align any IP headers that may be contained
176 	 * within the frame.
177 	 */
178 	skb_reserve(si->dma_rx.skb, 1);
179 
180 	sg_set_buf(&si->dma_rx.sg, si->dma_rx.skb->data, HPSIR_MAX_RXLEN);
181 	if (dma_map_sg(si->dma_rx.dev, &si->dma_rx.sg, 1, DMA_FROM_DEVICE) == 0) {
182 		dev_kfree_skb_any(si->dma_rx.skb);
183 		return -ENOMEM;
184 	}
185 
186 	return 0;
187 }
188 
189 /*
190  * We want to get here as soon as possible, and get the receiver setup.
191  * We use the existing buffer.
192  */
sa1100_irda_rx_dma_start(struct sa1100_irda * si)193 static void sa1100_irda_rx_dma_start(struct sa1100_irda *si)
194 {
195 	if (!si->dma_rx.skb) {
196 		printk(KERN_ERR "sa1100_ir: rx buffer went missing\n");
197 		return;
198 	}
199 
200 	/*
201 	 * First empty receive FIFO
202 	 */
203 	Ser2HSCR0 = HSCR0_HSSP;
204 
205 	/*
206 	 * Enable the DMA, receiver and receive interrupt.
207 	 */
208 	dmaengine_terminate_all(si->dma_rx.chan);
209 	sa1100_irda_dma_start(&si->dma_rx, DMA_DEV_TO_MEM, NULL, NULL);
210 
211 	Ser2HSCR0 = HSCR0_HSSP | HSCR0_RXE;
212 }
213 
sa1100_irda_check_speed(struct sa1100_irda * si)214 static void sa1100_irda_check_speed(struct sa1100_irda *si)
215 {
216 	if (si->newspeed) {
217 		sa1100_irda_set_speed(si, si->newspeed);
218 		si->newspeed = 0;
219 	}
220 }
221 
222 /*
223  * HP-SIR format support.
224  */
sa1100_irda_sirtxdma_irq(void * id)225 static void sa1100_irda_sirtxdma_irq(void *id)
226 {
227 	struct net_device *dev = id;
228 	struct sa1100_irda *si = netdev_priv(dev);
229 
230 	dma_unmap_sg(si->dma_tx.dev, &si->dma_tx.sg, 1, DMA_TO_DEVICE);
231 	dev_kfree_skb(si->dma_tx.skb);
232 	si->dma_tx.skb = NULL;
233 
234 	dev->stats.tx_packets++;
235 	dev->stats.tx_bytes += sg_dma_len(&si->dma_tx.sg);
236 
237 	/* We need to ensure that the transmitter has finished. */
238 	do
239 		rmb();
240 	while (Ser2UTSR1 & UTSR1_TBY);
241 
242 	/*
243 	 * Ok, we've finished transmitting.  Now enable the receiver.
244 	 * Sometimes we get a receive IRQ immediately after a transmit...
245 	 */
246 	Ser2UTSR0 = UTSR0_REB | UTSR0_RBB | UTSR0_RID;
247 	Ser2UTCR3 = UTCR3_RIE | UTCR3_RXE | UTCR3_TXE;
248 
249 	sa1100_irda_check_speed(si);
250 
251 	/* I'm hungry! */
252 	netif_wake_queue(dev);
253 }
254 
sa1100_irda_sir_tx_start(struct sk_buff * skb,struct net_device * dev,struct sa1100_irda * si)255 static int sa1100_irda_sir_tx_start(struct sk_buff *skb, struct net_device *dev,
256 	struct sa1100_irda *si)
257 {
258 	si->tx_buff.data = si->tx_buff.head;
259 	si->tx_buff.len  = async_wrap_skb(skb, si->tx_buff.data,
260 					  si->tx_buff.truesize);
261 
262 	si->dma_tx.skb = skb;
263 	sg_set_buf(&si->dma_tx.sg, si->tx_buff.data, si->tx_buff.len);
264 	if (dma_map_sg(si->dma_tx.dev, &si->dma_tx.sg, 1, DMA_TO_DEVICE) == 0) {
265 		si->dma_tx.skb = NULL;
266 		netif_wake_queue(dev);
267 		dev->stats.tx_dropped++;
268 		return NETDEV_TX_OK;
269 	}
270 
271 	sa1100_irda_dma_start(&si->dma_tx, DMA_MEM_TO_DEV, sa1100_irda_sirtxdma_irq, dev);
272 
273 	/*
274 	 * The mean turn-around time is enforced by XBOF padding,
275 	 * so we don't have to do anything special here.
276 	 */
277 	Ser2UTCR3 = UTCR3_TXE;
278 
279 	return NETDEV_TX_OK;
280 }
281 
sa1100_irda_sir_irq(struct net_device * dev,struct sa1100_irda * si)282 static irqreturn_t sa1100_irda_sir_irq(struct net_device *dev, struct sa1100_irda *si)
283 {
284 	int status;
285 
286 	status = Ser2UTSR0;
287 
288 	/*
289 	 * Deal with any receive errors first.  The bytes in error may be
290 	 * the only bytes in the receive FIFO, so we do this first.
291 	 */
292 	while (status & UTSR0_EIF) {
293 		int stat, data;
294 
295 		stat = Ser2UTSR1;
296 		data = Ser2UTDR;
297 
298 		if (stat & (UTSR1_FRE | UTSR1_ROR)) {
299 			dev->stats.rx_errors++;
300 			if (stat & UTSR1_FRE)
301 				dev->stats.rx_frame_errors++;
302 			if (stat & UTSR1_ROR)
303 				dev->stats.rx_fifo_errors++;
304 		} else
305 			async_unwrap_char(dev, &dev->stats, &si->rx_buff, data);
306 
307 		status = Ser2UTSR0;
308 	}
309 
310 	/*
311 	 * We must clear certain bits.
312 	 */
313 	Ser2UTSR0 = status & (UTSR0_RID | UTSR0_RBB | UTSR0_REB);
314 
315 	if (status & UTSR0_RFS) {
316 		/*
317 		 * There are at least 4 bytes in the FIFO.  Read 3 bytes
318 		 * and leave the rest to the block below.
319 		 */
320 		async_unwrap_char(dev, &dev->stats, &si->rx_buff, Ser2UTDR);
321 		async_unwrap_char(dev, &dev->stats, &si->rx_buff, Ser2UTDR);
322 		async_unwrap_char(dev, &dev->stats, &si->rx_buff, Ser2UTDR);
323 	}
324 
325 	if (status & (UTSR0_RFS | UTSR0_RID)) {
326 		/*
327 		 * Fifo contains more than 1 character.
328 		 */
329 		do {
330 			async_unwrap_char(dev, &dev->stats, &si->rx_buff,
331 					  Ser2UTDR);
332 		} while (Ser2UTSR1 & UTSR1_RNE);
333 
334 	}
335 
336 	return IRQ_HANDLED;
337 }
338 
339 /*
340  * FIR format support.
341  */
sa1100_irda_firtxdma_irq(void * id)342 static void sa1100_irda_firtxdma_irq(void *id)
343 {
344 	struct net_device *dev = id;
345 	struct sa1100_irda *si = netdev_priv(dev);
346 	struct sk_buff *skb;
347 
348 	/*
349 	 * Wait for the transmission to complete.  Unfortunately,
350 	 * the hardware doesn't give us an interrupt to indicate
351 	 * "end of frame".
352 	 */
353 	do
354 		rmb();
355 	while (!(Ser2HSSR0 & HSSR0_TUR) || Ser2HSSR1 & HSSR1_TBY);
356 
357 	/*
358 	 * Clear the transmit underrun bit.
359 	 */
360 	Ser2HSSR0 = HSSR0_TUR;
361 
362 	/*
363 	 * Do we need to change speed?  Note that we're lazy
364 	 * here - we don't free the old dma_rx.skb.  We don't need
365 	 * to allocate a buffer either.
366 	 */
367 	sa1100_irda_check_speed(si);
368 
369 	/*
370 	 * Start reception.  This disables the transmitter for
371 	 * us.  This will be using the existing RX buffer.
372 	 */
373 	sa1100_irda_rx_dma_start(si);
374 
375 	/* Account and free the packet. */
376 	skb = si->dma_tx.skb;
377 	if (skb) {
378 		dma_unmap_sg(si->dma_tx.dev, &si->dma_tx.sg, 1,
379 			     DMA_TO_DEVICE);
380 		dev->stats.tx_packets ++;
381 		dev->stats.tx_bytes += skb->len;
382 		dev_kfree_skb_irq(skb);
383 		si->dma_tx.skb = NULL;
384 	}
385 
386 	/*
387 	 * Make sure that the TX queue is available for sending
388 	 * (for retries).  TX has priority over RX at all times.
389 	 */
390 	netif_wake_queue(dev);
391 }
392 
sa1100_irda_fir_tx_start(struct sk_buff * skb,struct net_device * dev,struct sa1100_irda * si)393 static int sa1100_irda_fir_tx_start(struct sk_buff *skb, struct net_device *dev,
394 	struct sa1100_irda *si)
395 {
396 	int mtt = irda_get_mtt(skb);
397 
398 	si->dma_tx.skb = skb;
399 	sg_set_buf(&si->dma_tx.sg, skb->data, skb->len);
400 	if (dma_map_sg(si->dma_tx.dev, &si->dma_tx.sg, 1, DMA_TO_DEVICE) == 0) {
401 		si->dma_tx.skb = NULL;
402 		netif_wake_queue(dev);
403 		dev->stats.tx_dropped++;
404 		dev_kfree_skb(skb);
405 		return NETDEV_TX_OK;
406 	}
407 
408 	sa1100_irda_dma_start(&si->dma_tx, DMA_MEM_TO_DEV, sa1100_irda_firtxdma_irq, dev);
409 
410 	/*
411 	 * If we have a mean turn-around time, impose the specified
412 	 * specified delay.  We could shorten this by timing from
413 	 * the point we received the packet.
414 	 */
415 	if (mtt)
416 		udelay(mtt);
417 
418 	Ser2HSCR0 = HSCR0_HSSP | HSCR0_TXE;
419 
420 	return NETDEV_TX_OK;
421 }
422 
sa1100_irda_fir_error(struct sa1100_irda * si,struct net_device * dev)423 static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev)
424 {
425 	struct sk_buff *skb = si->dma_rx.skb;
426 	unsigned int len, stat, data;
427 
428 	if (!skb) {
429 		printk(KERN_ERR "sa1100_ir: SKB is NULL!\n");
430 		return;
431 	}
432 
433 	/*
434 	 * Get the current data position.
435 	 */
436 	len = sa1100_irda_dma_xferred(&si->dma_rx);
437 	if (len > HPSIR_MAX_RXLEN)
438 		len = HPSIR_MAX_RXLEN;
439 	dma_unmap_sg(si->dma_rx.dev, &si->dma_rx.sg, 1, DMA_FROM_DEVICE);
440 
441 	do {
442 		/*
443 		 * Read Status, and then Data.
444 		 */
445 		stat = Ser2HSSR1;
446 		rmb();
447 		data = Ser2HSDR;
448 
449 		if (stat & (HSSR1_CRE | HSSR1_ROR)) {
450 			dev->stats.rx_errors++;
451 			if (stat & HSSR1_CRE)
452 				dev->stats.rx_crc_errors++;
453 			if (stat & HSSR1_ROR)
454 				dev->stats.rx_frame_errors++;
455 		} else
456 			skb->data[len++] = data;
457 
458 		/*
459 		 * If we hit the end of frame, there's
460 		 * no point in continuing.
461 		 */
462 		if (stat & HSSR1_EOF)
463 			break;
464 	} while (Ser2HSSR0 & HSSR0_EIF);
465 
466 	if (stat & HSSR1_EOF) {
467 		si->dma_rx.skb = NULL;
468 
469 		skb_put(skb, len);
470 		skb->dev = dev;
471 		skb_reset_mac_header(skb);
472 		skb->protocol = htons(ETH_P_IRDA);
473 		dev->stats.rx_packets++;
474 		dev->stats.rx_bytes += len;
475 
476 		/*
477 		 * Before we pass the buffer up, allocate a new one.
478 		 */
479 		sa1100_irda_rx_alloc(si);
480 
481 		netif_rx(skb);
482 	} else {
483 		/*
484 		 * Remap the buffer - it was previously mapped, and we
485 		 * hope that this succeeds.
486 		 */
487 		dma_map_sg(si->dma_rx.dev, &si->dma_rx.sg, 1, DMA_FROM_DEVICE);
488 	}
489 }
490 
491 /*
492  * We only have to handle RX events here; transmit events go via the TX
493  * DMA handler. We disable RX, process, and the restart RX.
494  */
sa1100_irda_fir_irq(struct net_device * dev,struct sa1100_irda * si)495 static irqreturn_t sa1100_irda_fir_irq(struct net_device *dev, struct sa1100_irda *si)
496 {
497 	/*
498 	 * Stop RX DMA
499 	 */
500 	dmaengine_pause(si->dma_rx.chan);
501 
502 	/*
503 	 * Framing error - we throw away the packet completely.
504 	 * Clearing RXE flushes the error conditions and data
505 	 * from the fifo.
506 	 */
507 	if (Ser2HSSR0 & (HSSR0_FRE | HSSR0_RAB)) {
508 		dev->stats.rx_errors++;
509 
510 		if (Ser2HSSR0 & HSSR0_FRE)
511 			dev->stats.rx_frame_errors++;
512 
513 		/*
514 		 * Clear out the DMA...
515 		 */
516 		Ser2HSCR0 = HSCR0_HSSP;
517 
518 		/*
519 		 * Clear selected status bits now, so we
520 		 * don't miss them next time around.
521 		 */
522 		Ser2HSSR0 = HSSR0_FRE | HSSR0_RAB;
523 	}
524 
525 	/*
526 	 * Deal with any receive errors.  The any of the lowest
527 	 * 8 bytes in the FIFO may contain an error.  We must read
528 	 * them one by one.  The "error" could even be the end of
529 	 * packet!
530 	 */
531 	if (Ser2HSSR0 & HSSR0_EIF)
532 		sa1100_irda_fir_error(si, dev);
533 
534 	/*
535 	 * No matter what happens, we must restart reception.
536 	 */
537 	sa1100_irda_rx_dma_start(si);
538 
539 	return IRQ_HANDLED;
540 }
541 
542 /*
543  * Set the IrDA communications speed.
544  */
sa1100_irda_set_speed(struct sa1100_irda * si,int speed)545 static int sa1100_irda_set_speed(struct sa1100_irda *si, int speed)
546 {
547 	unsigned long flags;
548 	int brd, ret = -EINVAL;
549 
550 	switch (speed) {
551 	case 9600:	case 19200:	case 38400:
552 	case 57600:	case 115200:
553 		brd = 3686400 / (16 * speed) - 1;
554 
555 		/* Stop the receive DMA, and configure transmit. */
556 		if (IS_FIR(si)) {
557 			dmaengine_terminate_all(si->dma_rx.chan);
558 			dmaengine_slave_config(si->dma_tx.chan,
559 						&sa1100_irda_sir_tx);
560 		}
561 
562 		local_irq_save(flags);
563 
564 		Ser2UTCR3 = 0;
565 		Ser2HSCR0 = HSCR0_UART;
566 
567 		Ser2UTCR1 = brd >> 8;
568 		Ser2UTCR2 = brd;
569 
570 		/*
571 		 * Clear status register
572 		 */
573 		Ser2UTSR0 = UTSR0_REB | UTSR0_RBB | UTSR0_RID;
574 		Ser2UTCR3 = UTCR3_RIE | UTCR3_RXE | UTCR3_TXE;
575 
576 		if (si->pdata->set_speed)
577 			si->pdata->set_speed(si->dev, speed);
578 
579 		si->speed = speed;
580 		si->tx_start = sa1100_irda_sir_tx_start;
581 		si->irq = sa1100_irda_sir_irq;
582 
583 		local_irq_restore(flags);
584 		ret = 0;
585 		break;
586 
587 	case 4000000:
588 		if (!IS_FIR(si))
589 			dmaengine_slave_config(si->dma_tx.chan,
590 						&sa1100_irda_fir_tx);
591 
592 		local_irq_save(flags);
593 
594 		Ser2HSSR0 = 0xff;
595 		Ser2HSCR0 = HSCR0_HSSP;
596 		Ser2UTCR3 = 0;
597 
598 		si->speed = speed;
599 		si->tx_start = sa1100_irda_fir_tx_start;
600 		si->irq = sa1100_irda_fir_irq;
601 
602 		if (si->pdata->set_speed)
603 			si->pdata->set_speed(si->dev, speed);
604 
605 		sa1100_irda_rx_alloc(si);
606 		sa1100_irda_rx_dma_start(si);
607 
608 		local_irq_restore(flags);
609 
610 		break;
611 
612 	default:
613 		break;
614 	}
615 
616 	return ret;
617 }
618 
619 /*
620  * Control the power state of the IrDA transmitter.
621  * State:
622  *  0 - off
623  *  1 - short range, lowest power
624  *  2 - medium range, medium power
625  *  3 - maximum range, high power
626  *
627  * Currently, only assabet is known to support this.
628  */
629 static int
__sa1100_irda_set_power(struct sa1100_irda * si,unsigned int state)630 __sa1100_irda_set_power(struct sa1100_irda *si, unsigned int state)
631 {
632 	int ret = 0;
633 	if (si->pdata->set_power)
634 		ret = si->pdata->set_power(si->dev, state);
635 	return ret;
636 }
637 
638 static inline int
sa1100_set_power(struct sa1100_irda * si,unsigned int state)639 sa1100_set_power(struct sa1100_irda *si, unsigned int state)
640 {
641 	int ret;
642 
643 	ret = __sa1100_irda_set_power(si, state);
644 	if (ret == 0)
645 		si->power = state;
646 
647 	return ret;
648 }
649 
sa1100_irda_irq(int irq,void * dev_id)650 static irqreturn_t sa1100_irda_irq(int irq, void *dev_id)
651 {
652 	struct net_device *dev = dev_id;
653 	struct sa1100_irda *si = netdev_priv(dev);
654 
655 	return si->irq(dev, si);
656 }
657 
sa1100_irda_hard_xmit(struct sk_buff * skb,struct net_device * dev)658 static int sa1100_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
659 {
660 	struct sa1100_irda *si = netdev_priv(dev);
661 	int speed = irda_get_next_speed(skb);
662 
663 	/*
664 	 * Does this packet contain a request to change the interface
665 	 * speed?  If so, remember it until we complete the transmission
666 	 * of this frame.
667 	 */
668 	if (speed != si->speed && speed != -1)
669 		si->newspeed = speed;
670 
671 	/* If this is an empty frame, we can bypass a lot. */
672 	if (skb->len == 0) {
673 		sa1100_irda_check_speed(si);
674 		dev_kfree_skb(skb);
675 		return NETDEV_TX_OK;
676 	}
677 
678 	netif_stop_queue(dev);
679 
680 	/* We must not already have a skb to transmit... */
681 	BUG_ON(si->dma_tx.skb);
682 
683 	return si->tx_start(skb, dev, si);
684 }
685 
686 static int
sa1100_irda_ioctl(struct net_device * dev,struct ifreq * ifreq,int cmd)687 sa1100_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
688 {
689 	struct if_irda_req *rq = (struct if_irda_req *)ifreq;
690 	struct sa1100_irda *si = netdev_priv(dev);
691 	int ret = -EOPNOTSUPP;
692 
693 	switch (cmd) {
694 	case SIOCSBANDWIDTH:
695 		if (capable(CAP_NET_ADMIN)) {
696 			/*
697 			 * We are unable to set the speed if the
698 			 * device is not running.
699 			 */
700 			if (si->open) {
701 				ret = sa1100_irda_set_speed(si,
702 						rq->ifr_baudrate);
703 			} else {
704 				printk("sa1100_irda_ioctl: SIOCSBANDWIDTH: !netif_running\n");
705 				ret = 0;
706 			}
707 		}
708 		break;
709 
710 	case SIOCSMEDIABUSY:
711 		ret = -EPERM;
712 		if (capable(CAP_NET_ADMIN)) {
713 			irda_device_set_media_busy(dev, TRUE);
714 			ret = 0;
715 		}
716 		break;
717 
718 	case SIOCGRECEIVING:
719 		rq->ifr_receiving = IS_FIR(si) ? 0
720 					: si->rx_buff.state != OUTSIDE_FRAME;
721 		break;
722 
723 	default:
724 		break;
725 	}
726 
727 	return ret;
728 }
729 
sa1100_irda_startup(struct sa1100_irda * si)730 static int sa1100_irda_startup(struct sa1100_irda *si)
731 {
732 	int ret;
733 
734 	/*
735 	 * Ensure that the ports for this device are setup correctly.
736 	 */
737 	if (si->pdata->startup)	{
738 		ret = si->pdata->startup(si->dev);
739 		if (ret)
740 			return ret;
741 	}
742 
743 	/*
744 	 * Configure PPC for IRDA - we want to drive TXD2 low.
745 	 * We also want to drive this pin low during sleep.
746 	 */
747 	PPSR &= ~PPC_TXD2;
748 	PSDR &= ~PPC_TXD2;
749 	PPDR |= PPC_TXD2;
750 
751 	/*
752 	 * Enable HP-SIR modulation, and ensure that the port is disabled.
753 	 */
754 	Ser2UTCR3 = 0;
755 	Ser2HSCR0 = HSCR0_UART;
756 	Ser2UTCR4 = si->utcr4;
757 	Ser2UTCR0 = UTCR0_8BitData;
758 	Ser2HSCR2 = HSCR2_TrDataH | HSCR2_RcDataL;
759 
760 	/*
761 	 * Clear status register
762 	 */
763 	Ser2UTSR0 = UTSR0_REB | UTSR0_RBB | UTSR0_RID;
764 
765 	ret = sa1100_irda_set_speed(si, si->speed = 9600);
766 	if (ret) {
767 		Ser2UTCR3 = 0;
768 		Ser2HSCR0 = 0;
769 
770 		if (si->pdata->shutdown)
771 			si->pdata->shutdown(si->dev);
772 	}
773 
774 	return ret;
775 }
776 
sa1100_irda_shutdown(struct sa1100_irda * si)777 static void sa1100_irda_shutdown(struct sa1100_irda *si)
778 {
779 	/*
780 	 * Stop all DMA activity.
781 	 */
782 	dmaengine_terminate_all(si->dma_rx.chan);
783 	dmaengine_terminate_all(si->dma_tx.chan);
784 
785 	/* Disable the port. */
786 	Ser2UTCR3 = 0;
787 	Ser2HSCR0 = 0;
788 
789 	if (si->pdata->shutdown)
790 		si->pdata->shutdown(si->dev);
791 }
792 
sa1100_irda_start(struct net_device * dev)793 static int sa1100_irda_start(struct net_device *dev)
794 {
795 	struct sa1100_irda *si = netdev_priv(dev);
796 	int err;
797 
798 	si->speed = 9600;
799 
800 	err = sa1100_irda_dma_request(si->dev, &si->dma_rx, "Ser2ICPRc",
801 				&sa1100_irda_fir_rx);
802 	if (err)
803 		goto err_rx_dma;
804 
805 	err = sa1100_irda_dma_request(si->dev, &si->dma_tx, "Ser2ICPTr",
806 				&sa1100_irda_sir_tx);
807 	if (err)
808 		goto err_tx_dma;
809 
810 	/*
811 	 * Setup the serial port for the specified speed.
812 	 */
813 	err = sa1100_irda_startup(si);
814 	if (err)
815 		goto err_startup;
816 
817 	/*
818 	 * Open a new IrLAP layer instance.
819 	 */
820 	si->irlap = irlap_open(dev, &si->qos, "sa1100");
821 	err = -ENOMEM;
822 	if (!si->irlap)
823 		goto err_irlap;
824 
825 	err = request_irq(dev->irq, sa1100_irda_irq, 0, dev->name, dev);
826 	if (err)
827 		goto err_irq;
828 
829 	/*
830 	 * Now enable the interrupt and start the queue
831 	 */
832 	si->open = 1;
833 	sa1100_set_power(si, power_level); /* low power mode */
834 
835 	netif_start_queue(dev);
836 	return 0;
837 
838 err_irq:
839 	irlap_close(si->irlap);
840 err_irlap:
841 	si->open = 0;
842 	sa1100_irda_shutdown(si);
843 err_startup:
844 	dma_release_channel(si->dma_tx.chan);
845 err_tx_dma:
846 	dma_release_channel(si->dma_rx.chan);
847 err_rx_dma:
848 	return err;
849 }
850 
sa1100_irda_stop(struct net_device * dev)851 static int sa1100_irda_stop(struct net_device *dev)
852 {
853 	struct sa1100_irda *si = netdev_priv(dev);
854 	struct sk_buff *skb;
855 
856 	netif_stop_queue(dev);
857 
858 	si->open = 0;
859 	sa1100_irda_shutdown(si);
860 
861 	/*
862 	 * If we have been doing any DMA activity, make sure we
863 	 * tidy that up cleanly.
864 	 */
865 	skb = si->dma_rx.skb;
866 	if (skb) {
867 		dma_unmap_sg(si->dma_rx.dev, &si->dma_rx.sg, 1,
868 			     DMA_FROM_DEVICE);
869 		dev_kfree_skb(skb);
870 		si->dma_rx.skb = NULL;
871 	}
872 
873 	skb = si->dma_tx.skb;
874 	if (skb) {
875 		dma_unmap_sg(si->dma_tx.dev, &si->dma_tx.sg, 1,
876 			     DMA_TO_DEVICE);
877 		dev_kfree_skb(skb);
878 		si->dma_tx.skb = NULL;
879 	}
880 
881 	/* Stop IrLAP */
882 	if (si->irlap) {
883 		irlap_close(si->irlap);
884 		si->irlap = NULL;
885 	}
886 
887 	/*
888 	 * Free resources
889 	 */
890 	dma_release_channel(si->dma_tx.chan);
891 	dma_release_channel(si->dma_rx.chan);
892 	free_irq(dev->irq, dev);
893 
894 	sa1100_set_power(si, 0);
895 
896 	return 0;
897 }
898 
sa1100_irda_init_iobuf(iobuff_t * io,int size)899 static int sa1100_irda_init_iobuf(iobuff_t *io, int size)
900 {
901 	io->head = kmalloc(size, GFP_KERNEL | GFP_DMA);
902 	if (io->head != NULL) {
903 		io->truesize = size;
904 		io->in_frame = FALSE;
905 		io->state    = OUTSIDE_FRAME;
906 		io->data     = io->head;
907 	}
908 	return io->head ? 0 : -ENOMEM;
909 }
910 
911 static const struct net_device_ops sa1100_irda_netdev_ops = {
912 	.ndo_open		= sa1100_irda_start,
913 	.ndo_stop		= sa1100_irda_stop,
914 	.ndo_start_xmit		= sa1100_irda_hard_xmit,
915 	.ndo_do_ioctl		= sa1100_irda_ioctl,
916 };
917 
sa1100_irda_probe(struct platform_device * pdev)918 static int sa1100_irda_probe(struct platform_device *pdev)
919 {
920 	struct net_device *dev;
921 	struct sa1100_irda *si;
922 	unsigned int baudrate_mask;
923 	int err, irq;
924 
925 	if (!pdev->dev.platform_data)
926 		return -EINVAL;
927 
928 	irq = platform_get_irq(pdev, 0);
929 	if (irq <= 0)
930 		return irq < 0 ? irq : -ENXIO;
931 
932 	err = request_mem_region(__PREG(Ser2UTCR0), 0x24, "IrDA") ? 0 : -EBUSY;
933 	if (err)
934 		goto err_mem_1;
935 	err = request_mem_region(__PREG(Ser2HSCR0), 0x1c, "IrDA") ? 0 : -EBUSY;
936 	if (err)
937 		goto err_mem_2;
938 	err = request_mem_region(__PREG(Ser2HSCR2), 0x04, "IrDA") ? 0 : -EBUSY;
939 	if (err)
940 		goto err_mem_3;
941 
942 	dev = alloc_irdadev(sizeof(struct sa1100_irda));
943 	if (!dev) {
944 		err = -ENOMEM;
945 		goto err_mem_4;
946 	}
947 
948 	SET_NETDEV_DEV(dev, &pdev->dev);
949 
950 	si = netdev_priv(dev);
951 	si->dev = &pdev->dev;
952 	si->pdata = pdev->dev.platform_data;
953 
954 	sg_init_table(&si->dma_rx.sg, 1);
955 	sg_init_table(&si->dma_tx.sg, 1);
956 
957 	/*
958 	 * Initialise the HP-SIR buffers
959 	 */
960 	err = sa1100_irda_init_iobuf(&si->rx_buff, 14384);
961 	if (err)
962 		goto err_mem_5;
963 	err = sa1100_irda_init_iobuf(&si->tx_buff, IRDA_SIR_MAX_FRAME);
964 	if (err)
965 		goto err_mem_5;
966 
967 	dev->netdev_ops	= &sa1100_irda_netdev_ops;
968 	dev->irq	= irq;
969 
970 	irda_init_max_qos_capabilies(&si->qos);
971 
972 	/*
973 	 * We support original IRDA up to 115k2. (we don't currently
974 	 * support 4Mbps).  Min Turn Time set to 1ms or greater.
975 	 */
976 	baudrate_mask = IR_9600;
977 
978 	switch (max_rate) {
979 	case 4000000:		baudrate_mask |= IR_4000000 << 8;
980 	case 115200:		baudrate_mask |= IR_115200;
981 	case 57600:		baudrate_mask |= IR_57600;
982 	case 38400:		baudrate_mask |= IR_38400;
983 	case 19200:		baudrate_mask |= IR_19200;
984 	}
985 
986 	si->qos.baud_rate.bits &= baudrate_mask;
987 	si->qos.min_turn_time.bits = 7;
988 
989 	irda_qos_bits_to_value(&si->qos);
990 
991 	si->utcr4 = UTCR4_HPSIR;
992 	if (tx_lpm)
993 		si->utcr4 |= UTCR4_Z1_6us;
994 
995 	/*
996 	 * Initially enable HP-SIR modulation, and ensure that the port
997 	 * is disabled.
998 	 */
999 	Ser2UTCR3 = 0;
1000 	Ser2UTCR4 = si->utcr4;
1001 	Ser2HSCR0 = HSCR0_UART;
1002 
1003 	err = register_netdev(dev);
1004 	if (err == 0)
1005 		platform_set_drvdata(pdev, dev);
1006 
1007 	if (err) {
1008  err_mem_5:
1009 		kfree(si->tx_buff.head);
1010 		kfree(si->rx_buff.head);
1011 		free_netdev(dev);
1012  err_mem_4:
1013 		release_mem_region(__PREG(Ser2HSCR2), 0x04);
1014  err_mem_3:
1015 		release_mem_region(__PREG(Ser2HSCR0), 0x1c);
1016  err_mem_2:
1017 		release_mem_region(__PREG(Ser2UTCR0), 0x24);
1018 	}
1019  err_mem_1:
1020 	return err;
1021 }
1022 
sa1100_irda_remove(struct platform_device * pdev)1023 static int sa1100_irda_remove(struct platform_device *pdev)
1024 {
1025 	struct net_device *dev = platform_get_drvdata(pdev);
1026 
1027 	if (dev) {
1028 		struct sa1100_irda *si = netdev_priv(dev);
1029 		unregister_netdev(dev);
1030 		kfree(si->tx_buff.head);
1031 		kfree(si->rx_buff.head);
1032 		free_netdev(dev);
1033 	}
1034 
1035 	release_mem_region(__PREG(Ser2HSCR2), 0x04);
1036 	release_mem_region(__PREG(Ser2HSCR0), 0x1c);
1037 	release_mem_region(__PREG(Ser2UTCR0), 0x24);
1038 
1039 	return 0;
1040 }
1041 
1042 #ifdef CONFIG_PM
1043 /*
1044  * Suspend the IrDA interface.
1045  */
sa1100_irda_suspend(struct platform_device * pdev,pm_message_t state)1046 static int sa1100_irda_suspend(struct platform_device *pdev, pm_message_t state)
1047 {
1048 	struct net_device *dev = platform_get_drvdata(pdev);
1049 	struct sa1100_irda *si;
1050 
1051 	if (!dev)
1052 		return 0;
1053 
1054 	si = netdev_priv(dev);
1055 	if (si->open) {
1056 		/*
1057 		 * Stop the transmit queue
1058 		 */
1059 		netif_device_detach(dev);
1060 		disable_irq(dev->irq);
1061 		sa1100_irda_shutdown(si);
1062 		__sa1100_irda_set_power(si, 0);
1063 	}
1064 
1065 	return 0;
1066 }
1067 
1068 /*
1069  * Resume the IrDA interface.
1070  */
sa1100_irda_resume(struct platform_device * pdev)1071 static int sa1100_irda_resume(struct platform_device *pdev)
1072 {
1073 	struct net_device *dev = platform_get_drvdata(pdev);
1074 	struct sa1100_irda *si;
1075 
1076 	if (!dev)
1077 		return 0;
1078 
1079 	si = netdev_priv(dev);
1080 	if (si->open) {
1081 		/*
1082 		 * If we missed a speed change, initialise at the new speed
1083 		 * directly.  It is debatable whether this is actually
1084 		 * required, but in the interests of continuing from where
1085 		 * we left off it is desirable.  The converse argument is
1086 		 * that we should re-negotiate at 9600 baud again.
1087 		 */
1088 		if (si->newspeed) {
1089 			si->speed = si->newspeed;
1090 			si->newspeed = 0;
1091 		}
1092 
1093 		sa1100_irda_startup(si);
1094 		__sa1100_irda_set_power(si, si->power);
1095 		enable_irq(dev->irq);
1096 
1097 		/*
1098 		 * This automatically wakes up the queue
1099 		 */
1100 		netif_device_attach(dev);
1101 	}
1102 
1103 	return 0;
1104 }
1105 #else
1106 #define sa1100_irda_suspend	NULL
1107 #define sa1100_irda_resume	NULL
1108 #endif
1109 
1110 static struct platform_driver sa1100ir_driver = {
1111 	.probe		= sa1100_irda_probe,
1112 	.remove		= sa1100_irda_remove,
1113 	.suspend	= sa1100_irda_suspend,
1114 	.resume		= sa1100_irda_resume,
1115 	.driver		= {
1116 		.name	= "sa11x0-ir",
1117 		.owner	= THIS_MODULE,
1118 	},
1119 };
1120 
sa1100_irda_init(void)1121 static int __init sa1100_irda_init(void)
1122 {
1123 	/*
1124 	 * Limit power level a sensible range.
1125 	 */
1126 	if (power_level < 1)
1127 		power_level = 1;
1128 	if (power_level > 3)
1129 		power_level = 3;
1130 
1131 	return platform_driver_register(&sa1100ir_driver);
1132 }
1133 
sa1100_irda_exit(void)1134 static void __exit sa1100_irda_exit(void)
1135 {
1136 	platform_driver_unregister(&sa1100ir_driver);
1137 }
1138 
1139 module_init(sa1100_irda_init);
1140 module_exit(sa1100_irda_exit);
1141 module_param(power_level, int, 0);
1142 module_param(tx_lpm, int, 0);
1143 module_param(max_rate, int, 0);
1144 
1145 MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
1146 MODULE_DESCRIPTION("StrongARM SA1100 IrDA driver");
1147 MODULE_LICENSE("GPL");
1148 MODULE_PARM_DESC(power_level, "IrDA power level, 1 (low) to 3 (high)");
1149 MODULE_PARM_DESC(tx_lpm, "Enable transmitter low power (1.6us) mode");
1150 MODULE_PARM_DESC(max_rate, "Maximum baud rate (4000000, 115200, 57600, 38400, 19200, 9600)");
1151 MODULE_ALIAS("platform:sa11x0-ir");
1152