• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * SH SPI bus driver
3  *
4  * Copyright (C) 2011  Renesas Solutions Corp.
5  *
6  * Based on pxa2xx_spi.c:
7  * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; version 2 of the License.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
21  *
22  */
23 
24 #include <linux/module.h>
25 #include <linux/kernel.h>
26 #include <linux/sched.h>
27 #include <linux/errno.h>
28 #include <linux/timer.h>
29 #include <linux/delay.h>
30 #include <linux/list.h>
31 #include <linux/workqueue.h>
32 #include <linux/interrupt.h>
33 #include <linux/platform_device.h>
34 #include <linux/io.h>
35 #include <linux/spi/spi.h>
36 
37 #define SPI_SH_TBR		0x00
38 #define SPI_SH_RBR		0x00
39 #define SPI_SH_CR1		0x08
40 #define SPI_SH_CR2		0x10
41 #define SPI_SH_CR3		0x18
42 #define SPI_SH_CR4		0x20
43 #define SPI_SH_CR5		0x28
44 
45 /* CR1 */
46 #define SPI_SH_TBE		0x80
47 #define SPI_SH_TBF		0x40
48 #define SPI_SH_RBE		0x20
49 #define SPI_SH_RBF		0x10
50 #define SPI_SH_PFONRD		0x08
51 #define SPI_SH_SSDB		0x04
52 #define SPI_SH_SSD		0x02
53 #define SPI_SH_SSA		0x01
54 
55 /* CR2 */
56 #define SPI_SH_RSTF		0x80
57 #define SPI_SH_LOOPBK		0x40
58 #define SPI_SH_CPOL		0x20
59 #define SPI_SH_CPHA		0x10
60 #define SPI_SH_L1M0		0x08
61 
62 /* CR3 */
63 #define SPI_SH_MAX_BYTE		0xFF
64 
65 /* CR4 */
66 #define SPI_SH_TBEI		0x80
67 #define SPI_SH_TBFI		0x40
68 #define SPI_SH_RBEI		0x20
69 #define SPI_SH_RBFI		0x10
70 #define SPI_SH_WPABRT		0x04
71 #define SPI_SH_SSS		0x01
72 
73 /* CR8 */
74 #define SPI_SH_P1L0		0x80
75 #define SPI_SH_PP1L0		0x40
76 #define SPI_SH_MUXI		0x20
77 #define SPI_SH_MUXIRQ		0x10
78 
79 #define SPI_SH_FIFO_SIZE	32
80 #define SPI_SH_SEND_TIMEOUT	(3 * HZ)
81 #define SPI_SH_RECEIVE_TIMEOUT	(HZ >> 3)
82 
83 #undef DEBUG
84 
85 struct spi_sh_data {
86 	void __iomem *addr;
87 	int irq;
88 	struct spi_master *master;
89 	struct list_head queue;
90 	struct workqueue_struct *workqueue;
91 	struct work_struct ws;
92 	unsigned long cr1;
93 	wait_queue_head_t wait;
94 	spinlock_t lock;
95 	int width;
96 };
97 
spi_sh_write(struct spi_sh_data * ss,unsigned long data,unsigned long offset)98 static void spi_sh_write(struct spi_sh_data *ss, unsigned long data,
99 			     unsigned long offset)
100 {
101 	if (ss->width == 8)
102 		iowrite8(data, ss->addr + (offset >> 2));
103 	else if (ss->width == 32)
104 		iowrite32(data, ss->addr + offset);
105 }
106 
spi_sh_read(struct spi_sh_data * ss,unsigned long offset)107 static unsigned long spi_sh_read(struct spi_sh_data *ss, unsigned long offset)
108 {
109 	if (ss->width == 8)
110 		return ioread8(ss->addr + (offset >> 2));
111 	else if (ss->width == 32)
112 		return ioread32(ss->addr + offset);
113 	else
114 		return 0;
115 }
116 
spi_sh_set_bit(struct spi_sh_data * ss,unsigned long val,unsigned long offset)117 static void spi_sh_set_bit(struct spi_sh_data *ss, unsigned long val,
118 				unsigned long offset)
119 {
120 	unsigned long tmp;
121 
122 	tmp = spi_sh_read(ss, offset);
123 	tmp |= val;
124 	spi_sh_write(ss, tmp, offset);
125 }
126 
spi_sh_clear_bit(struct spi_sh_data * ss,unsigned long val,unsigned long offset)127 static void spi_sh_clear_bit(struct spi_sh_data *ss, unsigned long val,
128 				unsigned long offset)
129 {
130 	unsigned long tmp;
131 
132 	tmp = spi_sh_read(ss, offset);
133 	tmp &= ~val;
134 	spi_sh_write(ss, tmp, offset);
135 }
136 
clear_fifo(struct spi_sh_data * ss)137 static void clear_fifo(struct spi_sh_data *ss)
138 {
139 	spi_sh_set_bit(ss, SPI_SH_RSTF, SPI_SH_CR2);
140 	spi_sh_clear_bit(ss, SPI_SH_RSTF, SPI_SH_CR2);
141 }
142 
spi_sh_wait_receive_buffer(struct spi_sh_data * ss)143 static int spi_sh_wait_receive_buffer(struct spi_sh_data *ss)
144 {
145 	int timeout = 100000;
146 
147 	while (spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_RBE) {
148 		udelay(10);
149 		if (timeout-- < 0)
150 			return -ETIMEDOUT;
151 	}
152 	return 0;
153 }
154 
spi_sh_wait_write_buffer_empty(struct spi_sh_data * ss)155 static int spi_sh_wait_write_buffer_empty(struct spi_sh_data *ss)
156 {
157 	int timeout = 100000;
158 
159 	while (!(spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_TBE)) {
160 		udelay(10);
161 		if (timeout-- < 0)
162 			return -ETIMEDOUT;
163 	}
164 	return 0;
165 }
166 
spi_sh_send(struct spi_sh_data * ss,struct spi_message * mesg,struct spi_transfer * t)167 static int spi_sh_send(struct spi_sh_data *ss, struct spi_message *mesg,
168 			struct spi_transfer *t)
169 {
170 	int i, retval = 0;
171 	int remain = t->len;
172 	int cur_len;
173 	unsigned char *data;
174 	unsigned long tmp;
175 	long ret;
176 
177 	if (t->len)
178 		spi_sh_set_bit(ss, SPI_SH_SSA, SPI_SH_CR1);
179 
180 	data = (unsigned char *)t->tx_buf;
181 	while (remain > 0) {
182 		cur_len = min(SPI_SH_FIFO_SIZE, remain);
183 		for (i = 0; i < cur_len &&
184 				!(spi_sh_read(ss, SPI_SH_CR4) &
185 							SPI_SH_WPABRT) &&
186 				!(spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_TBF);
187 				i++)
188 			spi_sh_write(ss, (unsigned long)data[i], SPI_SH_TBR);
189 
190 		if (spi_sh_read(ss, SPI_SH_CR4) & SPI_SH_WPABRT) {
191 			/* Abort SPI operation */
192 			spi_sh_set_bit(ss, SPI_SH_WPABRT, SPI_SH_CR4);
193 			retval = -EIO;
194 			break;
195 		}
196 
197 		cur_len = i;
198 
199 		remain -= cur_len;
200 		data += cur_len;
201 
202 		if (remain > 0) {
203 			ss->cr1 &= ~SPI_SH_TBE;
204 			spi_sh_set_bit(ss, SPI_SH_TBE, SPI_SH_CR4);
205 			ret = wait_event_interruptible_timeout(ss->wait,
206 						 ss->cr1 & SPI_SH_TBE,
207 						 SPI_SH_SEND_TIMEOUT);
208 			if (ret == 0 && !(ss->cr1 & SPI_SH_TBE)) {
209 				printk(KERN_ERR "%s: timeout\n", __func__);
210 				return -ETIMEDOUT;
211 			}
212 		}
213 	}
214 
215 	if (list_is_last(&t->transfer_list, &mesg->transfers)) {
216 		tmp = spi_sh_read(ss, SPI_SH_CR1);
217 		tmp = tmp & ~(SPI_SH_SSD | SPI_SH_SSDB);
218 		spi_sh_write(ss, tmp, SPI_SH_CR1);
219 		spi_sh_set_bit(ss, SPI_SH_SSA, SPI_SH_CR1);
220 
221 		ss->cr1 &= ~SPI_SH_TBE;
222 		spi_sh_set_bit(ss, SPI_SH_TBE, SPI_SH_CR4);
223 		ret = wait_event_interruptible_timeout(ss->wait,
224 					 ss->cr1 & SPI_SH_TBE,
225 					 SPI_SH_SEND_TIMEOUT);
226 		if (ret == 0 && (ss->cr1 & SPI_SH_TBE)) {
227 			printk(KERN_ERR "%s: timeout\n", __func__);
228 			return -ETIMEDOUT;
229 		}
230 	}
231 
232 	return retval;
233 }
234 
spi_sh_receive(struct spi_sh_data * ss,struct spi_message * mesg,struct spi_transfer * t)235 static int spi_sh_receive(struct spi_sh_data *ss, struct spi_message *mesg,
236 			  struct spi_transfer *t)
237 {
238 	int i;
239 	int remain = t->len;
240 	int cur_len;
241 	unsigned char *data;
242 	unsigned long tmp;
243 	long ret;
244 
245 	if (t->len > SPI_SH_MAX_BYTE)
246 		spi_sh_write(ss, SPI_SH_MAX_BYTE, SPI_SH_CR3);
247 	else
248 		spi_sh_write(ss, t->len, SPI_SH_CR3);
249 
250 	tmp = spi_sh_read(ss, SPI_SH_CR1);
251 	tmp = tmp & ~(SPI_SH_SSD | SPI_SH_SSDB);
252 	spi_sh_write(ss, tmp, SPI_SH_CR1);
253 	spi_sh_set_bit(ss, SPI_SH_SSA, SPI_SH_CR1);
254 
255 	spi_sh_wait_write_buffer_empty(ss);
256 
257 	data = (unsigned char *)t->rx_buf;
258 	while (remain > 0) {
259 		if (remain >= SPI_SH_FIFO_SIZE) {
260 			ss->cr1 &= ~SPI_SH_RBF;
261 			spi_sh_set_bit(ss, SPI_SH_RBF, SPI_SH_CR4);
262 			ret = wait_event_interruptible_timeout(ss->wait,
263 						 ss->cr1 & SPI_SH_RBF,
264 						 SPI_SH_RECEIVE_TIMEOUT);
265 			if (ret == 0 &&
266 			    spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_RBE) {
267 				printk(KERN_ERR "%s: timeout\n", __func__);
268 				return -ETIMEDOUT;
269 			}
270 		}
271 
272 		cur_len = min(SPI_SH_FIFO_SIZE, remain);
273 		for (i = 0; i < cur_len; i++) {
274 			if (spi_sh_wait_receive_buffer(ss))
275 				break;
276 			data[i] = (unsigned char)spi_sh_read(ss, SPI_SH_RBR);
277 		}
278 
279 		remain -= cur_len;
280 		data += cur_len;
281 	}
282 
283 	/* deassert CS when SPI is receiving. */
284 	if (t->len > SPI_SH_MAX_BYTE) {
285 		clear_fifo(ss);
286 		spi_sh_write(ss, 1, SPI_SH_CR3);
287 	} else {
288 		spi_sh_write(ss, 0, SPI_SH_CR3);
289 	}
290 
291 	return 0;
292 }
293 
spi_sh_work(struct work_struct * work)294 static void spi_sh_work(struct work_struct *work)
295 {
296 	struct spi_sh_data *ss = container_of(work, struct spi_sh_data, ws);
297 	struct spi_message *mesg;
298 	struct spi_transfer *t;
299 	unsigned long flags;
300 	int ret;
301 
302 	pr_debug("%s: enter\n", __func__);
303 
304 	spin_lock_irqsave(&ss->lock, flags);
305 	while (!list_empty(&ss->queue)) {
306 		mesg = list_entry(ss->queue.next, struct spi_message, queue);
307 		list_del_init(&mesg->queue);
308 
309 		spin_unlock_irqrestore(&ss->lock, flags);
310 		list_for_each_entry(t, &mesg->transfers, transfer_list) {
311 			pr_debug("tx_buf = %p, rx_buf = %p\n",
312 					t->tx_buf, t->rx_buf);
313 			pr_debug("len = %d, delay_usecs = %d\n",
314 					t->len, t->delay_usecs);
315 
316 			if (t->tx_buf) {
317 				ret = spi_sh_send(ss, mesg, t);
318 				if (ret < 0)
319 					goto error;
320 			}
321 			if (t->rx_buf) {
322 				ret = spi_sh_receive(ss, mesg, t);
323 				if (ret < 0)
324 					goto error;
325 			}
326 			mesg->actual_length += t->len;
327 		}
328 		spin_lock_irqsave(&ss->lock, flags);
329 
330 		mesg->status = 0;
331 		mesg->complete(mesg->context);
332 	}
333 
334 	clear_fifo(ss);
335 	spi_sh_set_bit(ss, SPI_SH_SSD, SPI_SH_CR1);
336 	udelay(100);
337 
338 	spi_sh_clear_bit(ss, SPI_SH_SSA | SPI_SH_SSDB | SPI_SH_SSD,
339 			 SPI_SH_CR1);
340 
341 	clear_fifo(ss);
342 
343 	spin_unlock_irqrestore(&ss->lock, flags);
344 
345 	return;
346 
347  error:
348 	mesg->status = ret;
349 	mesg->complete(mesg->context);
350 
351 	spi_sh_clear_bit(ss, SPI_SH_SSA | SPI_SH_SSDB | SPI_SH_SSD,
352 			 SPI_SH_CR1);
353 	clear_fifo(ss);
354 
355 }
356 
spi_sh_setup(struct spi_device * spi)357 static int spi_sh_setup(struct spi_device *spi)
358 {
359 	struct spi_sh_data *ss = spi_master_get_devdata(spi->master);
360 
361 	if (!spi->bits_per_word)
362 		spi->bits_per_word = 8;
363 
364 	pr_debug("%s: enter\n", __func__);
365 
366 	spi_sh_write(ss, 0xfe, SPI_SH_CR1);	/* SPI sycle stop */
367 	spi_sh_write(ss, 0x00, SPI_SH_CR1);	/* CR1 init */
368 	spi_sh_write(ss, 0x00, SPI_SH_CR3);	/* CR3 init */
369 
370 	clear_fifo(ss);
371 
372 	/* 1/8 clock */
373 	spi_sh_write(ss, spi_sh_read(ss, SPI_SH_CR2) | 0x07, SPI_SH_CR2);
374 	udelay(10);
375 
376 	return 0;
377 }
378 
spi_sh_transfer(struct spi_device * spi,struct spi_message * mesg)379 static int spi_sh_transfer(struct spi_device *spi, struct spi_message *mesg)
380 {
381 	struct spi_sh_data *ss = spi_master_get_devdata(spi->master);
382 	unsigned long flags;
383 
384 	pr_debug("%s: enter\n", __func__);
385 	pr_debug("\tmode = %02x\n", spi->mode);
386 
387 	spin_lock_irqsave(&ss->lock, flags);
388 
389 	mesg->actual_length = 0;
390 	mesg->status = -EINPROGRESS;
391 
392 	spi_sh_clear_bit(ss, SPI_SH_SSA, SPI_SH_CR1);
393 
394 	list_add_tail(&mesg->queue, &ss->queue);
395 	queue_work(ss->workqueue, &ss->ws);
396 
397 	spin_unlock_irqrestore(&ss->lock, flags);
398 
399 	return 0;
400 }
401 
spi_sh_cleanup(struct spi_device * spi)402 static void spi_sh_cleanup(struct spi_device *spi)
403 {
404 	struct spi_sh_data *ss = spi_master_get_devdata(spi->master);
405 
406 	pr_debug("%s: enter\n", __func__);
407 
408 	spi_sh_clear_bit(ss, SPI_SH_SSA | SPI_SH_SSDB | SPI_SH_SSD,
409 			 SPI_SH_CR1);
410 }
411 
spi_sh_irq(int irq,void * _ss)412 static irqreturn_t spi_sh_irq(int irq, void *_ss)
413 {
414 	struct spi_sh_data *ss = (struct spi_sh_data *)_ss;
415 	unsigned long cr1;
416 
417 	cr1 = spi_sh_read(ss, SPI_SH_CR1);
418 	if (cr1 & SPI_SH_TBE)
419 		ss->cr1 |= SPI_SH_TBE;
420 	if (cr1 & SPI_SH_TBF)
421 		ss->cr1 |= SPI_SH_TBF;
422 	if (cr1 & SPI_SH_RBE)
423 		ss->cr1 |= SPI_SH_RBE;
424 	if (cr1 & SPI_SH_RBF)
425 		ss->cr1 |= SPI_SH_RBF;
426 
427 	if (ss->cr1) {
428 		spi_sh_clear_bit(ss, ss->cr1, SPI_SH_CR4);
429 		wake_up(&ss->wait);
430 	}
431 
432 	return IRQ_HANDLED;
433 }
434 
spi_sh_remove(struct platform_device * pdev)435 static int __devexit spi_sh_remove(struct platform_device *pdev)
436 {
437 	struct spi_sh_data *ss = dev_get_drvdata(&pdev->dev);
438 
439 	spi_unregister_master(ss->master);
440 	destroy_workqueue(ss->workqueue);
441 	free_irq(ss->irq, ss);
442 	iounmap(ss->addr);
443 
444 	return 0;
445 }
446 
spi_sh_probe(struct platform_device * pdev)447 static int __devinit spi_sh_probe(struct platform_device *pdev)
448 {
449 	struct resource *res;
450 	struct spi_master *master;
451 	struct spi_sh_data *ss;
452 	int ret, irq;
453 
454 	/* get base addr */
455 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
456 	if (unlikely(res == NULL)) {
457 		dev_err(&pdev->dev, "invalid resource\n");
458 		return -EINVAL;
459 	}
460 
461 	irq = platform_get_irq(pdev, 0);
462 	if (irq < 0) {
463 		dev_err(&pdev->dev, "platform_get_irq error\n");
464 		return -ENODEV;
465 	}
466 
467 	master = spi_alloc_master(&pdev->dev, sizeof(struct spi_sh_data));
468 	if (master == NULL) {
469 		dev_err(&pdev->dev, "spi_alloc_master error.\n");
470 		return -ENOMEM;
471 	}
472 
473 	ss = spi_master_get_devdata(master);
474 	dev_set_drvdata(&pdev->dev, ss);
475 
476 	switch (res->flags & IORESOURCE_MEM_TYPE_MASK) {
477 	case IORESOURCE_MEM_8BIT:
478 		ss->width = 8;
479 		break;
480 	case IORESOURCE_MEM_32BIT:
481 		ss->width = 32;
482 		break;
483 	default:
484 		dev_err(&pdev->dev, "No support width\n");
485 		ret = -ENODEV;
486 		goto error1;
487 	}
488 	ss->irq = irq;
489 	ss->master = master;
490 	ss->addr = ioremap(res->start, resource_size(res));
491 	if (ss->addr == NULL) {
492 		dev_err(&pdev->dev, "ioremap error.\n");
493 		ret = -ENOMEM;
494 		goto error1;
495 	}
496 	INIT_LIST_HEAD(&ss->queue);
497 	spin_lock_init(&ss->lock);
498 	INIT_WORK(&ss->ws, spi_sh_work);
499 	init_waitqueue_head(&ss->wait);
500 	ss->workqueue = create_singlethread_workqueue(
501 					dev_name(master->dev.parent));
502 	if (ss->workqueue == NULL) {
503 		dev_err(&pdev->dev, "create workqueue error\n");
504 		ret = -EBUSY;
505 		goto error2;
506 	}
507 
508 	ret = request_irq(irq, spi_sh_irq, 0, "spi_sh", ss);
509 	if (ret < 0) {
510 		dev_err(&pdev->dev, "request_irq error\n");
511 		goto error3;
512 	}
513 
514 	master->num_chipselect = 2;
515 	master->bus_num = pdev->id;
516 	master->setup = spi_sh_setup;
517 	master->transfer = spi_sh_transfer;
518 	master->cleanup = spi_sh_cleanup;
519 
520 	ret = spi_register_master(master);
521 	if (ret < 0) {
522 		printk(KERN_ERR "spi_register_master error.\n");
523 		goto error4;
524 	}
525 
526 	return 0;
527 
528  error4:
529 	free_irq(irq, ss);
530  error3:
531 	destroy_workqueue(ss->workqueue);
532  error2:
533 	iounmap(ss->addr);
534  error1:
535 	spi_master_put(master);
536 
537 	return ret;
538 }
539 
540 static struct platform_driver spi_sh_driver = {
541 	.probe = spi_sh_probe,
542 	.remove = __devexit_p(spi_sh_remove),
543 	.driver = {
544 		.name = "sh_spi",
545 		.owner = THIS_MODULE,
546 	},
547 };
548 module_platform_driver(spi_sh_driver);
549 
550 MODULE_DESCRIPTION("SH SPI bus driver");
551 MODULE_LICENSE("GPL");
552 MODULE_AUTHOR("Yoshihiro Shimoda");
553 MODULE_ALIAS("platform:sh_spi");
554