1 /*
2 * Designware SPI core controller driver (refer pxa2xx_spi.c)
3 *
4 * Copyright (c) 2009, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15
16 #include <linux/dma-mapping.h>
17 #include <linux/interrupt.h>
18 #include <linux/module.h>
19 #include <linux/highmem.h>
20 #include <linux/delay.h>
21 #include <linux/slab.h>
22 #include <linux/spi/spi.h>
23 #include <linux/gpio.h>
24
25 #include "spi-dw.h"
26
27 #ifdef CONFIG_DEBUG_FS
28 #include <linux/debugfs.h>
29 #endif
30
31 /* Slave spi_dev related */
32 struct chip_data {
33 u8 cs; /* chip select pin */
34 u8 tmode; /* TR/TO/RO/EEPROM */
35 u8 type; /* SPI/SSP/MicroWire */
36
37 u8 poll_mode; /* 1 means use poll mode */
38
39 u8 enable_dma;
40 u16 clk_div; /* baud rate divider */
41 u32 speed_hz; /* baud rate */
42 void (*cs_control)(u32 command);
43 };
44
45 #ifdef CONFIG_DEBUG_FS
46 #define SPI_REGS_BUFSIZE 1024
dw_spi_show_regs(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)47 static ssize_t dw_spi_show_regs(struct file *file, char __user *user_buf,
48 size_t count, loff_t *ppos)
49 {
50 struct dw_spi *dws = file->private_data;
51 char *buf;
52 u32 len = 0;
53 ssize_t ret;
54
55 buf = kzalloc(SPI_REGS_BUFSIZE, GFP_KERNEL);
56 if (!buf)
57 return 0;
58
59 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
60 "%s registers:\n", dev_name(&dws->master->dev));
61 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
62 "=================================\n");
63 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
64 "CTRL0: \t\t0x%08x\n", dw_readl(dws, DW_SPI_CTRL0));
65 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
66 "CTRL1: \t\t0x%08x\n", dw_readl(dws, DW_SPI_CTRL1));
67 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
68 "SSIENR: \t0x%08x\n", dw_readl(dws, DW_SPI_SSIENR));
69 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
70 "SER: \t\t0x%08x\n", dw_readl(dws, DW_SPI_SER));
71 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
72 "BAUDR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_BAUDR));
73 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
74 "TXFTLR: \t0x%08x\n", dw_readl(dws, DW_SPI_TXFLTR));
75 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
76 "RXFTLR: \t0x%08x\n", dw_readl(dws, DW_SPI_RXFLTR));
77 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
78 "TXFLR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_TXFLR));
79 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
80 "RXFLR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_RXFLR));
81 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
82 "SR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_SR));
83 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
84 "IMR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_IMR));
85 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
86 "ISR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_ISR));
87 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
88 "DMACR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_DMACR));
89 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
90 "DMATDLR: \t0x%08x\n", dw_readl(dws, DW_SPI_DMATDLR));
91 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
92 "DMARDLR: \t0x%08x\n", dw_readl(dws, DW_SPI_DMARDLR));
93 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
94 "=================================\n");
95
96 ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
97 kfree(buf);
98 return ret;
99 }
100
101 static const struct file_operations dw_spi_regs_ops = {
102 .owner = THIS_MODULE,
103 .open = simple_open,
104 .read = dw_spi_show_regs,
105 .llseek = default_llseek,
106 };
107
dw_spi_debugfs_init(struct dw_spi * dws)108 static int dw_spi_debugfs_init(struct dw_spi *dws)
109 {
110 char name[128];
111
112 snprintf(name, 128, "dw_spi-%s", dev_name(&dws->master->dev));
113 dws->debugfs = debugfs_create_dir(name, NULL);
114 if (!dws->debugfs)
115 return -ENOMEM;
116
117 debugfs_create_file("registers", S_IFREG | S_IRUGO,
118 dws->debugfs, (void *)dws, &dw_spi_regs_ops);
119 return 0;
120 }
121
dw_spi_debugfs_remove(struct dw_spi * dws)122 static void dw_spi_debugfs_remove(struct dw_spi *dws)
123 {
124 debugfs_remove_recursive(dws->debugfs);
125 }
126
127 #else
dw_spi_debugfs_init(struct dw_spi * dws)128 static inline int dw_spi_debugfs_init(struct dw_spi *dws)
129 {
130 return 0;
131 }
132
dw_spi_debugfs_remove(struct dw_spi * dws)133 static inline void dw_spi_debugfs_remove(struct dw_spi *dws)
134 {
135 }
136 #endif /* CONFIG_DEBUG_FS */
137
dw_spi_set_cs(struct spi_device * spi,bool enable)138 static void dw_spi_set_cs(struct spi_device *spi, bool enable)
139 {
140 struct dw_spi *dws = spi_master_get_devdata(spi->master);
141 struct chip_data *chip = spi_get_ctldata(spi);
142
143 /* Chip select logic is inverted from spi_set_cs() */
144 if (chip && chip->cs_control)
145 chip->cs_control(!enable);
146
147 if (!enable)
148 dw_writel(dws, DW_SPI_SER, BIT(spi->chip_select));
149 }
150
151 /* Return the max entries we can fill into tx fifo */
tx_max(struct dw_spi * dws)152 static inline u32 tx_max(struct dw_spi *dws)
153 {
154 u32 tx_left, tx_room, rxtx_gap;
155
156 tx_left = (dws->tx_end - dws->tx) / dws->n_bytes;
157 tx_room = dws->fifo_len - dw_readl(dws, DW_SPI_TXFLR);
158
159 /*
160 * Another concern is about the tx/rx mismatch, we
161 * though to use (dws->fifo_len - rxflr - txflr) as
162 * one maximum value for tx, but it doesn't cover the
163 * data which is out of tx/rx fifo and inside the
164 * shift registers. So a control from sw point of
165 * view is taken.
166 */
167 rxtx_gap = ((dws->rx_end - dws->rx) - (dws->tx_end - dws->tx))
168 / dws->n_bytes;
169
170 return min3(tx_left, tx_room, (u32) (dws->fifo_len - rxtx_gap));
171 }
172
173 /* Return the max entries we should read out of rx fifo */
rx_max(struct dw_spi * dws)174 static inline u32 rx_max(struct dw_spi *dws)
175 {
176 u32 rx_left = (dws->rx_end - dws->rx) / dws->n_bytes;
177
178 return min_t(u32, rx_left, dw_readl(dws, DW_SPI_RXFLR));
179 }
180
dw_writer(struct dw_spi * dws)181 static void dw_writer(struct dw_spi *dws)
182 {
183 u32 max;
184 u16 txw = 0;
185
186 spin_lock(&dws->buf_lock);
187 max = tx_max(dws);
188 while (max--) {
189 /* Set the tx word if the transfer's original "tx" is not null */
190 if (dws->tx_end - dws->len) {
191 if (dws->n_bytes == 1)
192 txw = *(u8 *)(dws->tx);
193 else
194 txw = *(u16 *)(dws->tx);
195 }
196 dw_write_io_reg(dws, DW_SPI_DR, txw);
197 dws->tx += dws->n_bytes;
198 }
199 spin_unlock(&dws->buf_lock);
200 }
201
dw_reader(struct dw_spi * dws)202 static void dw_reader(struct dw_spi *dws)
203 {
204 u32 max;
205 u16 rxw;
206
207 spin_lock(&dws->buf_lock);
208 max = rx_max(dws);
209 while (max--) {
210 rxw = dw_read_io_reg(dws, DW_SPI_DR);
211 /* Care rx only if the transfer's original "rx" is not null */
212 if (dws->rx_end - dws->len) {
213 if (dws->n_bytes == 1)
214 *(u8 *)(dws->rx) = rxw;
215 else
216 *(u16 *)(dws->rx) = rxw;
217 }
218 dws->rx += dws->n_bytes;
219 }
220 spin_unlock(&dws->buf_lock);
221 }
222
int_error_stop(struct dw_spi * dws,const char * msg)223 static void int_error_stop(struct dw_spi *dws, const char *msg)
224 {
225 spi_reset_chip(dws);
226
227 dev_err(&dws->master->dev, "%s\n", msg);
228 dws->master->cur_msg->status = -EIO;
229 spi_finalize_current_transfer(dws->master);
230 }
231
interrupt_transfer(struct dw_spi * dws)232 static irqreturn_t interrupt_transfer(struct dw_spi *dws)
233 {
234 u16 irq_status = dw_readl(dws, DW_SPI_ISR);
235
236 /* Error handling */
237 if (irq_status & (SPI_INT_TXOI | SPI_INT_RXOI | SPI_INT_RXUI)) {
238 dw_readl(dws, DW_SPI_ICR);
239 int_error_stop(dws, "interrupt_transfer: fifo overrun/underrun");
240 return IRQ_HANDLED;
241 }
242
243 dw_reader(dws);
244 if (dws->rx_end == dws->rx) {
245 spi_mask_intr(dws, SPI_INT_TXEI);
246 spi_finalize_current_transfer(dws->master);
247 return IRQ_HANDLED;
248 }
249 if (irq_status & SPI_INT_TXEI) {
250 spi_mask_intr(dws, SPI_INT_TXEI);
251 dw_writer(dws);
252 /* Enable TX irq always, it will be disabled when RX finished */
253 spi_umask_intr(dws, SPI_INT_TXEI);
254 }
255
256 return IRQ_HANDLED;
257 }
258
dw_spi_irq(int irq,void * dev_id)259 static irqreturn_t dw_spi_irq(int irq, void *dev_id)
260 {
261 struct spi_master *master = dev_id;
262 struct dw_spi *dws = spi_master_get_devdata(master);
263 u16 irq_status = dw_readl(dws, DW_SPI_ISR) & 0x3f;
264
265 if (!irq_status)
266 return IRQ_NONE;
267
268 if (!master->cur_msg) {
269 spi_mask_intr(dws, SPI_INT_TXEI);
270 return IRQ_HANDLED;
271 }
272
273 return dws->transfer_handler(dws);
274 }
275
276 /* Must be called inside pump_transfers() */
poll_transfer(struct dw_spi * dws)277 static int poll_transfer(struct dw_spi *dws)
278 {
279 do {
280 dw_writer(dws);
281 dw_reader(dws);
282 cpu_relax();
283 } while (dws->rx_end > dws->rx);
284
285 return 0;
286 }
287
dw_spi_transfer_one(struct spi_master * master,struct spi_device * spi,struct spi_transfer * transfer)288 static int dw_spi_transfer_one(struct spi_master *master,
289 struct spi_device *spi, struct spi_transfer *transfer)
290 {
291 struct dw_spi *dws = spi_master_get_devdata(master);
292 struct chip_data *chip = spi_get_ctldata(spi);
293 unsigned long flags;
294 u8 imask = 0;
295 u16 txlevel = 0;
296 u16 clk_div;
297 u32 cr0;
298 int ret;
299
300 dws->dma_mapped = 0;
301 spin_lock_irqsave(&dws->buf_lock, flags);
302 dws->tx = (void *)transfer->tx_buf;
303 dws->tx_end = dws->tx + transfer->len;
304 dws->rx = transfer->rx_buf;
305 dws->rx_end = dws->rx + transfer->len;
306 dws->len = transfer->len;
307 spin_unlock_irqrestore(&dws->buf_lock, flags);
308
309 /* Ensure dw->rx and dw->rx_end are visible */
310 smp_mb();
311
312 spi_enable_chip(dws, 0);
313
314 /* Handle per transfer options for bpw and speed */
315 if (transfer->speed_hz != chip->speed_hz) {
316 /* clk_div doesn't support odd number */
317 clk_div = (dws->max_freq / transfer->speed_hz + 1) & 0xfffe;
318
319 chip->speed_hz = transfer->speed_hz;
320 chip->clk_div = clk_div;
321
322 spi_set_clk(dws, chip->clk_div);
323 }
324 if (transfer->bits_per_word == 8) {
325 dws->n_bytes = 1;
326 dws->dma_width = 1;
327 } else if (transfer->bits_per_word == 16) {
328 dws->n_bytes = 2;
329 dws->dma_width = 2;
330 } else {
331 return -EINVAL;
332 }
333 /* Default SPI mode is SCPOL = 0, SCPH = 0 */
334 cr0 = (transfer->bits_per_word - 1)
335 | (chip->type << SPI_FRF_OFFSET)
336 | (spi->mode << SPI_MODE_OFFSET)
337 | (chip->tmode << SPI_TMOD_OFFSET);
338
339 /*
340 * Adjust transfer mode if necessary. Requires platform dependent
341 * chipselect mechanism.
342 */
343 if (chip->cs_control) {
344 if (dws->rx && dws->tx)
345 chip->tmode = SPI_TMOD_TR;
346 else if (dws->rx)
347 chip->tmode = SPI_TMOD_RO;
348 else
349 chip->tmode = SPI_TMOD_TO;
350
351 cr0 &= ~SPI_TMOD_MASK;
352 cr0 |= (chip->tmode << SPI_TMOD_OFFSET);
353 }
354
355 dw_writel(dws, DW_SPI_CTRL0, cr0);
356
357 /* Check if current transfer is a DMA transaction */
358 if (master->can_dma && master->can_dma(master, spi, transfer))
359 dws->dma_mapped = master->cur_msg_mapped;
360
361 /* For poll mode just disable all interrupts */
362 spi_mask_intr(dws, 0xff);
363
364 /*
365 * Interrupt mode
366 * we only need set the TXEI IRQ, as TX/RX always happen syncronizely
367 */
368 if (dws->dma_mapped) {
369 ret = dws->dma_ops->dma_setup(dws, transfer);
370 if (ret < 0) {
371 spi_enable_chip(dws, 1);
372 return ret;
373 }
374 } else if (!chip->poll_mode) {
375 txlevel = min_t(u16, dws->fifo_len / 2, dws->len / dws->n_bytes);
376 dw_writel(dws, DW_SPI_TXFLTR, txlevel);
377
378 /* Set the interrupt mask */
379 imask |= SPI_INT_TXEI | SPI_INT_TXOI |
380 SPI_INT_RXUI | SPI_INT_RXOI;
381 spi_umask_intr(dws, imask);
382
383 dws->transfer_handler = interrupt_transfer;
384 }
385
386 spi_enable_chip(dws, 1);
387
388 if (dws->dma_mapped)
389 return dws->dma_ops->dma_transfer(dws, transfer);
390
391 if (chip->poll_mode)
392 return poll_transfer(dws);
393
394 return 1;
395 }
396
dw_spi_handle_err(struct spi_master * master,struct spi_message * msg)397 static void dw_spi_handle_err(struct spi_master *master,
398 struct spi_message *msg)
399 {
400 struct dw_spi *dws = spi_master_get_devdata(master);
401
402 if (dws->dma_mapped)
403 dws->dma_ops->dma_stop(dws);
404
405 spi_reset_chip(dws);
406 }
407
408 /* This may be called twice for each spi dev */
dw_spi_setup(struct spi_device * spi)409 static int dw_spi_setup(struct spi_device *spi)
410 {
411 struct dw_spi_chip *chip_info = NULL;
412 struct chip_data *chip;
413 int ret;
414
415 /* Only alloc on first setup */
416 chip = spi_get_ctldata(spi);
417 if (!chip) {
418 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
419 if (!chip)
420 return -ENOMEM;
421 spi_set_ctldata(spi, chip);
422 }
423
424 /*
425 * Protocol drivers may change the chip settings, so...
426 * if chip_info exists, use it
427 */
428 chip_info = spi->controller_data;
429
430 /* chip_info doesn't always exist */
431 if (chip_info) {
432 if (chip_info->cs_control)
433 chip->cs_control = chip_info->cs_control;
434
435 chip->poll_mode = chip_info->poll_mode;
436 chip->type = chip_info->type;
437 }
438
439 chip->tmode = 0; /* Tx & Rx */
440
441 if (gpio_is_valid(spi->cs_gpio)) {
442 ret = gpio_direction_output(spi->cs_gpio,
443 !(spi->mode & SPI_CS_HIGH));
444 if (ret)
445 return ret;
446 }
447
448 return 0;
449 }
450
dw_spi_cleanup(struct spi_device * spi)451 static void dw_spi_cleanup(struct spi_device *spi)
452 {
453 struct chip_data *chip = spi_get_ctldata(spi);
454
455 kfree(chip);
456 spi_set_ctldata(spi, NULL);
457 }
458
459 /* Restart the controller, disable all interrupts, clean rx fifo */
spi_hw_init(struct device * dev,struct dw_spi * dws)460 static void spi_hw_init(struct device *dev, struct dw_spi *dws)
461 {
462 spi_reset_chip(dws);
463
464 /*
465 * Try to detect the FIFO depth if not set by interface driver,
466 * the depth could be from 2 to 256 from HW spec
467 */
468 if (!dws->fifo_len) {
469 u32 fifo;
470
471 for (fifo = 1; fifo < 256; fifo++) {
472 dw_writel(dws, DW_SPI_TXFLTR, fifo);
473 if (fifo != dw_readl(dws, DW_SPI_TXFLTR))
474 break;
475 }
476 dw_writel(dws, DW_SPI_TXFLTR, 0);
477
478 dws->fifo_len = (fifo == 1) ? 0 : fifo;
479 dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len);
480 }
481 }
482
dw_spi_add_host(struct device * dev,struct dw_spi * dws)483 int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
484 {
485 struct spi_master *master;
486 int ret;
487
488 BUG_ON(dws == NULL);
489
490 master = spi_alloc_master(dev, 0);
491 if (!master)
492 return -ENOMEM;
493
494 dws->master = master;
495 dws->type = SSI_MOTO_SPI;
496 dws->dma_inited = 0;
497 dws->dma_addr = (dma_addr_t)(dws->paddr + DW_SPI_DR);
498 snprintf(dws->name, sizeof(dws->name), "dw_spi%d", dws->bus_num);
499 spin_lock_init(&dws->buf_lock);
500
501 spi_master_set_devdata(master, dws);
502
503 ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED, dws->name, master);
504 if (ret < 0) {
505 dev_err(dev, "can not get IRQ\n");
506 goto err_free_master;
507 }
508
509 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP;
510 master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
511 master->bus_num = dws->bus_num;
512 master->num_chipselect = dws->num_cs;
513 master->setup = dw_spi_setup;
514 master->cleanup = dw_spi_cleanup;
515 master->set_cs = dw_spi_set_cs;
516 master->transfer_one = dw_spi_transfer_one;
517 master->handle_err = dw_spi_handle_err;
518 master->max_speed_hz = dws->max_freq;
519 master->dev.of_node = dev->of_node;
520
521 /* Basic HW init */
522 spi_hw_init(dev, dws);
523
524 if (dws->dma_ops && dws->dma_ops->dma_init) {
525 ret = dws->dma_ops->dma_init(dws);
526 if (ret) {
527 dev_warn(dev, "DMA init failed\n");
528 dws->dma_inited = 0;
529 } else {
530 master->can_dma = dws->dma_ops->can_dma;
531 }
532 }
533
534 ret = spi_register_master(master);
535 if (ret) {
536 dev_err(&master->dev, "problem registering spi master\n");
537 goto err_dma_exit;
538 }
539
540 dw_spi_debugfs_init(dws);
541 return 0;
542
543 err_dma_exit:
544 if (dws->dma_ops && dws->dma_ops->dma_exit)
545 dws->dma_ops->dma_exit(dws);
546 spi_enable_chip(dws, 0);
547 free_irq(dws->irq, master);
548 err_free_master:
549 spi_master_put(master);
550 return ret;
551 }
552 EXPORT_SYMBOL_GPL(dw_spi_add_host);
553
dw_spi_remove_host(struct dw_spi * dws)554 void dw_spi_remove_host(struct dw_spi *dws)
555 {
556 dw_spi_debugfs_remove(dws);
557
558 spi_unregister_master(dws->master);
559
560 if (dws->dma_ops && dws->dma_ops->dma_exit)
561 dws->dma_ops->dma_exit(dws);
562
563 spi_shutdown_chip(dws);
564
565 free_irq(dws->irq, dws->master);
566 }
567 EXPORT_SYMBOL_GPL(dw_spi_remove_host);
568
dw_spi_suspend_host(struct dw_spi * dws)569 int dw_spi_suspend_host(struct dw_spi *dws)
570 {
571 int ret;
572
573 ret = spi_master_suspend(dws->master);
574 if (ret)
575 return ret;
576
577 spi_shutdown_chip(dws);
578 return 0;
579 }
580 EXPORT_SYMBOL_GPL(dw_spi_suspend_host);
581
dw_spi_resume_host(struct dw_spi * dws)582 int dw_spi_resume_host(struct dw_spi *dws)
583 {
584 int ret;
585
586 spi_hw_init(&dws->master->dev, dws);
587 ret = spi_master_resume(dws->master);
588 if (ret)
589 dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret);
590 return ret;
591 }
592 EXPORT_SYMBOL_GPL(dw_spi_resume_host);
593
594 MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>");
595 MODULE_DESCRIPTION("Driver for DesignWare SPI controller core");
596 MODULE_LICENSE("GPL v2");
597