1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
3 // Copyright (C) 2008 Juergen Beisert
4
5 #include <linux/clk.h>
6 #include <linux/completion.h>
7 #include <linux/delay.h>
8 #include <linux/dmaengine.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/err.h>
11 #include <linux/interrupt.h>
12 #include <linux/io.h>
13 #include <linux/irq.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/pinctrl/consumer.h>
17 #include <linux/platform_device.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/slab.h>
20 #include <linux/spi/spi.h>
21 #include <linux/spi/spi_bitbang.h>
22 #include <linux/types.h>
23 #include <linux/of.h>
24 #include <linux/of_device.h>
25 #include <linux/property.h>
26
27 #include <linux/platform_data/dma-imx.h>
28
29 #define DRIVER_NAME "spi_imx"
30
31 static bool use_dma = true;
32 module_param(use_dma, bool, 0644);
33 MODULE_PARM_DESC(use_dma, "Enable usage of DMA when available (default)");
34
35 #define MXC_RPM_TIMEOUT 2000 /* 2000ms */
36
37 #define MXC_CSPIRXDATA 0x00
38 #define MXC_CSPITXDATA 0x04
39 #define MXC_CSPICTRL 0x08
40 #define MXC_CSPIINT 0x0c
41 #define MXC_RESET 0x1c
42
43 /* generic defines to abstract from the different register layouts */
44 #define MXC_INT_RR (1 << 0) /* Receive data ready interrupt */
45 #define MXC_INT_TE (1 << 1) /* Transmit FIFO empty interrupt */
46 #define MXC_INT_RDR BIT(4) /* Receive date threshold interrupt */
47
48 /* The maximum bytes that a sdma BD can transfer. */
49 #define MAX_SDMA_BD_BYTES (1 << 15)
50 #define MX51_ECSPI_CTRL_MAX_BURST 512
51 /* The maximum bytes that IMX53_ECSPI can transfer in slave mode.*/
52 #define MX53_MAX_TRANSFER_BYTES 512
53
54 enum spi_imx_devtype {
55 IMX1_CSPI,
56 IMX21_CSPI,
57 IMX27_CSPI,
58 IMX31_CSPI,
59 IMX35_CSPI, /* CSPI on all i.mx except above */
60 IMX51_ECSPI, /* ECSPI on i.mx51 */
61 IMX53_ECSPI, /* ECSPI on i.mx53 and later */
62 };
63
64 struct spi_imx_data;
65
66 struct spi_imx_devtype_data {
67 void (*intctrl)(struct spi_imx_data *, int);
68 int (*prepare_message)(struct spi_imx_data *, struct spi_message *);
69 int (*prepare_transfer)(struct spi_imx_data *, struct spi_device *);
70 void (*trigger)(struct spi_imx_data *);
71 int (*rx_available)(struct spi_imx_data *);
72 void (*reset)(struct spi_imx_data *);
73 void (*setup_wml)(struct spi_imx_data *);
74 void (*disable)(struct spi_imx_data *);
75 void (*disable_dma)(struct spi_imx_data *);
76 bool has_dmamode;
77 bool has_slavemode;
78 unsigned int fifo_size;
79 bool dynamic_burst;
80 enum spi_imx_devtype devtype;
81 };
82
83 struct spi_imx_data {
84 struct spi_bitbang bitbang;
85 struct device *dev;
86
87 struct completion xfer_done;
88 void __iomem *base;
89 unsigned long base_phys;
90
91 struct clk *clk_per;
92 struct clk *clk_ipg;
93 unsigned long spi_clk;
94 unsigned int spi_bus_clk;
95
96 unsigned int bits_per_word;
97 unsigned int spi_drctl;
98
99 unsigned int count, remainder;
100 void (*tx)(struct spi_imx_data *);
101 void (*rx)(struct spi_imx_data *);
102 void *rx_buf;
103 const void *tx_buf;
104 unsigned int txfifo; /* number of words pushed in tx FIFO */
105 unsigned int dynamic_burst;
106
107 /* Slave mode */
108 bool slave_mode;
109 bool slave_aborted;
110 unsigned int slave_burst;
111
112 /* DMA */
113 bool usedma;
114 u32 wml;
115 struct completion dma_rx_completion;
116 struct completion dma_tx_completion;
117
118 const struct spi_imx_devtype_data *devtype_data;
119 };
120
is_imx27_cspi(struct spi_imx_data * d)121 static inline int is_imx27_cspi(struct spi_imx_data *d)
122 {
123 return d->devtype_data->devtype == IMX27_CSPI;
124 }
125
is_imx35_cspi(struct spi_imx_data * d)126 static inline int is_imx35_cspi(struct spi_imx_data *d)
127 {
128 return d->devtype_data->devtype == IMX35_CSPI;
129 }
130
is_imx51_ecspi(struct spi_imx_data * d)131 static inline int is_imx51_ecspi(struct spi_imx_data *d)
132 {
133 return d->devtype_data->devtype == IMX51_ECSPI;
134 }
135
is_imx53_ecspi(struct spi_imx_data * d)136 static inline int is_imx53_ecspi(struct spi_imx_data *d)
137 {
138 return d->devtype_data->devtype == IMX53_ECSPI;
139 }
140
141 #define MXC_SPI_BUF_RX(type) \
142 static void spi_imx_buf_rx_##type(struct spi_imx_data *spi_imx) \
143 { \
144 unsigned int val = readl(spi_imx->base + MXC_CSPIRXDATA); \
145 \
146 if (spi_imx->rx_buf) { \
147 *(type *)spi_imx->rx_buf = val; \
148 spi_imx->rx_buf += sizeof(type); \
149 } \
150 \
151 spi_imx->remainder -= sizeof(type); \
152 }
153
154 #define MXC_SPI_BUF_TX(type) \
155 static void spi_imx_buf_tx_##type(struct spi_imx_data *spi_imx) \
156 { \
157 type val = 0; \
158 \
159 if (spi_imx->tx_buf) { \
160 val = *(type *)spi_imx->tx_buf; \
161 spi_imx->tx_buf += sizeof(type); \
162 } \
163 \
164 spi_imx->count -= sizeof(type); \
165 \
166 writel(val, spi_imx->base + MXC_CSPITXDATA); \
167 }
168
169 MXC_SPI_BUF_RX(u8)
170 MXC_SPI_BUF_TX(u8)
171 MXC_SPI_BUF_RX(u16)
172 MXC_SPI_BUF_TX(u16)
173 MXC_SPI_BUF_RX(u32)
174 MXC_SPI_BUF_TX(u32)
175
176 /* First entry is reserved, second entry is valid only if SDHC_SPIEN is set
177 * (which is currently not the case in this driver)
178 */
179 static int mxc_clkdivs[] = {0, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192,
180 256, 384, 512, 768, 1024};
181
182 /* MX21, MX27 */
spi_imx_clkdiv_1(unsigned int fin,unsigned int fspi,unsigned int max,unsigned int * fres)183 static unsigned int spi_imx_clkdiv_1(unsigned int fin,
184 unsigned int fspi, unsigned int max, unsigned int *fres)
185 {
186 int i;
187
188 for (i = 2; i < max; i++)
189 if (fspi * mxc_clkdivs[i] >= fin)
190 break;
191
192 *fres = fin / mxc_clkdivs[i];
193 return i;
194 }
195
196 /* MX1, MX31, MX35, MX51 CSPI */
spi_imx_clkdiv_2(unsigned int fin,unsigned int fspi,unsigned int * fres)197 static unsigned int spi_imx_clkdiv_2(unsigned int fin,
198 unsigned int fspi, unsigned int *fres)
199 {
200 int i, div = 4;
201
202 for (i = 0; i < 7; i++) {
203 if (fspi * div >= fin)
204 goto out;
205 div <<= 1;
206 }
207
208 out:
209 *fres = fin / div;
210 return i;
211 }
212
spi_imx_bytes_per_word(const int bits_per_word)213 static int spi_imx_bytes_per_word(const int bits_per_word)
214 {
215 if (bits_per_word <= 8)
216 return 1;
217 else if (bits_per_word <= 16)
218 return 2;
219 else
220 return 4;
221 }
222
spi_imx_can_dma(struct spi_master * master,struct spi_device * spi,struct spi_transfer * transfer)223 static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
224 struct spi_transfer *transfer)
225 {
226 struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
227
228 if (!use_dma || master->fallback)
229 return false;
230
231 if (!master->dma_rx)
232 return false;
233
234 if (spi_imx->slave_mode)
235 return false;
236
237 if (transfer->len < spi_imx->devtype_data->fifo_size)
238 return false;
239
240 spi_imx->dynamic_burst = 0;
241
242 return true;
243 }
244
245 /*
246 * Note the number of natively supported chip selects for MX51 is 4. Some
247 * devices may have less actual SS pins but the register map supports 4. When
248 * using gpio chip selects the cs values passed into the macros below can go
249 * outside the range 0 - 3. We therefore need to limit the cs value to avoid
250 * corrupting bits outside the allocated locations.
251 *
252 * The simplest way to do this is to just mask the cs bits to 2 bits. This
253 * still allows all 4 native chip selects to work as well as gpio chip selects
254 * (which can use any of the 4 chip select configurations).
255 */
256
257 #define MX51_ECSPI_CTRL 0x08
258 #define MX51_ECSPI_CTRL_ENABLE (1 << 0)
259 #define MX51_ECSPI_CTRL_XCH (1 << 2)
260 #define MX51_ECSPI_CTRL_SMC (1 << 3)
261 #define MX51_ECSPI_CTRL_MODE_MASK (0xf << 4)
262 #define MX51_ECSPI_CTRL_DRCTL(drctl) ((drctl) << 16)
263 #define MX51_ECSPI_CTRL_POSTDIV_OFFSET 8
264 #define MX51_ECSPI_CTRL_PREDIV_OFFSET 12
265 #define MX51_ECSPI_CTRL_CS(cs) ((cs & 3) << 18)
266 #define MX51_ECSPI_CTRL_BL_OFFSET 20
267 #define MX51_ECSPI_CTRL_BL_MASK (0xfff << 20)
268
269 #define MX51_ECSPI_CONFIG 0x0c
270 #define MX51_ECSPI_CONFIG_SCLKPHA(cs) (1 << ((cs & 3) + 0))
271 #define MX51_ECSPI_CONFIG_SCLKPOL(cs) (1 << ((cs & 3) + 4))
272 #define MX51_ECSPI_CONFIG_SBBCTRL(cs) (1 << ((cs & 3) + 8))
273 #define MX51_ECSPI_CONFIG_SSBPOL(cs) (1 << ((cs & 3) + 12))
274 #define MX51_ECSPI_CONFIG_SCLKCTL(cs) (1 << ((cs & 3) + 20))
275
276 #define MX51_ECSPI_INT 0x10
277 #define MX51_ECSPI_INT_TEEN (1 << 0)
278 #define MX51_ECSPI_INT_RREN (1 << 3)
279 #define MX51_ECSPI_INT_RDREN (1 << 4)
280
281 #define MX51_ECSPI_DMA 0x14
282 #define MX51_ECSPI_DMA_TX_WML(wml) ((wml) & 0x3f)
283 #define MX51_ECSPI_DMA_RX_WML(wml) (((wml) & 0x3f) << 16)
284 #define MX51_ECSPI_DMA_RXT_WML(wml) (((wml) & 0x3f) << 24)
285
286 #define MX51_ECSPI_DMA_TEDEN (1 << 7)
287 #define MX51_ECSPI_DMA_RXDEN (1 << 23)
288 #define MX51_ECSPI_DMA_RXTDEN (1 << 31)
289
290 #define MX51_ECSPI_STAT 0x18
291 #define MX51_ECSPI_STAT_RR (1 << 3)
292
293 #define MX51_ECSPI_TESTREG 0x20
294 #define MX51_ECSPI_TESTREG_LBC BIT(31)
295
spi_imx_buf_rx_swap_u32(struct spi_imx_data * spi_imx)296 static void spi_imx_buf_rx_swap_u32(struct spi_imx_data *spi_imx)
297 {
298 unsigned int val = readl(spi_imx->base + MXC_CSPIRXDATA);
299 #ifdef __LITTLE_ENDIAN
300 unsigned int bytes_per_word;
301 #endif
302
303 if (spi_imx->rx_buf) {
304 #ifdef __LITTLE_ENDIAN
305 bytes_per_word = spi_imx_bytes_per_word(spi_imx->bits_per_word);
306 if (bytes_per_word == 1)
307 val = cpu_to_be32(val);
308 else if (bytes_per_word == 2)
309 val = (val << 16) | (val >> 16);
310 #endif
311 *(u32 *)spi_imx->rx_buf = val;
312 spi_imx->rx_buf += sizeof(u32);
313 }
314
315 spi_imx->remainder -= sizeof(u32);
316 }
317
spi_imx_buf_rx_swap(struct spi_imx_data * spi_imx)318 static void spi_imx_buf_rx_swap(struct spi_imx_data *spi_imx)
319 {
320 int unaligned;
321 u32 val;
322
323 unaligned = spi_imx->remainder % 4;
324
325 if (!unaligned) {
326 spi_imx_buf_rx_swap_u32(spi_imx);
327 return;
328 }
329
330 if (spi_imx_bytes_per_word(spi_imx->bits_per_word) == 2) {
331 spi_imx_buf_rx_u16(spi_imx);
332 return;
333 }
334
335 val = readl(spi_imx->base + MXC_CSPIRXDATA);
336
337 while (unaligned--) {
338 if (spi_imx->rx_buf) {
339 *(u8 *)spi_imx->rx_buf = (val >> (8 * unaligned)) & 0xff;
340 spi_imx->rx_buf++;
341 }
342 spi_imx->remainder--;
343 }
344 }
345
spi_imx_buf_tx_swap_u32(struct spi_imx_data * spi_imx)346 static void spi_imx_buf_tx_swap_u32(struct spi_imx_data *spi_imx)
347 {
348 u32 val = 0;
349 #ifdef __LITTLE_ENDIAN
350 unsigned int bytes_per_word;
351 #endif
352
353 if (spi_imx->tx_buf) {
354 val = *(u32 *)spi_imx->tx_buf;
355 spi_imx->tx_buf += sizeof(u32);
356 }
357
358 spi_imx->count -= sizeof(u32);
359 #ifdef __LITTLE_ENDIAN
360 bytes_per_word = spi_imx_bytes_per_word(spi_imx->bits_per_word);
361
362 if (bytes_per_word == 1)
363 val = cpu_to_be32(val);
364 else if (bytes_per_word == 2)
365 val = (val << 16) | (val >> 16);
366 #endif
367 writel(val, spi_imx->base + MXC_CSPITXDATA);
368 }
369
spi_imx_buf_tx_swap(struct spi_imx_data * spi_imx)370 static void spi_imx_buf_tx_swap(struct spi_imx_data *spi_imx)
371 {
372 int unaligned;
373 u32 val = 0;
374
375 unaligned = spi_imx->count % 4;
376
377 if (!unaligned) {
378 spi_imx_buf_tx_swap_u32(spi_imx);
379 return;
380 }
381
382 if (spi_imx_bytes_per_word(spi_imx->bits_per_word) == 2) {
383 spi_imx_buf_tx_u16(spi_imx);
384 return;
385 }
386
387 while (unaligned--) {
388 if (spi_imx->tx_buf) {
389 val |= *(u8 *)spi_imx->tx_buf << (8 * unaligned);
390 spi_imx->tx_buf++;
391 }
392 spi_imx->count--;
393 }
394
395 writel(val, spi_imx->base + MXC_CSPITXDATA);
396 }
397
mx53_ecspi_rx_slave(struct spi_imx_data * spi_imx)398 static void mx53_ecspi_rx_slave(struct spi_imx_data *spi_imx)
399 {
400 u32 val = be32_to_cpu(readl(spi_imx->base + MXC_CSPIRXDATA));
401
402 if (spi_imx->rx_buf) {
403 int n_bytes = spi_imx->slave_burst % sizeof(val);
404
405 if (!n_bytes)
406 n_bytes = sizeof(val);
407
408 memcpy(spi_imx->rx_buf,
409 ((u8 *)&val) + sizeof(val) - n_bytes, n_bytes);
410
411 spi_imx->rx_buf += n_bytes;
412 spi_imx->slave_burst -= n_bytes;
413 }
414
415 spi_imx->remainder -= sizeof(u32);
416 }
417
mx53_ecspi_tx_slave(struct spi_imx_data * spi_imx)418 static void mx53_ecspi_tx_slave(struct spi_imx_data *spi_imx)
419 {
420 u32 val = 0;
421 int n_bytes = spi_imx->count % sizeof(val);
422
423 if (!n_bytes)
424 n_bytes = sizeof(val);
425
426 if (spi_imx->tx_buf) {
427 memcpy(((u8 *)&val) + sizeof(val) - n_bytes,
428 spi_imx->tx_buf, n_bytes);
429 val = cpu_to_be32(val);
430 spi_imx->tx_buf += n_bytes;
431 }
432
433 spi_imx->count -= n_bytes;
434
435 writel(val, spi_imx->base + MXC_CSPITXDATA);
436 }
437
438 /* MX51 eCSPI */
mx51_ecspi_clkdiv(struct spi_imx_data * spi_imx,unsigned int fspi,unsigned int * fres)439 static unsigned int mx51_ecspi_clkdiv(struct spi_imx_data *spi_imx,
440 unsigned int fspi, unsigned int *fres)
441 {
442 /*
443 * there are two 4-bit dividers, the pre-divider divides by
444 * $pre, the post-divider by 2^$post
445 */
446 unsigned int pre, post;
447 unsigned int fin = spi_imx->spi_clk;
448
449 fspi = min(fspi, fin);
450
451 post = fls(fin) - fls(fspi);
452 if (fin > fspi << post)
453 post++;
454
455 /* now we have: (fin <= fspi << post) with post being minimal */
456
457 post = max(4U, post) - 4;
458 if (unlikely(post > 0xf)) {
459 dev_err(spi_imx->dev, "cannot set clock freq: %u (base freq: %u)\n",
460 fspi, fin);
461 return 0xff;
462 }
463
464 pre = DIV_ROUND_UP(fin, fspi << post) - 1;
465
466 dev_dbg(spi_imx->dev, "%s: fin: %u, fspi: %u, post: %u, pre: %u\n",
467 __func__, fin, fspi, post, pre);
468
469 /* Resulting frequency for the SCLK line. */
470 *fres = (fin / (pre + 1)) >> post;
471
472 return (pre << MX51_ECSPI_CTRL_PREDIV_OFFSET) |
473 (post << MX51_ECSPI_CTRL_POSTDIV_OFFSET);
474 }
475
mx51_ecspi_intctrl(struct spi_imx_data * spi_imx,int enable)476 static void mx51_ecspi_intctrl(struct spi_imx_data *spi_imx, int enable)
477 {
478 unsigned val = 0;
479
480 if (enable & MXC_INT_TE)
481 val |= MX51_ECSPI_INT_TEEN;
482
483 if (enable & MXC_INT_RR)
484 val |= MX51_ECSPI_INT_RREN;
485
486 if (enable & MXC_INT_RDR)
487 val |= MX51_ECSPI_INT_RDREN;
488
489 writel(val, spi_imx->base + MX51_ECSPI_INT);
490 }
491
mx51_ecspi_trigger(struct spi_imx_data * spi_imx)492 static void mx51_ecspi_trigger(struct spi_imx_data *spi_imx)
493 {
494 u32 reg;
495
496 reg = readl(spi_imx->base + MX51_ECSPI_CTRL);
497 reg |= MX51_ECSPI_CTRL_XCH;
498 writel(reg, spi_imx->base + MX51_ECSPI_CTRL);
499 }
500
mx51_disable_dma(struct spi_imx_data * spi_imx)501 static void mx51_disable_dma(struct spi_imx_data *spi_imx)
502 {
503 writel(0, spi_imx->base + MX51_ECSPI_DMA);
504 }
505
mx51_ecspi_disable(struct spi_imx_data * spi_imx)506 static void mx51_ecspi_disable(struct spi_imx_data *spi_imx)
507 {
508 u32 ctrl;
509
510 ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL);
511 ctrl &= ~MX51_ECSPI_CTRL_ENABLE;
512 writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
513 }
514
mx51_ecspi_prepare_message(struct spi_imx_data * spi_imx,struct spi_message * msg)515 static int mx51_ecspi_prepare_message(struct spi_imx_data *spi_imx,
516 struct spi_message *msg)
517 {
518 struct spi_device *spi = msg->spi;
519 struct spi_transfer *xfer;
520 u32 ctrl = MX51_ECSPI_CTRL_ENABLE;
521 u32 min_speed_hz = ~0U;
522 u32 testreg, delay;
523 u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG);
524
525 /* set Master or Slave mode */
526 if (spi_imx->slave_mode)
527 ctrl &= ~MX51_ECSPI_CTRL_MODE_MASK;
528 else
529 ctrl |= MX51_ECSPI_CTRL_MODE_MASK;
530
531 /*
532 * Enable SPI_RDY handling (falling edge/level triggered).
533 */
534 if (spi->mode & SPI_READY)
535 ctrl |= MX51_ECSPI_CTRL_DRCTL(spi_imx->spi_drctl);
536
537 /* set chip select to use */
538 ctrl |= MX51_ECSPI_CTRL_CS(spi->chip_select);
539
540 /*
541 * The ctrl register must be written first, with the EN bit set other
542 * registers must not be written to.
543 */
544 writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
545
546 testreg = readl(spi_imx->base + MX51_ECSPI_TESTREG);
547 if (spi->mode & SPI_LOOP)
548 testreg |= MX51_ECSPI_TESTREG_LBC;
549 else
550 testreg &= ~MX51_ECSPI_TESTREG_LBC;
551 writel(testreg, spi_imx->base + MX51_ECSPI_TESTREG);
552
553 /*
554 * eCSPI burst completion by Chip Select signal in Slave mode
555 * is not functional for imx53 Soc, config SPI burst completed when
556 * BURST_LENGTH + 1 bits are received
557 */
558 if (spi_imx->slave_mode && is_imx53_ecspi(spi_imx))
559 cfg &= ~MX51_ECSPI_CONFIG_SBBCTRL(spi->chip_select);
560 else
561 cfg |= MX51_ECSPI_CONFIG_SBBCTRL(spi->chip_select);
562
563 if (spi->mode & SPI_CPHA)
564 cfg |= MX51_ECSPI_CONFIG_SCLKPHA(spi->chip_select);
565 else
566 cfg &= ~MX51_ECSPI_CONFIG_SCLKPHA(spi->chip_select);
567
568 if (spi->mode & SPI_CPOL) {
569 cfg |= MX51_ECSPI_CONFIG_SCLKPOL(spi->chip_select);
570 cfg |= MX51_ECSPI_CONFIG_SCLKCTL(spi->chip_select);
571 } else {
572 cfg &= ~MX51_ECSPI_CONFIG_SCLKPOL(spi->chip_select);
573 cfg &= ~MX51_ECSPI_CONFIG_SCLKCTL(spi->chip_select);
574 }
575
576 if (spi->mode & SPI_CS_HIGH)
577 cfg |= MX51_ECSPI_CONFIG_SSBPOL(spi->chip_select);
578 else
579 cfg &= ~MX51_ECSPI_CONFIG_SSBPOL(spi->chip_select);
580
581 writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG);
582
583 /*
584 * Wait until the changes in the configuration register CONFIGREG
585 * propagate into the hardware. It takes exactly one tick of the
586 * SCLK clock, but we will wait two SCLK clock just to be sure. The
587 * effect of the delay it takes for the hardware to apply changes
588 * is noticable if the SCLK clock run very slow. In such a case, if
589 * the polarity of SCLK should be inverted, the GPIO ChipSelect might
590 * be asserted before the SCLK polarity changes, which would disrupt
591 * the SPI communication as the device on the other end would consider
592 * the change of SCLK polarity as a clock tick already.
593 *
594 * Because spi_imx->spi_bus_clk is only set in bitbang prepare_message
595 * callback, iterate over all the transfers in spi_message, find the
596 * one with lowest bus frequency, and use that bus frequency for the
597 * delay calculation. In case all transfers have speed_hz == 0, then
598 * min_speed_hz is ~0 and the resulting delay is zero.
599 */
600 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
601 if (!xfer->speed_hz)
602 continue;
603 min_speed_hz = min(xfer->speed_hz, min_speed_hz);
604 }
605
606 delay = (2 * 1000000) / min_speed_hz;
607 if (likely(delay < 10)) /* SCLK is faster than 100 kHz */
608 udelay(delay);
609 else /* SCLK is _very_ slow */
610 usleep_range(delay, delay + 10);
611
612 return 0;
613 }
614
mx51_ecspi_prepare_transfer(struct spi_imx_data * spi_imx,struct spi_device * spi)615 static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx,
616 struct spi_device *spi)
617 {
618 u32 ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL);
619 u32 clk;
620
621 /* Clear BL field and set the right value */
622 ctrl &= ~MX51_ECSPI_CTRL_BL_MASK;
623 if (spi_imx->slave_mode && is_imx53_ecspi(spi_imx))
624 ctrl |= (spi_imx->slave_burst * 8 - 1)
625 << MX51_ECSPI_CTRL_BL_OFFSET;
626 else
627 ctrl |= (spi_imx->bits_per_word - 1)
628 << MX51_ECSPI_CTRL_BL_OFFSET;
629
630 /* set clock speed */
631 ctrl &= ~(0xf << MX51_ECSPI_CTRL_POSTDIV_OFFSET |
632 0xf << MX51_ECSPI_CTRL_PREDIV_OFFSET);
633 ctrl |= mx51_ecspi_clkdiv(spi_imx, spi_imx->spi_bus_clk, &clk);
634 spi_imx->spi_bus_clk = clk;
635
636 if (spi_imx->usedma)
637 ctrl |= MX51_ECSPI_CTRL_SMC;
638
639 writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
640
641 return 0;
642 }
643
mx51_setup_wml(struct spi_imx_data * spi_imx)644 static void mx51_setup_wml(struct spi_imx_data *spi_imx)
645 {
646 /*
647 * Configure the DMA register: setup the watermark
648 * and enable DMA request.
649 */
650 writel(MX51_ECSPI_DMA_RX_WML(spi_imx->wml - 1) |
651 MX51_ECSPI_DMA_TX_WML(spi_imx->wml) |
652 MX51_ECSPI_DMA_RXT_WML(spi_imx->wml) |
653 MX51_ECSPI_DMA_TEDEN | MX51_ECSPI_DMA_RXDEN |
654 MX51_ECSPI_DMA_RXTDEN, spi_imx->base + MX51_ECSPI_DMA);
655 }
656
mx51_ecspi_rx_available(struct spi_imx_data * spi_imx)657 static int mx51_ecspi_rx_available(struct spi_imx_data *spi_imx)
658 {
659 return readl(spi_imx->base + MX51_ECSPI_STAT) & MX51_ECSPI_STAT_RR;
660 }
661
mx51_ecspi_reset(struct spi_imx_data * spi_imx)662 static void mx51_ecspi_reset(struct spi_imx_data *spi_imx)
663 {
664 /* drain receive buffer */
665 while (mx51_ecspi_rx_available(spi_imx))
666 readl(spi_imx->base + MXC_CSPIRXDATA);
667 }
668
669 #define MX31_INTREG_TEEN (1 << 0)
670 #define MX31_INTREG_RREN (1 << 3)
671
672 #define MX31_CSPICTRL_ENABLE (1 << 0)
673 #define MX31_CSPICTRL_MASTER (1 << 1)
674 #define MX31_CSPICTRL_XCH (1 << 2)
675 #define MX31_CSPICTRL_SMC (1 << 3)
676 #define MX31_CSPICTRL_POL (1 << 4)
677 #define MX31_CSPICTRL_PHA (1 << 5)
678 #define MX31_CSPICTRL_SSCTL (1 << 6)
679 #define MX31_CSPICTRL_SSPOL (1 << 7)
680 #define MX31_CSPICTRL_BC_SHIFT 8
681 #define MX35_CSPICTRL_BL_SHIFT 20
682 #define MX31_CSPICTRL_CS_SHIFT 24
683 #define MX35_CSPICTRL_CS_SHIFT 12
684 #define MX31_CSPICTRL_DR_SHIFT 16
685
686 #define MX31_CSPI_DMAREG 0x10
687 #define MX31_DMAREG_RH_DEN (1<<4)
688 #define MX31_DMAREG_TH_DEN (1<<1)
689
690 #define MX31_CSPISTATUS 0x14
691 #define MX31_STATUS_RR (1 << 3)
692
693 #define MX31_CSPI_TESTREG 0x1C
694 #define MX31_TEST_LBC (1 << 14)
695
696 /* These functions also work for the i.MX35, but be aware that
697 * the i.MX35 has a slightly different register layout for bits
698 * we do not use here.
699 */
mx31_intctrl(struct spi_imx_data * spi_imx,int enable)700 static void mx31_intctrl(struct spi_imx_data *spi_imx, int enable)
701 {
702 unsigned int val = 0;
703
704 if (enable & MXC_INT_TE)
705 val |= MX31_INTREG_TEEN;
706 if (enable & MXC_INT_RR)
707 val |= MX31_INTREG_RREN;
708
709 writel(val, spi_imx->base + MXC_CSPIINT);
710 }
711
mx31_trigger(struct spi_imx_data * spi_imx)712 static void mx31_trigger(struct spi_imx_data *spi_imx)
713 {
714 unsigned int reg;
715
716 reg = readl(spi_imx->base + MXC_CSPICTRL);
717 reg |= MX31_CSPICTRL_XCH;
718 writel(reg, spi_imx->base + MXC_CSPICTRL);
719 }
720
mx31_prepare_message(struct spi_imx_data * spi_imx,struct spi_message * msg)721 static int mx31_prepare_message(struct spi_imx_data *spi_imx,
722 struct spi_message *msg)
723 {
724 return 0;
725 }
726
mx31_prepare_transfer(struct spi_imx_data * spi_imx,struct spi_device * spi)727 static int mx31_prepare_transfer(struct spi_imx_data *spi_imx,
728 struct spi_device *spi)
729 {
730 unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER;
731 unsigned int clk;
732
733 reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, spi_imx->spi_bus_clk, &clk) <<
734 MX31_CSPICTRL_DR_SHIFT;
735 spi_imx->spi_bus_clk = clk;
736
737 if (is_imx35_cspi(spi_imx)) {
738 reg |= (spi_imx->bits_per_word - 1) << MX35_CSPICTRL_BL_SHIFT;
739 reg |= MX31_CSPICTRL_SSCTL;
740 } else {
741 reg |= (spi_imx->bits_per_word - 1) << MX31_CSPICTRL_BC_SHIFT;
742 }
743
744 if (spi->mode & SPI_CPHA)
745 reg |= MX31_CSPICTRL_PHA;
746 if (spi->mode & SPI_CPOL)
747 reg |= MX31_CSPICTRL_POL;
748 if (spi->mode & SPI_CS_HIGH)
749 reg |= MX31_CSPICTRL_SSPOL;
750 if (!spi->cs_gpiod)
751 reg |= (spi->chip_select) <<
752 (is_imx35_cspi(spi_imx) ? MX35_CSPICTRL_CS_SHIFT :
753 MX31_CSPICTRL_CS_SHIFT);
754
755 if (spi_imx->usedma)
756 reg |= MX31_CSPICTRL_SMC;
757
758 writel(reg, spi_imx->base + MXC_CSPICTRL);
759
760 reg = readl(spi_imx->base + MX31_CSPI_TESTREG);
761 if (spi->mode & SPI_LOOP)
762 reg |= MX31_TEST_LBC;
763 else
764 reg &= ~MX31_TEST_LBC;
765 writel(reg, spi_imx->base + MX31_CSPI_TESTREG);
766
767 if (spi_imx->usedma) {
768 /*
769 * configure DMA requests when RXFIFO is half full and
770 * when TXFIFO is half empty
771 */
772 writel(MX31_DMAREG_RH_DEN | MX31_DMAREG_TH_DEN,
773 spi_imx->base + MX31_CSPI_DMAREG);
774 }
775
776 return 0;
777 }
778
mx31_rx_available(struct spi_imx_data * spi_imx)779 static int mx31_rx_available(struct spi_imx_data *spi_imx)
780 {
781 return readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR;
782 }
783
mx31_reset(struct spi_imx_data * spi_imx)784 static void mx31_reset(struct spi_imx_data *spi_imx)
785 {
786 /* drain receive buffer */
787 while (readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR)
788 readl(spi_imx->base + MXC_CSPIRXDATA);
789 }
790
791 #define MX21_INTREG_RR (1 << 4)
792 #define MX21_INTREG_TEEN (1 << 9)
793 #define MX21_INTREG_RREN (1 << 13)
794
795 #define MX21_CSPICTRL_POL (1 << 5)
796 #define MX21_CSPICTRL_PHA (1 << 6)
797 #define MX21_CSPICTRL_SSPOL (1 << 8)
798 #define MX21_CSPICTRL_XCH (1 << 9)
799 #define MX21_CSPICTRL_ENABLE (1 << 10)
800 #define MX21_CSPICTRL_MASTER (1 << 11)
801 #define MX21_CSPICTRL_DR_SHIFT 14
802 #define MX21_CSPICTRL_CS_SHIFT 19
803
mx21_intctrl(struct spi_imx_data * spi_imx,int enable)804 static void mx21_intctrl(struct spi_imx_data *spi_imx, int enable)
805 {
806 unsigned int val = 0;
807
808 if (enable & MXC_INT_TE)
809 val |= MX21_INTREG_TEEN;
810 if (enable & MXC_INT_RR)
811 val |= MX21_INTREG_RREN;
812
813 writel(val, spi_imx->base + MXC_CSPIINT);
814 }
815
mx21_trigger(struct spi_imx_data * spi_imx)816 static void mx21_trigger(struct spi_imx_data *spi_imx)
817 {
818 unsigned int reg;
819
820 reg = readl(spi_imx->base + MXC_CSPICTRL);
821 reg |= MX21_CSPICTRL_XCH;
822 writel(reg, spi_imx->base + MXC_CSPICTRL);
823 }
824
mx21_prepare_message(struct spi_imx_data * spi_imx,struct spi_message * msg)825 static int mx21_prepare_message(struct spi_imx_data *spi_imx,
826 struct spi_message *msg)
827 {
828 return 0;
829 }
830
mx21_prepare_transfer(struct spi_imx_data * spi_imx,struct spi_device * spi)831 static int mx21_prepare_transfer(struct spi_imx_data *spi_imx,
832 struct spi_device *spi)
833 {
834 unsigned int reg = MX21_CSPICTRL_ENABLE | MX21_CSPICTRL_MASTER;
835 unsigned int max = is_imx27_cspi(spi_imx) ? 16 : 18;
836 unsigned int clk;
837
838 reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, spi_imx->spi_bus_clk, max, &clk)
839 << MX21_CSPICTRL_DR_SHIFT;
840 spi_imx->spi_bus_clk = clk;
841
842 reg |= spi_imx->bits_per_word - 1;
843
844 if (spi->mode & SPI_CPHA)
845 reg |= MX21_CSPICTRL_PHA;
846 if (spi->mode & SPI_CPOL)
847 reg |= MX21_CSPICTRL_POL;
848 if (spi->mode & SPI_CS_HIGH)
849 reg |= MX21_CSPICTRL_SSPOL;
850 if (!spi->cs_gpiod)
851 reg |= spi->chip_select << MX21_CSPICTRL_CS_SHIFT;
852
853 writel(reg, spi_imx->base + MXC_CSPICTRL);
854
855 return 0;
856 }
857
mx21_rx_available(struct spi_imx_data * spi_imx)858 static int mx21_rx_available(struct spi_imx_data *spi_imx)
859 {
860 return readl(spi_imx->base + MXC_CSPIINT) & MX21_INTREG_RR;
861 }
862
mx21_reset(struct spi_imx_data * spi_imx)863 static void mx21_reset(struct spi_imx_data *spi_imx)
864 {
865 writel(1, spi_imx->base + MXC_RESET);
866 }
867
868 #define MX1_INTREG_RR (1 << 3)
869 #define MX1_INTREG_TEEN (1 << 8)
870 #define MX1_INTREG_RREN (1 << 11)
871
872 #define MX1_CSPICTRL_POL (1 << 4)
873 #define MX1_CSPICTRL_PHA (1 << 5)
874 #define MX1_CSPICTRL_XCH (1 << 8)
875 #define MX1_CSPICTRL_ENABLE (1 << 9)
876 #define MX1_CSPICTRL_MASTER (1 << 10)
877 #define MX1_CSPICTRL_DR_SHIFT 13
878
mx1_intctrl(struct spi_imx_data * spi_imx,int enable)879 static void mx1_intctrl(struct spi_imx_data *spi_imx, int enable)
880 {
881 unsigned int val = 0;
882
883 if (enable & MXC_INT_TE)
884 val |= MX1_INTREG_TEEN;
885 if (enable & MXC_INT_RR)
886 val |= MX1_INTREG_RREN;
887
888 writel(val, spi_imx->base + MXC_CSPIINT);
889 }
890
mx1_trigger(struct spi_imx_data * spi_imx)891 static void mx1_trigger(struct spi_imx_data *spi_imx)
892 {
893 unsigned int reg;
894
895 reg = readl(spi_imx->base + MXC_CSPICTRL);
896 reg |= MX1_CSPICTRL_XCH;
897 writel(reg, spi_imx->base + MXC_CSPICTRL);
898 }
899
mx1_prepare_message(struct spi_imx_data * spi_imx,struct spi_message * msg)900 static int mx1_prepare_message(struct spi_imx_data *spi_imx,
901 struct spi_message *msg)
902 {
903 return 0;
904 }
905
mx1_prepare_transfer(struct spi_imx_data * spi_imx,struct spi_device * spi)906 static int mx1_prepare_transfer(struct spi_imx_data *spi_imx,
907 struct spi_device *spi)
908 {
909 unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_MASTER;
910 unsigned int clk;
911
912 reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, spi_imx->spi_bus_clk, &clk) <<
913 MX1_CSPICTRL_DR_SHIFT;
914 spi_imx->spi_bus_clk = clk;
915
916 reg |= spi_imx->bits_per_word - 1;
917
918 if (spi->mode & SPI_CPHA)
919 reg |= MX1_CSPICTRL_PHA;
920 if (spi->mode & SPI_CPOL)
921 reg |= MX1_CSPICTRL_POL;
922
923 writel(reg, spi_imx->base + MXC_CSPICTRL);
924
925 return 0;
926 }
927
mx1_rx_available(struct spi_imx_data * spi_imx)928 static int mx1_rx_available(struct spi_imx_data *spi_imx)
929 {
930 return readl(spi_imx->base + MXC_CSPIINT) & MX1_INTREG_RR;
931 }
932
mx1_reset(struct spi_imx_data * spi_imx)933 static void mx1_reset(struct spi_imx_data *spi_imx)
934 {
935 writel(1, spi_imx->base + MXC_RESET);
936 }
937
938 static struct spi_imx_devtype_data imx1_cspi_devtype_data = {
939 .intctrl = mx1_intctrl,
940 .prepare_message = mx1_prepare_message,
941 .prepare_transfer = mx1_prepare_transfer,
942 .trigger = mx1_trigger,
943 .rx_available = mx1_rx_available,
944 .reset = mx1_reset,
945 .fifo_size = 8,
946 .has_dmamode = false,
947 .dynamic_burst = false,
948 .has_slavemode = false,
949 .devtype = IMX1_CSPI,
950 };
951
952 static struct spi_imx_devtype_data imx21_cspi_devtype_data = {
953 .intctrl = mx21_intctrl,
954 .prepare_message = mx21_prepare_message,
955 .prepare_transfer = mx21_prepare_transfer,
956 .trigger = mx21_trigger,
957 .rx_available = mx21_rx_available,
958 .reset = mx21_reset,
959 .fifo_size = 8,
960 .has_dmamode = false,
961 .dynamic_burst = false,
962 .has_slavemode = false,
963 .devtype = IMX21_CSPI,
964 };
965
966 static struct spi_imx_devtype_data imx27_cspi_devtype_data = {
967 /* i.mx27 cspi shares the functions with i.mx21 one */
968 .intctrl = mx21_intctrl,
969 .prepare_message = mx21_prepare_message,
970 .prepare_transfer = mx21_prepare_transfer,
971 .trigger = mx21_trigger,
972 .rx_available = mx21_rx_available,
973 .reset = mx21_reset,
974 .fifo_size = 8,
975 .has_dmamode = false,
976 .dynamic_burst = false,
977 .has_slavemode = false,
978 .devtype = IMX27_CSPI,
979 };
980
981 static struct spi_imx_devtype_data imx31_cspi_devtype_data = {
982 .intctrl = mx31_intctrl,
983 .prepare_message = mx31_prepare_message,
984 .prepare_transfer = mx31_prepare_transfer,
985 .trigger = mx31_trigger,
986 .rx_available = mx31_rx_available,
987 .reset = mx31_reset,
988 .fifo_size = 8,
989 .has_dmamode = false,
990 .dynamic_burst = false,
991 .has_slavemode = false,
992 .devtype = IMX31_CSPI,
993 };
994
995 static struct spi_imx_devtype_data imx35_cspi_devtype_data = {
996 /* i.mx35 and later cspi shares the functions with i.mx31 one */
997 .intctrl = mx31_intctrl,
998 .prepare_message = mx31_prepare_message,
999 .prepare_transfer = mx31_prepare_transfer,
1000 .trigger = mx31_trigger,
1001 .rx_available = mx31_rx_available,
1002 .reset = mx31_reset,
1003 .fifo_size = 8,
1004 .has_dmamode = true,
1005 .dynamic_burst = false,
1006 .has_slavemode = false,
1007 .devtype = IMX35_CSPI,
1008 };
1009
1010 static struct spi_imx_devtype_data imx51_ecspi_devtype_data = {
1011 .intctrl = mx51_ecspi_intctrl,
1012 .prepare_message = mx51_ecspi_prepare_message,
1013 .prepare_transfer = mx51_ecspi_prepare_transfer,
1014 .trigger = mx51_ecspi_trigger,
1015 .rx_available = mx51_ecspi_rx_available,
1016 .reset = mx51_ecspi_reset,
1017 .setup_wml = mx51_setup_wml,
1018 .disable_dma = mx51_disable_dma,
1019 .fifo_size = 64,
1020 .has_dmamode = true,
1021 .dynamic_burst = true,
1022 .has_slavemode = true,
1023 .disable = mx51_ecspi_disable,
1024 .devtype = IMX51_ECSPI,
1025 };
1026
1027 static struct spi_imx_devtype_data imx53_ecspi_devtype_data = {
1028 .intctrl = mx51_ecspi_intctrl,
1029 .prepare_message = mx51_ecspi_prepare_message,
1030 .prepare_transfer = mx51_ecspi_prepare_transfer,
1031 .trigger = mx51_ecspi_trigger,
1032 .rx_available = mx51_ecspi_rx_available,
1033 .disable_dma = mx51_disable_dma,
1034 .reset = mx51_ecspi_reset,
1035 .fifo_size = 64,
1036 .has_dmamode = true,
1037 .has_slavemode = true,
1038 .disable = mx51_ecspi_disable,
1039 .devtype = IMX53_ECSPI,
1040 };
1041
1042 static const struct platform_device_id spi_imx_devtype[] = {
1043 {
1044 .name = "imx1-cspi",
1045 .driver_data = (kernel_ulong_t) &imx1_cspi_devtype_data,
1046 }, {
1047 .name = "imx21-cspi",
1048 .driver_data = (kernel_ulong_t) &imx21_cspi_devtype_data,
1049 }, {
1050 .name = "imx27-cspi",
1051 .driver_data = (kernel_ulong_t) &imx27_cspi_devtype_data,
1052 }, {
1053 .name = "imx31-cspi",
1054 .driver_data = (kernel_ulong_t) &imx31_cspi_devtype_data,
1055 }, {
1056 .name = "imx35-cspi",
1057 .driver_data = (kernel_ulong_t) &imx35_cspi_devtype_data,
1058 }, {
1059 .name = "imx51-ecspi",
1060 .driver_data = (kernel_ulong_t) &imx51_ecspi_devtype_data,
1061 }, {
1062 .name = "imx53-ecspi",
1063 .driver_data = (kernel_ulong_t) &imx53_ecspi_devtype_data,
1064 }, {
1065 /* sentinel */
1066 }
1067 };
1068
1069 static const struct of_device_id spi_imx_dt_ids[] = {
1070 { .compatible = "fsl,imx1-cspi", .data = &imx1_cspi_devtype_data, },
1071 { .compatible = "fsl,imx21-cspi", .data = &imx21_cspi_devtype_data, },
1072 { .compatible = "fsl,imx27-cspi", .data = &imx27_cspi_devtype_data, },
1073 { .compatible = "fsl,imx31-cspi", .data = &imx31_cspi_devtype_data, },
1074 { .compatible = "fsl,imx35-cspi", .data = &imx35_cspi_devtype_data, },
1075 { .compatible = "fsl,imx51-ecspi", .data = &imx51_ecspi_devtype_data, },
1076 { .compatible = "fsl,imx53-ecspi", .data = &imx53_ecspi_devtype_data, },
1077 { /* sentinel */ }
1078 };
1079 MODULE_DEVICE_TABLE(of, spi_imx_dt_ids);
1080
spi_imx_set_burst_len(struct spi_imx_data * spi_imx,int n_bits)1081 static void spi_imx_set_burst_len(struct spi_imx_data *spi_imx, int n_bits)
1082 {
1083 u32 ctrl;
1084
1085 ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL);
1086 ctrl &= ~MX51_ECSPI_CTRL_BL_MASK;
1087 ctrl |= ((n_bits - 1) << MX51_ECSPI_CTRL_BL_OFFSET);
1088 writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
1089 }
1090
spi_imx_push(struct spi_imx_data * spi_imx)1091 static void spi_imx_push(struct spi_imx_data *spi_imx)
1092 {
1093 unsigned int burst_len, fifo_words;
1094
1095 if (spi_imx->dynamic_burst)
1096 fifo_words = 4;
1097 else
1098 fifo_words = spi_imx_bytes_per_word(spi_imx->bits_per_word);
1099 /*
1100 * Reload the FIFO when the remaining bytes to be transferred in the
1101 * current burst is 0. This only applies when bits_per_word is a
1102 * multiple of 8.
1103 */
1104 if (!spi_imx->remainder) {
1105 if (spi_imx->dynamic_burst) {
1106
1107 /* We need to deal unaligned data first */
1108 burst_len = spi_imx->count % MX51_ECSPI_CTRL_MAX_BURST;
1109
1110 if (!burst_len)
1111 burst_len = MX51_ECSPI_CTRL_MAX_BURST;
1112
1113 spi_imx_set_burst_len(spi_imx, burst_len * 8);
1114
1115 spi_imx->remainder = burst_len;
1116 } else {
1117 spi_imx->remainder = fifo_words;
1118 }
1119 }
1120
1121 while (spi_imx->txfifo < spi_imx->devtype_data->fifo_size) {
1122 if (!spi_imx->count)
1123 break;
1124 if (spi_imx->dynamic_burst &&
1125 spi_imx->txfifo >= DIV_ROUND_UP(spi_imx->remainder,
1126 fifo_words))
1127 break;
1128 spi_imx->tx(spi_imx);
1129 spi_imx->txfifo++;
1130 }
1131
1132 if (!spi_imx->slave_mode)
1133 spi_imx->devtype_data->trigger(spi_imx);
1134 }
1135
spi_imx_isr(int irq,void * dev_id)1136 static irqreturn_t spi_imx_isr(int irq, void *dev_id)
1137 {
1138 struct spi_imx_data *spi_imx = dev_id;
1139
1140 while (spi_imx->txfifo &&
1141 spi_imx->devtype_data->rx_available(spi_imx)) {
1142 spi_imx->rx(spi_imx);
1143 spi_imx->txfifo--;
1144 }
1145
1146 if (spi_imx->count) {
1147 spi_imx_push(spi_imx);
1148 return IRQ_HANDLED;
1149 }
1150
1151 if (spi_imx->txfifo) {
1152 /* No data left to push, but still waiting for rx data,
1153 * enable receive data available interrupt.
1154 */
1155 spi_imx->devtype_data->intctrl(
1156 spi_imx, MXC_INT_RR);
1157 return IRQ_HANDLED;
1158 }
1159
1160 spi_imx->devtype_data->intctrl(spi_imx, 0);
1161 complete(&spi_imx->xfer_done);
1162
1163 return IRQ_HANDLED;
1164 }
1165
spi_imx_dma_configure(struct spi_master * master)1166 static int spi_imx_dma_configure(struct spi_master *master)
1167 {
1168 int ret;
1169 enum dma_slave_buswidth buswidth;
1170 struct dma_slave_config rx = {}, tx = {};
1171 struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
1172
1173 switch (spi_imx_bytes_per_word(spi_imx->bits_per_word)) {
1174 case 4:
1175 buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
1176 break;
1177 case 2:
1178 buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
1179 break;
1180 case 1:
1181 buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
1182 break;
1183 default:
1184 return -EINVAL;
1185 }
1186
1187 tx.direction = DMA_MEM_TO_DEV;
1188 tx.dst_addr = spi_imx->base_phys + MXC_CSPITXDATA;
1189 tx.dst_addr_width = buswidth;
1190 tx.dst_maxburst = spi_imx->wml;
1191 ret = dmaengine_slave_config(master->dma_tx, &tx);
1192 if (ret) {
1193 dev_err(spi_imx->dev, "TX dma configuration failed with %d\n", ret);
1194 return ret;
1195 }
1196
1197 rx.direction = DMA_DEV_TO_MEM;
1198 rx.src_addr = spi_imx->base_phys + MXC_CSPIRXDATA;
1199 rx.src_addr_width = buswidth;
1200 rx.src_maxburst = spi_imx->wml;
1201 ret = dmaengine_slave_config(master->dma_rx, &rx);
1202 if (ret) {
1203 dev_err(spi_imx->dev, "RX dma configuration failed with %d\n", ret);
1204 return ret;
1205 }
1206
1207 return 0;
1208 }
1209
spi_imx_setupxfer(struct spi_device * spi,struct spi_transfer * t)1210 static int spi_imx_setupxfer(struct spi_device *spi,
1211 struct spi_transfer *t)
1212 {
1213 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
1214
1215 if (!t)
1216 return 0;
1217
1218 if (!t->speed_hz) {
1219 if (!spi->max_speed_hz) {
1220 dev_err(&spi->dev, "no speed_hz provided!\n");
1221 return -EINVAL;
1222 }
1223 dev_dbg(&spi->dev, "using spi->max_speed_hz!\n");
1224 spi_imx->spi_bus_clk = spi->max_speed_hz;
1225 } else
1226 spi_imx->spi_bus_clk = t->speed_hz;
1227
1228 spi_imx->bits_per_word = t->bits_per_word;
1229
1230 /*
1231 * Initialize the functions for transfer. To transfer non byte-aligned
1232 * words, we have to use multiple word-size bursts, we can't use
1233 * dynamic_burst in that case.
1234 */
1235 if (spi_imx->devtype_data->dynamic_burst && !spi_imx->slave_mode &&
1236 (spi_imx->bits_per_word == 8 ||
1237 spi_imx->bits_per_word == 16 ||
1238 spi_imx->bits_per_word == 32)) {
1239
1240 spi_imx->rx = spi_imx_buf_rx_swap;
1241 spi_imx->tx = spi_imx_buf_tx_swap;
1242 spi_imx->dynamic_burst = 1;
1243
1244 } else {
1245 if (spi_imx->bits_per_word <= 8) {
1246 spi_imx->rx = spi_imx_buf_rx_u8;
1247 spi_imx->tx = spi_imx_buf_tx_u8;
1248 } else if (spi_imx->bits_per_word <= 16) {
1249 spi_imx->rx = spi_imx_buf_rx_u16;
1250 spi_imx->tx = spi_imx_buf_tx_u16;
1251 } else {
1252 spi_imx->rx = spi_imx_buf_rx_u32;
1253 spi_imx->tx = spi_imx_buf_tx_u32;
1254 }
1255 spi_imx->dynamic_burst = 0;
1256 }
1257
1258 if (spi_imx_can_dma(spi_imx->bitbang.master, spi, t))
1259 spi_imx->usedma = true;
1260 else
1261 spi_imx->usedma = false;
1262
1263 if (is_imx53_ecspi(spi_imx) && spi_imx->slave_mode) {
1264 spi_imx->rx = mx53_ecspi_rx_slave;
1265 spi_imx->tx = mx53_ecspi_tx_slave;
1266 spi_imx->slave_burst = t->len;
1267 }
1268
1269 spi_imx->devtype_data->prepare_transfer(spi_imx, spi);
1270
1271 return 0;
1272 }
1273
spi_imx_sdma_exit(struct spi_imx_data * spi_imx)1274 static void spi_imx_sdma_exit(struct spi_imx_data *spi_imx)
1275 {
1276 struct spi_master *master = spi_imx->bitbang.master;
1277
1278 if (master->dma_rx) {
1279 dma_release_channel(master->dma_rx);
1280 master->dma_rx = NULL;
1281 }
1282
1283 if (master->dma_tx) {
1284 dma_release_channel(master->dma_tx);
1285 master->dma_tx = NULL;
1286 }
1287 }
1288
spi_imx_sdma_init(struct device * dev,struct spi_imx_data * spi_imx,struct spi_master * master)1289 static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
1290 struct spi_master *master)
1291 {
1292 int ret;
1293
1294 /* use pio mode for i.mx6dl chip TKT238285 */
1295 if (of_machine_is_compatible("fsl,imx6dl"))
1296 return 0;
1297
1298 spi_imx->wml = spi_imx->devtype_data->fifo_size / 2;
1299
1300 /* Prepare for TX DMA: */
1301 master->dma_tx = dma_request_chan(dev, "tx");
1302 if (IS_ERR(master->dma_tx)) {
1303 ret = PTR_ERR(master->dma_tx);
1304 dev_dbg(dev, "can't get the TX DMA channel, error %d!\n", ret);
1305 master->dma_tx = NULL;
1306 goto err;
1307 }
1308
1309 /* Prepare for RX : */
1310 master->dma_rx = dma_request_chan(dev, "rx");
1311 if (IS_ERR(master->dma_rx)) {
1312 ret = PTR_ERR(master->dma_rx);
1313 dev_dbg(dev, "can't get the RX DMA channel, error %d\n", ret);
1314 master->dma_rx = NULL;
1315 goto err;
1316 }
1317
1318 init_completion(&spi_imx->dma_rx_completion);
1319 init_completion(&spi_imx->dma_tx_completion);
1320 master->can_dma = spi_imx_can_dma;
1321 master->max_dma_len = MAX_SDMA_BD_BYTES;
1322 spi_imx->bitbang.master->flags = SPI_MASTER_MUST_RX |
1323 SPI_MASTER_MUST_TX;
1324
1325 return 0;
1326 err:
1327 spi_imx_sdma_exit(spi_imx);
1328 return ret;
1329 }
1330
spi_imx_dma_rx_callback(void * cookie)1331 static void spi_imx_dma_rx_callback(void *cookie)
1332 {
1333 struct spi_imx_data *spi_imx = (struct spi_imx_data *)cookie;
1334
1335 complete(&spi_imx->dma_rx_completion);
1336 }
1337
spi_imx_dma_tx_callback(void * cookie)1338 static void spi_imx_dma_tx_callback(void *cookie)
1339 {
1340 struct spi_imx_data *spi_imx = (struct spi_imx_data *)cookie;
1341
1342 complete(&spi_imx->dma_tx_completion);
1343 }
1344
spi_imx_calculate_timeout(struct spi_imx_data * spi_imx,int size)1345 static int spi_imx_calculate_timeout(struct spi_imx_data *spi_imx, int size)
1346 {
1347 unsigned long timeout = 0;
1348
1349 /* Time with actual data transfer and CS change delay related to HW */
1350 timeout = (8 + 4) * size / spi_imx->spi_bus_clk;
1351
1352 /* Add extra second for scheduler related activities */
1353 timeout += 1;
1354
1355 /* Double calculated timeout */
1356 return msecs_to_jiffies(2 * timeout * MSEC_PER_SEC);
1357 }
1358
spi_imx_dma_transfer(struct spi_imx_data * spi_imx,struct spi_transfer * transfer)1359 static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
1360 struct spi_transfer *transfer)
1361 {
1362 struct dma_async_tx_descriptor *desc_tx, *desc_rx;
1363 unsigned long transfer_timeout;
1364 unsigned long timeout;
1365 struct spi_master *master = spi_imx->bitbang.master;
1366 struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg;
1367 struct scatterlist *last_sg = sg_last(rx->sgl, rx->nents);
1368 unsigned int bytes_per_word, i;
1369 int ret;
1370
1371 /* Get the right burst length from the last sg to ensure no tail data */
1372 bytes_per_word = spi_imx_bytes_per_word(transfer->bits_per_word);
1373 for (i = spi_imx->devtype_data->fifo_size / 2; i > 0; i--) {
1374 if (!(sg_dma_len(last_sg) % (i * bytes_per_word)))
1375 break;
1376 }
1377 /* Use 1 as wml in case no available burst length got */
1378 if (i == 0)
1379 i = 1;
1380
1381 spi_imx->wml = i;
1382
1383 ret = spi_imx_dma_configure(master);
1384 if (ret)
1385 goto dma_failure_no_start;
1386
1387 if (!spi_imx->devtype_data->setup_wml) {
1388 dev_err(spi_imx->dev, "No setup_wml()?\n");
1389 ret = -EINVAL;
1390 goto dma_failure_no_start;
1391 }
1392 spi_imx->devtype_data->setup_wml(spi_imx);
1393
1394 /*
1395 * The TX DMA setup starts the transfer, so make sure RX is configured
1396 * before TX.
1397 */
1398 desc_rx = dmaengine_prep_slave_sg(master->dma_rx,
1399 rx->sgl, rx->nents, DMA_DEV_TO_MEM,
1400 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1401 if (!desc_rx) {
1402 ret = -EINVAL;
1403 goto dma_failure_no_start;
1404 }
1405
1406 desc_rx->callback = spi_imx_dma_rx_callback;
1407 desc_rx->callback_param = (void *)spi_imx;
1408 dmaengine_submit(desc_rx);
1409 reinit_completion(&spi_imx->dma_rx_completion);
1410 dma_async_issue_pending(master->dma_rx);
1411
1412 desc_tx = dmaengine_prep_slave_sg(master->dma_tx,
1413 tx->sgl, tx->nents, DMA_MEM_TO_DEV,
1414 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1415 if (!desc_tx) {
1416 dmaengine_terminate_all(master->dma_tx);
1417 dmaengine_terminate_all(master->dma_rx);
1418 return -EINVAL;
1419 }
1420
1421 desc_tx->callback = spi_imx_dma_tx_callback;
1422 desc_tx->callback_param = (void *)spi_imx;
1423 dmaengine_submit(desc_tx);
1424 reinit_completion(&spi_imx->dma_tx_completion);
1425 dma_async_issue_pending(master->dma_tx);
1426
1427 transfer_timeout = spi_imx_calculate_timeout(spi_imx, transfer->len);
1428
1429 /* Wait SDMA to finish the data transfer.*/
1430 timeout = wait_for_completion_timeout(&spi_imx->dma_tx_completion,
1431 transfer_timeout);
1432 if (!timeout) {
1433 dev_err(spi_imx->dev, "I/O Error in DMA TX\n");
1434 dmaengine_terminate_all(master->dma_tx);
1435 dmaengine_terminate_all(master->dma_rx);
1436 return -ETIMEDOUT;
1437 }
1438
1439 timeout = wait_for_completion_timeout(&spi_imx->dma_rx_completion,
1440 transfer_timeout);
1441 if (!timeout) {
1442 dev_err(&master->dev, "I/O Error in DMA RX\n");
1443 spi_imx->devtype_data->reset(spi_imx);
1444 dmaengine_terminate_all(master->dma_rx);
1445 return -ETIMEDOUT;
1446 }
1447
1448 return transfer->len;
1449 /* fallback to pio */
1450 dma_failure_no_start:
1451 transfer->error |= SPI_TRANS_FAIL_NO_START;
1452 return ret;
1453 }
1454
spi_imx_pio_transfer(struct spi_device * spi,struct spi_transfer * transfer)1455 static int spi_imx_pio_transfer(struct spi_device *spi,
1456 struct spi_transfer *transfer)
1457 {
1458 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
1459 unsigned long transfer_timeout;
1460 unsigned long timeout;
1461
1462 spi_imx->tx_buf = transfer->tx_buf;
1463 spi_imx->rx_buf = transfer->rx_buf;
1464 spi_imx->count = transfer->len;
1465 spi_imx->txfifo = 0;
1466 spi_imx->remainder = 0;
1467
1468 reinit_completion(&spi_imx->xfer_done);
1469
1470 spi_imx_push(spi_imx);
1471
1472 spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TE);
1473
1474 transfer_timeout = spi_imx_calculate_timeout(spi_imx, transfer->len);
1475
1476 timeout = wait_for_completion_timeout(&spi_imx->xfer_done,
1477 transfer_timeout);
1478 if (!timeout) {
1479 dev_err(&spi->dev, "I/O Error in PIO\n");
1480 spi_imx->devtype_data->reset(spi_imx);
1481 return -ETIMEDOUT;
1482 }
1483
1484 return transfer->len;
1485 }
1486
spi_imx_pio_transfer_slave(struct spi_device * spi,struct spi_transfer * transfer)1487 static int spi_imx_pio_transfer_slave(struct spi_device *spi,
1488 struct spi_transfer *transfer)
1489 {
1490 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
1491 int ret = transfer->len;
1492
1493 if (is_imx53_ecspi(spi_imx) &&
1494 transfer->len > MX53_MAX_TRANSFER_BYTES) {
1495 dev_err(&spi->dev, "Transaction too big, max size is %d bytes\n",
1496 MX53_MAX_TRANSFER_BYTES);
1497 return -EMSGSIZE;
1498 }
1499
1500 spi_imx->tx_buf = transfer->tx_buf;
1501 spi_imx->rx_buf = transfer->rx_buf;
1502 spi_imx->count = transfer->len;
1503 spi_imx->txfifo = 0;
1504 spi_imx->remainder = 0;
1505
1506 reinit_completion(&spi_imx->xfer_done);
1507 spi_imx->slave_aborted = false;
1508
1509 spi_imx_push(spi_imx);
1510
1511 spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TE | MXC_INT_RDR);
1512
1513 if (wait_for_completion_interruptible(&spi_imx->xfer_done) ||
1514 spi_imx->slave_aborted) {
1515 dev_dbg(&spi->dev, "interrupted\n");
1516 ret = -EINTR;
1517 }
1518
1519 /* ecspi has a HW issue when works in Slave mode,
1520 * after 64 words writtern to TXFIFO, even TXFIFO becomes empty,
1521 * ECSPI_TXDATA keeps shift out the last word data,
1522 * so we have to disable ECSPI when in slave mode after the
1523 * transfer completes
1524 */
1525 if (spi_imx->devtype_data->disable)
1526 spi_imx->devtype_data->disable(spi_imx);
1527
1528 return ret;
1529 }
1530
spi_imx_transfer(struct spi_device * spi,struct spi_transfer * transfer)1531 static int spi_imx_transfer(struct spi_device *spi,
1532 struct spi_transfer *transfer)
1533 {
1534 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
1535
1536 transfer->effective_speed_hz = spi_imx->spi_bus_clk;
1537
1538 /* flush rxfifo before transfer */
1539 while (spi_imx->devtype_data->rx_available(spi_imx))
1540 readl(spi_imx->base + MXC_CSPIRXDATA);
1541
1542 if (spi_imx->slave_mode)
1543 return spi_imx_pio_transfer_slave(spi, transfer);
1544
1545 if (spi_imx->usedma)
1546 return spi_imx_dma_transfer(spi_imx, transfer);
1547
1548 return spi_imx_pio_transfer(spi, transfer);
1549 }
1550
spi_imx_setup(struct spi_device * spi)1551 static int spi_imx_setup(struct spi_device *spi)
1552 {
1553 dev_dbg(&spi->dev, "%s: mode %d, %u bpw, %d hz\n", __func__,
1554 spi->mode, spi->bits_per_word, spi->max_speed_hz);
1555
1556 return 0;
1557 }
1558
spi_imx_cleanup(struct spi_device * spi)1559 static void spi_imx_cleanup(struct spi_device *spi)
1560 {
1561 }
1562
1563 static int
spi_imx_prepare_message(struct spi_master * master,struct spi_message * msg)1564 spi_imx_prepare_message(struct spi_master *master, struct spi_message *msg)
1565 {
1566 struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
1567 int ret;
1568
1569 ret = pm_runtime_resume_and_get(spi_imx->dev);
1570 if (ret < 0) {
1571 dev_err(spi_imx->dev, "failed to enable clock\n");
1572 return ret;
1573 }
1574
1575 ret = spi_imx->devtype_data->prepare_message(spi_imx, msg);
1576 if (ret) {
1577 pm_runtime_mark_last_busy(spi_imx->dev);
1578 pm_runtime_put_autosuspend(spi_imx->dev);
1579 }
1580
1581 return ret;
1582 }
1583
1584 static int
spi_imx_unprepare_message(struct spi_master * master,struct spi_message * msg)1585 spi_imx_unprepare_message(struct spi_master *master, struct spi_message *msg)
1586 {
1587 struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
1588
1589 pm_runtime_mark_last_busy(spi_imx->dev);
1590 pm_runtime_put_autosuspend(spi_imx->dev);
1591 return 0;
1592 }
1593
spi_imx_slave_abort(struct spi_master * master)1594 static int spi_imx_slave_abort(struct spi_master *master)
1595 {
1596 struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
1597
1598 spi_imx->slave_aborted = true;
1599 complete(&spi_imx->xfer_done);
1600
1601 return 0;
1602 }
1603
spi_imx_probe(struct platform_device * pdev)1604 static int spi_imx_probe(struct platform_device *pdev)
1605 {
1606 struct device_node *np = pdev->dev.of_node;
1607 const struct of_device_id *of_id =
1608 of_match_device(spi_imx_dt_ids, &pdev->dev);
1609 struct spi_master *master;
1610 struct spi_imx_data *spi_imx;
1611 struct resource *res;
1612 int ret, irq, spi_drctl;
1613 const struct spi_imx_devtype_data *devtype_data = of_id ? of_id->data :
1614 (struct spi_imx_devtype_data *)pdev->id_entry->driver_data;
1615 bool slave_mode;
1616 u32 val;
1617
1618 slave_mode = devtype_data->has_slavemode &&
1619 of_property_read_bool(np, "spi-slave");
1620 if (slave_mode)
1621 master = spi_alloc_slave(&pdev->dev,
1622 sizeof(struct spi_imx_data));
1623 else
1624 master = spi_alloc_master(&pdev->dev,
1625 sizeof(struct spi_imx_data));
1626 if (!master)
1627 return -ENOMEM;
1628
1629 ret = of_property_read_u32(np, "fsl,spi-rdy-drctl", &spi_drctl);
1630 if ((ret < 0) || (spi_drctl >= 0x3)) {
1631 /* '11' is reserved */
1632 spi_drctl = 0;
1633 }
1634
1635 platform_set_drvdata(pdev, master);
1636
1637 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
1638 master->bus_num = np ? -1 : pdev->id;
1639 master->use_gpio_descriptors = true;
1640
1641 spi_imx = spi_master_get_devdata(master);
1642 spi_imx->bitbang.master = master;
1643 spi_imx->dev = &pdev->dev;
1644 spi_imx->slave_mode = slave_mode;
1645
1646 spi_imx->devtype_data = devtype_data;
1647
1648 /*
1649 * Get number of chip selects from device properties. This can be
1650 * coming from device tree or boardfiles, if it is not defined,
1651 * a default value of 3 chip selects will be used, as all the legacy
1652 * board files have <= 3 chip selects.
1653 */
1654 if (!device_property_read_u32(&pdev->dev, "num-cs", &val))
1655 master->num_chipselect = val;
1656 else
1657 master->num_chipselect = 3;
1658
1659 spi_imx->bitbang.setup_transfer = spi_imx_setupxfer;
1660 spi_imx->bitbang.txrx_bufs = spi_imx_transfer;
1661 spi_imx->bitbang.master->setup = spi_imx_setup;
1662 spi_imx->bitbang.master->cleanup = spi_imx_cleanup;
1663 spi_imx->bitbang.master->prepare_message = spi_imx_prepare_message;
1664 spi_imx->bitbang.master->unprepare_message = spi_imx_unprepare_message;
1665 spi_imx->bitbang.master->slave_abort = spi_imx_slave_abort;
1666 spi_imx->bitbang.master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
1667 | SPI_NO_CS;
1668 if (is_imx35_cspi(spi_imx) || is_imx51_ecspi(spi_imx) ||
1669 is_imx53_ecspi(spi_imx))
1670 spi_imx->bitbang.master->mode_bits |= SPI_LOOP | SPI_READY;
1671
1672 spi_imx->spi_drctl = spi_drctl;
1673
1674 init_completion(&spi_imx->xfer_done);
1675
1676 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1677 spi_imx->base = devm_ioremap_resource(&pdev->dev, res);
1678 if (IS_ERR(spi_imx->base)) {
1679 ret = PTR_ERR(spi_imx->base);
1680 goto out_master_put;
1681 }
1682 spi_imx->base_phys = res->start;
1683
1684 irq = platform_get_irq(pdev, 0);
1685 if (irq < 0) {
1686 ret = irq;
1687 goto out_master_put;
1688 }
1689
1690 ret = devm_request_irq(&pdev->dev, irq, spi_imx_isr, 0,
1691 dev_name(&pdev->dev), spi_imx);
1692 if (ret) {
1693 dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret);
1694 goto out_master_put;
1695 }
1696
1697 spi_imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1698 if (IS_ERR(spi_imx->clk_ipg)) {
1699 ret = PTR_ERR(spi_imx->clk_ipg);
1700 goto out_master_put;
1701 }
1702
1703 spi_imx->clk_per = devm_clk_get(&pdev->dev, "per");
1704 if (IS_ERR(spi_imx->clk_per)) {
1705 ret = PTR_ERR(spi_imx->clk_per);
1706 goto out_master_put;
1707 }
1708
1709 ret = clk_prepare_enable(spi_imx->clk_per);
1710 if (ret)
1711 goto out_master_put;
1712
1713 ret = clk_prepare_enable(spi_imx->clk_ipg);
1714 if (ret)
1715 goto out_put_per;
1716
1717 pm_runtime_set_autosuspend_delay(spi_imx->dev, MXC_RPM_TIMEOUT);
1718 pm_runtime_use_autosuspend(spi_imx->dev);
1719 pm_runtime_get_noresume(spi_imx->dev);
1720 pm_runtime_set_active(spi_imx->dev);
1721 pm_runtime_enable(spi_imx->dev);
1722
1723 spi_imx->spi_clk = clk_get_rate(spi_imx->clk_per);
1724 /*
1725 * Only validated on i.mx35 and i.mx6 now, can remove the constraint
1726 * if validated on other chips.
1727 */
1728 if (spi_imx->devtype_data->has_dmamode) {
1729 ret = spi_imx_sdma_init(&pdev->dev, spi_imx, master);
1730 if (ret == -EPROBE_DEFER)
1731 goto out_runtime_pm_put;
1732
1733 if (ret < 0)
1734 dev_dbg(&pdev->dev, "dma setup error %d, use pio\n",
1735 ret);
1736 }
1737
1738 spi_imx->devtype_data->reset(spi_imx);
1739
1740 spi_imx->devtype_data->intctrl(spi_imx, 0);
1741
1742 master->dev.of_node = pdev->dev.of_node;
1743 ret = spi_bitbang_start(&spi_imx->bitbang);
1744 if (ret) {
1745 dev_err_probe(&pdev->dev, ret, "bitbang start failed\n");
1746 goto out_bitbang_start;
1747 }
1748
1749 pm_runtime_mark_last_busy(spi_imx->dev);
1750 pm_runtime_put_autosuspend(spi_imx->dev);
1751
1752 return ret;
1753
1754 out_bitbang_start:
1755 if (spi_imx->devtype_data->has_dmamode)
1756 spi_imx_sdma_exit(spi_imx);
1757 out_runtime_pm_put:
1758 pm_runtime_dont_use_autosuspend(spi_imx->dev);
1759 pm_runtime_set_suspended(&pdev->dev);
1760 pm_runtime_disable(spi_imx->dev);
1761
1762 clk_disable_unprepare(spi_imx->clk_ipg);
1763 out_put_per:
1764 clk_disable_unprepare(spi_imx->clk_per);
1765 out_master_put:
1766 spi_master_put(master);
1767
1768 return ret;
1769 }
1770
spi_imx_remove(struct platform_device * pdev)1771 static int spi_imx_remove(struct platform_device *pdev)
1772 {
1773 struct spi_master *master = platform_get_drvdata(pdev);
1774 struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
1775 int ret;
1776
1777 spi_bitbang_stop(&spi_imx->bitbang);
1778
1779 ret = pm_runtime_get_sync(spi_imx->dev);
1780 if (ret >= 0)
1781 writel(0, spi_imx->base + MXC_CSPICTRL);
1782 else
1783 dev_warn(spi_imx->dev, "failed to enable clock, skip hw disable\n");
1784
1785 pm_runtime_dont_use_autosuspend(spi_imx->dev);
1786 pm_runtime_put_sync(spi_imx->dev);
1787 pm_runtime_disable(spi_imx->dev);
1788
1789 spi_imx_sdma_exit(spi_imx);
1790 spi_master_put(master);
1791
1792 return 0;
1793 }
1794
spi_imx_runtime_resume(struct device * dev)1795 static int __maybe_unused spi_imx_runtime_resume(struct device *dev)
1796 {
1797 struct spi_master *master = dev_get_drvdata(dev);
1798 struct spi_imx_data *spi_imx;
1799 int ret;
1800
1801 spi_imx = spi_master_get_devdata(master);
1802
1803 ret = clk_prepare_enable(spi_imx->clk_per);
1804 if (ret)
1805 return ret;
1806
1807 ret = clk_prepare_enable(spi_imx->clk_ipg);
1808 if (ret) {
1809 clk_disable_unprepare(spi_imx->clk_per);
1810 return ret;
1811 }
1812
1813 return 0;
1814 }
1815
spi_imx_runtime_suspend(struct device * dev)1816 static int __maybe_unused spi_imx_runtime_suspend(struct device *dev)
1817 {
1818 struct spi_master *master = dev_get_drvdata(dev);
1819 struct spi_imx_data *spi_imx;
1820
1821 spi_imx = spi_master_get_devdata(master);
1822
1823 clk_disable_unprepare(spi_imx->clk_per);
1824 clk_disable_unprepare(spi_imx->clk_ipg);
1825
1826 return 0;
1827 }
1828
spi_imx_suspend(struct device * dev)1829 static int __maybe_unused spi_imx_suspend(struct device *dev)
1830 {
1831 pinctrl_pm_select_sleep_state(dev);
1832 return 0;
1833 }
1834
spi_imx_resume(struct device * dev)1835 static int __maybe_unused spi_imx_resume(struct device *dev)
1836 {
1837 pinctrl_pm_select_default_state(dev);
1838 return 0;
1839 }
1840
1841 static const struct dev_pm_ops imx_spi_pm = {
1842 SET_RUNTIME_PM_OPS(spi_imx_runtime_suspend,
1843 spi_imx_runtime_resume, NULL)
1844 SET_SYSTEM_SLEEP_PM_OPS(spi_imx_suspend, spi_imx_resume)
1845 };
1846
1847 static struct platform_driver spi_imx_driver = {
1848 .driver = {
1849 .name = DRIVER_NAME,
1850 .of_match_table = spi_imx_dt_ids,
1851 .pm = &imx_spi_pm,
1852 },
1853 .id_table = spi_imx_devtype,
1854 .probe = spi_imx_probe,
1855 .remove = spi_imx_remove,
1856 };
1857 module_platform_driver(spi_imx_driver);
1858
1859 MODULE_DESCRIPTION("SPI Controller driver");
1860 MODULE_AUTHOR("Sascha Hauer, Pengutronix");
1861 MODULE_LICENSE("GPL");
1862 MODULE_ALIAS("platform:" DRIVER_NAME);
1863